text stringlengths 9 7.94M |
|---|
\begin{document}
\title[Brauer $p$-dimension of HDV-fields of residual chacteristic $p$]{On the Brauer $p$-dimension of Henselian discrete valued fields of residual characteristic $p > 0$} \keywords{Henselian field, Brauer $p$-dimension, totally ramified extension, mixed characteristic, normal element\\ 2020 MSC Classification: 16K50, 12J10 (primary), 16K20, 12E15, 11S15 (secondary).}
\author{Ivan D. Chipchakov} \address{Institute of Mathematics and Informatics\\Bulgarian Academy of Sciences\\1113 Sofia, Bulgaria: E-mail address: chipchak@math.bas.bg}
\begin{abstract} Let $(K, v)$ be a Henselian discrete valued field with residue field $\widehat K$ of characteristic $p > 0$, and Brd$_{p}(K)$ be the Brauer $p$-dimension of $K$. This paper shows that Brd$_{p}(K) \ge n$ if $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, for some $n \in \mathbb{N}$. It proves that Brd$_{p}(K) = \infty $ if and only if $[\widehat K\colon \widehat K ^{p}] = \infty $. \end{abstract}
\maketitle
\par
\section{\bf Introduction} \par
Let $E$ be a field, Br$(E)$ its Brauer group, $s(E)$ the class of associative finite-dimensional central simple algebras over $E$, and $d(E)$ the subclass of division algebras $D \in s(E)$. For each $A \in s(E)$, let $[A]$ be the equivalence class of $A$ in Br$(E)$, and let deg$(A)$, ind$(A)$, exp$(A)$ be the degree, the Schur index and the exponent of $A$, respectively. It is well-known (cf. \cite{P}, Sect. 14.4) that exp$(A)$ divides ind$(A)$ and shares with it the same set of prime divisors; also, ind$(A) \mid {\rm deg}(A)$, and deg$(A) = {\rm ind}(A)$ if and only if $A \in d(E)$. Note that if $B _{1}, B _{2} \in s(E)$ and g.c.d.$\{{\rm ind}(B _{1}), {\rm ind}(B _{2})\} = 1$, then ind$(B _{1} \otimes _{E} B _{2}) = {\rm ind}(B _{1}){\rm ind}(B _{2})$; equivalently, if $B _{j} ^{\prime } \in d(E)$, $j = 1, 2$, and g.c.d.$\{{\rm deg}(B _{1} ^{\prime }), {\rm deg}(B _{2} ^{\prime })\}$ $= 1$, then $B _{1} ^{\prime } \otimes _{E} B _{2} ^{\prime } \in d(E)$ (see \cite{P}, Sect. 13.4). Since Br$(E)$ is an abelian torsion group and ind$(A)$, exp$(A)$ are invariants both of $A$ and $[A]$, these results show that the study of the restrictions on the pairs ind$(A)$, exp$(A)$, $A \in s(E)$, reduces to the special case of $p$-primary pairs, for an arbitrary prime $p$. The Brauer $p$-dimensions Brd$_{p}(E)$, $p \in \mathbb P$, where $\mathbb P$ is the set of prime numbers, are defined as in \cite{ABGV}, and contain essential information on these restrictions. We say that Brd$_{p}(E) = n < \infty $, for a given $p \in \mathbb P$, if $n$ is the least integer $\ge 0$, for which ind$(P) \mid {\rm exp}(P) ^{n}$ whenever $P \in s(E)$ and $[P]$ lies in the $p$-component Br$(E) _{p}$ of Br$(E)$; if no such $n$ exists, we put Brd$_{p}(E) = \infty $. For instance, Brd$_{p}(E) \le 1$, for all $p \in \mathbb P$, if and only if $E$ is a stable field, i.e. deg$(D) = {\rm exp}(D)$, for each $D \in d(E)$; Brd$_{p'}(E) = 0$, for some $p ^{\prime } \in \mathbb P$, if and only if the $p'$-component Br$(E) _{p'}$ of Br$(E)$ is trivial. \par The absolute Brauer $p$-dimension abrd$_{p}(E)$ of $E$ is defined to be the supremum of Brd$_{p}(R)\colon R \in {\rm Fe}(E)$, where Fe$(E)$ is the set of finite extensions of $E$ in a separable closure $E _{\rm sep}$. This trivially implies abrd$_{p}(E) \ge {\rm Brd}_{p}(E)$, for each $p$. We have abrd$_{p}(E) \le 1$, $p \in \mathbb P$, if $E$ is an absolutely stable field, i.e. its finite extensions are stable fields. Class field theory gives examples of such fields: it shows that Brd$_{p}(\Phi ) = {\rm abrd}_{p}(\Phi ) = 1$, $p \in \mathbb P$, if $\Phi $ is a global or local field (see, e.g., \cite{Re}, (31.4) and (32.19)). The same equalities hold, if $\Phi = \Phi _{0}((X))((Y))$ is an iterated formal Laurent power series field in $2$ variables over a quasifinite field $\Phi _{0}$ (see \cite{Ch1}, Corollary~4.5 (ii)). \par The knowledge of the sequence Brd$_{p}(E), {\rm abrd}_{p}(E)\colon p \in \mathbb P$, is helpful for better understanding the behaviour of index-exponent relations over finitely-generated transcendental extensions of $E$ \cite{Ch4}. This is demonstrated by the description in \cite{Ch5} of the set of sequences Brd$_{p}(K _{q})$, abrd$_{p}(K _{q})$, $p \in \mathbb P$, $p \neq q$, where $K _{q}$ runs across the class of fields with Henselian valuations $v _{q}$ whose residue fields $\widehat K _{q}$ are perfect of characteristic $q \ge 0$, such that their absolute Galois groups $\mathcal{G}_{\widehat K _{q}} = \mathcal{G}(\widehat K _{q,{\rm sep}}/\widehat K _{q})$ are projective profinite groups, in the sense of \cite{S1}. The description relies on formulae for Brd$_{p}(K _{q})$, $p \neq q$, which depend only on whether $\widehat K _{q}$ contains a primitive $p$-th root of unity. Thus Brd$_{p}(K _{q})$ is determined, for each $p \neq q$, by two invariants: one of the value group $v _{q}(K _{q})$, and one of the Galois group $\mathcal{G}(\widehat K _{q}(p)/\widehat K _{q})$ of the maximal $p$-extension $\widehat K _{q}(p)$ of $\widehat K _{q}$ in $\widehat K _{q,{\rm sep}}$. \par A formula for Brd$_{q}(K _{q})$ in terms of invariants of $\widehat K _{q}$ and $v(K _{q})$ has also been found when char$(K _{q}) = q > 0$, $\widehat K _{q}$ is perfect and $(K _{q}, v _{q})$ is a maximally complete field (see \cite{Ch6}, Proposition~3.5). By definition, the imposed restriction on $(K _{q}, v _{q})$ means that it does not admit immediate proper extensions, i.e. valued extensions $(K _{q} ^{\prime }, v'_{q}) \neq (K _{q}, v _{q})$ with $\widehat K _{q} ^{\prime } = \widehat K _{q}$ and $v'_{q}(K _{q} ^{\prime }) = v _{q}(K _{q})$. The considered fields are singled out by the fact (established by Krull, see \cite{Wa}, Theorem~31.24 and page 483) that every valued field $(L _{0}, \lambda _{0})$ has an immediate extension $(L _{1}, \lambda _{1})$ that is a maximally complete field. Note here that no formula for Brd$_{q}(K _{q})$ as above exists if $(K _{q}, v _{q})$ is only Henselian. More precisely, one can show using suitably chosen valued subfields of maximally complete fields that if $(K, v)$ runs across the class of Henselian fields of characteristic $q$, then Brd$_{q}(K)$ does not depend only on $\widehat K$ and $v(K)$. Specifically, it has been proved (see \cite{Ch6}, Example~3.7) that for any integer $t \ge 2$, the iterated formal Laurent power series field $Y _{t} = \mathbb{F} _{q}((T _{1})) \dots ((T _{t}))$ in $t$ variables over the field $\mathbb{F} _{q}$ with $q$ elements possesses subfields $K _{\infty }$ and $K _{n}$, $n \in \mathbb{N}$, such that: \par
\noindent (1.1) (a) Brd$_{q}(K _{\infty }) = \infty $; $n + t - 1 \le {\rm Brd}_{q}(K _{n}) \le n + t$, for each $n \in \mathbb{N}$; \par (b) The valuations $v _{m}$ of $K _{m}$, $m \le \infty $, induced by the standard $\mathbb{Z} ^{t}$-valued valuation of $Y _{t}$ are Henselian with $\widehat K _{m} = \mathbb{F} _{q}$ and $v _{m}(K _{m}) = \mathbb{Z} ^{t}$; here $\mathbb{Z} ^{t}$ is viewed as an abelian group endowed with the inverse-lexicographic ordering. \par
Statement (1.1) attracts interest in the study of Brauer $p$-dimensions of Henselian fields of residual characteristic $p > 0$ from suitably chosen special classes. This paper considers Brd$_{p}(K)$, for a Henselian discrete valued field (abbr., an HDV-field) $(K, v)$ with char$(\widehat K) = p$. Our research is related to the problem of describing index-exponent relations over finitely-generated field extensions. It proves the right-to-left implication in the equivalence \par\noindent Brd$_{p}(K) = \infty $ $\Leftrightarrow $ the degree $[\widehat K\colon \widehat K ^{p}]$ is infinite (in case char$(K) = 0$, the inverse implication is a consequence of \cite{PS}, Corollary~2.5, see also Fact \ref{fact3.5}), $\widehat K ^{p}$ being the subfield $\{u ^{p}\colon u \in \widehat K\}$. When $[\widehat K\colon \widehat K ^{p}] < \infty $, we prove the lower bound in the following conjecture (stated by Bhaskhar and Haase \cite{BH}\footnote{The Brauer $p$-dimension, in the sense of \cite{PS} and \cite{BH}, means the same as the absolute Brauer $p$-dimension in the present paper.} for complete discrete valued fields): \par
\begin{conj} \label{conj1.1} If $(K, v)$ is an {\rm HDV}-field with {\rm char}$(\widehat K) = p > 0$ and $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, for some $n \in \mathbb{N}$, then $n \le {\rm abrd}_{p}(K) \le n + 1$. \end{conj} \par
\noindent Conjecture \ref{conj1.1} has been stated at the end of \cite{BH}, under the extra hypothesis that char$(K) = 0$ and char$(\widehat K) = p$. This restriction is not emphasized in the present paper, as we prove Conjecture \ref{conj1.1} in case char$(K) = p$ (see Proposition \ref{prop7.1}). Note also that the class of HDV-fields $(K, v)$ with char$(\widehat K) = p > 0$ and $[\widehat K\colon \widehat K ^{p}] = p ^{n}$ is closed under taking finite extensions (cf. \cite{E3}, Corollary~14.2.2, and \cite{BH}, Lemma~2.12). It is therefore clear that the upper bound in Conjecture \ref{conj1.1} will follow, if the inequality Brd$_{p}(K) \le n + 1$ holds, for an arbitrary HDV-field $(K, v)$ with $\widehat K$ as above. The inequality Brd$_{p}(K) \ge n$ implies trivially the lower bound $n \le {\rm abrd}_{p}(K)$ in Conjecture \ref{conj1.1}. It attracts interest in finding formulae for Brd$_{p}(K)$, for example, when $(K, v)$ belongs to some basic classes of HDV-fields of residual characteristic $p$ (see Conjecture \ref{conj7.3} and Problem \ref{prob7.4}).
\par\vskip0.8truecm\noindent
{\bf Basic notation and abbreviations used in the paper}
\begin{itemize}
\item $\mathbb{P}$ - the set of prime numbers; $\mathbb{N}$ - the set of positive integers; $\mathbb{Z}$ - the set (additive group, ring) of integers;
\item $\mathbb{Q}$ and $\mathbb{R}$ the additive groups (the fields) of rational numbers and of real numbers, respectively;
\item Abbreviations: HDV - Henselian discrete valued; TR - totally ramified;
\item For any field $E$, we use the following notation:
\item $E ^{\ast }$ is the multiplicative group of $E$; $E ^{\ast n}$ is the subgroup of $n$-th powers $E ^{\ast n} = \{\alpha ^{n}\colon \alpha \in E ^{\ast }\}$, for each $n \in \mathbb{N}$;
\item $s(E)$ - the class of associative finite-dimensional central simple $E$-algebras, $d(E)$ - the subclass of division algebras $D \in s(E)$, Br$(E)$ - the Brauer group of $E$;
\item $E _{\rm sep}$ is a separable closure of $E$, Fe$(E)$ is the set of finite extensions of $E$ in $E _{\rm sep}$, $\mathcal{G}_{E} := \mathcal{G}(E _{\rm sep}/E)$ is the absolute Galois group of $E$; $N(E _{1}/E)$ denotes the norm group of the extension $E _{1}/E$, for any $E _{1} \in {\rm Fe}(E)$;
\item For each $p \in \mathbb{P}$, $_{p}{\rm Br}(E) = \{b _{p} \in {\rm Br}(E)\colon \ pb _{p} = 0\}$ is the maximal subgroup of Br$(E)$ of period dividing $p$, Br$(E) _{p}$ - the $p$-component of Br$(E)$, Brd$_{p}(E)$ - the Brauer $p$-dimension of $E$, abrd$_{p}(E)$ - the absolute Brauer $p$-dimension of $E$; also, $E(p)$ is the maximal $p$-extension of $E$ (in $E _{\rm sep}$), and cd$_{p}(\mathcal{G}_{E})$ - the cohomological $p$-dimensions of $\mathcal{G}_{E}$, in the sense of \cite{S1};
\item For any field extension $E ^{\prime }/E$, $I(E ^{\prime }/E)$ denotes the set of intermediate fields of $E ^{\prime }/E$, and Br$(E ^{\prime }/E)$ is the relative Brauer group of $E ^{\prime }/E$;
\item Algebraic structures attached to a field $K$ with a nontrivial Krull valuation $v$: $O _{v}(K) = \{a \in K\colon \ v(a) \ge 0\}$ - the valuation ring of $(K, v)$; $M _{v}(K) = \{\mu \in K\colon \ v(\mu ) > 0\}$ - the maximal ideal of $O _{v}(K)$; $O _{v}(K) ^{\ast } = \{u \in K\colon \ v(u) = 0\}$ - the multiplicative group of $O _{v}(K)$; $v(K)$ - the value group of $(K, v)$; $\overline {v(K)}$ - a divisible hull of $v(K)$; for each $\gamma \in \overline {v(K)}$, $\gamma \ge 0$, $\nabla _{\gamma }(K)$ denotes the set $\{\lambda \in K\colon \ v(\lambda - 1) > \gamma \}$;
\item $\widehat K = O _{v}(K)/M _{v}(K)$ is the residue field of $(K, v)$, and for any $\lambda \in O _{v}(K)$, $\hat \lambda \in \widehat K$ is the residue class $\lambda + M _{v}(K)$; $(K, v)$ is said to be of mixed characteristic $(0, p)$ if char$(K) = 0$ and char$(\widehat K) = p > 0$; \label{k999} \item When $(K, v)$ is a real-valued field, $K _{v}$ stands for the completion of $K$ with respect to the topology induced by $v$, and $\bar v$ is the valuation of $K _{v}$ continuously extending $v$; \label{approx} \item Given an HDV-field $(K, v)$ of mixed characteristic $(0, p)$, and a primitive $p$-th root of unity $\varepsilon \in K _{\rm sep}$, we write $\beta \approx \beta '$, for some $\beta , \beta ' \in K(\varepsilon ) ^{\ast }$, if $v(\beta - \beta ') > p\kappa $, where $\kappa = v(p)/(p - 1)$; given an element $\pi \in K$ with $0 < v(\pi ) < p\kappa $, we write $\beta \sim \beta '$ if $v(\beta - \beta ') > p\kappa - v(\pi ) = v((1 - \varepsilon ) ^{p}\pi ^{-1})$. \end{itemize}
\section{\bf Statement of the main result}
\par
Let $(K, v)$ be an HDV-field with char$(\widehat K) = p > 0$. As shown in \cite{PS}, if char$(K) = 0$ and $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, for some $n \in \mathbb{N}$, then $[n/2] \le {\rm abrd}_{p}(K) \le 2n$; abrd$_{p}(K) = \infty $ if and only if $[\widehat K\colon \widehat K ^{p}] = \infty $ (this is contained in \cite{PS}, Corollary~2.5 and Lemma~2.6). When $[\widehat K\colon \widehat K ^{p}] = p ^{n}$ and $n$ is odd, it has been proved in \cite{BH} that abrd$_{p}(K) \ge 1 + [n/2]$. The proofs of these results show their validity for Brd$_{p}(K)$ if $K$ contains a primitive $p$-th root of unity (see Remark \ref{rema6.2}). \par
The purpose of the present paper, in the first place, is to prove the inequality Brd$_{p}(K) \ge n$ in general, and thereby, to obtain the inequality abrd$_{p}(K) \ge n$ in Conjecture \ref{conj1.1}. Also, its major objective is to give an optimal infinitude criterion for Brd$_{p}(K)$. Our main result can be stated as follows: \par
\begin{theo} \label{theo2.1} Let $(K, v)$ be an {\rm HDV}-field with {\rm char}$(\widehat K) = p > 0$. Then: \par {\rm (a)} {\rm Brd}$_{p}(K)$ is infinite if and only if $\widehat K/\widehat K ^{p}$ is an infinite extension; \par {\rm (b)} There exists $D \in d(K)$ with {\rm exp}$(D) = p$ and {\rm deg}$(D) = p ^{n}$, provided that $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, for some $n \in \mathbb{N}$; in particular, {\rm Brd}$_{p}(K) \ge n$. \end{theo} \par
Theorem \ref{theo2.1} (b) and the right-to-left implication in Theorem \ref{theo2.1} (a) are proved in Section 6. For our proof, we construct in Section 3 an algebra $D \in d(K)$ with exp$(D) = p$ and deg$(D) = p ^{\mu }$, assuming that $K$ has a TR and Galois extension $M _{\mu }$ of degree $[M _{\mu }\colon K] = p ^{\mu } \le [\widehat K\colon \widehat K ^{p}]$ with an abelian Galois group $\mathcal{G}(M _{\mu }/K)$ of period $p$. By a TR-extension of $K$, we mean here a finite extension $M/K$ with $\widehat M = \widehat K$. This agrees, by Lemma \ref{lemm3.2} (b), with the definition of a TR-extension over any valued field, given before the statement of Lemma \ref{lemm3.3} (for the case of a discrete valued field, see the paragraph before the statement of Lemma \ref{lemm5.2}). The existence of $M _{\mu }$ is a consequence of the following result, which is of independent interest when $(K, v)$ is of mixed characteristic $(0, p)$: \par
\begin{lemm} \label{lemm2.2} Let $(K, v)$ be an {\rm HDV}-field with {\rm char}$(\widehat K) = p > 0$ and $\widehat K$ infinite. Then $K$ has {\rm TR} extensions $M _{\mu }$, $\mu \in \mathbb{N}$, such that $[M _{\mu }\colon K] = p ^{\mu }$, $M _{\mu }/K$ is a Galois extension and the group $\mathcal{G}(M _{\mu }/K)$ is abelian of period $p$, for each $\mu $. \end{lemm} \par
Theorem \ref{theo2.1} and Lemma \ref{lemm2.2} have already been proved in case char$(K) = p$ (cf. \cite{Ch4}, Lemma~4.2). Moreover, it follows that, in the setting of the lemma, if char$(K) = p$, then each finite $p$-group $G$ is isomorphic to $\mathcal{G}(M _{G}/K)$, for some TR and Galois extension $M _{G}$ of $K$ (see \cite{Ch6}, Lemma~2.3). When char$(K) = 0$ and $(K, v)$ is an HDV-field of type II, in the sense of Kurihara, this is not true, for any cyclic $p$-group $G$ of sufficiently large order \cite{MKu}, 12.2, Theorem~(b). \par
Lemma \ref{lemm2.2} is proved in Sections 5 and 6. Section 3 contains valuation-theoretic preliminaries used in the sequel. We also show there how Theorem \ref{theo2.1} can be deduced from Lemma \ref{lemm2.2} (see Lemma \ref{lemm3.6}). For reasons noted above, here we focus our attention on the mixed characteristic case $(0, p)$. For the proof of Lemma \ref{lemm2.2}, we take into consideration whether or not $v(p) \in pv(K)$ (see Lemmas \ref{lemm4.8}, \ref{lemm6.1} and Lemma \ref{lemm5.2} (b), respectively). Section 4 is devoted to the technical preparation for the proof of Lemma \ref{lemm2.2}. As noted above, in Section 7, we prove Conjecture \ref{conj1.1} for an HDV-field of characteristic $p$. Open questions concerning Brd$_{p}(K)$ are also posed in two frequently considered special cases. \par
The basic notation, terminology and conventions kept in this paper are standard and essentially the same as in \cite{L}, \cite{TW} and \cite{Ch4}. Missing definitions concerning central simple algebras can be found in \cite{P}. Throughout, Brauer and value groups are written additively, Galois groups are viewed as profinite under the Krull topology, and by a profinite group homomorphism, we mean a continuous one. For any discrete valued field $(K, v)$, we suppose that $v(K)$ is chosen to be a subgroup of the additive group $\mathbb{Q}$ of rational numbers. By an $n$-dimensional local field, for some $n \in \mathbb{N}$, we mean a complete $n$-discretely valued field $K _{n}$, in the sense of \cite{F1} (see also \cite{Zh}), with a quasifinite $n$-th residue field $K _{0}$.
\section{\bf Preliminaries}
Let $K$ be a field with a (nontrivial) Krull valuation $v$. We say that $v$ is Henselian, if it extends uniquely, up-to equivalence, to a valuation $v _{L}$ on each algebraic extension $L$ of $K$. This holds, if $K = K _{v}$ and $(K, v)$ is a real-valued field, i.e. $v(K)$ is isomorphic to an ordered subgroup of the additive group $\mathbb{R}$ of real numbers (cf. \cite{L}, Ch. XII). Maximally complete fields are also Henselian, since Henselizations of valued fields are their immediate extensions (see \cite{E3}, Theorem~15.3.5). The valuation $v$ is Henselian if and only if any of the following two equivalent conditions holds (cf. \cite{E3}, Sect. 18.1, and \cite{Wa}, Theorem~32.19): \par
\noindent (3.1) (a) Given a polynomial $f(X) \in O _{v}(K) [X]$ and an element $a \in O _{v}(K)$, such that $2v(f ^{\prime }(a)) < v(f(a))$, where $f ^{\prime }$ is the formal derivative of $f$, there is a zero $c \in O _{v}(K)$ of $f$ satisfying the equality $v(c - a) = v(f(a)/f ^{\prime }(a))$; \par (b) For each normal extension $\Omega /K$, $v ^{\prime }(\tau (\mu )) = v ^{\prime }(\mu )$ whenever $\mu \in \Omega $, $v ^{\prime }$ is a valuation of $\Omega $ extending $v$, and $\tau $ is a $K$-automorphism of $\Omega $. \par
When $(K, v)$ is real-valued, it is Henselian if and only if $K$ is (relatively) separably closed in $K _{v}$ (cf. \cite{E3}, Theorems~15.3.5, 17.1.5). The following lemma allows to extend to the Henselian case results on complete real-valued fields (e.g., the Grunwald-Wang theorem, see \cite{LR} and Remark \ref{rema5.3}). \par
\begin{lemm} \label{lemm3.1} Let $(K, v)$ be a real-valued field, $\bar v$ the continuous prolongation of $v$ on $K _{v}$, and $(\mathcal{K}, v')$ an intermediate valued field of $(K _{v}, \bar v)/(K, v)$. Suppose that $(\mathcal{K}, v')$ is Henselian, identify $\mathcal{K} _{\rm sep}$ with its $\mathcal{K}$-isomorphic copy in $K _{v,{\rm sep}}$, and let $f$ be the mapping {\rm Fe}$(\mathcal{K}) \to {\rm Fe}(K _{v})$, by the rule $\Lambda ^{\prime } \to \Lambda ^{\prime }K _{v}$. Then: \par {\rm (a)} $\mathcal{K} _{\rm sep} \cap K _{v} = \mathcal{K}$, and each $\Lambda \in {\rm Fe}(K _{v})$ contains a primitive element $\lambda \in \mathcal{K} _{\rm sep}$ over $K _{v}$, such that $[K _{v}(\lambda )\colon K _{v}] = [\mathcal{K}(\lambda )\colon \mathcal{K}]$; \par {\rm (b)} $\mathcal{K} _{\rm sep}K _{v} = K _{v,{\rm sep}}$ and $\mathcal{G}_{\mathcal{K}} \cong \mathcal{G}_{K _{v}}$; \par {\rm (c)} The correspondence $f$ is bijective and degree-preserving; moreover, $f$ and the inverse mapping $f ^{-1}: {\rm Fe}(K _{v}) \to {\rm Fe}(\mathcal{K})$, preserve the Galois property and the isomorphism class of the corresponding Galois groups; \par {\rm (d)} For each $\nu \in \mathbb{N}$ not divisible by {\rm char}$(K)$, $K _{v} ^{\ast \nu } \cap \mathcal{K} ^{\ast } = \mathcal{K} ^{\ast \nu }$. \end{lemm} \par
\begin{proof} The conditions on $(K, v)$ and $(\mathcal{K}, v')$ ensure that $\mathcal{K} _{\rm sep} \cap K _{v} = \mathcal{K}$. The latter part of Lemma \ref{lemm3.1} (a) can be deduced from Krasner's lemma (see \cite{L2}, Ch. II, Propositions~3, 4). Lemma \ref{lemm3.1} (c) follows from Lemma \ref{lemm3.1} (a) and Galois theory (cf. \cite{L}, Ch. VI, Theorem~1.12), and Lemma \ref{lemm3.1} (b) - from Lemma \ref{lemm3.1} (a), (c) and the definition of the Krull topology on $\mathcal{G}_{\mathcal{K}}$ and $\mathcal{G}_{K _{v}}$. Lemma \ref{lemm3.1} (d) is implied by the density of $\mathcal{K}$ in $K _{v}$, and by the fact that the set $\nabla _{\gamma }(K _{v}) = \{\alpha \in K _{v}\colon \bar v(\alpha - 1) > \gamma \}$ is an open subgroup of $K _{v} ^{\ast \nu }$, provided $\gamma \in \mathbb{R}$ is sufficiently large (one may put $\gamma = 0$ if char$(\widehat K) \nmid \nu $). \end{proof} \par
\noindent When $v$ is Henselian, so is $v _{L}$, for any algebraic field extension $L/K$; in this case, $\widehat L/\widehat K$ is algebraic as well. We write $v$ instead of $v _{L}$ and view $v(L)$ as an ordered subgroup of a fixed divisible hull $\overline {v(K)}$. This is allowed, since $v(K)$ is an ordered subgroup of $v(L)$, such that $v(L)/v(K)$ is a torsion group; hence, $v(L)$ embeds in $\overline {v(K)}$ as an ordered subgroup. These facts follow from Ostrowski's theorem (see \cite{E3}, Theorem~17.2.1), namely, the assertion that if $[L\colon K]$ is finite, then $[\widehat L\colon \widehat K]e(L/K)$ divides $[L\colon K]$ and $[L\colon K][\widehat L\colon \widehat K] ^{-1}e(L/K) ^{-1}$ has no divisor $p \in \mathbb P$, $p \neq {\rm char}(\widehat K)$; here $e(L/K)$ denotes the ramification index of $L/K$ (the index $\vert v(L)\colon v(K)\vert $ of $v(K)$ in $v(L)$). We state below several known criteria that $[L\colon K] = [\widehat L\colon \widehat K]e(L/K)$: \par
\begin{lemm} \label{lemm3.2} Let $(K, v)$ be a Henselian field and $L/K$ a finite extension. Then $[L\colon K] = [\widehat L\colon \widehat K]e(L/K)$ in the following cases: \par {\rm (a)} If char$(\widehat K) \nmid [L\colon K]$ (apply Ostrowski's theorem); \par {\rm (b)} If $(K, v)$ is HDV and $L/K$ is separable (see \cite{E3}, Sect. 17.4); \par {\rm (c)} When $(K, v)$ is maximally complete (cf. \cite{Wa}, Theorem~31.21). \par\noindent Under the hypotheses of (c), if {\rm char}$(K) = p > 0$, then $K ^{p}$ is maximally complete (relative to the valuation induced by $v$) with a residue field $\widehat K ^{p}$ and a value group $pv(K)$; this ensures that $[K\colon K^{p}]$ is finite if and only if so are $[\widehat K\colon \widehat K ^{p}]$ and the quotient group $v(K)/pv(K)$. \end{lemm} \par
\noindent Assume that $(K, v)$ is a nontrivially valued field. A finite extension $R$ of $K$ is said to be inertial with respect to $v$, if $R$ has a unique (up-to equivalence) valuation $v _{R}$ extending $v$, the residue field $\widehat R$ of $(R, v _{R})$ is separable over $\widehat K$, and $[R\colon K] = [\widehat R\colon \widehat K]$; $R/K$ is called a TR-extension with respect to $v$, if $v$ has a unique prolongation $v _{R}$ on $R$, and the index $\vert v _{R}(R)\colon v(K)\vert $ equals $[R\colon K]$. When $v$ is Henselian, $R/K$ is TR if and only if $e(R/K) = [R\colon K]$. Inertial extensions of Henselian fields have useful properties, some of which are presented by the following lemma (for a proof, see \cite{TW}, Theorem~A.23): \par
\begin{lemm} \label{lemm3.3} Let $(K, v)$ be a Henselian field. Then: \par {\rm (a)} An inertial extension $R ^{\prime }/K$ is Galois if and only if $\widehat R ^{\prime }/\widehat K$ is Galois. When this holds, $\mathcal{G}(R ^{\prime }/K)$ and $\mathcal{G}(\widehat R ^{\prime }/\widehat K)$ are canonically isomorphic. \par {\rm (b)} The compositum $K _{\rm ur}$ of inertial extensions of $K$ in $K _{\rm sep}$ is a Galois extension of $K$ with $\mathcal{G}(K _{\rm ur}/K) \cong \mathcal{G}_{\widehat K}$. \par {\rm (c)} Finite extensions of $K$ in $K _{\rm ur}$ are inertial, and the natural mapping of $I(K _{\rm ur}/K)$ into $I(\widehat K _{\rm sep}/\widehat K)$ is bijective. \end{lemm} \par
It is known (cf. \cite{Sch}, Ch. 2, Sect. 7, and \cite{TW}, Sect. 1.2.2) that if $(K, v)$ is Henselian, then $v$ extends on each $D \in d(K)$ to a unique valuation $v _{D}$, up-to equivalence. Put $v(D) = v _{D}(D)$ and denote by $\widehat D$ the residue division ring of $(D, v _{D})$. Note that $\widehat D$ is a division $\widehat K$-algebra with $[\widehat D\colon \widehat K] < \infty $, and $v(D)$ is an ordered abelian group including $v(K)$ is an ordered subgroup of finite index $e(D/K)$. In addition, the following holds, by \cite{TY}, Proposition~2.2: \par
\begin{lemm} \label{lemm3.4} If $(K, v)$ is an {\rm HDV}-field, then $[D\colon K] = [\widehat D\colon \widehat K]e(D/K)$, for every $D \in d(K)$. \end{lemm} Next we state results on any HDV-field $(K, v)$ that are used in Section 7 for proving Conjecture \ref{conj1.1} in the case of char$(K) = p$. They reduce the proof of the upper bound in this conjecture to considering only the case where $(K, v)$ is a complete discrete valued field (which allows to apply results of \cite{PS} and \cite{BH}): \par
\begin{fact} \label{fact3.5} {\rm (a)} The scalar extension map {\rm Br}$(K) \to {\rm Br}(K _{v})$ is an injective homomorphism which preserves Schur indices and exponents (cf. \cite{Cohn}, Theorem~1, and \cite{Sch}, Ch. 2, Theorem~9); hence, {\rm Brd}$_{p'}(K) \le {\rm Brd}_{p'}(K _{v})$, for every $p' \in \mathbb P$; \par {\rm (b)} The valued field $(K _{v}, \bar v)$ (see page \pageref{k999}) is maximally complete (cf. \cite{Sch}, Ch. 2, Theorem~8, or \cite{TW}, Example~3.11); in addition, $(K _{v}, \bar v)/(K, v)$ is an immediate extension (cf. \cite{E3}, Theorem~9.3.2, or \cite{L}, Ch. XII, Sect. 5). \end{fact} \par
Let now $(K, v)$ be an HDV-field with char$(\widehat K) = p$. Suppose that there exists a Galois extension $M/K$ with $\mathcal{G}(M/K)$ abelian of period $p$ and order $p ^{\mu }$, for some $\mu \in \mathbb{N}$. Then, by Galois theory, $M$ equals the compositum $L _{1} \dots L _{\mu }$ of degree $p$ (Galois) extensions $L _{j}$ of $K$ in $M$, $j = 1, \dots , \mu $. This enables one to construct various algebras of degree $p ^{\mu }$ presentable as tensor products of cyclic $K$-algebras of degree $p$ (concerning cyclic algebras in general, see, e.g., \cite{P}, Sect. 15). When $M/K$ is a TR-extension and $p ^{\mu } \le [\widehat K\colon \widehat K ^{p}]$, our next lemma provides a criterion for an algebra of this type to lie in $d(K)$, which is used for proving Theorem \ref{theo2.1}. Before stating it, note that a finite system $\Theta $ of $m$ elements of a field $E$ with char$(E) = p$ is called $p$-independent over $E ^{p}$, if $[E ^{p}(\Theta )\colon E ^{p}] = p ^{m}$. \par
\begin{lemm} \label{lemm3.6} Let $(K, v)$ be an {\rm HDV}-field with {\rm char}$(\widehat K) = p > 0$, and let $M/K$ be a {\rm TR} and Galois extension with $\mathcal{G}(M/K)$ abelian of period $p$ and finite order $p ^{\mu } \le [\widehat K\colon \widehat K ^{p}]$. Fix a presentation $M = L _{1} \dots L _{\mu }$ as a compositum of degree $p$ extensions of $K$ in $M$, take a generator $\sigma _{j}$ of $\mathcal{G}(L _{j}/K)$, for each index $j$, and choose elements $a _{j} \in O _{v}(K)$, $j = 1, \dots , \mu $, so that the system $\hat a _{j} \in \widehat K$, $j = 1, \dots , \mu $, be $p$-independent over $\widehat K ^{p}$. Then the tensor product $D _{\mu } = \otimes _{j=1} ^{\mu } \Delta _{j}$ of the cyclic $K$-algebra $\Delta _{j} = (L _{j}/K, \sigma _{j}, a _{j})$, $j = 1, \dots , \mu $, lies in $d(K)$, where $\otimes = \otimes _{K}$. Moreover, $v(D _{\mu }) = v(M)$ and $\widehat D _{\mu }$ is a root field over $\widehat K$ of the binomials $X ^{p} - \hat a _{j}$, $j = 1, \dots , \mu $, so $[\widehat D _{\mu }\colon \widehat K] = p ^{\mu }$. \end{lemm} \par
The proof of Lemma \ref{lemm3.6} is done by induction on $\mu $, by the method of proving \cite{Ch4}, Lemma~4.2 (b) (which covers the case of $p = {\rm char}(K)$). For convenience of the reader, we outline its main steps. In fact, it suffices to prove that $D _{\mu } \in d(K)$; then the rest of the lemma can be deduced from Lemma \ref{lemm3.4}, the equality $[D _{\mu }\colon K] = p ^{2\mu }$, and the existence of $K$-subalgebras $\Theta _{\mu }$ and $W _{\mu }$ of $D _{\mu }$, such that $\Theta _{\mu } \cong M$ and $W _{\mu }$ is a root field over $K$ of the binomials $X ^{p} - a _{j}$, $j = 1, \dots , \mu $. If $\mu = 1$, then $\hat a _{1} \notin \widehat K ^{p} = \widehat L _{1} ^{p}$, which implies $a _{1} \notin N(L _{1}/K)$; hence, by \cite{P}, Proposition~15.1~b, $D _{1} \in d(K)$. When $\mu \ge 2$, it suffices to show that $D _{\mu } \in d(K)$, under the extra hypothesis that the centralizer $C = C _{D _{\mu }}(L _{\mu })$ lies in $d(L _{\mu })$. As $C = D _{\mu -1} \otimes _{K} L _{\mu }$, where $D _{\mu -1} = \otimes _{j=1} ^{\mu -1} \Delta _{j}$, it is easy to see that $v _{C}(C) = v(M)$ and $\widehat C$ equals the (commutative) field $\widehat K(\sqrt[p]{\hat a _{1}}, \dots , \sqrt[p]{\hat a _{\mu -1}})$; in particular, $\widehat C$ does not possess nontrivial $\widehat K$-automorphisms. Observing that $D _{\mu } \in s(K)$, consider the $K$-automorphism $\varphi $ of $C$ which induces the identity on $D _{\mu -1}$ and the automorphism $\sigma _{\mu }$ of $L _{\mu }$. It follows from the Skolem-Noether theorem (see \cite{P}, Sect. 12.6) that $\varphi $ is induced by the inner automorphism of $D _{\mu }$ defined by conjugation by an element $x _{\mu } \in \Delta _{\mu }$ that induces $\sigma _{\mu }$ on $L _{\mu }$, satisfies $x _{\mu } ^{p} = a _{\mu }$, and generates $D _{\mu }$ over $C$. Thus, $D _{\mu }$ is a cyclic generalized crossed product over $C$ as described in \cite{A2}, Ch. XI, Theorems~10, 11. In view of (3.1) (b), it is easily verified that the composition $v _{C} \circ \varphi $ is a valuation of $C$ extending the prolongation of $v$ on $L _{\mu }$. As $v$ is Henselian, this means that $v _{C} \circ \varphi = v _{C}$ which implies $v _{C}(d) = 0$ and $\hat d = \hat d ^{\prime p} \in \widehat C ^{p}$, provided that $d = \prod _{i=0} ^{p-1} \varphi ^{i}(d ^{\prime })$, for some $d ^{\prime } \in C$ with $v _{C}(d ^{\prime }) = 0$. Since $\hat a _{\mu } \notin \widehat C ^{p}$ (and $v _{C}(d) \neq 0$ if $v _{C}(d ^{\prime }) \neq 0$), one thereby concludes that $\prod _{i=0} ^{p-1} \varphi ^{i} (\tilde d) \neq a _{\mu }$, for any $\tilde d \in C$. Hence, by the equality $x _{\mu } ^{p} = a _{\mu }$ and the hypothesis that $C \in d(L _{\mu })$, the assertion that $D _{\mu } \in d(K)$ can be obtained from \cite{A2}, Ch. XI, Theorem~12, so Lemma \ref{lemm3.6} is proved. \par
Theorem \ref{theo2.1} is implied by Lemmas \ref{lemm3.6} and \ref{lemm2.2}, so our main goal in the rest of the paper is to prove Lemma \ref{lemm2.2}. As noted in Section 2, one may consider only the case of char$(K) = 0$. Our next lemma is used in Section 5 for proving Lemma \ref{lemm2.2}, under the extra hypothesis that $v(p) \notin pv(K)$. \par
\begin{lemm} \label{lemm3.7} Let $(K, v)/(\Phi , \omega )$ be a valued field extension, such that the index $\vert v(K)\colon \omega (\Phi )\vert $ of $\omega (\Phi )$ in $v(K)$ is finite, and let $\Psi $ be an extension of $\Phi $ in $K _{\rm sep}$ of degree $p ^{\mu }$, for some $p \in \mathbb{P}$, $\mu \in \mathbb{N}$. Suppose that $\Psi $ is {\rm TR} over $\Phi $ relative to $\omega $, and $p \nmid \vert v(K)\colon \omega (\Phi )\vert $. Then $\Psi K/K$ is {\rm TR} relative to $v$ and $[\Psi K\colon K] = p ^{\mu }$. \end{lemm} \par
\begin{proof} In view of \cite{E3}, Theorem~15.3.5, and our assumptions, one may suppose, for the proof, that the value groups of all valuations of $\Psi K$ extending $\omega $ are ordered subgroups of $\overline {v(K)}$. Let $v'$ be any valuation of $\Psi K$ extending $v$. By the Fundamental Inequality (cf. \cite{E3}, Theorem~17.1.5), \par
\noindent (3.2) $\vert v'(\Psi K)\colon v(K)\vert \le [\Psi K\colon K] \le [\Psi \colon \Phi ] = p ^{\mu }$. \par
\noindent As $\Psi /\Phi $ is TR relative to $\omega $, $\Psi $ has a unique valuation $\omega '$ extending $\omega $. This shows that $\omega '$ equals the valuation of $\Psi $ induced by $v'$. Note further that $$p ^{\mu } = \vert \omega '(\Psi )\colon \omega (\Phi )\vert ,$$ $$ \vert \omega '(\Psi )\colon \omega (\Phi )\vert \mid \vert v'(\Psi K)\colon \omega (\Phi )\vert $$ $${\rm and} \ \vert v'(\Psi K)\colon \omega (\Phi )\vert = \vert v'(\Psi K)\colon v(K)\vert . \vert v(K)\colon \omega (\Phi )\vert .$$ \par
\noindent Since $p \nmid \vert v(K)\colon \omega (\Phi )\vert $ by hypothesis, it follows that $p ^{\mu } \mid \vert v'(\Psi K)\colon v(K)\vert $, which implies the inequalities in (3.2) must be equalities. Hence, $[\Psi K\colon K] = p ^{\mu }$, and by the Fundamental Inequality, it turns out that $v'$ is the unique valuation of $\Psi K$ extending $v$ and, moreover, $\Psi K/K$ is TR relative to $v$, as required. \end{proof} \par
The next lemma presents well-known properties of binomial extensions of prime degree, and of cyclotomic extensions. They are often used without an explicit reference (for a proof of the lemma, see \cite{L}, Ch. VI, Sects. 3, 9). \par
\begin{lemm} \label{lemm3.8} Let $E$ be a field and $p \in \mathbb{P}$. Then: \par {\rm (a)} For any $\theta \in E^{\ast }$, the polynomial $X ^{p} - \theta $ is irreducible over $E$ if and only if it has no root in $E$. \par {\rm (b)} If $L/E$ is a finite extension, such that $p \nmid [L\colon E]$, then $L ^{\ast p} \cap E ^{\ast } = E ^{\ast p}$. \par {\rm (c)} If $p \neq {\rm char}(E)$ and $\varepsilon $ is a primitive $p$-th root of unity in $E _{\rm sep}$, then $E(\varepsilon )/E$ is a Galois extension with $\mathcal{G}(E(\varepsilon )/E)$ cyclic and $[E(\varepsilon )\colon E] \mid p - 1$; in particular, $E(\varepsilon ) ^{\ast p} \cap E ^{\ast } = E ^{\ast p}$. \end{lemm} \par
At the end of this section we recall some known properties of cyclotomic extensions of valued fields that are used in the sequel. \par
\begin{lemm} \label{lemm3.9} Let $(K, v)$ be a valued field of mixed characteristic $(0, p)$ containing a primitive $p$-th root of unity $\varepsilon $. Then: \par {\rm (a)} $v(1 - \varepsilon ) = v(p)/(p - 1)$; \par {\rm (b)} $v(-i + \sum _{j=0} ^{i-1} \varepsilon ^{j}) \ge v(1 - \varepsilon )$, for each $i \in \mathbb{N}$ not divisible by $p$; \par {\rm (c)} $v((1 - \varepsilon ) ^{p-1} + p) \ge v((1 - \varepsilon ) ^{p}) = pv(p)/(p - 1)$. \end{lemm} \par
\begin{proof} The assumption on char$(\widehat K)$ ensures that $v(p) > 0$, $\varepsilon \in O _{v}(K)$ and the residue class $\hat \varepsilon $ equals the unit of $\widehat K$. Therefore, $v(1 - \varepsilon ) > 0$, and by the proof of Proposition~4.1.2 (i) of \cite{Co-Th}, $v(p) = (p - 1)v(1 - \varepsilon )$, as claimed by Lemma \ref{lemm3.9} (a). Also, the inequality $v(1 - \varepsilon ) > 0$ implies $\hat e _{i} = i \neq 0$, for each $i \in \mathbb{N}$ not divisible by $p$, where $e _{i} = \sum _{j=0} ^{i-1} \varepsilon ^{j}$. Lemma \ref{lemm3.9} (b) follows from the fact that $\mathbb{Z}[\varepsilon ] \subset O _{v}(K)$ and $\varepsilon - 1$ divides (in the ring $\mathbb{Z}[\varepsilon ]$) the elements $e _{i} - i = \sum _{j=0} ^{i-1} (\varepsilon ^{j} - 1)$, $i = 1, \dots , p - 1$. Clearly, Lemma \ref{lemm3.9} (b) shows that $v \big ((p - 1)! - \prod _{i=1} ^{p-1} e _{i}\big ) \ge v(1 - \varepsilon ),$ which implies together with the equalities $$\Phi _{p}(1) = \prod _{i=1} ^{p-1} (1 - \varepsilon ^{i}) = p = (1 - \varepsilon ) ^{p-1}\prod _{i=1} ^{p-1} e _{i},$$ where $\Phi _{p}(X) = \sum _{j=0} ^{p-1} X ^{j}$ is the $p$-th cyclotomic polynomial, that $$v((p - 1)!(1 - \varepsilon ) ^{p-1} - p) \ge v((1 - \varepsilon ) ^{p}) = pv(p)/(p - 1).$$ \par\noindent As $(p - 1)! \equiv -1 ({\rm mod} \ p)$ (Wilson's theorem), this proves Lemma \ref{lemm3.9} (c). \end{proof}
\section{\bf Normal elements and radical degree $p$ extensions of HDV-fields}
\par
Our goal in this section is to prepare technically the proof of Lemma \ref{lemm2.2}. In order to achieve it, we need information on the algebraic properties of $p$-th roots of elements of $\nabla _{0}(F)$, for a valued field $(F, v)$ of mixed characteristic $(0, p)$. A part of this information is contained in the following two lemmas. \par
\begin{lemm} \label{lemm4.1} Let $(F, v)$ be a valued field of mixed characteristic $(0, p)$, and let $\alpha \in F$, $\beta \in F ^{\ast }$ be elements, such that $(1 + \beta ) ^{p} = 1 + \alpha $ and $v(\alpha ) > 0$. Put $\eta = \alpha - \beta ^{p} - p\beta $ and $\kappa = v(p)/(p - 1)$. Then $v(\eta ) \ge v(p) + 2v(\beta )$. Moreover, \par {\rm (a)} $v(\alpha ) < p\kappa $ if and only if $v(\beta ) < \kappa $; when this holds, $v(\beta ) = v(\alpha )/p$ and $v(\beta ^{p} - \alpha ) > v(\alpha ) = v(\beta ^{p})$. \par {\rm (b)} If $v(\alpha ) = p\kappa $, then $v(\beta ) = \kappa $. \end{lemm} \par
\begin{proof} By Newton's binomial formula, one has $$(1 + \beta ) ^{p} = 1 + \alpha = 1 + \beta ^{p} + \sum _{i=1} ^{p-1} {p \brack i} \beta ^{i}.$$ Since $v(\alpha ) > 0$ and char$(\widehat F) = p$, this ensures that $v(\beta ) > 0$. The binomial \par\vskip0.05truecm\noindent formula also shows that $\eta = 0$ if $p = 2$, and $\eta = \sum _{i=2} ^{p-1} {p \brack i} \beta ^{i}$ if $p > 2$. Note \par\vskip0.05truecm\noindent further that $v({p \brack i}) = v(p)$, for all $i < p$, which implies that, in case $p > 2$, the sequence of values $v({p \brack i}\beta ^{i})$, $i = 1, \dots , p - 1$, strictly increases. These facts prove \par\vskip0.05truecm\noindent that $v(\eta ) \ge v(p) + 2v(\beta )$. The obtained inequality has the following consequences, which in turn imply statements (a) and (b) of Lemma \ref{lemm4.1}: \par
\noindent ${\rm (i)} \ {\rm If} \ v(\beta ) < \kappa , \ {\rm then} \ v(\alpha ) = v(\beta ^{p}) < pv(\beta ) < p\kappa $ \par\vskip0.08truecm\noindent ${\rm and} \ v(\alpha - \beta ^{p}) = v(p\beta + \eta ) = v(p\beta ) > v(\beta ^{p});$ \par \vskip0.08truecm\noindent ${\rm (ii)} \ {\rm If} \ v(\beta ) > \kappa , {\rm then} \ v(\beta ^{p}) > v(p\beta ),$ $ v(\alpha - p\beta ) = v(\beta ^{p} + \eta ) > v(p\beta ),$ \par \vskip0.09truecm\noindent ${\rm and} \ v(\alpha ) = v(p\beta ) = v(p) + v(\beta ) > p\kappa $; \par
\noindent ${\rm (iii)} \ {\rm If} \ v(\beta ) = \kappa , \ {\rm then} \ v(\beta ^{p}) = v(p\beta ) = p\kappa < v(\eta ), \ {\rm whence}, v(\alpha )\ge p\kappa .$ \end{proof} \par
\begin{lemm} \label{lemm4.2} Let $(F, v)$ be a valued field of mixed characteristic $(0, p)$, and there exist $\gamma \in F ^{\ast }$ with $v(\gamma ) = v(p)/(p - 1) : = \kappa $. Assume that $\alpha \in F$ and $\beta \in F ^{\ast }$ satisfy $v(\alpha ) \ge p\kappa $ and $(1 + \beta ) ^{p} = 1 + \alpha $, put $\delta = \beta /\gamma $, and denote by $g$ the polynomial $g(X) = \gamma ^{-p}[(1 + \gamma X) ^{p} - (1 + \alpha )] \in F[X]$. Then: \par {\rm (a)} $g$ is monic of degree $p$, $g(\delta ) = 0$ and $g \in O _{v}(F)[X]$; \par {\rm (b)} The reduction $\hat g \in \widehat F[X]$ of $g$ modulo $M _{v}(F)$ equals $X ^{p} + \hat cX - \hat d$, where $c = p/\gamma ^{p-1}$, $\hat c \neq 0$ and $d = \gamma ^{-p}\alpha $; also, $\hat d \neq 0$ if and only if $v(\alpha ) = p\kappa $. \end{lemm} \par
\begin{proof} (a): Evidently, $1 + \beta $ is a root of the binomial $X ^{p} - (1 + \alpha )$, so \par\vskip0.12truecm\noindent $h(\beta ) = 0$, where $h(X) = (X + 1) ^{p} - 1 - \alpha = X ^{p} + (\sum _{i=1} ^{p-1} {p \brack i} X ^{p-i}) - \alpha $. Observing also that $g(X) = \gamma ^{-p}h(\gamma X) = X ^{p} + \sum _{i=1} ^{p-1} ({p \brack i} /\gamma ^{i}).X ^{p-i} - (\alpha /\gamma ^{p}),$ one obtains that $g(X)$ is monic of degree $p$ and $g(\delta ) = 0$, as required by \par\vskip0.11truecm\noindent Lemma \ref{lemm4.2} (a). Since $v(\alpha ) \ge p\kappa $, $v(\gamma ) = \kappa $, and $p \mid {p \brack i}$, $i = 1, \dots , p - 1$, it \par\vskip0.12truecm\noindent is easily verified that $v(\alpha /\gamma ^{p}) \ge 0$ and $v({p \brack i} /\gamma ^{i}) = (p - i - 1)\kappa \ge 0$, for $i = 1, \dots , p - 1$, proving that $g(X) \in O _{v}(F)[X]$. \par\vskip0.1truecm (b): The preceding calculations show that $v({p \brack p-1} /\gamma ^{p-1}) = 0$, and in case $p > 2$, they yield $v({p \brack i} /\gamma ^{i}) > 0$, $i = 1, \dots , p - 2$. Also, by the assumptions on $v(\gamma )$ and $v(\alpha )$, there exist $c \in O _{v}(F) ^{\ast }$ and $d \in O _{v}(F)$, such that $p = \gamma ^{p-1}c$ and $\alpha = \gamma ^{p}d$. These observations show that $\hat g(X) = X ^{p} + \hat cX - \hat d \in \widehat F[X]$ and $\hat c \neq 0$. They also prove that $\hat d \neq 0$ if and only if $v(\alpha ) = p\kappa $, as claimed. \end{proof} \par
Our approach to the proof of Lemma \ref{lemm2.2} in the case where $v(p) \in pv(K)$ relies on the following lemma (which is an extended version of \cite{TY}, Lemma~2.1). \par
\begin{lemm} \label{lemm4.3} Let $(K, v)$ be a Henselian field of mixed characteristic $(0, p)$, $\varepsilon $ be a primitive $p$-th root of unity in $K _{\rm sep}$, and $\kappa = v(p)/(p - 1)$. Then: \par {\rm (a)} The polynomial $g _{\lambda }(X) = (1 - \varepsilon ) ^{-p}[((1 - \varepsilon )X + 1) ^{p} - \lambda ]$ lies in $O _{v}(K(\varepsilon ))[X]$ and has a root in $K(\varepsilon )$, for each $\lambda \in \nabla _{p\kappa }(K(\varepsilon ))$; in particular, $\lambda \in K(\varepsilon ) ^{\ast p}$. \par {\rm (b)} $\nabla _{\kappa '}(K) \subset K ^{\ast p}$, in case $\kappa ' \in v(K)$ and $\kappa ' \ge p\kappa $. \par {\rm (c)} For any pair $\lambda _{1} \in \nabla _{0}(K)$, $\lambda _{2} \in K$, such that $v(\lambda _{1} - \lambda _{2}) > p\kappa $, the elements $\lambda _{2}$ and $\lambda _{2}\lambda _{1} ^{-1}$ lie in $\nabla _{0}(K)$ and $K ^{\ast p}$, respectively. \end{lemm} \par
\begin{proof} (a): We have $v(1 - \varepsilon ) = \kappa $ and $v(\lambda - 1) > p\kappa $, whence, Lemma \ref{lemm4.2} applies to $g _{\lambda }(X)$ and yields $g _{\lambda }(X) \in O _{v}(K(\varepsilon ))[X]$. Denote by $\widehat K _{\varepsilon }$ the residue field of $(K(\varepsilon ), v)$. Lemma \ref{lemm4.2}, combined with Lemma \ref{lemm3.9} (c), shows that the reduction $\hat g _{\lambda }(X) \in \widehat K _{\varepsilon }[X]$ of $g _{\lambda }(X)$ modulo $M _{v}(K(\varepsilon ))$ equals the binomial $X ^{p} - X$ ($\hat g _{\lambda }(0) = 0$, since $v((\lambda - 1)/(1 - \varepsilon ) ^{p}) > 0$). This implies $\hat g _{\lambda }(X)$ has a simple zero in $\widehat K _{\varepsilon }$, so it follows from (3.1) (a) that $g _{\lambda }(X)$ has a zero in $O _{v}(K(\varepsilon ))$; hence, $\lambda \in K(\varepsilon ) ^{\ast p}$. \par\vskip0.05truecm (b): Lemmas \ref{lemm3.8} (c) and \ref{lemm4.3} (a) imply $\nabla _{\kappa '}(K) \subset K(\varepsilon ) ^{\ast p} \cap K ^{\ast } = K ^{\ast p}$. \par\vskip0.05truecm (c): Clearly, $\nabla _{0}(K)$ contains $\lambda _{2}$ and $\lambda _{1} ^{-1}$, and $\lambda _{2}\lambda _{1} ^{-1} = 1 + (\lambda _{2} - \lambda _{1})\lambda _{1} ^{-1}$ \par\vskip0.07truecm\noindent lies in $\nabla _{p\kappa }(K(\varepsilon ))$, whence, $\lambda _{2}\lambda _{1} ^{-1} \in (K(\varepsilon ) ^{\ast p} \cap K ^{\ast }) = K ^{\ast p}$, as claimed. \end{proof} \par
\noindent {\bf Definition~1.} An element $\lambda \in \nabla _{0}(K)$, where $(K, v)$ is an HDV-field of mixed characteristic $(0, p)$, is called normal over $K$ (or $K$-normal), if $\lambda \notin K ^{\ast p}$ and $v(\lambda - 1) \ge v(\lambda ^{\prime } - 1)$, for each element $\lambda ^{\prime }$ of the coset $\lambda K ^{\ast p}$. \par
When $\lambda \notin K ^{\ast p}$, $\lambda K ^{\ast p}$ contains $K$-normal elements, as Lemma \ref{lemm4.3} (b) and the cyclicity of $v(K)$ show that the system $v(\lambda ^{\prime } - 1)$, $\lambda ^{\prime } \in \lambda K ^{\ast p}$, contains a maximal element $v(\xi - 1)$ (and $\xi $ is $K$-normal). Our next lemma characterizes $K$-normal elements. Its conclusions follow from Lemma \ref{lemm3.1} and \cite{Hyo}, Lemma~(2-16), if $K$ contains a primitive $p$-th root of unity. Stating the lemma, we use the implication $pv(p)/(p - 1) \in v(K) \Rightarrow v(p)/(p - 1) \in v(K)$. \par
\begin{lemm} \label{lemm4.4} Let $(K, v)$ be an {\rm HDV}-field of mixed characteristic $(0, p)$, and let $\varepsilon $ be a primitive $p$-th root of unity in $K _{\rm sep}$. Suppose that $\lambda \in \nabla _{0}(K)$, put $\pi = \lambda - 1$, $\kappa = v(p)/(p - 1)$, and let $K ^{\prime }$ be an extension of $K$ in $K _{\rm sep}$ obtained by adjunction of a $p$-th root $\lambda ^{\prime }$ of $\lambda $. Then $\lambda $ is $K$-normal if and only if one of the following three conditions is fulfilled: \par {\rm (a)} $v(\pi ) < p\kappa $ and $v(\pi ) \notin pv(K)$; when this holds, $K ^{\prime }/K$ is {\rm TR}; \par {\rm (b)} $v(\pi ) < p\kappa $ and $\pi = \pi _{1} ^{p}a$, for some $\pi _{1} \in K$, $a \in O _{v}(K) ^{\ast }$ with $\hat a \notin \widehat K ^{\ast p}$; in this case, $\hat a \in \widehat K ^{\prime p}$ and $\widehat K ^{\prime }/\widehat K$ is purely inseparable of degree $p$; \par {\rm (c)} $v(\pi ) = p\kappa $, and for any $\pi _{1} \in K$ with $v(\pi _{1}) = \kappa $, the polynomial $X ^{p} + \hat bX - \hat d \in \widehat K[X]$ is irreducible over $\widehat K$, $\hat b$ and $\hat d$ being the residue classes of the elements $b = p/\pi _{1} ^{p-1}$ and $d = \pi /\pi _{1} ^{p}$, respectively; when this holds, $K ^{\prime }/K$ is inertial and $K(\sqrt[(p-1)]{-b} \ ) = K(\varepsilon )$. \end{lemm} \par
\begin{proof} Put $\pi ^{\prime } = \lambda ^{\prime } - 1$. The conditions of the lemma show that $v(\pi ) > 0$ and $\lambda ^{\prime } \in \nabla _{0}(K ^{\prime })$, i.e. $v(\pi ^{\prime }) > 0$. In view of Lemma \ref{lemm4.3} (b), one may assume, for the proof, that $v(\pi ) \le p\kappa $. Hence, by Lemma \ref{lemm4.1} (a) and (b) (applied to $(1 + \pi ^{\prime }) ^{p} = 1 + \pi$), $v(\pi ^{\prime }) \le \kappa $, where equality holds only in case $v(\pi ) = p\kappa $. Our proof proceeds in three steps. \par\vskip0.05truecm Step 1. Let $v(\pi ) < p\kappa $ and $\pi $ violate both conditions (a) and (b). Then \par\vskip0.05truecm\noindent $\lambda = 1 + \pi _{0} ^{p}a _{0} ^{p} + \pi _{0} ^{\prime }$, for some $a _{0} \in O _{v}(K) ^{\ast }$ and $\pi _{0}, \pi _{0} ^{\prime } \in K$, such that $v(\pi _{0} ^{p}) = v(\pi ) < v(\pi _{0} ^{\prime })$. Therefore, applying Lemma \ref{lemm4.1} to $(1 - \pi _{0}a _{0}) ^{p}$, one obtains that $v(\lambda (1 - \pi _{0}a _{0}) ^{p} - 1) > v(\pi ) = v(\lambda - 1)$; hence, $\lambda $ is not $K$-normal. \par\vskip0.05truecm Step 2. Assume now that $\pi $ satisfies condition (a) or (b) of Lemma \ref{lemm4.4}. Then, for each $\tilde \lambda \in \nabla _{0}(K)$ with $v(\tilde \lambda - 1) > v(\pi )$, the element $\lambda \tilde \lambda - 1$ has value $v(\lambda \tilde \lambda - 1) = v(\pi )$ and satisfies the same condition as $\pi $. Moreover, under condition (b), $(\lambda \tilde \lambda - 1)/\pi _{1} ^{p}$ lies in $O _{v}(K) ^{\ast }$ and its residue class equals $\hat a$. Observing also that $\tilde \lambda ^{-1} \in \nabla _{0}(K)$ and $v(\tilde \lambda ^{-1} - 1) = v(\tilde \lambda - 1)$, one concludes that the $K$-normality of $\lambda $ will be proved, if we show that $\lambda \notin K ^{\ast p}$. The equality $(1 + \pi ^{\prime }) ^{p} = 1 + \pi = \lambda $ and Lemma \ref{lemm4.1} (a) imply $v(\pi - \pi ^{\prime p}) > v(\pi ) = v(\pi ^{\prime p}) = pv(\pi ^{\prime })$, proving that $v(\pi ) \in pv(K ^{\prime })$. When $v(\pi ) \notin pv(K)$, this means that $K ^{\prime }/K$ is TR, $[K ^{\prime }\colon K] = p$ and $\lambda \notin K ^{\ast p}$. Similarly, it follows from Lemma \ref{lemm4.1} (a) that if $\pi = \pi _{1} ^{p}a$, where $\pi _{1} \in K$ and $a \in O _{v}(K) ^{\ast }$, then $\pi ^{\prime } = \pi _{1}a _{1}$, for some $a _{1} \in O _{v}(K ^{\prime }) ^{\ast }$ with $v(a - a _{1} ^{p}) > 0$; hence, $\hat a _{1} ^{p} = \hat a$, proving that $\hat a \in \widehat K ^{\prime p}$. This shows that if $\hat a \notin \widehat K ^{p}$, then $[K ^{\prime }\colon K] = [\widehat K ^{\prime }\colon \widehat K] = p$, $\widehat K ^{\prime }/\widehat K$ is purely inseparable and $\lambda \notin K ^{\ast p}$. Thus our assumptions on $\pi $ guarantee that, in both cases, $\tilde \lambda ^{-1}\lambda \notin K ^{\ast p}$, for any $\tilde \lambda \in \nabla _{0}(K)$ with $v(\tilde \lambda - 1) > v(\pi )$, which implies $\lambda $ is $K$-normal. \par Step 3. Suppose that $v(\pi ) = p\kappa $, take $\pi _{1} \in K$ so that $v(\pi _{1}) = \kappa $, define $b$ and $d$ as in Lemma \ref{lemm4.4} (c), and put $g(X) = \pi _{1} ^{-p}[(1 + \pi _{1}X) ^{p} - \lambda ]$. It is easily verified that $v(b) = v(d) = 0$, $g(\pi ^{\prime }/\pi _{1}) = 0$, and $g(X) \in K[X]$ is monic; also, it follows from Lemma \ref{lemm3.8} (a) that $g(X)$ is irreducible over $K$ if and only if $\lambda \notin K ^{\ast p}$. At the same time, Lemma \ref{lemm4.3} (b) implies $\lambda \notin K ^{\ast p}$ if and only if $\lambda $ is $K$-normal. Note further that, by Lemma \ref{lemm4.2}, $g(X) \in O _{v}(K)[X]$ and its reduction $\hat g(X)$ modulo $M _{v}(K)$ equals the trinomial $X ^{p} + \hat bX - \hat d \in \widehat K[X]$. In addition, the equality $v(b) = 0$ shows that $\hat g(X)$ is separable. Using (3.1)(a), one also proves that $\hat g(X)$ is irreducible over $\widehat K$ if and only if $\lambda \notin K ^{\ast p}$. It is now easy to see that $\lambda $ is $K$-normal if and only if $K ^{\prime }/K$ is inertial with $[K ^{\prime }:K] = p$. \par For the rest of the proof of Lemma \ref{lemm4.4} (c), we assume that $\lambda \notin K ^{\ast p}$, fix a root $\xi \in K _{\rm sep}$ of the binomial $b(x) = X ^{p-1} + b$, and put $B = K(\xi )$. We first show that $[B:K] \mid p - 1$. As char$(\widehat K) = p$, $\widehat K$ contains a primitive $(p - 1)$-th root of unity $\hat \rho $, and since $v$ is Henselian, (3.1) (a), applied to the binomial $X ^{p-1} - 1$, shows that $\hat \rho $ can be lifted to such a root $\rho \in K$. Hence, the fact that $[B:K] \mid p - 1$ follows from Galois theory (cf. \cite{L}, Ch. VI, Theorem~6.2). \par Finally, we prove that $B = K(\varepsilon )$. It is easily verified that $\pi ^{\prime }/(\pi _{1}\xi )$ is a root of the monic polynomial $h(X) = \xi ^{-p}g(\xi X)$. Observing that $v(\xi ) = 0$, one obtains from the already noted properties of $g(X)$ that $h(X) \in O _{v}(B)[X]$ and the reduction $\hat h(X) \in \widehat B[X]$ of $h(X)$ modulo $M _{v}(B))$ is an Artin-Schreier trinomial. Moreover, it becomes clear that $\widehat h(X) = \hat \xi ^{-p}\hat g(\hat \xi X)$, which implies in conjunction with Lemma \ref{lemm3.8} (b) (and the divisibility of $p - 1$ by $[B\colon K]$) that $\hat g(X)$ and $\hat h(X)$ are irreducible over $\widehat B$. Hence, by Lemma \ref{lemm3.3} and the Artin-Schreier theorem (cf. \cite{L}, Ch. VI, Sect. 6), applied to $\hat h(X)$, $K ^{\prime }B/B$ is an inertial Galois extension of degree $p$. In view of the definition of $K ^{\prime }$, this proves that $\varepsilon \in B$. Let now $\widehat K _{\varepsilon }$ be the residue field of $(K(\varepsilon ), v)$, and set $g _{0}(X) = (1 - \varepsilon ) ^{-p}[(1 + (1 - \varepsilon )X) ^{p} - \lambda ]$. Then $g _{0}(X)$ is monic, and it follows from Lemmas \ref{lemm4.2} (a), \ref{lemm3.9} (a) that $g _{0}(\pi ^{\prime }/(1 - \varepsilon )) = 0$ and $g _{0}(X) \in O _{v}(K(\varepsilon ))[X]$. Moreover, Lemmas \ref{lemm3.8} (a) and \ref{lemm3.9} (c) imply the reduction $\hat g _{0}(X) \in \widehat K _{\varepsilon }[X]$ is an Artin-Schreier trinomial irreducible over $\widehat K _{\varepsilon }$. Lemma \ref{lemm4.2} (b), applied to $g(X)$ and $g _{0}(X)$, further indicates that if $c = (1 - \varepsilon )/\pi _{1}$, then $v(c) = 0$ and $\hat c ^{p-1} = -\hat b \in \widehat K _{\varepsilon }$. Hence, by (3.1) (a), $b(x)$ has a root in $K(\varepsilon )$. As $K$ contains a primitive $(p - 1)$-th root of unity, this means that all roots of $b(X)$ in $K _{\rm sep}$ in fact lie in $K(\varepsilon )$. It is now obvious that $B = K(\varepsilon )$, so Lemma \ref{lemm4.4} is proved. \end{proof} \par
It follows from Lemmas \ref{lemm3.2} (b) and \ref{lemm4.4} that if $\alpha \in K$ is normal over $K$, then it is normal over any finite extension of $K$ of prime-to $p$ degree. \par
\noindent {\bf Definition~2.} In the setting of Lemma \ref{lemm4.4}, an element $\lambda \in \nabla _{0}(K)$ is called (u)-normal over $K$, where $(u) \in \{(a), (b), (c)\}$, if it satisfies condition (u). \par
Next we present Albert's characterization \cite{A1}, Ch. IX, Theorem~6, of Galois extensions of prime degree different from the characteristic of the ground field. The characterization is based on Lemma \ref{lemm3.8} (c). \par
\begin{lemm} \label{lemm4.5} Assume that $K$ is an arbitrary field, $\varepsilon $ is a primitive $p$-th root of unity in $K _{\rm sep}$, for some $p \in \mathbb P \setminus \{{\rm char}(K)\}$, and $\varphi $ a generator of $\mathcal{G}(K(\varepsilon )/K)$. Fix an integer $s > 0$ satisfying $\varphi (\varepsilon ) = \varepsilon ^{s}$, and let $\lambda $ be an element of $K(\varepsilon ) ^{\ast }$. Then the following conditions are equivalent: \par {\rm (a)} $\lambda \notin K(\varepsilon ) ^{\ast p}$ and $\varphi (\lambda )\lambda ^{-s} \in K(\varepsilon ) ^{\ast p}$; \par {\rm (b)} If $L _{\lambda } ^{\prime } = K(\varepsilon )(\sqrt[p]{\lambda })$, then $L _{\lambda } ^{\prime }$ contains as a subfield a Galois extension $L _{\lambda }$ of $K$ of degree $p$ (equivalently, the extension $L _{\lambda } ^{\prime }/K$ is Galois with $\mathcal{G}(L _{\lambda } ^{\prime }/K)$ cyclic and $[L _{\lambda } ^{\prime }\colon K] = p[K(\varepsilon )\colon K]$). \end{lemm} \par
Denote by $K(p, 1)$ the compositum of the extensions of $K$ in $K(p)$ of degree $p$, put $K _{\mathcal{G}} = \{\alpha \in K(\varepsilon ) ^{\ast }\colon \ \varphi (\alpha )\alpha ^{-s} \in K(\varepsilon ) ^{\ast p}\}$, and fix $\ell \in \mathbb{N}$ so that $s\ell \equiv 1 ({\rm mod} \ p)$. Obviously, $K _{\mathcal{G}}$ is a subgroup of $K(\varepsilon ) ^{\ast }$ including $K(\varepsilon ) ^{\ast p}$. Note also that $K(p, 1)/K$ is a Galois extension with $\mathcal{G}(K(p, 1)/K)$ abelian of period $p$; this can be deduced from Galois theory and the normality of maximal subgroups of nontrivial finite $p$-groups (see \cite{L}, Ch. I, Sect. 6; Ch. VI, Theorem~1.14). With this notation, Lemma \ref{lemm4.5} can be supplemented as follows: \par
\begin{lemm} \label{lemm4.6} {\rm (a)} There is a bijection $\varrho $ of the set $\Sigma _{p}$ of finite extensions of $K$ in $K(p,1)$ upon the set $\mathcal{G}_{p}$ of finite subgroups of $K _{\mathcal{G}}/K(\varepsilon ) ^{\ast p}$, such that \par\noindent $\varrho (\Lambda ) \cong \mathcal{G}(\Lambda /K) \cong \mathcal{G}(\Lambda (\varepsilon )/K(\varepsilon ))$, for each $\Lambda \in \Sigma _{p}$; \par {\rm (b)} For each $\lambda \in K(\varepsilon ) ^{\ast }$, the product $\Omega (\lambda ) = \prod _{j=0} ^{m-1} \varphi ^{j}(\lambda ) ^{\ell (j)}$ lies in $K _{\mathcal{G}}$, where $m = [K(\varepsilon )\colon K]$ and $\ell (j) = \ell ^{j}$, $j = 0, \dots , m - 1$. \end{lemm} \par
\begin{proof} It follows from Lemma \ref{lemm3.8} (c) and Galois theory (cf. \cite{L}, Ch. VI, Theorem~1.12) that the mapping $\sigma $ of $\Sigma _{p}$ into the set $\Sigma _{p} ^{\prime }$ of finite extensions of $K(\varepsilon )$ in $K(p, 1)(\varepsilon )$, by the rule $\Lambda \to \Lambda (\varepsilon )$, is bijective with $\mathcal{G}(\Lambda /K) \cong \mathcal{G}(\Lambda (\varepsilon )/K(\varepsilon ))$, for each $\Lambda \in \Sigma _{p}$. Moreover, by Kummer theory and Lemma \ref{lemm4.5}, there is a bijection $\varrho ': \Sigma _{p} ^{\prime } \to \mathcal{G}_{p}$, such that $\varrho '(\Lambda ^{\prime }) \cong \mathcal{G}(\Lambda ^{\prime }/K(\varepsilon ))$, for each $\Lambda ^{\prime } \in \Sigma _{p} ^{\prime }$. Therefore, the composition $\varrho = \varrho ' \circ \sigma $ has the properties required by Lemma \ref{lemm4.6} (a). \par We prove Lemma \ref{lemm4.6} (b). If $\varepsilon \in K$, then the assertion is obvious, so we assume that $\varepsilon \notin K$, i.e. $m \ge 2$. It is easily verified that $$\varphi (\Omega (\lambda )) = \prod _{j=0} ^{m-1} \varphi ^{j+1}(\lambda ) ^{\ell (j)} = \prod _{j=1} ^{m} \varphi ^{j}(\lambda ) ^{\ell (j-1)} = \lambda ^{\ell (m-1)}\prod _{j=1} ^{m-1} \varphi ^{j}(\lambda ) ^{\ell (j-1)},$$ \par\vskip0.15truecm\noindent ${\rm and} \ \Omega (\lambda ) ^{s} = \Omega (\lambda ^{s}) = \prod _{j=0} ^{m-1} \varphi ^{j}(\lambda ) ^{s.\ell (j)}$, for each $\lambda \in K(\varepsilon )^{\ast }$. Since \par\vskip0.14truecm\noindent $s ^{m} \equiv s\ell \equiv 1 ({\rm mod} \ p)$, it follows that $\ell ^{m} \equiv 1 ({\rm mod} \ p)$, $s \equiv \ell ^{m-1} ({\rm mod} \ p)$, \par\vskip0.14truecm\noindent $${\rm and} \ s.\ell (j) \equiv \ell (j - 1) ({\rm mod} \ p), j = 1, \dots, m - 1,$$ so our calculations prove that $\varphi (\Omega (\lambda )).\Omega (\lambda ) ^{-s} \in K(\varepsilon ) ^{\ast p}$, as claimed. \end{proof} \par
\begin{rema} \label{rema4.7} Let $(K, v)$ be an HDV-field of mixed characteristic $(0, p)$, and let $\varepsilon $ be a primitive $p$-th root of unity in $K _{\rm sep}$. Then: \par {\rm (a)} The existence of a {\rm (c)}-normal element over $K$ ensures that $\varepsilon \in K _{\rm ur}$. \par {\rm (b)} It can be deduced from Lemma \ref{lemm4.5} that if $K(\varepsilon )/K$ is TR and $\varepsilon \notin K$ (this holds, for example, if $v(p)$ generates $v(K)$), then each Galois extension $L$ of $K$ of degree $p$ is $K$-isomorphic to $L _{\lambda (L)}$, for some $\lambda (L) \in K _{\mathcal{G}} \cap \nabla _{0}(K(\varepsilon ))$. \par {\rm (c)} When $\langle v(p)\rangle = v(K)$, we have $\langle v(1 - \varepsilon )\rangle = v(K(\varepsilon ))$, which enables one to obtain from Lemma \ref{lemm4.5}, the preceding observation and Lemma \ref{lemm4.4} (applied over $K(\varepsilon )$) that a Galois extension of $K$ of degree $p$ is either inertial or TR (this is a special case of Miki's theorem, see \cite{MKu}, 12.2). Moreover, it turns out that degree $p$ extensions of $K _{\rm ur}$ in $K _{\rm ur}(p)$ are TR (whereas finite extensions of $K _{\rm ur}$ in $K _{\rm ur}(p)$ need not be TR unless $\widehat K$ is perfect, see Lemmas \ref{lemm5.4} and \ref{lemm3.2} {\rm (b)}). \end{rema} \par
We conclude this section with the following lemma. As demonstrated in Section 6, it makes it possible to turn Lemmas \ref{lemm4.3}, \ref{lemm4.4} and \ref{lemm4.6} (a) into the tools we need for the proof of Lemma \ref{lemm2.2} in the case where $v(p) \in pv(K)$. \par
\begin{lemm} \label{lemm4.8} Let $(K, v)$ be an {\rm HDV}-field of mixed characteristic $(0, p)$. Fix a primitive $p$-th root of unity $\varepsilon \in K _{\rm sep}$, a generator $\varphi $ of $\mathcal{G}(K(\varepsilon )/K)$, and some $s \in \mathbb{N}$ so that $\varphi (\varepsilon ) = \varepsilon ^{s}$. Take any $\alpha \in K(\varepsilon )$ with $v(\alpha ) > v(p)$, and put $\lambda = 1 + \alpha $. Then $\varphi (\lambda )\lambda ^{-s} \in K(\varepsilon ) ^{\ast p}$ in case $v(\varphi (\alpha ) - s\alpha ) > pv(p)/(p - 1)$. \par\vskip0.12truecm\noindent This holds, if $\alpha = p(1 - \varepsilon )\xi ^{-1}$, where $\xi \in K ^{\ast }$ with $v(\xi ) < v(p)/(p - 1)$. \end{lemm} \par
\begin{proof} Put $\kappa = v(p)/(p - 1)$, and {\it use the relation $\approx $ introduced on page \pageref{approx}}. Since for $j \ge 2$, $v(\alpha ^{j}) > 2v(p) \ge p\kappa $, Newton's binomial formula shows that \par\vskip0.11truecm\noindent $\lambda ^{s} \approx 1 + s\alpha $; hence, $\lambda ^{-s} \approx 1 - s\alpha $. Note also that \par\vskip0.12truecm\noindent $v(\varphi (\alpha )) = v(\alpha )$ because $v$ is Henselian (apply (3.1) (b)). Thus, $$\varphi (\lambda )\lambda ^{-s} \approx (1 + \varphi (\alpha ))(1 - s\alpha ) \approx (1 + \varphi (\alpha ) - s\alpha ) \approx 1.$$ Hence, $\varphi (\lambda )\lambda ^{-s} \in K(\varepsilon ) ^{\ast p}$, by Lemma \ref{lemm4.3} (a). \par\vskip0.15truecm Let now $\alpha = p(1 - \varepsilon )\xi ^{-1}$, where $\xi \in K ^{\ast }$ with $0 < v(\xi ) < \kappa $. \par\vskip0.171truecm\noindent Then Lemma \ref{lemm3.9} (b) implies the following, for each $t \in \mathbb{N}$ not divisible by $p$: $$v(1 - \varepsilon ^{t} - t(1 - \varepsilon )) = v((1 - \varepsilon ) \sum _{j=0} ^{t-1} (\varepsilon ^{j} - 1)) \ge 2\kappa .$$ Therefore, $v(\alpha ) = v(p) + \kappa - v(\xi ) > v(p)$ and \par\vskip0.171truecm\noindent $v(\varphi (\alpha ) - s\alpha ) = v(p[(1 - \varepsilon ^{s}) - s(1 - \varepsilon )]\xi ^{-1}) \ge v(p) + 2\kappa - v(\xi ) > p\kappa .$ \end{proof}
\section{\bf Proof of Lemma \ref{lemm2.2} in case char$(K) = 0$ and $v(p) \notin pv(K)$} \par
In this section, we consider degree $p$ cyclic extensions related to Lemma \ref{lemm4.4} (a) and (b), which allows to prove Lemma \ref{lemm2.2} and Theorem \ref{theo2.1} in the case where char$(K) = 0$ and $v(p) \notin pv(K)$. Our starting point is the following lemma. \par
\begin{lemm} \label{lemm5.1} Let $(K, v)$ be an {\rm HDV}-field of mixed characteristic $(0, p)$, and let $\varepsilon \in K _{\rm sep}$ be a primitive $p$-th root of unity, $\varphi $ a generator of $\mathcal{G}(K(\varepsilon )/K)$, $s$ and $\ell $ positive integers, such that $\varphi (\varepsilon ) = \varepsilon ^{s}$ and $s\ell \equiv 1 ({\rm mod} \ p)$. Assume that $[K(\varepsilon )\colon K] = m$, and $\lambda = 1 + (1 - \varepsilon ) ^{p}\pi ^{-1}$, for some $\pi \in K$ with $0 < v(\pi ) < p\kappa $, where $\kappa = v(p)/(p - 1)$. Denote by $\bar \lambda $ the element $\Omega (\lambda )$ defined in Lemma \ref{lemm4.6} {\rm (b)}, and let $L _{\bar \lambda }$ be the extension of $K$ in $K _{\rm sep}$ associated with $\bar \lambda $ in accordance with Lemma \ref{lemm4.5} {\rm (b)}. Then: \par {\rm (a)} If $v(\pi ) \notin pv(K)$, then $\lambda $ and $\bar \lambda $ are {\rm (a)}-normal over $K(\varepsilon )$; in addition, $[L _{\bar \lambda }\colon K] = p$, and $L _{\bar \lambda }/K$ is both Galois and {\rm TR}; \par {\rm (b)} If $\pi = \pi _{1} ^{p}a$, where $\pi \in K$, $a \in O _{v}(K) ^{\ast }$ and $\hat a \notin \widehat K ^{p}$, then $\lambda $ and $\bar \lambda $ are {\rm (b)}-normal over $K(\varepsilon )$; also, $L _{\bar \lambda }/K$ is Galois, $[L _{\bar \lambda }\colon K] = p$ and $\widehat L _{\bar \lambda } = \widehat K(\sqrt[p]{\hat a})$. \end{lemm} \par
\begin{proof} Our assumptions and Lemma \ref{lemm3.8} (c) imply $v(\pi ) \in pv(K)$ if and only if $v(\pi ) \in pv(K(\varepsilon ))$, and $v(\lambda - 1) \in pv(K(\varepsilon ))$ if and only if $v(\pi ) \in pv(K)$. They prove that $\widehat K _{\varepsilon } ^{p} \cap \widehat K = \widehat K ^{p}$, $\widehat K _{\varepsilon }$ being the residue field of $(K(\varepsilon ), v)$. \par\vskip0.11truecm\noindent Putting $e _{n} = \sum _{\nu =0} ^{n-1} \varepsilon ^{\nu }$, for each $n \in \mathbb{N}$, one obtains from Lemma \ref{lemm3.9} (a), (b) \par\vskip0.11truecm\noindent that $v(n - \varepsilon ^{u}.e _{n}) \ge v(1 - \varepsilon )$, for any pair $u, n \in \mathbb{N}$ with $p \nmid n$. Since $p \mid n ^{p} - n$ \par\vskip0.11truecm\noindent (by Fermat's little theorem), $v(1 - \varepsilon ) = \kappa $, and $n ^{p} - e _{n} ^{p} = \prod _{u=0} ^{p-1} (n - \varepsilon ^{u}.e _{n})$, \par\vskip0.11truecm\noindent this shows that $v(e _{n} ^{p} - n) \ge v(p),$ which implies the following: \par \vskip0.25truecm\noindent (5.1) $v((1 - \varepsilon ^{n}) ^{p} - n(1 - \varepsilon ) ^{p}) \ge v((1 - \varepsilon ) ^{p}) + v(p) > p\kappa .$ \par \vskip0.22truecm\noindent Our proof of Lemma \ref{lemm5.1} also relies on the following facts: \par \vskip0.22truecm\noindent (5.2) (a) $v(\bar \lambda - (1 + m(1 - \varepsilon ) ^{p}\pi ^{-1})) > v((1 - \varepsilon ) ^{p}\pi ^{-1})$; \par
(b) $v(\bar \lambda - 1) = v(m(1 - \varepsilon ) ^{p}\pi ^{-1}) = p\kappa - v(\pi )$. \par \vskip0.22truecm\noindent The equalities in (5.2) (b) follow from (5.2) (a) (and the equality $v(m) = 0$ implied by Lemma \ref{lemm3.8} (c)). To prove (5.2) (a) {\it we use the relation $\sim $ defined on page \pageref{approx} ($\sim $ depends on $\pi $)}. As $s\ell \equiv 1 ({\rm mod} \ p)$, the relations below, where $s(j) = s ^{j}$ and $\ell (j) = \ell ^{j}$, include the content of (5.2) (a) (and forms of (5.1)): $$\bar \lambda = \prod _{j=0} ^{m-1} [1 + (1 - \varepsilon ^{s(j)}) ^{p}\pi ^{-1}] ^{\ell (j)} \sim 1 + \sum _{j=0} ^{m-1} \ell (j)(1 - \varepsilon ^{s(j)}) ^{p}\pi ^{-1}$$ $$\sim 1 + \sum _{j=0} ^{m-1} \ell (j)s(j)(1 - \varepsilon ) ^{p}\pi ^{-1} \sim 1 + m(1 - \varepsilon ) ^{p}\pi ^{-1}.$$ \noindent Statements (5.2) and observations at the beginning of our proof imply the former parts of Lemma \ref{lemm5.1} (a) and (b), so we assume further that either $v(\pi ) \notin pv(K)$ or $\pi = \pi _{1} ^{p}a$, for some $\pi _{1} \in K$ and $a \in O _{v}(K) ^{\ast }$ with $\hat a \notin \widehat K ^{p}$. In the former case, $\lambda $ and $\bar \lambda $ are (a)-normal (over $K(\varepsilon )$), and in the latter one, they are (b)-normal. Let $L _{\bar \lambda } ^{\prime } = K(\varepsilon , \bar \lambda ')$, where $\bar \lambda ' \in K _{\rm sep}$ and $\bar \lambda '^{p} = \bar \lambda $. The normality of $\bar \lambda $ over $K(\varepsilon )$ ensures that $[L _{\bar \lambda } ^{\prime }\colon K(\varepsilon )] = p$. Using Lemma \ref{lemm4.4}, one obtains that: if $\bar \lambda $ is (a)-normal, then $L _{\bar \lambda } ^{\prime }/K(\varepsilon )$ is TR; when $\bar \lambda $ is (b)-normal, $\widehat L _{\bar \lambda } ^{\prime }/\widehat K _{\varepsilon }$ is inseparable of degree $p$ with $\hat a \in \widehat L _{\bar \lambda } ^{\prime p}$. Also, it follows from Lemmas \ref{lemm4.5}, \ref{lemm4.6} (b) and the $K(\varepsilon )$-normality of $\bar \lambda $ that $L _{\bar \lambda } ^{\prime } = L _{\bar \lambda }(\varepsilon )$, and the extension $L _{\bar \lambda }$ of $K$ in $L _{\bar \lambda } ^{\prime }$ pointed out in the statement of Lemma \ref{lemm5.1} is Galois with $[L _{\bar \lambda }\colon K] = p$. As $[L _{\bar \lambda } ^{\prime }\colon L _{\bar \lambda }] = m$ and $m \mid p - 1$, these observations prove the following: $L _{\bar \lambda }/K$ is TR if and only if so is $L _{\bar \lambda } ^{\prime }/K(\varepsilon )$; $\widehat L _{\bar \lambda }/\widehat K$ is inseparable of degree $p$ if and only if so is $\widehat L _{\bar \lambda } ^{\prime }/\widehat K _{\varepsilon }$. Note finally that $[\widehat L _{\bar \lambda } ^{\prime }\colon \widehat L _{\bar \lambda }] \mid [L _{\bar \lambda } ^{\prime }\colon L _{\bar \lambda }]$. This implies together with Lemma \ref{lemm3.8} (b) that if $\bar \lambda $ is (b)-normal, then $\hat a \in \widehat L _{\bar \lambda } ^{p}$, which completes our proof. \end{proof} \par
Lemma \ref{lemm3.6} and our next lemma prove Theorem \ref{theo2.1} in case char$(K) = 0$ and $v(p) \notin pv(K)$. In this situation, our proof of the lemma relies on the fact (see \cite{FV}, Ch. 2, (3.6), and \cite{E3}, Theorem~15.3.5) that a finite extension $E ^{\prime }$ of a discrete valued field $(E, w)$ is TR relative to $w$ if and only if $E ^{\prime }/E$ has a primitive element $\theta $ whose minimal polynomial $f$ over $E$ is Eisenstein at $w$, i.e. $f$ is monic, all of its coefficients but the leading one lie in $M _{w}(E)$, and the free coefficient of $f$ generates $M _{w}(E)$ as an ideal of $O _{w}(E)$. \par
\begin{lemm} \label{lemm5.2} Let $(K, v)$ be an {\rm HDV}-field of mixed characteristic $(0, p)$. Suppose that one of the following two conditions is satisfied: \par {\rm (a)} $\widehat K$ is an infinite perfect field; \par {\rm (b)} $\widehat K$ is imperfect and $v(p) \notin pv(K)$. \par\noindent Then there exist {\rm TR} and Galois extensions $M _{\mu }/K$, $\mu \in \mathbb{N}$, such that $[M _{\mu }\colon K]$ $= p ^{\mu }$ and $\mathcal{G}(M _{\mu }/K)$ is abelian of period $p$, for each $\mu $. \end{lemm} \par
\begin{proof} We assume, in agreement with conditions (a) and (b), that $\widehat K$ is infinite. Since the prime subfield, say $\mathbb{F}$, of $\widehat K$ is finite, this ensures that $\widehat K/\mathbb{F}$ is an infinite extension, whence, there is a sequence $\tilde b = b _{\mu } \in O _{v}(K) ^{\ast }$, $\mu \in \mathbb{N}$, such that the system $\bar b = \hat b _{\mu } \in \widehat K$, $\mu \in \mathbb{N}$, is linearly independent over $\mathbb{F}$. Denote by $V$ the $\mathbb{F}$-linear span of the set $\{\hat b _{\mu }\colon \mu \in \mathbb{N}\}$ and fix a primitive $p$-th root of unity $\varepsilon \in K _{\rm sep}$, a generator $\varphi $ of $\mathcal{G}(K(\varepsilon )/K)$, and integers $s$, $\ell $ as in Lemma \ref{lemm5.1}. Define $K _{\mathcal{G}}$ and $\Omega \colon K(\varepsilon ) ^{\ast } \to K _{\mathcal{G}}$ as in Lemma \ref{lemm4.6}, and put $m = [K(\varepsilon )\colon K]$ and $\lambda _{\mu } = \Omega (1 + (1 - \varepsilon ) ^{p}\pi ^{-1}b _{\mu })$, $\mu \in \mathbb{N}$, where $\pi \in K$ is fixed so that $v(\pi ) \notin pv(K)$ and $0 < v(\pi ) \le v(p)$. Take a $p$-th root $\eta _{\mu } \in K _{\rm sep}$ of $\lambda _{\mu }$, for each $\mu \in \mathbb{N}$, and consider the fields $L _{\mu } ^{\prime } = K(\varepsilon , \eta _{\mu })$, $\mu \in \mathbb{N}$. Lemmas \ref{lemm4.4} and \ref{lemm4.5} show that $[L _{\mu } ^{\prime }\colon K(\varepsilon )] = p$ and there is a unique Galois extension $L _{\mu }$ of $K$ in $L _{\mu } ^{\prime }$ of degree $[L _{\mu }\colon K] = p$. Let $L _{\infty } ^{\prime }$ be the compositum of the fields $L _{\mu } ^{\prime }$, $\mu \in \mathbb{N}$, and $\Lambda $ be the subgroup of $K(\varepsilon ) ^{\ast }$ generated by the set $K(\varepsilon ) ^{\ast p} \cup \{\lambda _{\mu }\colon \ \mu \in \mathbb{N}\}$. Obviously, $\Lambda $ is a subgroup of $K _{\mathcal{G}}$ including $K(\varepsilon ) ^{\ast p}$. It follows from the assumption on the sequence $\tilde b$ that, for each $h \in \Lambda \setminus K(\varepsilon ) ^{\ast p}$, the coset $hK(\varepsilon ) ^{\ast p}$ contains an element of the form $\lambda (h) = 1 + m(1 - \varepsilon ) ^{p}\pi ^{-1}\beta _{h} + \pi (h)$, where $\pi (h) \in K(\varepsilon )$, $v(\pi (h)) > v(m(1 - \varepsilon ) ^{p}\pi ^{-1})$, $\beta _{h} \in O _{v}(K) ^{\ast }$ and $\hat \beta _{h} \in V$. Therefore, by the assumptions on $\pi $, $\lambda (h)$ is (a)-normal over $K(\varepsilon )$ (so Lemma \ref{lemm5.1} (a) applies to it). This implies $\hat \beta _{h}$ is uniquely determined by $h$ and $\pi $, and does not depend on the choice of $\lambda (h)$ (see Step 2 of the proof of Lemma \ref{lemm4.4}). More precisely, if $h = \lambda _{\mu _{1}} ^{k_{1}} \dots \lambda _{\mu _{y}} ^{k _{y}}$, for some $y \in \mathbb{N}$, and $k _{1}, \dots , k _{y} \in \mathbb{N}$, with $p \nmid k _{j'}$, for at least one index $j'$, then $h \notin K(\varepsilon ) ^{\ast p}$, so one may put $\lambda (h) = h$ and $\beta _{h} = \sum _{j=1} ^{y} k _{j}b _{\mu _{j}}$, whence, $\hat \beta _{h} = \sum _{j=1} ^{y} k _{j}\hat b _{\mu _{j}}$. These observations prove that \par \vskip0.22truecm\noindent (5.3) $\{\lambda _{\mu }K(\varepsilon ) ^{\ast p}\colon \mu \in \mathbb{N}\}$ is a minimal generating set of $\Lambda /K(\varepsilon ) ^{\ast p}$, and there is a unique isomorphism $\rho $ of $\Lambda /K(\varepsilon ) ^{\ast p}$ upon the additive group of $V$, which maps the coset $\lambda _{\mu }K(\varepsilon ) ^{\ast p}$ into $\hat b _{\mu }$, for each $\mu \in \mathbb{N}$. \par \vskip0.22truecm\noindent Statement (5.3), the argument proving it, and Lemmas \ref{lemm4.6} and \ref{lemm5.1}~(a) imply that the fields $L _{\infty } ^{\prime }$ and $L _{\mu }$, $\mu \in \mathbb{N}$, satisfy the following: \par
\noindent (5.4) (a) $[L _{1} \dots L _{\mu }\colon K] = [L _{1} ^{\prime } \dots L _{\mu } ^{\prime }\colon K(\varepsilon )] = p ^{\mu }$, for each $\mu $; \par (b) The compositum $L _{\infty }$ of all $L _{\mu }$, $\mu \in \mathbb{N}$, is an infinite Galois extension of $K$ with $L _{\infty }(\varepsilon ) = L _{\infty } ^{\prime }$ and $\mathcal{G}(L _{\infty }/K)$ abelian of period $p$; \par (c) Every extension of $K$ in $L _{\infty }$ of degree $p$ is Galois and TR over $K$. \par
Suppose now that $\widehat K$ is perfect. Then every $R \in {\rm Fe}(K)$ contains as a subfield an inertial extension $R _{0}$ of $K$ with $\widehat R _{0} = \widehat R$ (cf. \cite{TW}, Proposition~A.17). In view of Lemmas \ref{lemm3.2} and \ref{lemm3.3} (c), this allows to deduce from (5.4) (b), (c) and Galois theory that finite extensions of $K$ in $L _{\infty }$ are TR. Thus the fields $M _{\mu } = L _{1} \dots L _{\mu }$, $\mu \in \mathbb{N}$, have the properties claimed by Lemma \ref{lemm5.2}. \par It remains for us to prove Lemma \ref{lemm5.2} (b). The idea of our proof has been borrowed from \cite{N}, 2.2.1. Identifying $\mathbb{Q}$ with the prime subfield of $K$, put $E _{0} = \mathbb{Q}(t _{0})$, where $t _{0} \in O _{v}(K) ^{\ast }$ is chosen so that $\hat t _{0} \notin \widehat K ^{p}$ (whence, $\hat t _{0}$ is transcendental over $\mathbb{F}$). Denote by $\omega $ and $v _{0}$ the valuations induced by $v$ upon $\mathbb{Q}$ and $E _{0}$, respectively, and fix a system $t _{\mu } \in K _{\rm sep}$, $\mu \in \mathbb{N}$, such that $t _{\mu } ^{p} = t _{\mu -1}$, for each $\mu > 0$. It is easy to see that $\mathbb{F}$ equals the residue field of $(\mathbb{Q}, \omega )$, and the fields $E _{\mu } = \mathbb{Q}(t _{\mu })$, $\mu \in \mathbb{N}$, are purely transcendental extensions of $\mathbb{Q}$. Let $v _{\mu }$ be the restricted Gauss valuation of $E _{\mu }$ extending $\omega $, in the sense of \cite{E3}, for each $\mu \in \mathbb{N}$. Clearly, for any pair of indices $\nu , \mu $ with $0 < \nu \le \mu $, $E _{\nu - 1}$ is a subfield of $E _{\mu }$ and $v _{\mu }$ is the unique prolongation of $v _{\nu - 1}$ on $E _{\mu }$. Hence, the union $E _{\infty } = \cup _{\mu =0} ^{\infty } E _{\mu }$ is a field with a unique valuation $v _{\infty }$ extending $v _{\mu }$, for every $\mu < \infty $. Denote by $\widehat E _{\mu }$ the residue field of $(E _{\mu }, v _{\mu })$, for each $\mu \in \mathbb{N} \cup \{0, \infty \}$. The Gaussian property of $v _{\mu }$, $\mu < \infty $, guarantees that $v _{\mu }(E _{\mu }) = \omega (\mathbb{Q})$, $v _{\mu}(t _{\mu }) = 0$, $\hat t _{\mu }$ is a transcendental element over $\mathbb{F}$ and $\widehat E _{\mu } = \mathbb{F}(\hat t _{\mu })$ (see \cite{E3}, Examples~4.3.2 and 4.3.3). Observing also that $\hat t _{\mu } ^{p} = \hat t _{\mu -1}$, $\mu \in \mathbb{N}$, $\widehat E _{\infty } = \cup _{\mu =1} ^{\infty } \widehat E _{\mu }$ and $\mathbb{F} ^{p} = \mathbb{F}$, one concludes that $\widehat E _{\infty }$ is infinite and perfect. It is therefore clear from \par\noindent Lemma \ref{lemm5.2} (a) and Grunwald-Wang's theorem (see Remark \ref{rema5.3}), that if $(E _{\infty } ^{\prime }, v _{\infty } ^{\prime })$ is a Henselization of $(E _{\infty }, v _{\infty })$ with $E _{\infty } ^{\prime } \subset K _{\rm sep}$, then there exist TR and Galois extensions $T _{\mu } ^{\prime }/E _{\infty } ^{\prime }$ and $T _{\mu }/E _{\infty }$, $\mu \in \mathbb{N}$, such that $[T _{\mu }\colon E _{\infty }] = [T _{\mu } ^{\prime }\colon E _{\infty } ^{\prime }] = p ^{\mu }$, $T _{\mu } ^{\prime } = T _{\mu }E _{\infty } ^{\prime }$, $\mathcal{G}(T _{\mu }/E _{\infty })$ is abelian of period $p$, and $\mathcal{G}(T _{\mu }/E _{\infty }) \cong \mathcal{G}(T _{\mu } ^{\prime }/E _{\infty } ^{\prime })$, for every $\mu $. Now fix an arbitrary index $\mu $, choose $\theta \in T _{\mu }$ so that the minimal polynomial $f(X)$ of $\theta $ over $E _{\infty }$ be Eisenstein at $v _{\mu }$, and take a sufficiently large index $k \ge \mu $ such that $f(X) \in E _{k}[X]$ and $f(X)$ splits over $E _{k}(\theta )$. Then the extension $E _{k}(\theta )/E _{k}$ is both TR and Galois with $\mathcal{G}(E _{k}(\theta )/E _{k}) \cong \mathcal{G}(T _{\mu }/E _{\infty })$, and $f(X)$ is Eisenstein at $v _{k}$. Let $\psi $ be the isomorphism $E _{k} \to E _{0}$ mapping $t _{k}$ into $t _{0}$. Then $\psi $ extends uniquely to a degree-preserving isomorphism $\psi ^{\prime }: E _{k}[X] \to E _{0}[X]$ of polynomial rings, such that $\psi ^{\prime }(X) = X$; also, $\psi ^{\prime }$ maps $O _{v _{k}}(E _{k})[X]$ into $O _{v _{0}}(E _{0})[X]$. Note that, for each $g(X) \in E _{k}[X]$, $\psi ^{\prime }$ induces canonically a ring isomorphism $\psi ^{\prime } _{g}: R _{k} \to R _{0}$ extending $\psi $, where $R _{k} = E _{k}[X]/(g(X))$ and $R _{0} = E _{0}[X]/(\psi ^{\prime }(g(X)))$. Clearly, $\psi ^{\prime }_{g}$ maps bijectively the set of roots of $g(X)$ in $R _{k}$ on the set of roots of $\psi ^{\prime }(g(X))$ in $R _{0}$. One also sees that $g(X)$ is irreducible over $E _{k}$ if and only if so is $\psi ^{\prime }(g(X))$ over $E _{0}$. Therefore, $R _{k}/E _{k}$ is a field extension if and only if so is $R _{0}/E _{0}$; when this occurs, $[R _{k}\colon E _{k}] = [R _{0}\colon E _{0}] = {\rm deg}(g)$. Moreover, it follows that $R _{k}/E _{k}$ is Galois if and only if so is $R _{0}/E _{0}$ (and this holds if and only if $g(X)$ is irreducible over $E _{k}$ and $R _{k}$ is a root field of $g(X)$ over $E _{k}$). Suppose now that $R _{k}/E _{k}$ is Galois. Then, for each $\sigma \in \mathcal{G}(R _{k}/E _{k})$, there is a unique $\sigma ^{\prime } \in \mathcal{G}(R _{0}/E _{0})$, such that $\sigma ^{\prime }(\psi ^{\prime } _{g}(r _{k})) = \psi ^{\prime } _{g}(\sigma (r _{k}))$, for every $r _{k} \in R _{k}$; in addition, the mapping of $\mathcal{G}(R _{k}/E _{k})$ into $\mathcal{G}(R _{0}/E _{0})$, by the rule $\sigma \to \sigma ^{\prime }$, is an isomorphism. Note finally that $v _{k}(e _{k}) = v _{0}(\psi (e _{k}))$, for every $e _{k} \in E _{k}$, which implies $g(X)$ is Eisenstein at $v _{k}$ if and only if so is $\psi ^{\prime }(g(X))$ at $v _{0}$; hence, $R _{k}/E _{k}$ is TR relative to $v _{k}$ if and only if so is $R _{0}/E _{0}$ relative to $v _{0}$. When $g(X) = f(X)$, these observations show that $\psi $ extends to an isomorphism of $E _{k}(\theta )$ on the root field $R \in {\rm Fe}(E _{0})$ of $\psi ^{\prime }(f(X))$ over $E _{0}$, and that $R/E _{0}$ is TR (relative to $v _{0}$) and Galois with $\mathcal{G}(R/E _{0}) \cong \mathcal{G}(E _{k}(\theta )/E _{k})$. As $v(p) \notin pv(K)$, one obtains from Lemma \ref{lemm3.7} and the described properties of $R/E _{0}$ (regarding $E _{0,{\rm sep}}$ as an $E _{0}$-subalgebra of $K _{\rm sep}$) that $RK/K$ is TR and Galois, $[RK\colon K] = p ^{\mu }$ and $\mathcal{G}(RK/K) \cong \mathcal{G}(R/E _{0})$ is abelian of period $p$. Because of the arbitrary choice of the index $\mu $, this proves Lemma \ref{lemm5.2} (b). \end{proof} \par
\begin{rema} \label{rema5.3} Lemma \ref{lemm3.1} shows that given a field $L$ with nonequivalent real-valued valuations $w _{1}, \dots , w _{n}$, for some $n \in \mathbb{N}$, Grunwald-Wang's theorem holds, if applied to a Henselization of $(L, w _{i})$ (instead of $(L _{w _{i}}, \bar w _{i})$), for $i = 1, \dots , n$. \end{rema} \par
\begin{lemm} \label{lemm5.4} Let $(K, v)$ be an {\rm HDV}-field of mixed characteristic $(0, p)$ with $v(p) \in pv(K)$ and $\widehat K \neq \widehat K ^{p}$. Let $\widetilde \Lambda /\widehat K$ be an inseparable extension of degree $p$. Then there exists $\Lambda \in I(K(p)/K)$ with $[\Lambda \colon K] = p$ and $\widehat \Lambda \cong \widetilde \Lambda $ over $\widehat K$. \end{lemm} \par
\begin{proof} The condition that $v(p) \in pv(K)$ means that there is $\pi _{1} \in K$ with $v(\pi _{1}) = v(p)/p$, so our conclusion follows at once from Lemma \ref{lemm5.1} (b). \end{proof} \par
\begin{prop} \label{prop5.5} Let $(K, v)$ be an HDV-field of mixed characteristic $(0, p)$ and with $\widehat K \neq \widehat K ^{p}$. Suppose that $v(p) \in pv(K)$ or $K$ contains a primitive $p$-th root of unity $\varepsilon $. Then each proper extension $\widetilde L$ of $\widehat K$ satisfying the inclusion $\widetilde L ^{p} \subseteq \widehat K$ is $\widehat K$-isomorphic to $\widehat L$, for some Galois extension $L$ of $K$, such that $v(L) = v(K)$ and $\mathcal{G}(L/K)$ is an abelian group of period $p$. \end{prop} \par
\begin{proof} If $v(p) \in pv(K)$, then our assertion follows from Lemmas \ref{lemm3.2}, \ref{lemm5.4} and Galois theory; when $\varepsilon \in K$, it can be deduced from Kummer theory. \end{proof} \par
\begin{rema} \label{rema5.6} Let $(K, v)$, $p$ and $\varepsilon \in K _{\rm sep}$ satisfy the conditions of Lemma \ref{lemm4.4}, and let $\widehat K \neq \widehat K ^{p}$. Take $c \in O _{v}(K)$ with $\hat c \notin \widehat K ^{p}$, and suppose that $K$ has a degree $p$ extension $C$ in $K(p)$, such that $\hat c \in \widehat C ^{p}$. By Lemma \ref{lemm5.4}, an extension of this kind exists if $v(p) \in pv(K)$ or $\varepsilon \in K$ (this need not hold in general, see Remark \ref{rema4.7} {\rm (c)}). It is easily verified that $v(C) = v(K)$, $v(z) \in pv(K)$, for each $z \in N(C/K)$, and $\hat z \in \widehat C ^{p}$ in case $v(z) = 0$. Therefore, if $[\widehat K\colon \widehat K ^{p}] \ge p ^{2}$, $\hat c, \hat b \in \widehat K$ are $p$-independent over $\widehat K ^{p}$, and $b \in O _{v}(K)$ is a pre-image of $\hat b$, then $b \notin N(C/K)$ and (by \cite{P}, Proposition~15.1~b) the cyclic $K$-algebra $V = (C/K, \tau , b)$ of degree $p$ lies in $d(K)$, $\tau $ being a generator of $\mathcal{G}(C/K)$. Since $v _{C}(\tau (\alpha ) - \alpha ) > v _{C}(\alpha )$, for any $\alpha \in C ^{\ast }$, this implies $\widehat V$ contains commuting $p$-th roots $\hat \eta _{c} = \sqrt[p]{\hat c}$ and $\hat \eta _{b} = \sqrt[p]{\hat b}$. Hence, by Lemma \ref{lemm3.4}, $v(V) = v(K)$ and $\widehat V$ equals the field $\widehat K(\hat \eta _{c}, \hat \eta _{b})$. Also, it follows from Kummer theory that $V$ is a symbol $K$-algebra, in the sense, e.g. of \cite{PS}, if and only if $\varepsilon \in K$. \end{rema} \par
\noindent A detailed and systematic study of algebras $W \in d(K)$, such that $v(W) = v(K)$ and $\widehat W/\widehat K$ is a purely inseparable extension would surely be of interest. This, however, goes beyond the scope of the present paper. \par
\section{\bf Proof of Theorem \ref{theo2.1}} \par
We begin this section with a lemma which completes our preparation for the proof of Lemma \ref{lemm2.2} in case char$(K) = 0$, $\widehat K \neq \widehat K ^{p}$ and $v(p) \in pv(K)$. Stating the lemma, we note that the imposed restriction on $v(p)$ requires the existence of an element $\pi \in K$ satisfying the conditions $v(\pi ) \notin pv(K)$ and $v(p) < pv(p)/(p - 1) - v(p)/p \le v(\pi ) < pv(p)/(p - 1)$. \par
\begin{lemm} \label{lemm6.1} Let $(K, v)$ be an {\rm HDV}-field of mixed characteristic $(0, p)$ with $\widehat K$ infinite and $v(p) \in pv(K)$, and let $\mathbb{F}$ be the prime subfield of $\widehat K$. Fix an integer $\mu > 0$ and elements $\pi \in K$, $\alpha _{1}, \dots , \alpha _{\mu } \in O _{v}(K) ^{\ast }$, such that $v(\pi ) \notin pv(K)$, $v(p) < v(\pi ) < pv(p)/(p - 1)$, and the system $\hat \alpha _{1}, \dots , \hat \alpha _{\mu }$ is linearly independent over $\mathbb{F}$. Put $\lambda _{j} = 1 + \pi \alpha _{j} ^{p^{\mu }}$, $j = 1, \dots , \mu $, and for any $j$, let $L _{j} = K(\lambda _{j} ^{\prime })$, where $\lambda _{j} ^{\prime } \in K _{\rm sep}$ and $\lambda _{j} ^{\prime p} = \lambda _{j}$. Then the field $M = L _{1} \dots L _{\mu }$ is a {\rm TR}-extension of $K$ of degree $p ^{\mu }$. Moreover, if $K$ contains a primitive $p$-th root of unity, then $M/K$ is Galois with $\mathcal{G}(M/K)$ abelian of period $p$. \end{lemm} \par
\begin{proof} We first show that one may consider only the special case where $K$ contains a primitive $p$-th root of unity. Let $\varepsilon $ be such a root in $K _{\rm sep}$. Then $$[M(\varepsilon )\colon K] = [M(\varepsilon )\colon M][M\colon K] = [M(\varepsilon )\colon K(\varepsilon )][K(\varepsilon )\colon K].$$ Since, by Galois theory, $[M(\varepsilon )\colon M] \mid [K(\varepsilon )\colon K]$, we have $[M(\varepsilon )\colon K(\varepsilon )] \mid [M\colon K].$ In addition, $[K(\varepsilon )\colon K] \mid p - 1$, by Lemma \ref{lemm3.8}~(c), which implies $p \nmid e(K(\varepsilon )/K)$ (Lemma \ref{lemm3.2}~(b)), proving that $v(\pi ) \notin pv(K(\varepsilon ))$. Moreover, Lemmas \ref{lemm3.2} (b) and \ref{lemm3.8} imply $\pi $ and $\alpha _{1}, \dots , \alpha _{\mu }$ satisfy the conditions of Lemma \ref{lemm6.1} with respect to $(K(\varepsilon ), v)$. Also, it follows from the definition of $M$ that $[M\colon K] \le p ^{\mu }$. As $p \nmid e(M(\varepsilon )/M)$, these observations prove that if $M(\varepsilon )/K(\varepsilon )$ is a TR-extension of degree $p ^{\mu }$, then so is $M/K$. This leads to the desired reduction. \par Henceforth, we assume that $\varepsilon \in K$. Then the concluding assertion of Lemma \ref{lemm6.1} is implied by Kummer theory and the definition of $M$, so it remains to be seen that $M/K$ is TR (of degree $p ^{\mu }$). Put $\kappa = v(p)/(p - 1)$ and $\gamma = p\kappa - v(\pi )$. It follows from the conditions on $\pi $ that $0 < \gamma < \kappa $ and $\gamma \notin pv(K)$. The rest of our proof relies on the fact that, by Lemma \ref{lemm4.4} (a), $L _{1}/K$ is TR and $[L _{1}\colon K] = p$, which means that $M/K$ is TR, provided so is $M/L_{1}$. Using a standard inductive argument, one may assume for the rest of the proof that $\mu \ge 2$ and, when $\mu $ is replaced by $\mu - 1$, the assertion of Lemma \ref{lemm6.1} holds, for any HDV-field $(K ^{\prime }, v ^{\prime })$ of mixed characteristic $(0, p)$ with $\widehat K ^{\prime }$ infinite and $v ^{\prime }(p) \in pv ^{\prime }(K ^{\prime })$. Then the assertion that $M/L _{1}$ is TR of degree $p ^{\mu -1}$ can be deduced from the existence of elements $\pi _{1}$ and $\lambda _{1,j} \in L _{1} ^{\ast }$, $\alpha _{1,j} \in O _{v}(L _{1}) ^{\ast }$, $j = 2, \dots , \mu $, such that: \par
\noindent (6.1) $\hat \alpha _{1,2}, \dots , \hat \alpha _{1,\mu }$ are linearly independent over $\mathbb{F}$; $v(\pi _{1}) = p\kappa - (\gamma /p)$ (whence, $v(\pi _{1}) \notin pv(L _{1})$); $\lambda _{1,j} = 1 + \pi _{1}\alpha _{1,j} ^{p ^{\mu -1}}$ and \par\vskip0.04truecm\noindent $\lambda _{1,j}L _{1} ^{\ast p} = \lambda _{j}L _{1} ^{\ast p}$, $j = 2, \dots , \mu $. \par
\noindent Since the elements $\hat \alpha _{j}\hat \alpha _{1} ^{-1}$, $j = 1, \dots , \mu $, are linearly independent over $\mathbb{F}$, it suffices to prove the existence of elements satisfying the conditions of (6.1) only in the special case where $\alpha _{1} = 1$ (considering $\pi \alpha _{1} ^{p ^{\mu }}$ and $\alpha _{2}\alpha _{1} ^{-1}, \dots , \alpha _{\mu }\alpha _{1} ^{-1}$ \par\vskip0.04truecm\noindent instead of $\pi $ and $\alpha _{2}, \dots , \alpha _{\mu }$, respectively). Putting $\eta _{1} = \lambda _{1} ^{\prime } - 1$, we show that, in this case, $\pi _{1}$ and $\alpha _{1,j}, \lambda _{1,j}$, $j = 2, \dots , \mu $, can be chosen as follows: \par
\noindent (6.2) $\pi _{1} = -p\eta _{1}$, $\alpha _{1,j} = \alpha _{j} - \alpha _{j} ^{p}$, and $\lambda _{1,j} = 1 - p\eta _{j}$, where $\eta _{j} = \eta _{1}\alpha _{1,j} ^{p ^{\mu -1}}$. \par
\noindent In the rest of the proof, {\it we use the relation $\approx $ introduced on page \pageref{approx}}. As \par\noindent $(1 + \eta _{1}) ^{p} = 1 + \pi $ (and $p \ge 2$), Lemma \ref{lemm4.1} (a) shows that $$v(\eta _{1}) = v(\pi )/p > v(p)/p, \ {\rm so} \ v(p\eta _{1} ^{2}) > (p + 2)v(p)/p \ge p\kappa ;$$ hence; by the former conclusion of Lemma \ref{lemm4.1}, $\pi \approx \eta _{1} ^{p} + p\eta _{1}$. At the same time, the equality $\lambda _{j} = 1 + \pi \alpha _{j} ^{p ^{\mu }}$ implies $\lambda _{j} ^{-1} \approx 1 - \pi \alpha _{j} ^{p ^{\mu }}$. Likewise, from $\lambda _{1,j} = 1 - p\eta _{j}$, one obtains that $\lambda _{1,j} ^{-1} \approx 1 + p\eta _{j}$. Let $\Omega _{j} = 1 + \eta _{1}\alpha _{j} ^{p ^{\mu -1}}$. Then $$\Omega _{j} ^{p} \approx 1 + \eta _{1} ^{p}\alpha _{j} ^{p ^{\mu }} + p\eta _{1}\alpha _{j} ^{p ^{\mu -1}} = [1 + (\eta _{1} ^{p} + p\eta _{1})\alpha _{j} ^{p ^{\mu }}] + [p\eta _{1}(\alpha _{j} ^{p ^{\mu -1}} - \alpha _{j} ^{p ^{\mu }})]$$ $$\approx \lambda _{j} + [p\eta _{1}(\alpha _{j} - \alpha _{j} ^{p}) ^{p ^{\mu -1}}] = \lambda _{j} + p\eta _{j} = \lambda _{j}(1 + p\eta _{j}\lambda _{j} ^{-1}) \approx \lambda _{j}(1 + p\eta _{j}) \approx \lambda _{j}\lambda _{1,j} ^{-1}.$$ Hence, by Lemma \ref{lemm4.3} (c), $\lambda _{j}\lambda _{1,j} ^{-1} \in L _{1} ^{\ast p}$. \par\vskip0.14truecm We are now in a position to prove Lemma \ref{lemm6.1}. As already shown, $$v(p) < v(\pi _{1}) = v(p\eta _{1}) = v(p) + v(\eta _{1}) = p\kappa - (\gamma /p)$$ and $pv _{1}(L) = v(K)$, which implies $v(\pi _{1}) \notin pv(L _{1})$. Observing that $\alpha _{1} = 1$, \par\vskip0.11truecm\noindent the field $\mathbb{F}$ equals the set $\{\hat y \in \widehat K\colon \hat y ^{p} = \hat y\}$, ${\rm and} \ \alpha _{1,j} = \alpha _{j} - \alpha _{j} ^{p}$, $j = 2, \dots , \mu $, \par\vskip0.08truecm\noindent are elements of $O _{v}(L _{1}) ^{\ast }$, such that $\hat \alpha _{1}, \dots , \hat \alpha _{\mu }$ are linearly independent (over $\mathbb{F}$), one concludes that $\hat \alpha _{1,2}, \dots , \hat \alpha _{1,\mu }$ are linearly independent as well. Thus the field $M$ and the elements $\pi _{1} = -p\eta _{1}$, and $\alpha _{1,j}, \lambda _{1,j}$, $j = 2, \dots \mu $, defined in (6.2) satisfy the conditions of Lemma \ref{lemm6.1} (over $L _{1}$), and by the inductive hypothesis, $M/L _{1}$ is a TR-extension of degree $p ^{\mu -1}$, so Lemma \ref{lemm6.1} is proved. \end{proof} \par
We can now take the final step towards the proof of Lemma \ref{lemm2.2} (and Theorem \ref{theo2.1}) in general. In view of Lemma \ref{lemm3.6} and \cite{Ch4}, Lemma~4.2, one may consider only the case of mixed characteristic $(0, p)$. We also assume that $v(p) \in pv(K)$ and $\widehat K \neq \widehat K ^{p}$, which is allowed by Lemma \ref{lemm5.2}. As $v(K)$ is cyclic, the condition on $v(p)$ ensures that there is $\xi \in K$ with $0 < v(\xi ) \le v(p)/p$ and $v(\xi ) \notin pv(K)$. Since $\widehat K$ is infinite, there are $\alpha _{\nu } \in O _{v}(K) ^{\ast }$, $\nu \in \mathbb{N}$, such that the system $\hat \alpha _{\nu } \in \widehat K$, $\nu \in \mathbb{N}$, is linearly independent over the prime subfield of $\widehat K$. Take a primitive $p$-th root of unity $\varepsilon \in K _{\rm sep}$, a generator $\varphi $ of $\mathcal{G}(K(\varepsilon )/K)$, and $s \in \mathbb{N}$ so that $\varphi (\varepsilon ) = \varepsilon ^{s}$. Fix any $\mu \in \mathbb{N}$, put $\lambda _{j} = 1 + p(1 - \varepsilon )\xi ^{-1}\alpha _{j} ^{p ^{\mu }}$, for $j = 1, \dots , \mu $, and denote by $M _{\mu } ^{\prime }$ the extension of $K(\varepsilon )$ generated by the set $\{\lambda _{j} ^{\prime }\colon j = 1, \dots , \mu \}$, where $\lambda _{j} ^{\prime } \in K _{\rm sep}$ and $\lambda _{j} ^{\prime p} = \lambda _{j}$, for any index $j$. It follows from Lemma \ref{lemm6.1} that $M _{\mu } ^{\prime }/K(\varepsilon )$ is TR and Galois of degree $p ^{\mu }$ with $\mathcal{G}(M _{\mu } ^{\prime }/K(\varepsilon ))$ abelian of period $p$. Furthermore, Lemma \ref{lemm4.8} and the conditions on $\xi $ and $\alpha _{1}, \dots , \alpha _{\mu }$ show that \par\vskip0.04truecm\noindent $\varphi (\lambda _{j})\lambda _{j} ^{-s} \in K(\varepsilon ) ^{\ast p}$, $j = 1, \dots , \mu $. Therefore, Lemmas \ref{lemm4.5} and \ref{lemm4.6}~(a) yield \par\vskip0.04truecm\noindent
$M _{\mu } ^{\prime } = M _{\mu }(\varepsilon )$, for some Galois extension $M _{\mu }$ of $K$ in $K(p)$, such that \par\vskip0.04truecm\noindent $\mathcal{G}(M _{\mu }/K) \cong \mathcal{G}(M _{\mu } ^{\prime }/K(\varepsilon ))$; hence, $[M _{\mu }\colon K] = p ^{\mu }$ and $[M _{\mu } ^{\prime }\colon M _{\mu }] = [K(\varepsilon )\colon K]$. As $p \nmid [K(\varepsilon )\colon K]$ and $M _{\mu } ^{\prime }/K(\varepsilon )$ is TR, it is now easy to see that $M _{\mu }/K$ is also TR. Because of the arbitrary choice of $\mu $, this proves Lemma \ref{lemm2.2}, Theorem \ref{theo2.1} (b) and the right-to-left implication in Theorem \ref{theo2.1} (a). Finally, by Fact \ref{fact3.5}, the converse implication follows from \cite{PS}, Corollary~2.5, so Theorem \ref{theo2.1} is proved. \par
\begin{rema} \label{rema6.2} It should be pointed out that in case $(K, v)$ is an HDV-field containing a primitive $p$-th root unity $\varepsilon $, the right-to-left-implication in Theorem \ref{theo2.1} (a) becomes obvious as a result of the proof of the lower bound for abrd$_{p}(K)$ in \cite{PS}, Lemma~2.6. The conditions of the cited lemma do not require that $\varepsilon \in K$. However, the assumption that $\varepsilon \in K$ is necessary to define over $K$ tensor products of symbol algebras like those used in the proof of \cite{PS}, Lemma~2.6. This allows to show easily that if $\varepsilon \in K$, then the lower bound in the cited lemma is also such a bound for Brd$_{p}(K)$, which proves the right-to-left implication in Theorem \ref{theo2.1} (a). \end{rema} \par
To end the present Section, we note that Theorem~2 of \cite{PS} and the conclusion of Theorem \ref{theo2.1} {\rm (b)} in case char$(K) = 0$ leave open the question of whether abrd$_{p}(E) > 2{\rm Brd}_{p}(E) + 1$, for any field $E$ with a primitive $p$-th root of unity and Brd$_{p}(E) < \infty $. Moreover, it seems to be unknown whether abrd$_{p}(E) = \infty $. \par
\section{\bf Open problems and further results} \par
We begin this section with a proof of Conjecture \ref{conj1.1} in case char$(K) = p$. \par
\begin{prop} \label{prop7.1} If $(K, v)$ is an {\rm HDV}-field with char$(K) = p > 0$, then: \par {\rm (a)} Brd$_{p}(K) = \infty $ if $[\widehat K\colon \widehat K ^{p}] = \infty $; when $(K, v)$ is complete, the equality $[\widehat K\colon \widehat K ^{p}] = \infty $ holds if and only if $[K\colon K ^{p}] = \infty $; \par {\rm (b)} $n \le {\rm Brd}_{p}(K) \le n + 1$, provided that $n < \infty $ and $[\widehat K\colon \widehat K ^{p}] = p ^{n}$; \par {\rm (c)} If $(K, v)$ is complete, $[\widehat K\colon \widehat K ^{p}] = p ^{n}$ and $K ^{\prime }/K$ is a finite field extension, then $[K ^{\prime }\colon K ^{\prime p}] = p ^{n+1}$. \end{prop} \par
\noindent \begin{proof} The former part of Proposition \ref{prop7.1} (a) and the lower bound on Brd$_{p}(K)$ in Proposition \ref{prop7.1} (b) are implied by \cite{Ch4}, Lemma~4.2 (b). Proposition \ref{prop7.1} (c) and the latter part of Proposition \ref{prop7.1} (a) follow from Fact 3.5 (b), Lemma \ref{lemm3.2}, and the equality $[L\colon L ^{p}] = [K\colon K ^{p}]$, for every finite extension $L/K$ (cf. \cite{BH}, Lemma~2.12). It remains to prove the upper bound in Proposition \ref{prop7.1} (b). Let $\overline K$ be an algebraic closure of $K$. In view of \cite{Ch2}, Lemma~4.1, it suffices to show that, for any finite extension $K ^{\prime }$ of $K$ in $\overline K$, we have deg$(D ^{\prime }) \mid p ^{n+1}$ whenever $D ^{\prime } \in d(K ^{\prime })$ and exp$(D ^{\prime }) = p$. In addition, Fact \ref{fact3.5} (a) allows us to consider only the case of $K = K _{v}$. Let $K _{1} ^{\prime } = \{\lambda \in \overline K\colon \lambda ^{p} \in K ^{\prime }\}$. Then $K _{1} ^{\prime } \in I(\overline K/K ^{\prime })$, $K _{1} ^{\prime p} = K ^{\prime }$, and by Proposition \ref{prop7.1} (c), $[K _{1} ^{\prime }\colon K ^{\prime }] = p ^{n+1}$. Since, by Albert's theorem, $_{p}{\rm Br}(K ^{\prime })$ is a subgroup of Br$(K _{1} ^{\prime }/K ^{\prime })$ (cf. \cite{A2}, Ch. VII, Theorem~28), this yields deg$(D ^{\prime }) \mid p ^{n+1}$ (see \cite{P}, Sect. 13.4), so Proposition \ref{prop7.1} is proved. \end{proof} \par
Our next result proves Conjecture \ref{conj1.1} in the special case where $\widehat K$ is an $n$-dimensional local field of characteristic $p$ with a finite $n$-th residue field. \par
\begin{prop} \label{prop7.2} Assume that $(K, v)$ is an {\rm HDV}-field, such that $\widehat K$ is an $n$-dimensional local field with {\rm char}$(\widehat K) = p$. Then {\rm Brd}$_{p}(K) \ge n$. Moreover, if the $n$-th residue field $\widehat K _{0}$ of $\widehat K$ is finite, then {\rm abrd}$_{p}(K) \le n + 1$. \end{prop} \par
\begin{proof} As $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, Theorem \ref{theo2.1} (b) yields Brd$_{p}(K) \ge n$, so it suffices to prove that if $\widehat K _{0}$ is finite, then abrd$_{p}(K) \le n + 1$. In view of Proposition \ref{prop7.1} (b) and Fact 3.5 (a), one may consider only the case of char$(K) = 0$ and $K = K _{v}$. Then $K$ is an $(n + 1)$-dimensional local field with last residue field $\widehat K _{0}$, whence, by \cite{Ch6}, Proposition~4.4, abrd$_{p}(K) \le n + 1$, as required. \end{proof} \par
It would be of interest to know whether an HDV-field $(K, v)$ with $\widehat K _{\rm sep} = \widehat K$ and $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, for some $n \in \mathbb{N}$, satisfies Brd$_{p}(K) = n$ (see page \pageref{stconj}). This is the same as to find whether Brd$_{p}(K) = n$, provided that $p \nmid [\widehat K^{\prime }\colon \widehat K]$ when $\widehat K ^{\prime }$ runs across Fe$(\widehat K)$ (cf. \cite{P}, Sects. 13.4 and 14.4). The condition on $\widehat K$ means that cd$_{p}(\mathcal{G}_{\widehat K}) = 0$. If $\widehat K _{\rm sep} \neq \widehat K$, then it is possible that Brd$_{p}(K) \ge n + 1$; such is the case where $\widehat K/\mathbb{F} _{p}$ is a finitely-generated extension of transcendence degree $n$ (see the proof of \cite{Ch4}, Proposition~6.3, or \cite{BH}, Theorem~5.2). The same inequality for Brd$_{p}(K)$ is obtained by the method of proving \cite{Ch4}, Proposition~6.3, when char$(\widehat K) = p$ and $\widehat K$ is a finitely-generated extension of transcendence degree $n > 0$ over a perfect field $\widehat K _{0}$ with cd$_{p}(\mathcal{G}_{\widehat K _{0}}) \neq 0$ (see \cite{S1}, Ch. I, 3.3). Since cd$_{p}(\mathcal{G}_{\widehat K _{0}}) \le 1$ (cf. \cite{S1}, Ch. II, 2.2), Theorem \ref{theo2.1} (b) and the preceding observations attract interest in the following special case of Conjecture \ref{conj1.1}: \par
\begin{conj} \label{conj7.3} If $(K, v)$ is an HDV-field with char$(\widehat K) = p > 0$ and $\widehat K$ is a finitely-generated extension of transcendence degree $n > 0$ over its maximal perfect subfield $\widehat K _{0}$, then {\rm Brd}$_{p}(K) = n + {\rm cd}_{p}(\mathcal{G}_{\widehat K _{0}})$. \end{conj} \par
Theorem \ref{theo2.1} (b) and the upper bounds in \cite{PS}, Theorem~2, \cite{BH}, Corollary~4.7 and Theorem~4.16, and Proposition \ref{prop7.1} (b) of the present paper prove Conjecture \ref{conj1.1}, for $n = 1, 2, 3$. Note also that Conjecture \ref{conj7.3} holds, for $n = 1, 2$. In view of the remarks preceding the statement of Conjecture \ref{conj7.3}, this can be obtained by using Theorem \ref{theo2.1} (b), \cite{BH}, Theorem~4.16, and Case IV of the proof of \cite{BH}, Theorem~5.3. As to Conjecture \ref{conj7.3}, it need not be true if $(K, v)$ is merely HDV with char$(\widehat K) = p$ and $[\widehat K\colon \widehat K ^{p}] < \infty $. One may take as a counter-example the iterated formal power series field $K = \widehat K _{0}((X _{1})) \dots ((X _{n}))((Y))$ in a system of variables $X _{1}, \dots , X _{n}, Y$ over a quasifinite field $\widehat K _{0}$ with char$(\widehat K _{0}) = p$. Then Brd$_{p}(K) = n$, by \cite{Ch6}, Proposition~3.5 (implied by \cite{Ch2}, Lemma~4.3~(b), \cite{Ch4}, Lemma~4.2 and \cite{AJ}, Theorem~3.3), whereas the formula in Conjecture \ref{conj7.3} requires Brd$_{p}(K) = n + 1$ (the standard discrete valuation on $K$ is Henselian with $\widehat K = \widehat K _{0}((X _{1})) \dots ((X _{n}))$, whence, $[\widehat K\colon \widehat K ^{p}] = p ^{n}$ and cd$_{p}(\widehat K _{0}) = 1$). This example as well as Proposition \ref{prop7.2} draw one's attention to the following problem: \par
\begin{prob} \label{prob7.4} Let $(K, v)$ be an {\rm HDV}-field with char$(\widehat K) = p > 0$. Suppose that $\widehat K$ is an $n$-dimensional local field, for some $n \in \mathbb{N}$, with an $n$-th residue field $\widehat K _{0}$. Find whether {\rm Brd}$_{p}(K) = n$. \end{prob} \par
The conditions of Problem \ref{prob7.4} show that $K _{v}$ is an $(n + 1)$-dimensional local field with last residue field $\widehat K _{0}$ (and $\widehat K$ is isomorphic to an iterated formal power series field in $n$ variables over the quasifinite field $\widehat K _{0}$, see \cite{F1}, 2.5.2). Therefore, in case char$(K) = p$, Fact \ref{fact3.5} (a) and \cite{Ch6}, Proposition~3.5, give an affirmative answer to Problem \ref{prob7.4}. When $n = 1$, such an answer is contained in the following result of \cite{Ch7}, obtained as a final step towards a full characterization of stable HDV-fields by properties of their residue fields: \par
\begin{prop} \label{prop7.5} Let $(K, v)$ be an {\rm HDV}-field with {\rm char}$(\widehat K) = p > 0$. Then {\rm Brd}$_{p}(K) \le 1$ if and only if the following condition is fulfilled: \par $[\widehat K\colon \widehat K ^{p}] \le p$, and in case {\rm Brd}$_{p}(\widehat K) \neq 0$, every degree $p$ extension of $\widehat K$ in $\widehat K(p)$ is embeddable as a $\widehat K$-subalgebra in each $D _{p} \in d(\widehat K)$ of degree $p$. \par\noindent The equality {\rm Brd}$_{p}(K) = 0$ holds if and only if $\widehat K$ is perfect and $\widehat K(p) = \widehat K$. \end{prop} \par
\begin{rema} \label{rema7.6} The inequalities $n \le {\rm Brd}_{p}(K) \le n + 1$ hold, for any HDV-field $(K, v)$, such that $\widehat K$ is an $n$-dimensional local field with a finite $n$-th residue field and with char$(\widehat K _{1}) = p$, $\widehat K _{1}$ being the $(n - 1)$-th residue field of $\widehat K$. Proposition \ref{prop7.2} reduce the proofs to the case of char$(\widehat K) = 0$ (and $n \ge 3$, in view of Proposition \ref{prop7.5}). Then the stated inequalities are contained in \cite{Ch6}, Proposition~4.4. \end{rema} \par
Note finally that the interest in the question of whether Brd$_{p}(K) = n$, if $(K, v)$ is an HDV-field, char$(\widehat K) = p > 0$, $\widehat K _{\rm sep} = \widehat K$ and $[\widehat K\colon \widehat K ^{p}] = p ^{n}$, for some $n \in \mathbb{N}$, is motivated not only by Theorem \ref{theo2.1} (b) and \cite{BH}, Theorem~4.16, but also by the following well-known conjecture (see, e.g., \cite{ABGV}, Sect. 4): \par
\begin{conj} \label{conj7.7} Assume that $F$ is a field of type $C _{\nu }$, i.e. each homogeneous polynomial $f(X _{1}, \dots , X _{m}) \in F[X _{1}, \dots , X _{m}]$ of degree $d$ with $0 < d ^{\nu } < m$, has a nontrivial zero over $F$. Then abrd$_{p}(F) < \nu $. \end{conj} \label{stconj} \par
\noindent To show how Conjecture \ref{conj7.7} is related to the noted question, fix an HDV-field $(E, \omega )$ so that char$(\widehat E) = p > 0$, $\widehat E$ be algebraically closed, and when char$(E) = p$, $E = E _{\omega }$. Consider a finitely-generated extension $F/E$ of transcendence degree $n$. By Lang's theorem \cite{L1}, $E$ is of type $C _{1}$, whence, by the Lang-Nagata-Tsen theorem \cite{Na}, $F$ is of type $C _{n+1}$. The assumptions on $F$ and $E$ also imply the existence of a discrete valuation $\omega ^{\prime }$ of $F$ extending $\omega $, such that $\widehat F/\widehat E$ is a finitely-generated extension of transcendence degree $n$ (when $F/E$ is purely transcendental, one may take as $\omega ^{\prime }$ the restricted Gauss prolongation of $\omega $ on $F$). Thus it follows that $[\widehat F ^{\prime }\colon \widehat F ^{\prime p}] = p ^{n}$, for every finite extension $F ^{\prime }/F$. This enables one to deduce (e.g., from \cite{Ch4}, Lemmas~3.1 and 4.3) that if $(L, w)$ is a Henselization of $(F, \omega ^{\prime })$, then abrd$_{p}(L) \le {\rm Brd}_{p}(F)$. Hence, Conjecture \ref{conj7.7} and the $C _{n+1}$ type of $F$ require that abrd$_{p}(L) \le n$. On the other hand, $(L, w)/(F, \omega ^{\prime })$ is immediate, so $[\widehat L\colon \widehat L ^{p}] = p ^{n}$, and by Theorem \ref{theo2.1} (b), Brd$_{p}(L) \ge n$. Thus the assertion that Brd$_{p}(L) = n$ can be viewed as a special case of Conjecture \ref{conj7.7}.
\vskip0.38truecm \emph{Acknowledgment.} I would like to thank the referee for the careful reading of an earlier version of this paper, and for a number of suggestions used for improving the organization (and other aspects) of its presentation. The paper presents a research partially supported by Grant KP-06 N 32/1 of 07.12.2019 "Groups and Rings - Theory and Applications" of the Bulgarian National Science Fund. \vskip0.1truecm
\end{document} |
\begin{document}
\title{Extending small arcs to large arcs}
\begin{abstract} An arc is a set of vectors of the $k$-dimensional vector space over the finite field with $q$ elements ${\mathbb F}_q$, in which every subset of size $k$ is a basis of the space, i.e. every $k$-subset is a set of linearly independent vectors. Given an arc $G$ in a space of odd characteristic, we prove that there is an upper bound on the largest arc containing $G$. The bound is not an explicit bound but is obtained by computing properties of a matrix constructed from $G$. In some cases we can also determine the largest arc containing $G$, or at least determine the hyperplanes which contain exactly $k-2$ vectors of the large arc. The theorems contained in this article may provide new tools in the computational classification and construction of large arcs. The article also simplifies some of the proofs of the results found in \cite{Ball2012}, \cite[Chapter 7]{Ball2015}, \cite{BdB2012} and \cite{Chowdhury2015} and unifies the approach taken in those articles with that in \cite{BBT1990} and \cite{Segre1967}. \end{abstract}
\section{Introduction}
Let $\mathrm{V}_k({\mathbb F}_q)$ denote the $k$-dimensional vector space over ${\mathbb F}_q$, the finite field with $q$ elements.
An {\em arc} of $\mathrm{V}_k({\mathbb F}_q)$ is a set $S$ of vectors of $\mathrm{V}_k({\mathbb F}_q)$ in which every subset of size $k$ is a basis of $\mathrm{V}_k({\mathbb F}_q)$. Most authors define an arc, equivalently, as a set of points in the corresponding projective space. However, for the techniques that we wish to develop here it is more convenient to use the vector space. The set of columns of a generator matrix of a $k$-dimensional linear maximum distance separable (MDS) code over ${\mathbb F}_q$ is an arc of $\mathrm{V}_k({\mathbb F}_q)$ and vice-versa, so arcs and linear MDS codes are equivalent objects. As in coding theory, we define the {\em weight} of a vector to be the number of non-zero coordinates that it has.
We will assume throughout that $k\geqslant 3$.
Let $\det(v_1,\ldots,v_k)$ denote the determinant of the matrix whose $i$-th row is $v_i$, a vector of ${\mathrm V}_k({\mathbb F}_q)$. If $C=\{p_1,\ldots,p_{k-1} \}$ is an ordered set of $k-1$ vectors then we write $$ \det(u,C)=\det(u,p_1,\ldots,p_{k-1}), $$ where we evaluate the determinant with respect to a fixed canonical basis.
Given an arc $G$ of $\mathrm{V}_k({\mathbb F}_q)$ and a non-negative integer $n \leqslant |G|-k$, we order the elements of $G$ arbitrarily and construct a matrix $\mathrm{M}_n$, in the following way. For each subset $E$ of $G$ of size $|G|-n$ and subset $A$ of $E$ of size $k-2$, we get a column of the matrix $\mathrm{M}_{ n}$, whose rows are indexed by subsets $C$ of $G$ of size $k-1$, where a $(C,(A,E))$ entry is $$ \prod_{u \in G \setminus E} \det(u,C), $$ if and only if $A \subset C$, and zero otherwise.
We shall prove the following theorem.
\begin{theorem} \label{wone}
If there is a vector of weight one in the column space of $\mathrm{M}_{ n}$ then $G$ cannot be extended to an arc of size $q+2k+n-1-|G|$. \end{theorem}
If $q$ is odd then Theorem~\ref{wone} always provides at least some upper bound on the size of an arc $S$ containing $G$. To see this, consider $M_{|G|-k}$ and the columns for a fixed subset $E$ of $G$ of size $k$. Restricting to rows which are not the all-zero row vector, one obtains a copy of an inclusion matrix whose rows are indexed by the $(k-1)$-subsets of $E$ and whose columns are indexed by $(k-2)$-subsets of $E$. Since $E$ is fixed, the non-zero entries in any row are the same. Therefore, to calculate the rank we can divide out by this non-zero element and assume that each entry of the matrix is either $0$ or $1$. It's straightforward to verify that this matrix has rank $k$ if $q$ is odd (see \cite{Frankl1990} or \cite{Wilson1990} for a general formula for $p$-ranks of inclusion matrices) and hence full row rank, which implies there is a vector of weight one in the column space of $\mathrm{M}_{|G|-k}$.
A weaker version of Theorem~\ref{wone} appears in \cite{Chowdhury2015}, where the condition on $\mathrm{M}_n$ is that it should have full row rank, although the stronger Theorem~\ref{wone} is also observed as a remark. There are many examples where $M_n$ does not have full row rank but where it does have a vector of weight one in its column space. For example, the arc of size $7$ $$ G=\{ [ \tau^0, 0, 0 ], [ 0, \tau^0, 0 ],
[ 0, 0, \tau^0 ], [ \tau^0, \tau^5, \tau^0 ],
[ \tau^0, \tau^8, \tau^9 ], [ \tau^0, \tau, \tau^5 ],
[ \tau^0, \tau^3, \tau ] \}, $$ where $\tau$ is a primitive element of ${\mathbb F}_{11}$. The matrix $\mathrm{M}_{ 2}$ does not have full row rank (it has rank 20, whereas full row-rank would be 21), but it does have a vector of weight one in its column space. Therefore, Theorem~\ref{wone} implies that it cannot be extended to an arc of size $11$. It can be extended to an arc of size $10$.
We say that $(G,n)$ has {\em Property W} if for each subset $A$ of $G$ of size $k-2$, there exist subsets $C,C_1,\ldots,C_{|G|-n-k+1}$ of $G$ of size $k-1$ containing $A$, such that the column space of $\mathrm{M}_{ n}$ contains a vector (of weight two) with non-zero coordinates at only $C$ and $C_i$ for each $i=1,\ldots,|G|-n-k+1$.
We define a {\em co-secant} to an arc $S$ of $\mathrm{V}_k({\mathbb F}_q)$ to be a hyperplane containing precisely $k-2$ vectors of $S$. An arc is {\em complete} if it is not contained in a larger arc, and an arc is {\em uniquely completable} if there is only one complete arc containing it.
We shall also prove the following theorem.
\begin{theorem} \label{wtwo}
If $(G,n)$ has Property $W$ and $G$ can be extended to an arc $S$ of size $q+2k+n-1-|G|$ then the co-secants to $S$ containing only points of $G$ are determined by $G$. \end{theorem}
Theorem~\ref{wtwo} has the following corollary.
\begin{corollary} \label{ctwo}
If $\mathrm{M}_{ n}$ has rank one less than full row rank and $G$ can be extended to an arc $S$ of size $q+2k+n-1-|G|$ then the co-secants to $S$ containing only points of $G$ are determined by $G$. \end{corollary}
\begin{proof}
If $G$ can be extended to an arc $S$ of size $q+2k+n-1-|G|$ then by Theorem~\ref{wone} there are no vectors of weight one in the column space of $\mathrm{M}_{ n}$. Since the rank of $\mathrm{M}_{ n}$ is one less than full row rank, using column operations one can transform the matrix $\mathrm{M}_{ n}$, into a matrix that has the identity matrix (of size one less than the number of rows) in the top left hand corner. By taking one of these columns, or a linear combination of two of them, we have that every vector of weight two is in the column space of $\mathrm{M}_{ n}$, so Property $W$ holds. \end{proof}
There is an inclusion-reversing duality between the $r$-dimensional subspaces of ${\mathrm V}_k({\mathbb F}_q)$ and the $(k-r)$-dimensional subspaces. Under this duality the set of co-secants to an arc $S$ of size $q+k-1-t$ is a set of ${|S| \choose k-2}t$ vectors. In \cite{Segre1967} and \cite{BBT1990} respectively, Segre (the case $k$ is three) and Blokhuis, Bruen and Thas prove that this set of vectors is contained in an algebraic hypersurface $\phi_S$ of degree $t$ if $q$ is even and of degree $2t$ if $q$ is odd. Moreover, $\phi_S$ can be constructed from a sub-arc $G$ of $S$ of size $k+t-1$ if $q$ is even and $k+2t-1$ if $q$ is odd, if one knows all the co-secants to $S$ containing only points of $G$, see Section~\ref{appendix}. Therefore, Theorem~\ref{wtwo} can be strengthened to the following theorem.
\begin{theorem} \label{wtwo2}
If $(G,n)$ has Property $W$ and $G$ can be extended to an arc $S$ of size $q+2k+n-1-|G|$ then $\phi_S$ is determined by $G$, provided that $2n \geqslant |G|-k-1$ in the case that $q$ is odd. \end{theorem}
Theorem~\ref{wtwo2} has the following corollary, the proof of which is identical to the proof of Corollary~\ref{ctwo}.
\begin{corollary} \label{ctwo2}
If $\mathrm{M}_{ n}$ has rank one less than full row rank and $G$ can be extended to an arc $S$ of size $q+2k+n-1-|G|$ then $\phi_S$ is determined by $G$, provided that $2n \geqslant |G|-k-1$ in the case that $q$ is odd. \end{corollary}
We will prove in Section~\ref{appendix} that $G$ never satisfies the hypotheses of Theorem~\ref{wone}, Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} when $q$ is even, see Theorem~\ref{forgetqeven}, apart from in the trivial case that $|G|=k+n$ in which case the hypotheses of Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} are satisfied. In \cite{Segre1967} and \cite{BBT1990} respectively, Segre (the case $k$ is three) and Blokhuis, Bruen and Thas prove that if $q$ is odd and $G$ has size at least $k-1+(2q/3)$ or if $q$ is even and $G$ has size at least $k+(q/2)$ then $G$ is uniquely completable. Thus, Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} are of interest only when $q$ is odd and $G$ is smaller than $k-1+(2q/3)$.
To illustrate the applicability of Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} we first consider some examples. It is perhaps surprising that $G$ can be relatively small and still have Property $W$. Let $$ G=\{ [ \epsilon^0, 0 ,0 ], [ 0, \epsilon^0, 0 ], [ 0, 0, \epsilon^0 ], [ \epsilon^0, \epsilon^2, \epsilon^3 ], [ \epsilon^0, \epsilon^6, \epsilon^4 ], [ \epsilon^0, \epsilon^{9}, \epsilon^{9} ], [ \epsilon^0, \epsilon^4, \epsilon^6 ], [ \epsilon^0, \epsilon^{11}, \epsilon^5 ], [ \epsilon^0, \epsilon^0, \epsilon^8 ]\}, $$ where $\epsilon$ is a primitive element of ${\mathbb F}_{13}$. Then $(G,3)$ has Property $W$, so Theorem~\ref{wtwo} implies that if it can be extended to an arc $S$ of $\mathrm{V}_3({\mathbb F}_{13})$ of size $12$ (and it can) then $\phi_S$ is determined by $G$.
Let $$ G=\{
[ \epsilon^0, 0, 0], [ 0, \epsilon^0, 0 ],
[ 0, 0, \epsilon^0 ], [ \epsilon^0, \epsilon^{10}, \epsilon^2 ],
[ \epsilon^0, \epsilon^2, \epsilon^{11} ], [ \epsilon^0, \epsilon^9, \epsilon^4 ] \}, $$ where $\epsilon$ is a primitive element of ${\mathbb F}_{13}$. Then $(G,2)$ has Property $W$, so Theorem~\ref{wtwo} implies that if it can be extended to an arc $S$ of $\mathrm{V}_3({\mathbb F}_{13})$ of size $14$ (and it can) then $\phi_S$ is determined by $G$. In this case $\phi_S$ also determines $S$. Note that by Segre's theorem \cite{Segre1955a}, we know that $S$ is a conic. The small arc $G$ also completes to arcs of size $9$, $10$ and $12$, so it is not uniquely completable.
In \cite{Ball2012}, it is shown that if $k\leqslant p$, where $p$ is the prime such that $q$ is a power of $p$, then there are no arcs of size $q+2$. This follows from Theorem~\ref{wone} by considering the $p$-rank formula for inclusion matrices from \cite{Frankl1990} or \cite{Wilson1990}. If $k\leqslant p$ then for any arc $G$ of size $2k-3$ the matrix $\mathrm{M}_{ 0}$ has full row-rank and therefore a vector of weight one in its column space. In \cite{BdB2012}, it is shown that if $k\leqslant 2p-2$, where $q$ is a non-prime prime power, then there are no arcs of size $q+2$. Indeed it can be shown that, see \cite{Chowdhury2015}, for an arc $G$ of size $2k-2$ the matrix $\mathrm{M}_{ 1}$ has a vector of weight one in its column space, so Theorem~\ref{wone} implies that if $k\leqslant 2p-2$ then there are no arcs of size $q+2$. Computational evidence for small $k$ and small $p$ suggests that the following is true.
\begin{conjecture} \label{weakmds} If $k \leqslant p+n(p-2)$ and $G$ has size $2k-3+n$ then the matrix $\mathrm{M}_{ n}$ has a vector of weight one in its column space. \end{conjecture}
If Conjecture~\ref{weakmds} is true then Theorem~\ref{wone} would imply that there are no arcs of size $q+2$ for $k \leqslant (pq-2q+6p-10)/(2p-3)$. This would verify the MDS conjecture for these $k$. For more on the MDS conjecture, see for example \cite{Ball2012}, \cite{HS2001}, \cite{MS1977} or \cite{Vardy2006}. A version of Conjecture~\ref{weakmds} in which $\mathrm{M}_{ n}$ is conjectured to have full row-rank appears in \cite{Chowdhury2015}. Since there are examples of arcs $G$ of size $2k-3+n$ with $k>p+n(p-2)$ for which $\mathrm{M}_{ n}$ does not have vectors of weight one in its column space, this would appear to put a limit on these methods for verifying the MDS conjecture in its entirety. To verify the MDS conjecture one must show that there are no arcs of size $q+2$ for $4 \leqslant k \leqslant (q+2)/2$, so one would fall short.
Theorem~\ref{wone}, Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} may be of some use in classifying or at least constructing large arcs computationally. See \cite{Coolsaet2015}, \cite{CS2011} and \cite{Keri2006} for recent computational results regarding arcs. To classify arcs of size $q+k-r$ one would need to classify arcs of size $k+r$. If one could classify arcs of size $k+r$ then one could quickly check for each arc to see if $\mathrm{M}_1$ has a vector of weight one in the column space, for each projectively distinct arc $G$. In the positive case, this would then rule out the possibility that $G$ can be extended to an arc of size $q+k-r$. In the negative case, one can then extend the arc to an arc $H$ of size $k+r+1$ and check to see if $\mathrm{M}_2$ (calculated using $H$) has a vector of weight one in the column space. In the positive case, this would then rule out the possibility that $H$ can be extended to a arc of size $q+k-r$. This should dramatically reduce the set of possible sub-arcs of arcs of size $q+k-r$. Those small arcs which cannot be ruled out as possibly extending to a large arc of course may well extend. In this case Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} come into play. If $(G,1)$ or $(H,2)$ etc., has property $W$ then Theorem~\ref{wtwo} and possibly Theorem~\ref{wtwo2} applies. Knowing $G$ and the co-secants to $S$ containing the points of $G$ may well be enough to determine $S$ and even if it doesn't, it will certainly drastically reduce the possible vectors which might extend $G$.
For example, consider the following arc of size $11$ of $\mathrm{V}_6({\mathbb F}_{81})$ $$ G=\{ [ \rho^0, 0, 0, 0, 0, 0 ],
[ 0, \rho^0, 0, 0, 0, 0 ],
[ 0, 0, \rho^0, 0, 0, 0 ],
[ 0, 0, 0, \rho^0, 0, 0 ],
[ 0, 0, 0, 0, \rho^0, 0 ],
$$
$$
[ 0, 0, 0, 0, 0, \rho^0 ],
[ \rho^0, \rho^0, \rho^0, \rho^0, \rho^0, \rho^0 ],
[ \rho^0, \rho^{58}, \rho^{41}, \rho^{14}, \rho^{54}, \rho^{48} ],
$$
$$
[ \rho^0, \rho^{25}, \rho^{55}, \rho^{43}, \rho^{74}, \rho^{58} ],
[ \rho^0, \rho, \rho^{66}, \rho^{22}, \rho^{42}, \rho^{65} ],
[ \rho^0, \rho^{76}, \rho^{44}, \rho^{21}, \rho^{43}, \rho^5 ] \}, $$ where $\rho$ is a primitive element of ${\mathbb F}_{81}$. The matrix $\mathrm{M}_1$ has rank $461$ and full row rank would be $462$. It does not have a vector of weight one in its column space, so Theorem~\ref{wone} does not apply. However, Corollary~\ref{ctwo} does apply. Suppose that $G$ extends to an arc $S$ of size $82$. From a vector which is a basis of the null space of the row space of $\mathrm{M}_1$ we can calculate $f_A$ (see the next section) for every subset $A$ of $G$ of size $4$. These polynomials of degree $4$ must all be fully reducible into linear factors. Moreover, for each linear form $\alpha$, which is a factor of $f_A$ for some $A$, the hyperplane $\ker \alpha$ contains only the points $A$ of $S$ and no other points of $S$. This severely restricts how one can extend $G$.
Even if the classification of arcs of size $k+r$ for a certain $q$ is infeasible computationally, one may be able to construct new examples of size $q+k-r$. By identifying the small arcs which can appear as sub-arcs of an arc of size $q+k-r$ one can apply Theorem~\ref{wtwo} and Theorem~\ref{wtwo2}, as explained in the previous paragraph.
Theorem~\ref{wone}, Conjecture~\ref{weakmds}, and their applications to computationally classifying and constructing large arcs are joint work with Ameera Chowdhury. The results in Section 2 and Section 3, which are simplifications of previous results from \cite{Ball2012}, \cite{Ball2015}, and \cite{BdB2012}, is also joint work with Ameera Chowdhury. She has written a separate exposition of these results in \cite{Chowdhury2015}.
\section{The functions $f_A$}
If $C=\{p_1,\ldots,p_{k-1} \}$ is an ordered set of $k-1$ vectors then we write $$ \det(u,C)=\det(u,p_1,\ldots,p_{k-1}), $$ and if $A=\{p_1,\ldots,p_{k-2} \}$ is an ordered set of $k-2$ vectors then we write $$ \det(u,v,A)=\det(u,v,p_1,\ldots,p_{k-2}), $$ where we evaluate the determinant with respect to a fixed canonical basis. This defines a bilinear form on $\mathrm{V}_k({\mathbb F}_q) \times \mathrm{V}_k({\mathbb F}_q)$ by $$ d_A(u,v)=\det (u,v,A). $$
Let $S$ be an arc of $\mathrm{V}_k({\mathbb F}_q)$. We order the elements of $S$ arbitrarily and maintain this order throughout, unless otherwise stated. Let $A$ and $B$ be subsets of the ordered set $S$. If we write $A,B$ in place of $A \cup B$ then this means order the elements of $A$ first and then order the elements of $B$.
Let $A$ be a subset of $S$ of size $k-2$.
\begin{lemma} \label{minusone} The bilinear form $d_A$ is alternating and in particular $d_{A}(u,v)=-d_{A}(v,u)$. \end{lemma}
\begin{proof} By the definition of $d_A$ we have $d_A(u,u)=\det(u,u,A)=0$ and $$ d_{A}(u,v)=\det(u,v,A)=-\det(v,u,A)=-d_{A}(v,u). $$ \end{proof}
Let $t=q+k-1-|S|$.
\begin{lemma} There are $t$ hyperplanes which contain the vectors of $A$ and no other vectors of $S$. \end{lemma}
\begin{proof}
There are $q+1$ hyperplanes containing the subspace spanned by the $k-2$ vectors of $A$. Since $S$ is an arc, a hyperplane can contain at most $k-1$ vectors of $S$. Therefore, there are $|S|-|A|$ of them which contain one more vector of $S$ and so $q+1-|S|+k-2$ of them which contain no more vectors of $S$. \end{proof}
Let $\alpha_1,\ldots,\alpha_t$ be pairwise linearly independent forms with the property that $\ker \alpha_i \cap S=A$. Define $$ f_A(x)=\prod_{i=1}^t \alpha_i(x), $$ a function from $\mathrm{V}_k({\mathbb F}_q)$ to ${\mathbb F}_q$.
\begin{lemma} \label{interp} If $ E$ is a subset of $S$ of size $t+k-1$ containing $A$ then $$ f_A(x)=\sum_{e \in E \setminus A} f_A(e) \prod_{u \in E \setminus (A\cup \{e\})} \frac{d_{A}(u,x)}{d_{A}(u,e)}. $$ \end{lemma}
\begin{proof} With respect to a basis $B$ whose last $k-2$ elements are $A$, $f_A$ is a homogeneous polynomial in two variables, and so is $$ \sum_{e \in E \setminus A} f_A(e) \prod_{u \in E \setminus (A\cup \{e\})} \frac{d_{A}(u,x)}{d_{A}(u,e)}. $$
These two polynomials are equal when evaluated at an element of $E \setminus A$. Two homogeneous polynomials of degree $t$ in two variables which are equal at $t+1$ linearly independent points are the same, since their difference is a homogeneous polynomials in two variables of degree at most $t$ and can be zero at at most $t$ linearly independent points. Note that it follows from the arc property that any two points of $E \setminus A$ are linearly independent, even after deleting the $k-2$ coordinates corresponding to the elements of $A$ in the basis. \end{proof}
\begin{lemma} \label{thesumiszero} If $ E$ is a subset of $S$ of size $t+k$ containing $A$ then $$ \sum_{e \in E \setminus A} f_A(e) \prod_{u \in E \setminus (A\cup \{e\})} d_{A}(u,e)^{-1}=0. $$ \end{lemma}
\begin{proof} Suppose that $v \in E \setminus A$ and apply Lemma~\ref{interp} with $E$ replaced by $E\setminus \{ v \}$, $$ f_A(v)=\sum_{e \in E \setminus (A \cup \{v\})} f_A(e) \prod_{u \in E \setminus (A\cup \{v, e\})} \frac{d_{A}(u,v)}{d_{A}(u,e)}. $$ Dividing by $$ \prod_{u \in E \setminus (A\cup \{v\})} d_{A}(u,v) $$ gives $$ f_A(v)\prod_{u \in E \setminus (A\cup \{v\})} d_{A}(u,v)^{-1}=\sum_{e \in E \setminus (A \cup \{v\})} f_A(e) \frac{d_{A}(v,e)}{d_{A}(e,v)}\prod_{u \in E \setminus (A\cup \{e\})} d_{A}(u,e)^{-1}, $$ and so the lemma follows from Lemma~\ref{minusone}. \end{proof}
The aim of the following section is to show that we can multiply the equation in Lemma~\ref{thesumiszero} by an element of ${\mathbb F}_q$, dependent on $A$, so that the terms depend only on $C=A \cup \{ e \}$ and not on $A$. This implies that we will get an equation for each $(k-2)$-subset of $E$ whose ``variables'' depend only on the $(k-1)$-subsets of $E$.
\section{A set of equations associated with an arc}
The following lemma is called the co-ordinate free version of Segre's lemma of tangents, proved in \cite{Ball2012} and also \cite[Lemma 7.15]{Ball2015}. In order that this article be self-contained we include a proof.
\begin{lemma} \label{segre1} Let $D$ be a subset of $S$ of size $k-3$ and let $\{x,y,z \}$ be a subset of $S \setminus D$. Interchanging $x$ and $y$ in $$ \frac{f_{D \cup \{x \}}(y)f_{D \cup \{z \}}(x)}{f_{D \cup \{x \}}(z)} $$ changes the sign by $(-1)^{t+1}$. \end{lemma}
\begin{proof} Let $B= \{x,y,z \} \cup D$. Since $B$ is a subset of $S$ of size $k$, it is a basis of $\mathrm{V}_k({\mathbb F}_q)$.
There are $q+1$ hyperplanes containing $\langle z,D\rangle$, since it is a $(k-2)$-dimensional subspace of $\mathrm{V}_k({\mathbb F}_q)$. We start off by identifying these $q+1$ hyperplanes.
Suppose that $u \in S \setminus B$ and that $(u_1,\ldots,u_k)$ are the coordinates of $u$ with respect to the basis $B$. The hyperplane $\langle u,z,D \rangle$ is $$ \ker (u_2X_1-u_1X_2), $$
since $\{z \} \cup D$ is the set of the last $k-2$ vectors of the basis $B$. For each $ u \in S \setminus B$, since $S$ is an arc, we have a distinct hyperplane $\langle u,z,D \rangle$, and so $|S \setminus B|=q-1-t$ of them in all.
Suppose that the function $f_{D \cup \{z\}}$ is $$ f_{D \cup \{z\}}(u)=\prod_{i=1}^t \alpha_i(u), $$ where $\ker \alpha_i \cap S=D\cup \{z \}$ and $\alpha_1,\ldots,\alpha_t$ are pairwise linearly independent linear forms.
With respect to the basis $B$, the linear form $\alpha_i(X)$ is $$ \alpha_i(X)=\alpha_{i1}X_1+\alpha_{i2}X_2, $$ for some $\alpha_{i1}, \alpha_{i2} \in {\mathbb F}_q$. Since $\ker \alpha_i \supset D \cup \{z \}$, this gives us a further $t$ hyperplanes containing $\langle z,D \rangle$.
The other two hyperplanes are $\ker X_1 = \langle y,z,D \rangle$ and $\ker X_2=\langle x,z,D \rangle$.
The $q-1$ hyperplanes containing $\langle z,D \rangle$, and not containing $x$ or $y$, are $$ \ker (aX_1+X_2), $$ where $a \in {\mathbb F}_q \setminus \{0 \}$. Therefore, $$ \prod_{i=1}^t \frac{\alpha_{i1}}{\alpha_{i2}} \prod_{u \in S \setminus B} \frac{(-u_2)}{u_1}=-1, $$ since it is the product of all non-zero elements of ${\mathbb F}_q$, which is $-1$.
With respect to the basis $B$, $x$ has coordinates $(1,0,\ldots,0)$, and so $$ f_{D \cup \{z \} }(x)=f_{D\cup \{z \} } ((1,0,\ldots,0))=\prod_{i=1}^t \alpha_{i1}. $$ Similarly $$f_{D \cup \{z\} }(y)=\prod_{i=1}^t \alpha_{i2},$$ so the equation above implies $$ f_{D \cup \{z\} } (y)\prod_{u \in S \setminus B} u_1=(-1)^{t+1} f_{D \cup \{z\}}(x) \prod_{u \in S \setminus B} u_2. $$ Repeating the above, switching $y$ and $z$ gives, $$ f_{D \cup \{y\} } (z)\prod_{u \in S \setminus B} u_1=(-1)^{t+1} f_{D \cup \{y\}}(x) \prod_{u \in S \setminus B} u_3. $$ And switching $x$ and $y$ gives, $$ f_{D \cup \{x\} } (z)\prod_{u \in S \setminus B} u_2=(-1)^{t+1} f_{D \cup \{x\}}(y) \prod_{u \in S \setminus B} u_3. $$ Combining these three equations gives, $$ f_{D \cup \{x \}}(y)f_{D \cup \{y \}}(z)f_{D \cup \{z \}}(x) =(-1)^{t+1}f_{D \cup \{x \}}(z)f_{D \cup \{y \}}(x)f_{D \cup \{z \}}(y), $$ since $$ \prod_{u \in S \setminus B} u_1u_2 u_3 \neq 0. $$ Thus, we have $$ \frac{f_{D \cup \{x \}}(y)f_{D \cup \{z \}}(x)}{f_{D \cup \{x \}}(z)}=(-1)^{t+1}\frac{f_{D \cup \{y \}}(x)f_{D \cup \{z \}}(y)}{f_{D \cup \{y \}}(z)} $$
\end{proof}
Let $F$ be the subset of the first $k-2$ elements of $S$ with respect to the ordering of $S$.
For a subset $A$ of $S$ of size $k-2$, let $$ \alpha_A=(-1)^{(r+s)(t+1)} \prod_{i=1}^r \frac{f_{D \cup \{z_r,\ldots,z_{i},x_{i-1},\ldots,x_1 \} } (x_{i})}{f_{D \cup \{z_r,\ldots,z_{i+1},x_{i},\ldots,x_1 \} } (z_i)}, $$ where $D=A \cap F$, $A \setminus F=\{x_1,\ldots,x_{r} \}$, $F \setminus A=\{z_1,\ldots,z_r \}$ and $s$ is the number of transpositions required to order $(F \cap A,F\setminus A)$ as $F$.
For a subset $C$ of $S$ of size $k-1$, let $$ \alpha_C=(-1)^{(r+s)(t+1)} f_{D\cup \{ x_r \ldots,x_1\}}(x_{r+1})\prod_{i=1}^r \frac{f_{D \cup \{z_r,\ldots,z_{i},x_{i-1},\ldots,x_1 \} } (x_{i})}{f_{D \cup \{z_r,\ldots,z_{i+1},x_{i},\ldots,x_1 \} } (z_i)}, $$ where $D=C \cap F$, $C \setminus F=\{x_1,\ldots,x_{r+1} \}$, $F \setminus C=\{z_1,\ldots,z_r \}$ and $s$ is the number of transpositions required to order $(F \cap C,F\setminus C)$ as $F$.
The following is from \cite[Lemma 7.19]{Ball2015}. Again, in order that this article be self-contained, we include a proof.
\begin{lemma} \label{atoc} For a subset $A$ of $S$ of size $k-2$, and $e \in S \setminus A$, $$ \alpha_{A \cup \{ e \}}=(-1)^{d(t+1)} \alpha_A f_A(e), $$ where $d$ is the number of elements of $A$ that come after $e$ in the ordering. \end{lemma}
\begin{proof} If $e \not\in F$ then $F \setminus (A \cup \{ e \})=F \setminus A$ and $A \cap F=(A\cup \{ e \}) \cap F$ is immediate. We have to reorder the numerator of $\alpha_Af_A(e)$ so that it coincides with $\alpha_{A \cup \{ e \}}$. Then we can write $\alpha_{A \cup \{ e \}}$ in place of $\alpha_Af_A(e)$. By Lemma~\ref{segre1}, this changes the sign by $$ (-1)^{m(t+1)}, $$ where $m$ is the number of elements of $A \setminus F$ that come after $e$ in the ordering. Since $e \not\in F$, and the elements of $F$ come first in the ordering, $m$ is the number of elements of $A$ that come after $e$ in the ordering. Therefore, $m=d$ and this case is done.
If $e \in F$ then we have to reorder the denominator of $\alpha_A$ to move the $e \in F \setminus A$ so that it is $z_r$. Then, up to getting the sign right, we are able to write $\alpha_{A \cup \{e \}}$ in place of $\alpha_Af_A(e)$, since the $f_A(e)$ cancels with one in the denominator. Note that $e \in F \cap (A \cup \{ e \})$.
This reordering, according to Lemma~\ref{segre1}, changes the sign by $$ (-1)^{m_1(t+1)}, $$ where $m_1$ is the number of elements of $F \setminus (A \cup \{ e\})$ which come after $e$ in the ordering.
Note that $|F \setminus (A \cup \{ e \})|=|F \setminus A|-1$, so we have to decrease $r$ by $1$ when we replace $\alpha_Af_A(e)$ by $\alpha_{A \cup \{e \}}$, while $s$ increases by the number of elements of $F\setminus (A \cup \{ e\})$ which come before $e$ in the ordering plus the number of elements of $F \cap A$ that come after $e$ in the ordering. So, all in all, the sign changes by $$
(-1)^{(|F\setminus (A \cup \{ e\})|+m_2-1)(t+1)} $$
where $m_2$ is the number of elements of $F \cap A=(F\setminus \{ e\}) \cap A$ that come after $e$ in the ordering. Since the elements of $F$ come first in the ordering and $e \in F$, we have that $m_3=|(F\setminus \{ e\}) \cap A|-m_2$ is the number of elements of $A$ that come before $e$ in the ordering. Therefore, the sign changes by $$
(-1)^{(|F \setminus \{ e \}|+m_3-1)(t+1)}. $$
The lemma follows since $d=k-2-m_3$ and $|F\setminus \{ e \}|=k-3$. \end{proof}
\begin{lemma} \label{theeqn} Let $S$ be an arbitrarily ordered arc of size $q+k-1-t$ and let $E$ be a subset of $S$ of size $k+t$. For any subset $A$ of $E$ of size $k-2$, $$ \sum \alpha_C \prod_{u \in E \setminus C} \det(u,C)^{-1}=0, $$ where the sum runs over the subsets $C$ of $E$ of size $k-1$ containing $A$. \end{lemma}
\begin{proof} By Lemma~\ref{thesumiszero}, since $E$ is a subset of $S$ of size $t+k$ containing $A$, $$ \sum_{e \in E \setminus A} f_A(e) \prod_{u \in E \setminus (A\cup \{e\})} \det(u,e,A)^{-1}=0. $$ Observe that $$ \det(u,e,A)=(-1)^{k-2}\det(u,A,e)=(-1)^{k-2+d} \det(u,A \cup \{ e \}), $$ where $d$ is the number of elements of $A$ which come after $e$ in the ordering. Since there are $t+1$ terms in the product, when we multiply by $\alpha_A$ and apply Lemma~\ref{atoc}, the lemma follows. \end{proof}
\section{Proofs of Theorem~\ref{wone}, Theorem~\ref{wtwo} and Theorem~\ref{wtwo2}}
Let $n$ be a non-negative integer and let $G$ be an arc of $\mathrm{V}_k({\mathbb F}_q)$ of size at least $k+n$. Order the elements of $G$ arbitrarily and let $F$ be the set of the first $k-2$ vectors of $G$.
Recall that we defined the matrix $\mathrm{M}_{ n}$ as follows. For each subset $E$ of $G$ of size $|G|-n$ and subset $A$ of $E$ of size $k-2$, we get a column of the matrix $\mathrm{M}_{ n}$, whose rows are indexed by subsets $C$ of $G$ of size $k-1$, where a $(C,(A,E))$ entry is $$ \prod_{u \in G \setminus E} \det(u,C), $$ if and only if $A \subset C$ and zero otherwise.
Let $v_G$ be a vector whose coordinates are indexed by the subsets $C$ of $G$ of size $k-1$ and whose $C$ coordinate is $$ \alpha_C \prod_{z \in G \setminus C} \det(z,C)^{-1}. $$
\begin{lemma} \label{upsolution}
If $G$ can be extended to an arc of size $q+2k+n-1-|G|$ then $v_G\mathrm{M}_{ n}=0$. \end{lemma}
\begin{proof}
Suppose that $G$ can be extended to $S$, an arc of size $q+2k+n-1-|G|$. Let $t=|G|-k-n$. For each subset $E$ of $G$ of size $k+t$, the equation in Lemma~\ref{theeqn} for a subset $A$ of $E$ of size $k-2$, is the equation obtained by multiplying $v_G$ with the $(A,E)$ column of $\mathrm{M}_{ n}$. \end{proof}
\begin{proof} (of Theorem~\ref{wone}). If there is a vector of weight one in the column space of $\mathrm{M}_{ n}$ then the scalar product of this vector with $v_G$, according to Lemma~\ref{upsolution}, gives us the equation $$ \alpha_C \prod_{z \in G \setminus C} \det(z,C)^{-1}=0, $$ for some subset $C$ of $G$ of size $k-1$. This is a contradiction, since all terms in this product are non-zero. \end{proof}
\begin{proof} (of Theorem~\ref{wtwo})
Let $A$ be a subset of $G$ of size $k-2$ and let $S$ be an arc of size $q+2k+n-1-|G|$ containing $G$. As before, let $t=q+k-1-|S|$, so $|G|=k+t+n$.
Since $(G,n)$ has property $W$ there are elements $x,y_1,\ldots,y_{t+1}$ of $G$ and vectors $u_1,\ldots,u_{t+1}$ in the column space of $\mathrm{M}_{ n}$, where $u_i$ has non-zero $C$ coordinates if and only if $C=A\cup \{x\}$ or $C=A \cup \{y_i \}$. By Lemma~\ref{upsolution}, the scalar product of $u_i$ with $v_G$, gives the equation $$ \alpha_{A\cup \{x\}} \prod_{z \in G \setminus A \cup \{x\}} \det(z,A \cup \{x \})^{-1}= a_i \alpha_{A\cup \{y_i\}} \prod_{z \in G \setminus A \cup \{y_i\}} \det(z,A \cup \{y_i \})^{-1}, $$ for some $a_i \in {\mathbb F}_q$.
By Lemma~\ref{atoc}, this determines $$ \frac{f_A(y_i)}{f_A(x)}, $$ so this quantity is determined by $G$. Hence, $G$ determines the value of $$ \frac{f_A(X)}{f_A(x)} $$ at $t+1$ linearly independent points. Since $f_A(X)$ is a homogeneous polynomial of degree $t$, this determines $f_A(X)$, so $G$ determines $f_A(X)$. Each factor of $f_A(X)$ must be a linear form $\alpha$, where $\ker \alpha$ is a hyperplane intersecting $S$ in $A$. Therefore, $G$ determines all these hyperplanes, which is what we wanted to prove. \end{proof}
\begin{proof} (of Theorem~\ref{wtwo2})
The algebraic hypersurface $\phi_S$ can be constructed from a subset $E$ of $S$ of size $k+t-1$ if $q$ is even and size $k+2t-1$ if $q$ is odd, if one knows all the co-secants to $S$ containing $A$ for every subset $A$ of $E$, see \cite{BBT1990} or Section~\ref{appendix}. In Section~\ref{appendix} we give an explicit description of $\phi_S$. The condition $2n \geqslant |G|-k-1$ implies $|G| \geqslant |E|$ if $q$ is odd, so by Theorem~\ref{wtwo} we can construct $\phi_S$. \end{proof}
\section{The algebraic hypersurface associated with an arc} \label{appendix}
In this section we explicitly construct the algebraic hypersurface $\phi_S$ associated with an arc $S$ of $\mathrm{V}_k({\mathbb F}_q)$, introduced in \cite{BBT1990}.
As before, let $t=q+k-1-|S|$. Let $E$ be a subset of $S$ of size $k+t-1$ is $q$ is even and $k+2t-1$ if $q$ is odd. To able to find such an $E$, and therefore construct $\phi_S$, this imposes a lower bound on the size of $S$.
For $q$ even, define a polynomial in $k-1$ vector variables, so $k(k-1)$ indeterminates, $$ \phi_S(Y_1,\ldots,Y_{k-1})=\sum_{C} \alpha_C \prod_{z \in E \setminus C} \frac{\det(z,Y_1,\ldots,Y_{k-1})}{\det(z,C)}, $$ where the sum runs over all subsets $C$ of size $k-1$ of $E$.
For $q$ odd, define a polynomial in $k-1$ vector variables, $$ \phi_S(Y_1,\ldots,Y_{k-1})=\sum_{C} \alpha_C^2 \prod_{z \in E \setminus C} \frac{\det(z,Y_1,\ldots,Y_{k-1})}{\det(z,C)} , $$ where the sum runs over all subsets $C$ of size $k-1$ of $E$.
Although $\phi_S$ is defined as a polynomial in $k-1$ vector variables, a simple change of variables shows that in fact it can be written as a polynomial in $k$ indeterminates. Let $$ Z_i=(-1)^{i-1}\det(Y_1,\ldots,Y_{k-1}), $$ where the $i$-th coordinate of $Y_j$ has been deleted, so the determinant is of a $(k-1) \times (k-1)$ matrix. Then $$ \phi_S=\phi_S(Z_1,\ldots,Z_k). $$ Let $\{ c_1,\ldots,c_{k-1} \}$ be a set of $k-1$ linearly independent vectors of $\mathrm{V}_k({\mathbb F}_q)$. With $Y_j=c_j$ for $j=1,\ldots,k-1$, this defines $z_i=Z_i$, for $i=1,\ldots,k$. The vector $(z_1,\ldots,z_k)$ is a vector in the dual space, dual to the hyperplane spanned by $\{ c_1,\ldots,c_{k-1} \}$. Suppose that $\{ c_1,\ldots,c_{k-1} \}$ spans a co-secant hyperplane to $S$. Then $(c_1,\ldots,c_{k-1})=(x,a_1,\ldots,a_{k-2})$, where $A=\{a_1,\ldots,a_{k-2}\}$ is a subset of $S$ and $x$ is a zero of $f_A(X)$, for some subset $A$ of $S$. By Theorem~\ref{hyp}, the vector $(z_1,\ldots,z_k)$ is a zero of $\phi_S$ and if $q$ is odd, it is a zero of multiplicity two on the line dual to the subspace spanned by $A$. This is precisely the properties that the hypersurface constructed by Blokhuis, Bruen and Thas in \cite{BBT1990} has. Therefore, we have an explicit description of this hypersurface in terms of the $\alpha_C$'s.
\begin{theorem} \label{hyp} For any subset $A=\{a_1,\ldots,a_{k-2} \}$ of $S$ of size $k-2$ $$ \phi_S(X,a_1,\ldots,a_{k-2})=\alpha_A f_A(X), $$ if $q$ is even and $$ \phi_S(X,a_1,\ldots,a_{k-2})=\alpha_A^2 f_A(X)^2, $$ if $q$ is odd. \end{theorem}
\begin{proof} Suppose $q$ is even. If $A$ is a subset of $E$ then $$ \phi_S(X,a_1,\ldots,a_{k-2})=\sum_{e \in E \setminus A} \alpha_{A \cup \{e\} } \prod_{z \in E \setminus (A \cup \{ e \})} \frac{\det(z,X,A)}{\det(z,e,A)}. $$ By Lemma~\ref{atoc}, $\alpha_{A \cup \{e\} }=\alpha_A f_A(e)$. Therefore, by Lemma~\ref{interp}, $$ \phi_S(X,a_1,\ldots,a_{k-2})=\alpha_A f_A(X). $$
Suppose $|A \cap E|=k-2-j$.
The above proves the theorem for $j=0$ and now we proceed by induction on $j$.
Let $x,y \in E\setminus A$ and $a \in A \setminus E$. Since $q$ is even, $\phi_S(x,A)=\phi_S(a,(A \setminus \{a \}) \cup \{x\})$. By induction, $$ \phi_S(a,(A \setminus \{a \}) \cup \{x\})=\alpha_{(A \setminus \{a \}) \cup \{x\}}f_{(A \setminus \{a \}) \cup \{x\}}(a)=\alpha_{A \cup \{ x\}}=\alpha_A f_A(x), $$ where the last two equalities use Lemma~\ref{atoc}. Hence, $$ \phi_S(x,A)=\alpha_A f_A(x), $$ and $$ \frac{\phi_S(y,A)}{\phi_S(x,A)}=\frac{f_A(y)}{f_A(x)}. $$ Thus, the evaluation of $$ \frac{f_A(x)}{\phi_S(x,A)} \phi_S(X,A) $$ is the evaluation of $f_A(X)$ at all points of $E \setminus A$. Now, we argue as in Lemma~\ref{interp}. Both are homogeneous polynomials of degree $t$ which, with respect to a basis containing $A$, are polynomials in two variables. Since they agree at at least $t+1$ linearly independent points they are the same. We have already observed that $\phi_S(x,A)=\alpha_A f_A(x)$, which completes the proof.
Suppose $q$ is odd. If $A$ is a subset of $E$ then $$ \phi_S(X,a_1,\ldots,a_{k-2})=\sum_{e \in E \setminus A} \alpha_{A \cup \{e\} }^2 \prod_{z \in E \setminus (A \cup \{ e \})} \frac{\det(z,X,A)}{\det(z,e,A)}. $$ By Lemma~\ref{atoc}, $\alpha_{A \cup \{e\} }^2=\alpha_A^2 f_A(e)^2$ so, as in Lemma~\ref{interp} but interpolating at $2t+1$ linearly independent points, $$ \phi_S(X,a_1,\ldots,a_{k-2})=\alpha_A^2 f_A(X)^2. $$
Suppose $|A \cap E|=k-2-j$.
The above proves the theorem for $j=0$ and now we proceed by induction on $j$.
Let $x,y \in E\setminus A$ and $a \in A \setminus E$. Arguing as in the $q$ even case, $$ \phi_S(x,A)=\alpha_A ^2f_A(x)^2, $$ and so $$ \frac{\phi_S(y,A)}{\phi_S(x,A)}=\frac{f_A(y)^2}{f_A(x)^2}. $$ Thus, the evaluation of $$ \frac{f_A(x)^2}{\phi_S(x,A)} \phi_S(X,A) $$ is the evaluation of $f_A(X)^2$ at all points of $E \setminus A$. Now, we argue as in Lemma~\ref{interp}. Both are homogeneous polynomials of degree $2t$ which, with respect to a basis containing $A$, are polynomials in two variables. Since they agree at at least $2t+1$ linearly independent points they are the same. \end{proof}
\begin{theorem} \label{forgetqeven} If $q$ is even then the dimension of the null space of the row space of $\mathrm{M}_{ n}$ is $$
{|G|-n-1 \choose k-1}. $$
Furthermore, the hypotheses in Theorem~\ref{wone}, Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} are never satisfied, apart from in the case $|G|=k+n$ where the hypotheses of Theorem~\ref{wtwo} and Theorem~\ref{wtwo2} are trivially satisfied. \end{theorem}
\begin{proof}
Let $E$ be a subset of $G$ of size $k+t-1$, where $t=|G|-n-k$. For any choice of $\alpha_C$, where $C$ is a subset of $E$ of size $k-1$, we define $$ \phi(Y_1,\ldots,Y_{k-1})=\sum_{C \subset E } \alpha_C \prod_{z \in E \setminus C} \frac{\det(z,Y_1,\ldots,Y_{k-1})}{\det(z,C)}, $$ where the sum runs over all subsets of size $k-1$ of $E$.
Define $f_A(X)=\phi(X,A)$ for each subset $A$ of $G$ of size $k-2$, and from this we define $\alpha_C$ for all $C \subset G$, as before. With respect to a basis containing $A$, $f_A(X)$ is a homogeneous polynomial of degree $t$ in two variables, so satisfies Lemma~\ref{interp}. Moreover $f_{D \cup \{ x \}}(y)=\phi(D,x,y)$, so $$ \frac{f_{D \cup \{ x \}}(y)}{f_{D \cup \{ y \}}(x)}=1, $$ so Lemma~\ref{segre1} is also satisfied. Lemma~\ref{theeqn} is derived from these two lemmas, so we conclude that Lemma~\ref{theeqn} holds for these $f_A$. Since Lemma~\ref{theeqn} gives the set of equations defined by $\mathrm{M}_n$ (see Lemma~\ref{upsolution}), the vector $v_G$, whose coordinates are indexed by the subsets $C$ of $G$ of size $k-1$ and whose $C$ coordinate is $$ \alpha_C \prod_{z \in G \setminus C} \det(z,C)^{-1}, $$ is in the null space of the row space of $\mathrm{M}_n$.
Thus, the dimension of the null space of the row space of $\mathrm{M}_{ n}$ is at least ${|G|-n-1 \choose k-1}$.
Suppose that $v$ is in the null space of the row space of $\mathrm{M}_{ n}$ and let $\alpha_C$ be defined by $$ (v)_C=\alpha_C \prod_{z \in G \setminus C} \det(z,C)^{-1}. $$ Let $E$ be a subset of $G$ of size $k+t-1$ and define $$ \phi(Y_1,\ldots,Y_{k-1})=\sum_{C } \alpha_C \prod_{z \in E \setminus C} \frac{\det(z,Y_1,\ldots,Y_{k-1})}{\det(z,C)}, $$ where the sum runs over all subsets of size $k-1$ of $E$. For any $C\subset E$, $\phi(C)=\alpha_C$.
Moreover, since $v$ is in the null space of the row space of $\mathrm{M}_{ n}$ we have that the equation in Lemma~\ref{theeqn} holds. This implies that for any $C$ where $|C \cap E|=k-2$, $\phi(C)=\alpha_C$. Therefore the $\alpha_C$'s, where $C \subset E$ determine $\alpha_C$
where $|C \cap E|=k-2$. Now we can deduce that $\alpha_C$, where $C \subset E'$ is determined by the same $\alpha_C$'s, when $|E' \cap E|=k+t-2$ and extrapolate to deduce that all $\alpha_C$'s are determined by the $\alpha_C$'s, where $C \subset E$.
Thus, the dimension of the null space of the row space of $\mathrm{M}_{ n}$ is at most ${|G|-n-1 \choose k-1}$.
If $\mathrm{M}_{ n}$ has a vector of weight one in its column space then this forces $\alpha_C=0$ for some $C$, which would make the dimension of the row space of $\mathrm{M}_{ n}$ strictly less than ${|G|-n-1 \choose k-1}$. If $\mathrm{M}_{ n}$ has property $W$ then there is a $C$ and a $C'$, subsets of $G$ of size $k-1$, intersecting in $k-2$ vectors, such that $\alpha_C=a \alpha_C'$, for some $a \in {\mathbb F}_q$. This would impose a condition on the null space of the row space and again imply that the dimension of the row space of $\mathrm{M}_{ n}$ is strictly less than ${|G|-n-1 \choose k-1}$. \end{proof}
\section{Conclusions}
Although the proofs are quite technical, the main results in this article are easily stated and potentially useful. Given an arc $G$ in a space of odd characteristic, one can quickly determine an upper bound on how large an arc one can hope to extend it to. This is done by increasing $n$ one by one to obtain the minimum $n_0$ such that $\mathrm{M}_{ n_0+1}$ has a vector of weight one in its column space. Then Theorem~\ref{wone} implies that $G$ cannot be extended to an arc of size $q+2k+n_0-|G|$. If $(G,n_0)$ satisfies property $W$ and $S$ is an arc of size $q+2k-n_0-1-|G|$ containing $G$ then, by Theorem~\ref{wtwo}, we can determine the co-secants to $S$ containing only points of $G$. Furthermore, if $2n_0 \geqslant |G|-k-1$ then, by Theorem~\ref{wtwo2}, we can determine the algebraic hypersurface $\phi_S$ associated with $S$.
It should be possible to prove explicit upper bounds if one assumes that $G$ has some structure. For example one might assume that in the planar case when $k$ is three, $G$ is contained in a cubic curve, or all but one or a few of the points of $G$ are contained in a conic. Likewise, for general $k$ one might assume that $G$ is contained in a normal rational curve or all but one or a few of the points of $G$ are contained in a normal rational curve. Using this structure one may then be able to calculate the column space of $\mathrm{M}_{ n}$. If this is possible, one may also be able to determine precisely the large arcs to which $G$ extends. There should be many results of this type.
\affiliationone{
Simeon Ball\\
Departament de Matem\`atiques, \\ Universitat Polit\`ecnica de Catalunya, \\ M\`odul C3, Campus Nord,\\ c/ Jordi Girona 1-3,\\ 08034 Barcelona, Spain \\
\email{simeon@ma4.upc.edu}}
\end{document} |
\begin{document}
\fancyhead[R]{\ifnum\value{page}<2\relax\else\thepage\fi}
\title{Quantum Walk Inspired Dynamic Adiabatic Local Search}
\author{Chen-Fu Chiang}
\affiliation{Department of Computer Science, State University of New York Polytechnic Institute, Utica, NY 13203 USA}
\email{chiangc@sunypoly.edu} \author{Paul M. Alsing} \affiliation{ Information Directorate, Air Force Research Laboratory, Rome, NY 13441, USA} \email{corresponding author: paul.alsing@us.af.mil}
\date{\today}
\begin{abstract}
We investigate the irreconcilability issue that raises in translating the search algorithm from the Continuous-Time Quantum Walk (CTQW) framework to the Adiabatic Quantum Computing (AQC) framework. \black{For the AQC formulation to evolve along the same path as the CTQW requires a constant energy gap in the former Hamiltonian throughout the AQC schedule. To resolve the issue, we modify the CTQW-inspired AQC catalyst Hamiltonian with a $Z$ oracle operator. Through simulation we demonstrate that the total running time for the proposed approach remains optimal.} Inspired by this solution, we further investigate \black{adaptive scheduling for the catalyst Hamiltonian and its coefficient function in the adiabatic path to improve the adiabatic local search.} \end{abstract} \maketitle
\thispagestyle{fancy}
\section{Introduction}
Quantum technologies have advanced dramatically in the past decade, both in theory and experiment. \black{From the view of theoretical computational complexity, Shor’s factoring algorithm \cite{shor1994algorithms} and Grover’s search algorithm \cite{grover1996fast} are well-known for their improvements over the best possible classical algorithms designed for the same purpose.}
From a perspective of universal computational models, Quantum Walks (QWs) have become a prominent model of quantum computation due to their direct relationship to the physics of the quantum system \cite{farhi1998quantum, kempe2003quantum}. It has been shown that the QW computational framework is universal for quantum computation \cite{childs2009universal, lovett2010universal}, and many algorithms now are presented directly in the quantum walk formulation rather than through a circuit model or other abstracted method \cite{farhi1998quantum, qiang2016efficient}. Besides being search algorithms, CTQWs have been applied in fields such as quantum transport\cite{caruso2009highly, mohseni2008environment, rebentrost2009environment,plenio2008dephasing}, state transfer \cite{bose2003quantum, kay2010perfect}, link prediction in complex networks \cite{omar2019quantum} and the creation of Bell pairs in a random network \cite{chakraborty2016spatial}. Some other well-known universal models include the quantum circuit model \cite{shor1998quantum, yao1993quantum, jordan2012quantum}, topological quantum computation \cite{nayak2008non}, adiabatic quantum computation (AQC) \cite{mizel2007simple}, resonant transition based quantum computation \cite{chiang2017resonant} and measurement based quantum computation \cite{morimae2012blind, gross2007novel, briegel2009measurement, raussendorf2003measurement}. \black{Each model might has its own bottleneck. Investigation on the relationship among the frameworks helps identify the violations when mapping frameworks and potential solutions. By studying the mapping, one can extend the techniques from one framework to another for some potential speedup \cite{Cutugno:2022}}.
\black{ In this work we investigate the irreconcilability issue that arises when translating the search algorithm from the Continuous-Time Quantum Walk (CTQW) framework to the Adiabatic Quantum Computing (AQC) framework as first pointed out by Wong and Meyer \cite{wong2016irreconcilable}. This irreconcilability issue can be described as follow. One first notes that the CTQW is the unique continuous-time quantum walk formulation of Grover’s discrete search algorithm. While the CTQW search evolves the initial unbiased (equal amplitude) state to the unknown (marked) state on the order of time $T\sim \mathcal{O}(\sqrt{N})$ (where $N$ is the size of search space), it does not follow the same evolution path (on the Bloch sphere) as that of Grover's algorithm. The uniqueness of the CTQW formulation stems from the fact that the unknown marked state only acquires a (time-dependent) phase from the oracle operation. Most importantly the marked states does not undergo evolution, and thus the CTQW effective employes a dichotomous ``Yes/No" oracle, for which the discrete Grover's algorithm has been proven to be optimal. }
\black{ The AQC formulation of the search algorithm with a non-uniform adiabatic evolution schedule \cite{roland2002quantum} also finds the marked state in time $T\sim \mathcal{O}(\sqrt{N})$ while at the same time following the same path as Grover's algorithm. Thus, if one investigates what adiabatic Hamiltonian gives rise to the same evolution path as the CTQW formulation, one finds \cite{wong2016irreconcilable} the AQC formulations introduces an extra ``catalyst" Hamiltonian which introduces structure beyond the standard ``Yes/No" oracle employed in the CTQW or discrete (Grovers) search algorithm. A scaled version of the AQC Hamiltonian leads to a constant energy gap that implies that the marked state can be found in time $T\sim \mathcal{O}(1)$. This discrepancy between the formulations of the two versions of a continuous time search algorithm was termed the ``irreconcilability (difference) issue" between CTQW and AQC by Wong and Meyer \cite{wong2016irreconcilable}. }
\black{In this work we address the CTQW/AQC search algorithm irreconcilability issue by modifying the constant energy gap Hamiltonian of the AQC formulation. Our contribution is twofold}. We first adapt the result from the mapping of CTQW to AQC by selecting the regular oracle $Z$ operator as the catalyst Hamiltonian and explore an alternative for the coefficient function for the catalyst Hamiltonian in order to attempt to avoid the irreconcilability issue. Through the simulation, the modified model provides optimal results in terms of time required for search. \black{We then apply this modification to adiabatic local search by adding an additional sluggish parameter $\delta$ which delineates the width of the adiabatic run time schedule over which the catalyst Hamiltonian effectively acts (i.e. the ``slowdown" region in the vicinity of the system's smallest energy gap $\Delta$). The sluggish parameter tracks the increase of running time $t=t(s)$ with respect to
schedule parameter $0\le s\le 1$ where $\delta=|d^2t/ds^2|$. The catalyst is employed when $\delta \ge\delta_0$ to facilitate the process, where we have found that the threshold value of $\delta_0=64$ provides good results.}
The outline of this work is as follows. The background information regarding CTQW and AQC is given in section \ref {sect:background} where the translation of CTQW to AQC is described in section \ref{sect:CTQW-AQC}. The irreconcilability issue that occurs during the translation is explained in section \ref{sect:irrecon} and our proposed solution is provided in section \ref{sect:m-qw-aqc}. The mapping of Grover search to AQC as an adiabatic local search is summarized in section \ref{sect:adpt-ags}. We propose and describe the catalyst Hamiltonian mechanism in section \ref{sect:catrelease} and determine the sluggish interval where it is employed. We further explore three coefficient functions of the catalyst Hamiltonian in section \ref{sect:catcoefficient}. The simulation results for proposed modifications are discussed in section \ref{sect:experiment}. Finally, our conclusions are given in section \ref{sect:conclude}.
\section{Background}\label{sect:background}
\subsection{Continuous-Time Quantum Walk}\label{sect:ctqw}
Given a graph $G=(V, E)$, where $V$ is the set of vertices and $E$ is the set of edges, the CTQW on $G$ is defined as follows.
Let $A$ be the adjacency matrix of $G$, the $|V| \times |V|$ matrix is defined component-wise as \begin{equation} A_{ij} =
\begin{cases}
1 & \text{if } (i,j) \in E, \\
0 & \text{otherwise}
\end{cases} \end{equation} where $i, j \in V$. A CTQW starts with a uniform superposition state $\ket{\psi_0}$ in the space, spanned by nodes in $V$, evolves according to the Schr\"odinger equation with Hamiltonian $A$. After time $t$, the output state is thus \begin{equation} \ket{\psi_{t}} = e^{-iAt}\ket{\psi_0}. \end{equation} \newline The probability that the walker is in the state
$\ket{\tau}$ at time $t$ is given by $|\bra{\tau}{e^{-iAt}\ket{\psi_0}}|^2$. To find the marked node $\ket{\omega}$ starting from an initial state $\ket{\psi_0}$ via a CTQW, one has to maximize the success probability
\begin{equation} |\mel{\omega}{e^{-iAt}}{\psi_0}|^2 \end{equation} while minimizing the time $t$. For instance, initially at time $t = 0$, the success probability is \begin{equation}
|\mel{\omega}{e^{-iA0}}{\psi_0}|^2 = O(\frac{1}{|V|}). \end{equation} The success probability is extremely small when the
search space $|V| = N $ is large and $\ket{\psi_0}$ is a uniform superposition state.
When applied to spatial search, the purpose of a CTQW is to find a marker basis state $\ket{\omega}$\cite{childs2004spatial,childs2003exponential}. For this purpose, the CTQW starts with the initial state $\ket{\psi_0} =\sum_{i=1}^{N}\frac{1}{\sqrt{N}}\ket{i}$, and evolves according to the Hamiltonian\cite{novo2015systematic} \begin{equation}\label{eqn:CTQW_gamma}
H = -\gamma A - \ket{\omega}\bra{\omega} \end{equation}
where $\gamma$ is the coupling factor between connected nodes. \black{The value of $\gamma$ has to be determined based on the graph structure} such that the quadratic speedup of CTQW can be preserved. Interested readers can refer to \cite{childs2004spatial, novo2015systematic} for more details.
\subsection{Adiabatic Quantum Computing} \label{sect:aqc}
In the AQC model, $H_0$ is the initial Hamiltonian, $H_f$ is the final Hamiltonian. The evolution path for the time-dependent Hamiltonian is \begin{equation}\label{eqn:aqc} H(s) = (1-s)H_0 + sH_f \end{equation} where $0 \leq s \leq 1$ is a schedule function of time $t$. For convenience, we denote $s$ as $s(t)$ and use them interchangeably. The variable $s$ increases slowly enough such that the initial ground state evolves and remains as the instantaneous ground state of the system. More specifically, \begin{align} H(s(t))\ket{\lambda_{k,t}} = \lambda_{k,t}\ket{\lambda_{k,t}} \end{align} where $\lambda_{k,t}$ is the corresponding eigenvalue the eigenstate $\ket{\lambda_{k,t}}$ at time $t$ and $k$ labels for the $k_{th}$ excited eigen-state.
The minimal eigenvalue gap is defined as \begin{align} g = \min_{0 \leq t \leq Ta}(\lambda_{1, t} - \lambda_{0,t}) \end{align} where $T_a$ is the total evolution time of the AQC. Let $\ket{\psi (T_a)}$ be the state of the system at time $T_a$ evolving under the Hamiltonian $H(s(t))$ from the ground state $\ket{\lambda_{0,0}}$ at time $t=0$. The Adiabatic theorem \cite{farhi2000quantum, albash2018Adiabatic} states that the final state $\ket{\psi (T_a)}$ is $\epsilon$-close to the real ground state $\ket{\lambda_{0,T_a}}$ as \begin{align}\label{eqn:aqc_limit_approx}
|\braket{\lambda_{0, T_a}}{\psi(T_a)}|^2 \leq 1 - \epsilon^2, \end{align} provided that \begin{align}
\frac{|\bra{\lambda_{1,t}}\frac{dH}{dt}\ket{\lambda_{0,t}}|}{g^2} \leq \epsilon. \end{align}
There are several variations of AQC to improve the performance. The variations are based on modifying the initial Hamiltonian and the final Hamiltonian \cite{10.5555/2011395.2011396, perdomo2011study} or adding a catalyst Hamiltonian $H_e$ \cite{10.5555/2011395.2011396}, which is turned on/off at the beginning/end of the adiabatic evolution. In this work, we are interested in the catalyst approach.
A conventional catalyst Hamiltonian assisted AQC path is expressed as \begin{align}\label{eqn:typical_catalyst_aqc} H(s) = (1-s)H_0 + s(1-s)H_e + sH_f. \end{align}
\section{Continuous Time Quantum Walk to Adiabatic Search Mapping} \label{sect:CTQW-AQC}
\noindent One can construct a time-dependent AQC Hamiltonian $H(s)$ as shown in \cite{wong2016irreconcilable} where the Adiabatic search follows the CTQW search on a complete graph with $N$ vertices. Let us define the following variables. The coupling factor $\gamma$ is set to $1/N$ and $\ket{\psi_0}$ is the uniform superposition of all states in the search space. State $\ket{r}$ is the uniform superposition of non-solution states, state $\ket{\omega}$ is the solution state. Treating the state evolving in CTQW system as the time-dependent ground state of $H(s)$, one constructs $H(s)$ in the $\{\ket{\omega}, \ket{r}\}$ basis as \cite{wong2016irreconcilable} \begin{align}\label{eqn:ctqw_aqc} H(s) =& \sqrt[4]{\frac{s(1-s)}{4\epsilon^2N}}[(1-s)H_0 + \sqrt{s(1-s)}H_e + s H_f] \end{align} where $s(t) = sin^2(\frac{t}{\sqrt{N}})$ with \black{ \begin{align}\label{eqn:h0hf}
H_0 &= \dyad{\psi_0^{\perp}}{\psi_0^{\perp}} - \dyad{\psi_0}{\psi_0}, \quad H_f =\dyad{\gamma} - \dyad{\omega}{\omega}, \nonumber \\ H_e &= 2i\sqrt{\frac{N-1}{N}}( \dyad{r}{\omega} - \dyad{\omega}{r}), \end{align} or explicitily in the $\{\ket{w}, \ket{r}\}$ basis as \begin{align} \label{eqn:HamiltoniansCTQW} H_0 &= \begin{pmatrix} \frac{N-2}{N} & -2\frac{\sqrt{N-1}}{N} \\ -2\frac{\sqrt{N-1}}{N} & -\frac{N-2}{N} \\ \end{pmatrix}, \\ H_e & = \begin{pmatrix} 0 & -2i\sqrt{\frac{N-1}{N}} \\ 2i\sqrt{\frac{N-1}{N}} & 0\\ \end{pmatrix},\; H_f = \begin{pmatrix} -1 & 0 \\ 0 & 1\\ \end{pmatrix}. \nonumber \end{align} }
\subsection{The Irreconcilability Issue: Constant Gap Catalyst Hamiltonian and Small Norm} \label{sect:irrecon}
\noindent The main concerns that are raised from Eqn. (\ref{eqn:ctqw_aqc}) are twofold. The first issue is the factor $ \sqrt[4]{\frac{s(1-s)}{4\epsilon^2N}}$ of $H(s)$. The adiabatic theorem \cite{griffiths2018introduction} states
the system achieve a fidelity of $1-\epsilon$ to the target state, provided that \begin{equation}\label{eqn:dhdtgmin}
\frac{|\langle\frac{dH}{dt}\rangle_{0,1}|}{g_{min}^2} \leq \epsilon \text{, where } g_{min} = \min_{0\leq t \leq T}E_1(t) - E_0(t) . \end{equation} Here $\langle\frac{dH}{dt}\rangle_{0,1}$ are the matrix elements of $dH/dt$ between the two corresponding eigen-states. $E_0(t)$ and $E_1(t)$ are the ground energy and the first excited energy of the system at time $t$. Given the $H(s)$ in
Eqn. (\ref{eqn:ctqw_aqc}), one might conclude that a factor of $ O(\sqrt[4]{1/N})$ significantly reduces the required time to achieve $1-\epsilon$ precision. This might be misleading as the $g_{min}$ of $H(s)$ also carries the same factor.
The second issue is that the catalyst $H_e$ provides power greater than a typical Yes$\slash$No oracle as it maps non-solution states to a solution state and a solution state to non-solution states. Provided initially the we start with a superposition state with amplitude of
$\sqrt{\frac{N-1}{N}}$ for a non-solution, it takes time of $O(1)$ for this catalyst to drive the initial (unbiased, equal amplitude) state to the solution state. In the following we will relax this constraint by using a normal oracle. For the rest of the paper, let us simply treat $\epsilon\ll 1$ as some small negligible constant.
\subsection{Modified CTQW-Inspired Adiabatic Search} \label{sect:m-qw-aqc}
\noindent In Eqn.(\ref{eqn:ctqw_aqc}), the following parameters were computed during the mapping \cite{wong2016irreconcilable}: \begin{itemize}
\item the scaling factor $ \sqrt[4]{\frac{s(1-s)}{4\epsilon^2N}}$\; of Hamiltonian\, $H_0$,
\item $H_e = 2i\sqrt{\frac{N-1}{N}}( \dyad{r}{\omega} - \dyad{\omega}{r})$, catalyst Hamiltonian
\item the coefficient function of $H_e$ as $\sqrt{s(1-s)}$. \end{itemize} In \cite{aharonov2008Adiabatic} the cost of the adiabatic algorithm was defined to be the dimensionless quantity (using $\hbar=1$) \begin{equation}\label{eqn:Adiabatic_cost}
cost = t_f \max_{s}||H(s)||, \end{equation} where $t_f$ is the running time. To prevent the cost from being manipulated to be arbitrarily small by changing the time units, or distorting the scaling of the algorithm by multiplying the Hamiltonians by some size-dependent factor as shown in the irreconcilability concern \cite{wong2016irreconcilable}, the norm of $H(s)$ should be fixed to some constant, such as 1. \\
\noindent
To address the irreconcilability issue, the scaling factor is dropped and the catalyst Hamiltonian $H_e$ is modified. Since $H_e = 2\sqrt{\frac{N-1}{N}} i XZ$ in the $\{\ket{\omega}, \ket{r}\}$ basis provides more power than a standard Oracle, for our modification we remove the imaginary number $i$ and the $X$ operator. The operator $Z$ alone behaves as a conventional ``Yes \slash No" oracle in the $\{\ket{\omega}, \ket{r}\}$ basis.
Let $M=2\sqrt{\frac{N-1}{N}}$ and choose the modified adiabatic path $H_m(s)$ as \begin{align}\label{eqn:ctqw_m_aqc} H_m(s) =&(1-s)H_0 + f_{z}(s)MZ + s H_f, \end{align} where $f_{z}(s)$ is our chosen $s$-dependent coefficient for catalyst $Z$. In addition to $f_{z}(s) = \sqrt{s(1-s)}$ that was used in \cite{wong2016irreconcilable}, functions that reach its maximum when $s=1/2$ are good candidates for $f_{z}(s)$, such as $f_{z}(s)=\frac{\sin(s \pi)}{2}$.
\black{The use of the factor $1/2$ on the sine function is to offset the magnitude $M$ to bound the norm of $H_e$ as described in Eqn. (\ref{eqn:Adiabatic_cost}).}
\section{Grover Search to Adiabatic Local Search Mapping}\label{sect:adpt-ags}
In this section we consider the mapping of Grover's algorithm to an adiabatic search. Given the initial driving Hamiltonian $H_0$ and the final Hamiltonian $H_f$ as \black{ \begin{equation}
H_0 =I - \dyad{\psi_0}{\psi_0}, \quad H_f =I - \dyad{\omega}{\omega}, \end{equation} where \begin{align} \label{eqn:HamiltoniansAGS} H_0 = \begin{pmatrix} \frac{N-1}{N} & -\frac{\sqrt{N-1}}{N} \\ -\frac{\sqrt{N-1}}{N} & \frac{1}{N} \\ \end{pmatrix}, H_f = \begin{pmatrix} 0 & 0 \\ 0 & 1\\ \end{pmatrix}, \end{align} } in the $\{\ket{\omega}, \ket{r}\}$ basis. The adiabatic path \cite{roland2002quantum, wong2016irreconcilable} in the $\{\ket{\omega}, \ket{r}\}$ basis is given by \begin{align}\label{eqn:grover_hs} H(s) &= (1-s)H_0 + sH_f \\ &= \begin{pmatrix} (1-s)\frac{N-1}{N} & -(1-s)\frac{\sqrt{N-1}}{N} \\ -(1-s)\frac{\sqrt{N-1}}{N} & 1 - (1-s)\frac{N-1}{N} \\ \end{pmatrix}. \end{align}
Instead of employing a linear evolution of $s(t)$, Eqn.(\ref{eqn:grover_hs}) adapts the evolution $ds/dt$ to the local adiabaticity condition \cite{roland2002quantum} such that \begin{equation}\label{eqn:dsdt}
|\frac{ds}{dt}| = \epsilon g^2(t) \end{equation} where $g(t)$ is the energy gap of the system at time $t$. The running time $t$ is then a function of schedule $s$ such that \begin{align} t(s) &= \frac{N}{2\epsilon \sqrt{N-1}}\Big\{\arctan(\sqrt{N-1}(2s-1)) \\ &+ \arctan(\sqrt{N-1})\Big\}. \end{align}
The relation between the schedule $s$ and the running time $t$ is shown in Figure \ref{fig:Adiabaticgrover_org}. It is clear that the system evolves quickly when the gap is large ($s$ away from $1/2$) and slowly when the gap is small ($s \simeq 1/2$) \cite{roland2002quantum}. In this example, the sluggish period $s \in [0.4, 0.6]$. For completeness, we provide the formal proof of the close form of the squared gap function $g^2(t)$ (second order in $s$) with respect to the schedule $s$ in Appendix \ref{sect:sTot}.
\begin{figure}
\caption{ Schedule $s$ in terms of time $t$ with $N=64$ in adiabatic local search, as observed in \cite{roland2002quantum}.}
\label{fig:Adiabaticgrover_org}
\end{figure}
\subsection{Adaptive Scheduling }\label{sect:qw-ags}
\noindent \black{For a fixed schedule of a adiabatic path, the schedule $s$ moves fast when the eigen-energy gap is large, and slowly when the gap is small. We desire to employ the catalyst Hamiltonians $H_e$ to amplify the eigen-energy gap during the ``slow down" period such that the total time to pass through the sluggish period is reduced ($s \in [0.4, 0.6]$ in Fig.(\ref{fig:Adiabaticgrover_org})).}
\subsubsection{Schedule Dependent Gap Function}\label{sect:gapmin}
In this section, we consider employing gap-dependent scheduling functions. Let $H_f$ be an arbitrary 2 by 2 Hermitian Hamiltonian. Let the time-dependent Hamiltonian $H(s)$ be \begin{equation}\label{time_dep_H} H(s) = (1-s) H_o + f_x(s) \sigma_x + f_z(s)\sigma_z + sH_f. \end{equation} Operators $\sigma_x$ and $\sigma_z$ are chosen as catalyst Hamiltonians. Let $H_o = \begin{bmatrix} a & c \\ c & b \\ \end{bmatrix}, H_f = \begin{bmatrix} p & r \\ r & q \\ \end{bmatrix} $ where $a, b, c, p, q, r$ are some given constants. The matrix form of the time-dependent Hamiltonian is given by \begin{equation} H(s) = \begin{bmatrix} (1-s)a +sp +f_z(s)& (1-s)c+sr+f_x(s) \\ (1-s)c+sr+f_x(s) & (1-s)b +sq-f_z(s) \\ \end{bmatrix} \end{equation} and the schedule-dependent gap can be analytically computed to yield \begin{align}\label{eqn:gminsqr} g^2(s) &= ((1-s)(a-b)+s(p-q) +2f_z(s))^2 \nonumber \\
&+ 4( (1-s)c+sr+f_x(s) )^2, \end{align} (see see Appendix \ref{sect:simpleAmp} for a derivation).
By using Eqn. (\ref{eqn:dsdt}), the total running time $T_{strt}^{stp}$ from $s = s_{strt}$ to $s= s_{stp}$
is thus \begin{equation}\label{eqn:totalTimestrtstp} T_{s_{strt}}^{s_{stp}} = \int_{s_{strt}}^{s_{stp}} \frac{ds} {\epsilon g^2(s)} \end{equation} where $0 \leq s_{strt} \leq s_{stp} \leq 1$. \black{In brief, the time spent during a certain period of a schedule can be obtained by use of gap function. The gap function can be expressed via the entries of $H_0$, $H_e$, $H_f$, schedule $s$ and the coefficient functions of the catalyst Hamiltonians.}
\subsubsection{Determining the Sluggish Interval \\ for the Catalyst Hamiltonian}\label{sect:catrelease}
\noindent By using the condition $f'(s) = dt/ds = \frac{1}{\epsilon g^2(s)}$ (see Appendix \ref{sect:sTot}), the region where the gap quickly significantly decreases or increases is during the sluggish period of $s$. That is the portion of schedule the $s$ where catalyst should be employed. The region where
$|df^2(s)/ds^2)| \geq \delta_0$ is the sluggish period. \black{The threshold value $\delta_0=64$ was chosen because if we choose a threshold proportional to $N$, as $N$ increases exponentially, the quantity $d^2t/ds^2$ might never reach the $N$-dependent threshold within the adiabatic evolution schedule $0\le s\le 1$.} By using this threshold, the starting point $s_{strt}^{slug}$ and the stopping point $s_{stp}^{slug}$ used to mark the sluggish period can be identified. Using the example in \cite{roland2002quantum}, we can re-plot and get $t$ as a function of $s$ as $t = f(s)$ and $f'(s) = dt/ds$ in Figure \ref{fig:timeschedule_1} - \ref{fig:timeschedule_2} with $N=64$.
\begin{figure}
\caption{Time $t$ as a function of schedule $s$ for adiabatic local search with $N=64$.}
\label{fig:timeschedule_1}
\end{figure} \begin{figure}
\caption{$dt/ds$ for adiabatic local search with $N=64$.}
\label{fig:timeschedule_2}
\end{figure}
\subsubsection{Catalyst Coefficient Functions}\label{sect:catcoefficient}
\noindent \black{As discussed in section \ref{sect:m-qw-aqc}, we are interested in the $H_e = Z$ case in Eqn.(\ref{eqn:ctqw_m_aqc}) and its coefficient function $f_z(s)$. Three coefficient functions of the catalyst Hamiltonian $Z$ are proposed as the following} \begin{align}\label{adapt_impv_func} f_{z}^{sine} (s) &= \sine (((s-s_{strt}^{slug})* \pi)/(s_{stp}^{slug} - s_{strt}^{slug})), \\ f_{z}^{ss} (s) & = (s-s_{strt}^{slug})(s_{stp}^{slug} -s), \nonumber \\ f_{z}^{grid} (s) &= a *f_{z}^{sine} (s) + b*(f_{z}^{sine} (s) )^2 \nonumber \end{align} where $ 0 \leq a, b \leq 1$ under the constraint that $a^2+b^2=1$. In the grid search $a$ increased from 0 to 1 by $0.1$ in each iteration. From the 10 pairs of $(a,b)$, we find the values of $a, b$ that give the shortest sluggish time interval.
\section{Experiment \& Result }\label{sect:experiment}
For our simulations we used (Wolfram) Mathematica (version 12.3 run on a Linux Ubuntu 20.04 LTS laptop). The code is available upon request.
The running time is based on Eqn.(\ref{eqn:totalTimestrtstp}). The size $N$ (number of nodes) ranges from $2^5, 2^6, \cdots$ to $2^{25}$. We observe the corresponding running time and sluggish time for each of the proposed models. \black{The result of the original adiabatic local search serves as the baseline for comparison, which used $N=64$ \cite{roland2002quantum}.} In this work, we generalize the setting for any arbitrary size $N$.
\black{Given an arbitrary complete graph of size $N$ with coupling factor $1/N$, one can compute the entries in the reduced Hamiltonian for $H_0$ and $H_f$ in the $\{\ket{\omega}, \ket{r}\}$ basis. The values of variables $a, b, c, p, q$ and $r$ as discussed in section \ref{sect:gapmin} can be obtained from Eqn.(\ref{eqn:HamiltoniansCTQW}) for the CTQW case, and from Eqn.(\ref{eqn:HamiltoniansAGS}) for the adiabatic local search. It is worth noticing that the ground state energy is $-1$ in the CTQW case, but is $0$ in the adiabatic local search case. Based on the adiabatic path Eqn.(\ref{time_dep_H}), and the gap function in Eqn. (\ref{eqn:gminsqr}) with given schedule $s$, coefficient function $f_z(s)$ for $\sigma_z$, we perform the simulation with the running time computed from Eqn.(\ref{eqn:totalTimestrtstp}).}
\subsection{Modified CTQW-Inspired Adiabatic Search Simulation} \label{sect:m-qw-aqc-sim}
\noindent This experiment aimed to demonstrate that the modified adiabatic paths addressing the irreconcilable issues remain optimal. The three proposed modifications we explored are as follows:
\begin{itemize}
\item \black{$H_{org}(s)$ takes Eqn. (\ref{eqn:ctqw_aqc}) and drops the scaling factor as explained in section \ref{sect:m-qw-aqc}.} The adiabatic path is
$H_{org}(s) = (1-s)H_0 + \sqrt{s(1-s)}H_e + s H_f$
\item $H_{m1}(s)$ replaces the computed catalyst Hamiltonian $H_e$ with an ordinary $Z$ oracle operator and keeps the magnitude $M$. This was used to address the constant gap $H_e$ irreconcilability issue. We have
\newline $H_{m1}(s) = (1-s)H_0 + \sqrt{s(1-s)}MZ + s H_f$
\item $H_{m2}(s)$ uses $\frac{\sin(s \pi)}{2}$ as the coefficient function for the catalyst Hamiltonian $Z$. The adiabatic path is
$H_{m2}(s) = (1-s)H_0 + \frac{\sin(s \pi)}{2}MZ + s H_f$ \end{itemize}
\black{For the above three models, simulations were run on Hamiltonian of size $N \in [2^5, 2^{6}, \cdots, 2^{25}]$. In the following figures, the abscissa is $\log_2 N$ while ordinate is the required total running time $T$. The time is computed based on Eqn.(\ref{eqn:totalTimestrtstp}). As the dimension of the Hamiltonian increases, the difference in running times for the three models considered are magnified.}
\begin{figure}
\caption{Case when $N \in [2^5, 2^{25}]$ and the running times of $H_{org}(s)$ (orange), $H_{m1}(s)$ (red), and $H_{m2}(s)$ (green) with the original adiabatic local search (blue) serving as the baseline. }
\label{fig:ctqw-aqc-m-comparison-N25}
\end{figure}
\noindent The simulation results are shown in Figure \ref{fig:ctqw-aqc-m-comparison-N25}.
\black{It is clear to see that $H_{org}$ is a constant time scheme as it does not scale as the size $N$ increases. This indicates the original catalyst Hamiltonian $H_e = MXZ$ in $H_{org}(s)$ indeed is a constant gap Hamiltonian. This also shows the irreconcilability issue as suggested in \cite{wong2016irreconcilable}.}
From the simulations we can conclude that both $H_{m1}(s), H_{m2}(s)$ perform optimally with respect to running time, \black{namely $T\sim\mathcal{O}(\sqrt{N})$}, similar to that of the original adiabatic local search but with a minor constant factor which can be ignored in the Big O notation. \black{As the simulation suggests, both modified CTQW-inspired approaches outperform the original adiabatic local search. When the $N \leq 2^{21}$, the $H_{m2}(s)$ outperforms $H_{m1}(s)$. When problem size $N$ is larger then $2^{21}$, $H_{m1}(s)$ is a better choice over $H_{m2}(s)$.}
\subsection{Adaptive Adiabatic Local Search Simulation With Various Coefficient Functions} \label{sect:m-gs-aqc-sim}
\black{In the previous section \ref{sect:m-qw-aqc-sim}, the proposed modifications are optimal, \black{in the sense that $T\sim\mathcal{O}(\sqrt{N})$ up to a minor constant factor}. For further improvement, the adaptive scheduling scheme is applied. The adiabatic path to be explored is therefore \begin{equation}
H_{adapt}(s) = (1-s)H_0 + f(s)Z + s H_f
\end{equation}
where $f(s) \in [f_{z}^{sine}, f_{z}^{ss}, f_{z}^{grid}]$ as seen in Eqn. (\ref{adapt_impv_func}). The catalyst Hamiltonian $Z$ operator is only employed during the sluggish period and hence $f(s) = 0 $ when $s \notin [s_{strt}^{slug}, s_{stp}^{slug}]$. The $H_0$ and $H_f$ are based on Eqn. (\ref{eqn:HamiltoniansAGS}).} As the catalyst is only employed within the sluggish period, to compare the performance of each proposed modification, one only needs to compute the running time within this period.
\begin{figure}
\caption{Case when $N \in [2^5, 2^{25}]$ and time spent in during the sluggish period for adiabatic paths with
($f_{z}^{ss}, f_{z}^{sine}, f_{z}^{grid}$) coefficient functions where the original adiabatic
local search serves as the baseline.}
\label{fig:ags-m-comparison-small_1}
\end{figure}
\noindent In Figure \ref{fig:ags-m-comparison-small_1}, $f_{z}^{ss}$ provides the minimal reduced sluggish time while $f_{z}^{sine}$ and $f_{z}^{grid}$ provide significant improvements. The difference in the runtimes becomes significant for $N\ge 2^{15}$. \begin{figure}
\caption{Case when $N \in [2^5, 2^{25}]$ and time spent in during the sluggish period for adiabatic paths with
($f_{z}^{sine}, f_{z}^{grid}$) coefficient functions where the original adiabatic
local search serves as the baseline.}
\label{fig:ags-m-comparison-small_2}
\end{figure}
\noindent
In Figure \ref{fig:ags-m-comparison-small_2}, both $f_{z}^{sine}$ and $f_{z}^{grid}$ have a reduced sluggish time over $75\%$ in comparison to the original adiabatic local search when $N$ reaches $2^{25}$.
$f_{z}^{sine}$ gradually outperforms the original adiabatic local search after $N=2^{10}$ and remains almost as good as $f_{z}^{grid}$ till $N=2^{23}$. When $N =2^{25}$, the sluggish time of $f_{z}^{sine}$ is only twice that of $f_{z}^{grid}$. In general, the grid search is a costly procedure as we have to run 10 pairs of $(a, b)$ for slightly different $H(s)$ for each value of $N=2^n$. If the time reduction of sluggish period is not greater than $90\%$ of the original, it might be a better choice to use $f_{z}^{sine}$. For the near term it might be more beneficial to use $f_{z}^{sine}$ model, instead of the grid search model $f_{z}^{grid}$.
\section{Conclusion}\label{sect:conclude}
In this work, we investigated different Hamiltonians for resolving the irreconcilability issue \cite{wong2016irreconcilable} when mapping the CTQW search algorithm to AQC. We modified the time-dependent Hamiltonian by (1) removing the original scaling CTQW factor $\sqrt[4]{\frac{s(1-s)}{4\epsilon^2N}}$, and (2) replacing $i\,X\,Z\to Z$ in the original catalyst $H_e$ Hamiltonian obtained from mapping CTQW to AQC. These modification were made in order to resolve the irreconcilability issue. We further optimized the schedule $s$ of the CTQW-inspired adiabatic path by an adaptive scheduling procedure.
The modified CTQW-inspired adiabatic search simulation experiment demonstrates that indeed the $H_e$ without any modification leads to a constant time in the total running time, regardless of the search space size $N$. This result echoes the irreconcilability issue stated in \cite{wong2016irreconcilable}. On the other hand, the modified CTQW-inspired adiabatic path with catalyst Hamiltonian coefficient $\frac{\sin({s \pi})}{2}$ behaves similarly to the behavior of the optimal adiabatic local search. \black{Furthermore, the modifications are optimal and outperform the original adiabatic local search.}
Lastly, in the adaptive adiabatic local search simulation with various coefficient functions experiment, we further investigated how to \black{reduce the time wasted in the sluggish period of an adiabatic local search path.}
\black{As our numerical experiments show, the function $f_z^{\sine}(s)$ and $f_z^{grid}(s)$ provide significant improvement and both outperform the original adiabatic local search.} Even though the grid search $f_z^{grid}(s)$ approach could have further reduced the length of the sluggish (``slow down") interval, the benefit was offset by the additional cost incurred from implementation over that of the other two methods.
\begin{acknowledgments} C.~C. gratefully acknowledges the support from the seed grant funding (917035-13) from the State University of New York Polytechnic Institute and the support from the Air Force Research Laboratory Summer Faculty Fellowship Program (AFSFFP). PMA would like to acknowledge support of this work from the Air Force Office of Scientific Research (AFOSR). Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of Air Force Research Laboratory. The appearance of external hyperlinks does not constitute endorsement by the United States Department of Defense (DoD) of the linked websites, or the information, products, or services contained therein. The DoD does not exercise any editorial, security, or other control over the information you may find at these locations.
\end{acknowledgments}
\appendix
\section{Time Integration of Adiabatic Local Search} \label{sect:sTot}
Given a spectral gap polynomial of the second order, that is \begin{equation} g^2(s) = A (s^2 + bs + c) \end{equation} where $s$ is the Adiabatic schedule, and \footnote{this is the same as $g^2(t)$ as for each $t$ there is only corresponding $s$} $\frac{ds}{dt} = \epsilon g^2(s)$, by integration on $t$ one obtains \begin{eqnarray}
T = \int dt &=& \int_{0}^{1} \frac{ds} {\epsilon g^2(s)} = \frac{1}{\epsilon A} \int_{0}^{1} \frac{ds}{(s^2 + bs + c)}. \end{eqnarray} \noindent (I) Case $b^2 - 4c > 0$: Let $r_{\pm} = \frac{-b\pm \sqrt{b^2 -4c}}{2}$. \begin{equation} \int_{0}^{1} \frac{ds}{(s^2 + bs + c)} = \frac{1}{r_+ - r_-} \int_{0}^{1} (\frac{1}{s-r_+}- \frac{1}{s-r_-})ds \end{equation}
since $\int \frac{1}{s-a} ds = \ln |s-a|$. Thus we have \begin{align}
T &= \frac{1}{\epsilon A (r_+ - r_-)} \ln \Big |\frac{s-r_+}{s-r_-}\Big|_0^1, \\
t &= \frac{1}{\epsilon A (r_+ - r_-)} (\ln \Big |\frac{s-r_+}{s-r_-}\Big| - \ln \Big |\frac{r_+}{r_-}\Big | ). \end{align} \\ (II) Case $b^2 -4c = 0$: \begin{equation} \int_{0}^{1} \frac{ds}{(s^2 + bs + c)} = \int_{0}^{1} \frac{1}{(s+b/2)^2}ds \end{equation} since $\int (s-a)^{-2} ds =-(s-a)^{-1}$, hence \begin{align}
T&= \frac{-1}{\epsilon A}{\frac{1}{(s+(b/2))}\Big|_0^1} \\ t&= \frac{1}{\epsilon A}\Big ({\frac{s}{(b/2)(s+(b/2))}}\Big) \end{align} (III) Case $b^2 -4c <0$: \begin{align} \int_{0}^{1} \frac{ds}{(s^2 + bs + c)} &=\int_{0}^{1} \frac{1}{(s+b/2)^2 + \frac{4c-b^2}{4}}ds \\ &=\int_{b/2}^{1+(b/2)} \frac{1}{x^2 + (\sqrt{\frac{4c-b^2}{4}})^2}dx \end{align} since $\int\frac{1}{a^2 +x^2} dx =\frac{1}{a} \arctan \frac{x}{a}$. With $a =\sqrt{\frac{4c-b^2}{4}}$, we obtain \begin{align}
T&=\frac{1}{\epsilon A}(\frac{1}{a})(\arctan \frac{x}{a})\Big|_{b/2}^{1+(b/2)} \\ t&=\frac{1}{\epsilon A}(\frac{1}{a})(\arctan \frac{s+(b/2)}{a} -\arctan \frac{(b/2)}{a}) \end{align} \newline
\section{Energy Gap} \label{sect:simpleAmp}
Given an arbitrary 2 by 2 non-negative-entry Hermitian matrix $H$ as \begin{equation} H = \begin{bmatrix} \alpha & \gamma \\ \gamma & \beta \\ \end{bmatrix}, \end{equation} via computing the determinant and eigenvalues, the energy gap $\Delta E$ is \begin{equation}
\Delta E = | \lambda_+ - \lambda_-| = \sqrt{(\alpha - \beta )^2 + 4\gamma^2}. \end{equation}
Simply from the view of energy gap, as long as $|\gamma|$ increases and the gap, $|\alpha -\beta|$, between the diagonal entries increases, the energy gap would increase. The increase of $|\gamma|$ can be
adapted by $\sigma_x$ while $|\alpha -\beta|$ can be increased by $\sigma_z$. They should be good candidates for the catalyst perturbation in the AQC path. Similarly, if the Hamiltonian has an imaginary part in the off diagonal entries, \begin{align} H & = \begin{bmatrix} \alpha & \gamma - di \\ \gamma+di & \beta \\ \end{bmatrix} \\
\Delta E &= | \lambda_+ - \lambda_-| = \sqrt{(\alpha -\beta)^2 + 4(\gamma^2+d^2)}. \end{align} The Hamiltonian $H$ (with no imaginery entries) can be expressed in terms of Pauli matrices as \begin{align} H &= \frac{\alpha + \beta}{2} \mathbb{I} + \frac{\Delta E}{2}((\frac{2\gamma}{\Delta E})\sigma_x + ((\frac{\alpha -\beta}{2})(\frac{2}{\Delta E}) \sigma_z)) \\
&= \frac{\alpha + \beta}{2} \mathbb{I} + \frac{\Delta E}{2} A \end{align} such that, by use of power of Pauli matrices, \begin{equation} e^{-iHt} = \cos(\frac{\Delta E t}{2})\mathbb{I} -i \sin(\frac{\Delta E t}{2})A. \end{equation}
\begin{thebibliography}{38} \makeatletter \providecommand \@ifxundefined [1]{
\@ifx{#1\undefined} } \providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{https://doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Shor}(1994)}]{shor1994algorithms}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~W.}\ \bibnamefont
{Shor}},\ }\bibfield {title} {\bibinfo {title} {Algorithms for quantum
computation: Discrete logarithms and factoring},\ }in\ \href@noop {} {\emph
{\bibinfo {booktitle} {Foundations of Computer Science, 1994 Proceedings.,
35th Annual Symposium on}}}\ (\bibinfo {organization} {IEEE},\ \bibinfo
{year} {1994})\ pp.\ \bibinfo {pages} {124--134}\BibitemShut {NoStop} \bibitem [{\citenamefont {Grover}(1996)}]{grover1996fast}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.~K.}\ \bibnamefont
{Grover}},\ }\bibfield {title} {\bibinfo {title} {A fast quantum mechanical
algorithm for database search},\ }in\ \href@noop {} {\emph {\bibinfo
{booktitle} {Proceedings of the twenty-eighth annual ACM symposium on Theory
of computing}}}\ (\bibinfo {organization} {ACM},\ \bibinfo {year} {1996})\
pp.\ \bibinfo {pages} {212--219}\BibitemShut {NoStop} \bibitem [{\citenamefont {Farhi}\ and\ \citenamefont
{Gutmann}(1998)}]{farhi1998quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont
{Farhi}}\ and\ \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Gutmann}},\ }\bibfield {title} {\bibinfo {title} {Quantum computation and
decision trees},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {58}},\ \bibinfo {pages}
{915} (\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kempe}(2003)}]{kempe2003quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Kempe}},\ }\bibfield {title} {\bibinfo {title} {Quantum random walks: an
introductory overview},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Contemporary Physics}\ }\textbf {\bibinfo {volume} {44}},\
\bibinfo {pages} {307} (\bibinfo {year} {2003})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Childs}(2009)}]{childs2009universal}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont
{Childs}},\ }\bibfield {title} {\bibinfo {title} {Universal computation by
quantum walk},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical review letters}\ }\textbf {\bibinfo {volume} {102}},\ \bibinfo
{pages} {180501} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lovett}\ \emph {et~al.}(2010)\citenamefont {Lovett},
\citenamefont {Cooper}, \citenamefont {Everitt}, \citenamefont {Trevers},\
and\ \citenamefont {Kendon}}]{lovett2010universal}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {N.~B.}\ \bibnamefont
{Lovett}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Cooper}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Everitt}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Trevers}},\ and\ \bibinfo {author}
{\bibfnamefont {V.}~\bibnamefont {Kendon}},\ }\bibfield {title} {\bibinfo
{title} {Universal quantum computation using the discrete-time quantum
walk},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Physical
Review A}\ }\textbf {\bibinfo {volume} {81}},\ \bibinfo {pages} {042330}
(\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Qiang}\ \emph {et~al.}(2016)\citenamefont {Qiang},
\citenamefont {Loke}, \citenamefont {Montanaro}, \citenamefont
{Aungskunsiri}, \citenamefont {Zhou}, \citenamefont {O’Brien},
\citenamefont {Wang},\ and\ \citenamefont {Matthews}}]{qiang2016efficient}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {X.}~\bibnamefont
{Qiang}}, \bibinfo {author} {\bibfnamefont {T.}~\bibnamefont {Loke}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Montanaro}}, \bibinfo
{author} {\bibfnamefont {K.}~\bibnamefont {Aungskunsiri}}, \bibinfo {author}
{\bibfnamefont {X.}~\bibnamefont {Zhou}}, \bibinfo {author} {\bibfnamefont
{J.~L.}\ \bibnamefont {O’Brien}}, \bibinfo {author} {\bibfnamefont {J.~B.}\
\bibnamefont {Wang}},\ and\ \bibinfo {author} {\bibfnamefont {J.~C.}\
\bibnamefont {Matthews}},\ }\bibfield {title} {\bibinfo {title} {Efficient
quantum walk on a quantum processor},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo {volume}
{7}},\ \bibinfo {pages} {1} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Caruso}\ \emph {et~al.}(2009)\citenamefont {Caruso},
\citenamefont {Chin}, \citenamefont {Datta}, \citenamefont {Huelga},\ and\
\citenamefont {Plenio}}]{caruso2009highly}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Caruso}}, \bibinfo {author} {\bibfnamefont {A.~W.}\ \bibnamefont {Chin}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Datta}}, \bibinfo
{author} {\bibfnamefont {S.~F.}\ \bibnamefont {Huelga}},\ and\ \bibinfo
{author} {\bibfnamefont {M.~B.}\ \bibnamefont {Plenio}},\ }\bibfield {title}
{\bibinfo {title} {Highly efficient energy excitation transfer in
light-harvesting complexes: The fundamental role of noise-assisted
transport},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {The
Journal of Chemical Physics}\ }\textbf {\bibinfo {volume} {131}},\ \bibinfo
{pages} {09B612} (\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mohseni}\ \emph {et~al.}(2008)\citenamefont
{Mohseni}, \citenamefont {Rebentrost}, \citenamefont {Lloyd},\ and\
\citenamefont {Aspuru-Guzik}}]{mohseni2008environment}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Mohseni}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Rebentrost}},
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Lloyd}},\ and\ \bibinfo
{author} {\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ }\bibfield
{title} {\bibinfo {title} {Environment-assisted quantum walks in
photosynthetic energy transfer},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {The Journal of chemical physics}\ }\textbf {\bibinfo
{volume} {129}},\ \bibinfo {pages} {11B603} (\bibinfo {year}
{2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Rebentrost}\ \emph {et~al.}(2009)\citenamefont
{Rebentrost}, \citenamefont {Mohseni}, \citenamefont {Kassal}, \citenamefont
{Lloyd},\ and\ \citenamefont {Aspuru-Guzik}}]{rebentrost2009environment}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Rebentrost}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mohseni}},
\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Kassal}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Lloyd}},\ and\ \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Aspuru-Guzik}},\ }\bibfield {title}
{\bibinfo {title} {Environment-assisted quantum transport},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf
{\bibinfo {volume} {11}},\ \bibinfo {pages} {033003} (\bibinfo {year}
{2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Plenio}\ and\ \citenamefont
{Huelga}(2008)}]{plenio2008dephasing}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~B.}\ \bibnamefont
{Plenio}}\ and\ \bibinfo {author} {\bibfnamefont {S.~F.}\ \bibnamefont
{Huelga}},\ }\bibfield {title} {\bibinfo {title} {Dephasing-assisted
transport: quantum networks and biomolecules},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {New Journal of Physics}\ }\textbf {\bibinfo
{volume} {10}},\ \bibinfo {pages} {113019} (\bibinfo {year}
{2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bose}(2003)}]{bose2003quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Bose}},\ }\bibfield {title} {\bibinfo {title} {Quantum communication
through an unmodulated spin chain},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Physical review letters}\ }\textbf {\bibinfo {volume}
{91}},\ \bibinfo {pages} {207901} (\bibinfo {year} {2003})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Kay}(2010)}]{kay2010perfect}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Kay}},\ }\bibfield {title} {\bibinfo {title} {Perfect, efficient, state
transfer and its application as a constructive tool},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {International Journal of Quantum
Information}\ }\textbf {\bibinfo {volume} {8}},\ \bibinfo {pages} {641}
(\bibinfo {year} {2010})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Omar}\ \emph {et~al.}(2019)\citenamefont {Omar},
\citenamefont {Moutinho}, \citenamefont {Melo}, \citenamefont {Coutinho},
\citenamefont {Kovacs},\ and\ \citenamefont {Barabasi}}]{omar2019quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont
{Omar}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Moutinho}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Melo}}, \bibinfo {author}
{\bibfnamefont {B.}~\bibnamefont {Coutinho}}, \bibinfo {author}
{\bibfnamefont {I.}~\bibnamefont {Kovacs}},\ and\ \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Barabasi}},\ }\bibfield {title} {\bibinfo
{title} {Quantum link prediction in complex networks},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {APS}\ }\textbf {\bibinfo {volume}
{2019}},\ \bibinfo {pages} {R28} (\bibinfo {year} {2019})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Chakraborty}\ \emph {et~al.}(2016)\citenamefont
{Chakraborty}, \citenamefont {Novo}, \citenamefont {Ambainis},\ and\
\citenamefont {Omar}}]{chakraborty2016spatial}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Chakraborty}}, \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Novo}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Ambainis}},\ and\
\bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Omar}},\ }\bibfield
{title} {\bibinfo {title} {Spatial search by quantum walk is optimal for
almost all graphs},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Physical review letters}\ }\textbf {\bibinfo {volume} {116}},\
\bibinfo {pages} {100501} (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Shor}(1998)}]{shor1998quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {P.~W.}\ \bibnamefont
{Shor}},\ }\bibfield {title} {\bibinfo {title} {Quantum computing},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Documenta
Mathematica}\ }\textbf {\bibinfo {volume} {1}},\ \bibinfo {pages} {1}
(\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Yao}(1993)}]{yao1993quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~C.-C.}\
\bibnamefont {Yao}},\ }\bibfield {title} {\bibinfo {title} {Quantum circuit
complexity},\ }in\ \href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of
1993 IEEE 34th Annual Foundations of Computer Science}}}\ (\bibinfo
{organization} {IEEE},\ \bibinfo {year} {1993})\ pp.\ \bibinfo {pages}
{352--361}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jordan}\ \emph {et~al.}(2012)\citenamefont {Jordan},
\citenamefont {Lee},\ and\ \citenamefont {Preskill}}]{jordan2012quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.~P.}\ \bibnamefont
{Jordan}}, \bibinfo {author} {\bibfnamefont {K.~S.}\ \bibnamefont {Lee}},\
and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Preskill}},\
}\bibfield {title} {\bibinfo {title} {Quantum algorithms for quantum field
theories},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Science}\ }\textbf {\bibinfo {volume} {336}},\ \bibinfo {pages} {1130}
(\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Nayak}\ \emph {et~al.}(2008)\citenamefont {Nayak},
\citenamefont {Simon}, \citenamefont {Stern}, \citenamefont {Freedman},\ and\
\citenamefont {Sarma}}]{nayak2008non}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont
{Nayak}}, \bibinfo {author} {\bibfnamefont {S.~H.}\ \bibnamefont {Simon}},
\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Stern}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Freedman}},\ and\ \bibinfo
{author} {\bibfnamefont {S.~D.}\ \bibnamefont {Sarma}},\ }\bibfield {title}
{\bibinfo {title} {Non-abelian anyons and topological quantum computation},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Reviews of Modern
Physics}\ }\textbf {\bibinfo {volume} {80}},\ \bibinfo {pages} {1083}
(\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Mizel}\ \emph {et~al.}(2007)\citenamefont {Mizel},
\citenamefont {Lidar},\ and\ \citenamefont {Mitchell}}]{mizel2007simple}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Mizel}}, \bibinfo {author} {\bibfnamefont {D.~A.}\ \bibnamefont {Lidar}},\
and\ \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mitchell}},\
}\bibfield {title} {\bibinfo {title} {Simple proof of equivalence between
adiabatic quantum computation and the circuit model},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical review letters}\
}\textbf {\bibinfo {volume} {99}},\ \bibinfo {pages} {070502} (\bibinfo
{year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Chiang}\ and\ \citenamefont
{Hsieh}(2017)}]{chiang2017resonant}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {C.-F.}\ \bibnamefont
{Chiang}}\ and\ \bibinfo {author} {\bibfnamefont {C.-Y.}\ \bibnamefont
{Hsieh}},\ }\bibfield {title} {\bibinfo {title} {Resonant transition-based
quantum computation},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Quantum Information Processing}\ }\textbf {\bibinfo {volume}
{16}},\ \bibinfo {pages} {120} (\bibinfo {year} {2017})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Morimae}\ and\ \citenamefont
{Fujii}(2012)}]{morimae2012blind}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Morimae}}\ and\ \bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Fujii}},\ }\bibfield {title} {\bibinfo {title} {Blind topological
measurement-based quantum computation},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Nature communications}\ }\textbf {\bibinfo {volume}
{3}},\ \bibinfo {pages} {1036} (\bibinfo {year} {2012})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Gross}\ and\ \citenamefont
{Eisert}(2007)}]{gross2007novel}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Gross}}\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Eisert}},\
}\bibfield {title} {\bibinfo {title} {Novel schemes for measurement-based
quantum computation},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Physical review letters}\ }\textbf {\bibinfo {volume} {98}},\
\bibinfo {pages} {220503} (\bibinfo {year} {2007})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Briegel}\ \emph {et~al.}(2009)\citenamefont
{Briegel}, \citenamefont {Browne}, \citenamefont {D{\"u}r}, \citenamefont
{Raussendorf},\ and\ \citenamefont {Van~den Nest}}]{briegel2009measurement}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {H.~J.}\ \bibnamefont
{Briegel}}, \bibinfo {author} {\bibfnamefont {D.~E.}\ \bibnamefont {Browne}},
\bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {D{\"u}r}}, \bibinfo
{author} {\bibfnamefont {R.}~\bibnamefont {Raussendorf}},\ and\ \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Van~den Nest}},\ }\bibfield
{title} {\bibinfo {title} {Measurement-based quantum computation},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature Physics}\
}\textbf {\bibinfo {volume} {5}},\ \bibinfo {pages} {19} (\bibinfo {year}
{2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Raussendorf}\ \emph {et~al.}(2003)\citenamefont
{Raussendorf}, \citenamefont {Browne},\ and\ \citenamefont
{Briegel}}]{raussendorf2003measurement}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Raussendorf}}, \bibinfo {author} {\bibfnamefont {D.~E.}\ \bibnamefont
{Browne}},\ and\ \bibinfo {author} {\bibfnamefont {H.~J.}\ \bibnamefont
{Briegel}},\ }\bibfield {title} {\bibinfo {title} {Measurement-based quantum
computation on cluster states},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Physical review A}\ }\textbf {\bibinfo {volume} {68}},\
\bibinfo {pages} {022312} (\bibinfo {year} {2003})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cutugno}\ \emph {et~al.}(2022)\citenamefont
{Cutugno}, \citenamefont {Giani}, \citenamefont {Alsing}, \citenamefont
{Wessing},\ and\ \citenamefont {Schnore}}]{Cutugno:2022}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Cutugno}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Giani}},
\bibinfo {author} {\bibfnamefont {P.~M.}\ \bibnamefont {Alsing}}, \bibinfo
{author} {\bibfnamefont {L.}~\bibnamefont {Wessing}},\ and\ \bibinfo {author}
{\bibfnamefont {S.}~\bibnamefont {Schnore}},\ }\bibfield {title} {\bibinfo
{title} {Quantum computing approaches for mission covering optimization},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Algorithms}\
}\textbf {\bibinfo {volume} {15}},\ \bibinfo {pages} {963} (\bibinfo {year}
{2022})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wong}\ and\ \citenamefont
{Meyer}(2016)}]{wong2016irreconcilable}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.~G.}\ \bibnamefont
{Wong}}\ and\ \bibinfo {author} {\bibfnamefont {D.~A.}\ \bibnamefont
{Meyer}},\ }\bibfield {title} {\bibinfo {title} {Irreconcilable difference
between quantum walks and adiabatic quantum computing},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Physical Review A}\ }\textbf
{\bibinfo {volume} {93}},\ \bibinfo {pages} {062313} (\bibinfo {year}
{2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Roland}\ and\ \citenamefont
{Cerf}(2002)}]{roland2002quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Roland}}\ and\ \bibinfo {author} {\bibfnamefont {N.~J.}\ \bibnamefont
{Cerf}},\ }\bibfield {title} {\bibinfo {title} {Quantum search by local
adiabatic evolution},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Physical Review A}\ }\textbf {\bibinfo {volume} {65}},\ \bibinfo
{pages} {042308} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Childs}\ and\ \citenamefont
{Goldstone}(2004)}]{childs2004spatial}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont
{Childs}}\ and\ \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Goldstone}},\ }\bibfield {title} {\bibinfo {title} {Spatial search by
quantum walk},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {70}},\ \bibinfo {pages}
{022314} (\bibinfo {year} {2004})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Childs}\ \emph {et~al.}(2003)\citenamefont {Childs},
\citenamefont {Cleve}, \citenamefont {Deotto}, \citenamefont {Farhi},
\citenamefont {Gutmann},\ and\ \citenamefont
{Spielman}}]{childs2003exponential}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.~M.}\ \bibnamefont
{Childs}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Cleve}},
\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont {Deotto}}, \bibinfo
{author} {\bibfnamefont {E.}~\bibnamefont {Farhi}}, \bibinfo {author}
{\bibfnamefont {S.}~\bibnamefont {Gutmann}},\ and\ \bibinfo {author}
{\bibfnamefont {D.~A.}\ \bibnamefont {Spielman}},\ }\bibfield {title}
{\bibinfo {title} {Exponential algorithmic speedup by a quantum walk},\ }in\
\href@noop {} {\emph {\bibinfo {booktitle} {Proceedings of the thirty-fifth
annual ACM symposium on Theory of computing}}}\ (\bibinfo {organization}
{ACM},\ \bibinfo {year} {2003})\ pp.\ \bibinfo {pages} {59--68}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Novo}\ \emph {et~al.}(2015)\citenamefont {Novo},
\citenamefont {Chakraborty}, \citenamefont {Mohseni}, \citenamefont {Neven},\
and\ \citenamefont {Omar}}]{novo2015systematic}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Novo}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Chakraborty}},
\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont {Mohseni}}, \bibinfo
{author} {\bibfnamefont {H.}~\bibnamefont {Neven}},\ and\ \bibinfo {author}
{\bibfnamefont {Y.}~\bibnamefont {Omar}},\ }\bibfield {title} {\bibinfo
{title} {Systematic dimensionality reduction for quantum walks: optimal
spatial search and transport on non-regular graphs},\ }\href@noop {}
{\bibfield {journal} {\bibinfo {journal} {Scientific reports}\ }\textbf
{\bibinfo {volume} {5}} (\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Farhi}\ \emph {et~al.}(2000)\citenamefont {Farhi},
\citenamefont {Goldstone}, \citenamefont {Gutmann},\ and\ \citenamefont
{Sipser}}]{farhi2000quantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont
{Farhi}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Goldstone}},
\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Gutmann}},\ and\ \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Sipser}},\ }\bibfield {title}
{\bibinfo {title} {Quantum computation by adiabatic evolution},\ }\href@noop
{} {\bibfield {journal} {\bibinfo {journal} {arXiv preprint
quant-ph/0001106}\ } (\bibinfo {year} {2000})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Albash}\ and\ \citenamefont
{Lidar}(2018)}]{albash2018Adiabatic}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Albash}}\ and\ \bibinfo {author} {\bibfnamefont {D.~A.}\ \bibnamefont
{Lidar}},\ }\bibfield {title} {\bibinfo {title} {Adiabatic quantum
computation},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Reviews of Modern Physics}\ }\textbf {\bibinfo {volume} {90}},\ \bibinfo
{pages} {015002} (\bibinfo {year} {2018})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Farhi}\ \emph {et~al.}(2011)\citenamefont {Farhi},
\citenamefont {Goldston}, \citenamefont {Gosset}, \citenamefont {Gutmann},
\citenamefont {Meyer},\ and\ \citenamefont {Shor}}]{10.5555/2011395.2011396}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {E.}~\bibnamefont
{Farhi}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Goldston}},
\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Gosset}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Gutmann}}, \bibinfo {author}
{\bibfnamefont {H.~B.}\ \bibnamefont {Meyer}},\ and\ \bibinfo {author}
{\bibfnamefont {P.}~\bibnamefont {Shor}},\ }\bibfield {title} {\bibinfo
{title} {Quantum adiabatic algorithms, small gaps, and different paths},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Quantum Info.
Comput.}\ }\textbf {\bibinfo {volume} {11}},\ \bibinfo {pages} {181–214}
(\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Perdomo-Ortiz}\ \emph {et~al.}(2011)\citenamefont
{Perdomo-Ortiz}, \citenamefont {Venegas-Andraca},\ and\ \citenamefont
{Aspuru-Guzik}}]{perdomo2011study}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Perdomo-Ortiz}}, \bibinfo {author} {\bibfnamefont {S.~E.}\ \bibnamefont
{Venegas-Andraca}},\ and\ \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Aspuru-Guzik}},\ }\bibfield {title} {\bibinfo {title} {A study of heuristic
guesses for adiabatic quantum computation},\ }\href@noop {} {\bibfield
{journal} {\bibinfo {journal} {Quantum Information Processing}\ }\textbf
{\bibinfo {volume} {10}},\ \bibinfo {pages} {33} (\bibinfo {year}
{2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Griffiths}\ and\ \citenamefont
{Schroeter}(2018)}]{griffiths2018introduction}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.~J.}\ \bibnamefont
{Griffiths}}\ and\ \bibinfo {author} {\bibfnamefont {D.~F.}\ \bibnamefont
{Schroeter}},\ }\href@noop {} {\emph {\bibinfo {title} {Introduction to
quantum mechanics}}}\ (\bibinfo {publisher} {Cambridge University Press},\
\bibinfo {year} {2018})\BibitemShut {NoStop} \bibitem [{\citenamefont {Aharonov}\ \emph {et~al.}(2008)\citenamefont
{Aharonov}, \citenamefont {Van~Dam}, \citenamefont {Kempe}, \citenamefont
{Landau}, \citenamefont {Lloyd},\ and\ \citenamefont
{Regev}}]{aharonov2008Adiabatic}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {D.}~\bibnamefont
{Aharonov}}, \bibinfo {author} {\bibfnamefont {W.}~\bibnamefont {Van~Dam}},
\bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Kempe}}, \bibinfo
{author} {\bibfnamefont {Z.}~\bibnamefont {Landau}}, \bibinfo {author}
{\bibfnamefont {S.}~\bibnamefont {Lloyd}},\ and\ \bibinfo {author}
{\bibfnamefont {O.}~\bibnamefont {Regev}},\ }\bibfield {title} {\bibinfo
{title} {Adiabatic quantum computation is equivalent to standard quantum
computation},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{SIAM review}\ }\textbf {\bibinfo {volume} {50}},\ \bibinfo {pages} {755}
(\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{Note1()}]{Note1}
\BibitemOpen
\bibinfo {note} {This is the same as $g^2(t)$ as for each $t$ there is only
corresponding $s$}\BibitemShut {NoStop} \end{thebibliography}
\end{document} |
\begin{document}
\title{Counterfactual Definiteness and Bell's Inequality}
\author{Karl Hess\affil{1}{Center for Advanced Study, University of Illinois, Urbana, Illinois}, Hans De Raedt\affil{2}{ Department of Applied Physics, Zernike Institute for Advanced Materials, University of Groningen, Nijenborgh 4, NL-9747 AG Groningen, The Netherlands }, Kristel Michielsen\affil{3}{ Institute for Advanced Simulation, J\"ulich Supercomputing Centre, Forschungszentrum J\"ulich, D-52425 J\"ulich, RWTH Aachen University, D-52056 Aachen, Germany } }
\contributor{Member submission to the Proceedings of the National Academy of Sciences of the United States of America}
\maketitle
\begin{article} \begin{abstract} Counterfactual definiteness must be used as at least one of the postulates or axioms that are necessary to derive Bell-type inequalities. It is considered by many to be a postulate that is not only commensurate with classical physics (as for example Einstein's special relativity), but also separates and distinguishes classical physics from quantum mechanics. It is the purpose of this paper to show that Bell's choice of mathematical functions and independent variables implicitly includes counterfactual definiteness and reduces the generality of the physics of Bell-type theories so significantly that no meaningful comparison of these theories with actual Einstein-Podolsky-Rosen experiments can be made. \end{abstract}
\keywords{Bell Inequality | Foundations of Quantum Mechanics | Foundations of probability}
\dropcap{B}ell's theorem \cite{BELL01} has an unusual standing among mathematical-physical theorems. No other theorem has ever been discussed with respect to so many ``loopholes", physical situations that make it possible to escape the mathematical strictures of the theorem. It is shown that the reason for this fact is that Bell's theorem is based on the postulate of counterfactual definiteness. The postulate of counterfactual definiteness to derive Bell-type inequalities is clearly asserted in the books of Peres \cite{PERE95} and Leggett \cite{LEGG85b}.
Some of Einstein's reasoning regarding Einstein-Podolsky-Rosen (EPR) experiments contains also counterfactual realism and Einstein's special relativity is counterfactually definite in the mathematical sense presented below. This fact may have contributed to the opinion that counterfactual realism is the major defining trait of ``classical" theories. It will be shown, however, that great care must be exercised with respect to the choice of independent variables in the argument of the functions that are used to formulate a counterfactually definite physical theory. It will also be shown that the particular choice of variables, that are used for the derivation of Bell's inequality and Bell's theorem, imposes significant restrictions to the physical situations that can be described by Bell's functions and excludes dynamic processes of classical physics, no matter whether deterministic or stochastic. To show this fact, we first repeat the main features of Bell's functions that describe Einstein-Podolsky-Rosen-Bohm (EPRB) experiments and then connect them to a precise definition of counterfactual definiteness.
\section{EPRB experiments and Bell's functions representing them}
EPRB experiments are performed at two space-like separated locations. The two particles of an entangled pair emanating from a source are spatially separated and propagate to the space-like separated locations. The properties of these particles are measured by instruments that are described by a ``setting" such as the direction of a polarizer or magnet which is characterized by a unit vector of three dimensional space denoted by $\mathbf{j} = \mathbf{a}, \mathbf{b}, \mathbf{c},\ldots$. Measurements of this type have been performed by a number of researchers and have had a checkered history with respect to the results. These, at first, contradicted and then confirmed quantum theory~\cite{GILD08}. There are still significant deviations from quantum theory in current experiments, which are, however, mostly ignored~\cite{RAED13a}. We proceed here by just stipulating that indeed these experiments showed a violation of the, by now, famous Bell inequality and describe in the following only Bell's postulates and assumptions, thereby focusing on the simplest case involving only three settings and not four, as used in actual experiments, see also~\cite{RAED16a}. Bell's postulates and assumptions are considered by many researchers to be entirely general and valid for all EPR like experiments and Gedanken-experiments as long as they can be described by classical physics such as Einstein's relativity.
Bell's classical-physics model for the system of measurement equipment and entangled pairs of the EPRB experiments is constructed as follows~\cite{BELL01}. He assumed that all experimental results, all data, can be described by using functions $A$ that map the independent measurement results onto $\pm 1$ or the segment $[-1, +1]$ of the real axis. The variables in the argument of the function always include the settings $\bf j = a, b, c,...$ and another variable, or set of variables, that Bell denoted by $\lambda$. Bell then proceeded to present a proof of his now celebrated inequality: \begin{equation} \langle A(\mathbf{a}, \lambda)A(\mathbf{b}, \lambda) + A(\mathbf{a}, \lambda)A(\mathbf{c}, \lambda) - A(\mathbf{b}, \lambda)A(\mathbf{c}, \lambda)\rangle \leq +1 , \label{5july15n1} \end{equation} where $\langle \cdot \rangle$ indicates the average over many measurements. The left and right factor of each term correspond to the data taken at the two corresponding space like separated measurement stations. The events of measurements and corresponding data are linked to clock times of two synchronized laboratory clocks. Therefore, the functions $A$ as well as the variables ${\bf j}$ and $ \lambda$ must for each of the products correspond to pairs of clock times $t_n, t_n'$ where $n$ is the measurement number.
Note that Bell's original paper assigned to $\lambda$ only properties of the entangled pair. It is now generally assumed~\cite{BRAN87} that $\lambda$ may stand for a set of arbitrary physical variables including space and time coordinates or even Einstein's space-time $\bf st$. Therefore, $\lambda$ may also describe some properties of the measurement equipment (in addition to the magnet or polarizer orientation $\bf j$), such as dynamical effects arising from many-body interactions of the entangled pair with the constituent particles and fields of the measurement equipment. Bell agreed with this assumption in his later work \cite{BELL01}.
It is the purpose of this paper to show that the postulate of counterfactual definiteness in conjunction with the use of a setting variable $\bf j$ does not permit the introduction of general space and time related variables that describe the said many body dynamics. Therefore, Bell's assumptions are not general enough to describe classical theories of EPRB experiments that include dynamic processes involving the measurement equipment.
\section{Counterfactual reasoning and EPRB experiments}
Peres~\cite{PERE95} gave the following definition of counterfactual realism, which roughly agrees with the definition of Leggett \cite{LEGG85b}. Peres claims, as does Leggett, not to use traditional concepts of mathematics and physics to start with, but only ``what could have possibly been the results of unperformed experiments" and bases his definition of counterfactual realism on the following statement:
\begin{center}
\framebox{ \parbox[t]{0.8\hsize}{
It is possible to imagine hypothetical results for any unperformed test, and to do calculations where these unknown results are treated as if they were numbers.
}}
\end{center}
We agree that it is possible, as a purely intellectual activity, to imagine hypothetical results for any unperformed tests. However, without significant additional assumptions, it is not possible ``to do calculations where these unknown results are treated as if they were numbers". Here we encounter the so often unrecognized gulf between sense impressions, even just imagined ones, and conceptual frame-works such as the axiomatic system of numbers or the probability theory of Kolmogorov. Peres, Leggett and a majority of quantum information theorists did not and do not recognize that giant gulf, that giant separation, between events of nature, recorded as data, and the axiomatic edifices of human thought.
If one wishes to treat hypothetical ``results" of unperformed tests as if they were numbers, one must be sure that these abstractions at least follow the axioms of numbers. There are several steps necessary to connect the ``events" of the physical world to numbers. Boole derived ultimate alternatives and a Boolean algebra while Kolmogorov's axiomatic system introduces an event algebra and probability space. It is true that mathematicians often describe experimental situations or ideas about them by the Kolmogorov framework and just postulate that a probability space and $\sigma$-algebra exists. It is known, however, from the work of Boole \cite{BO1862} and Vorob'ev \cite{VORO62} that a given particular set of variables may not be able to describe certain correlations in any given set of data.
In more elementary terms, we have to consider the following facts. If we perform ``calculations where these unknown results are treated as if they were numbers", then we must use the mathematical concept of functions or something equivalent in order to link the imagined but possible tests with numbers. A one to one correspondence of the possible tests and the numbers needs to be established and it needs to be shown that no logical-mathematical contradictions arise from such procedure. If no such correspondence exists, then the ``purely intellectual activity" is nothing more than child's play and the mathematical abstractions of such activity can certainly not be treated as if they were numbers with some relation to physics.
Take any set of data derived from measurements on spin-$1/2$ particles with Stern-Gerlach magnets, that lists the measured spins as ``up" or ``down" together with magnet settings ${\mathbf j}={\mathbf a}, {\mathbf b}, {\mathbf c}, \ldots$.
Can we replace ``up" with $+1$ and ``down" with $-1$ and expect that the so obtained set follows the axioms of integers? The ``trespass" to deal with tests as if they were numbers has been committed by several textbook authors, in particular by Peres~\cite{PERE95} and Leggett~\cite{LEGG85b}. This point appears in clear relief, if we write down the data according to the way in which they are imagined to be taken in testing e.g. the Bell-type inequality. The data are recorded in pairs corresponding to detector-events that are registered together with equipment settings and the clock times of synchronized laboratory clocks. Thus we obtain data lists of the kind:
$ (D_{{\mathbf j}_1}^{t_1}, D_{{\mathbf j}_1^\prime}^{t_1^\prime}), (D_{{\mathbf j}_2}^{t_2}, D_{{\mathbf j}_2^\prime}^{t_2^\prime}),\ldots, (D_{{\mathbf j}_N}^{t_M}, D_{{\mathbf j}_{M}^\prime}^{t_{M}^\prime}),
$
the ${\mathbf j}_n, {\mathbf j}_n'$ representing the randomly chosen setting pair and $t_n, t'_n$ denoting the times of measurement. The number of times that the setting $(\mathbf{a},\mathbf{b})$, $(\mathbf{a},\mathbf{c})$, and $(\mathbf{b},\mathbf{c})$ was chosen is denoted by $N_{\mathbf{a},\mathbf{b}}$, $N_{\mathbf{a},\mathbf{c}}$, and $N_{\mathbf{b},\mathbf{c}}$, respectively. The total number of pairs is then $M=N_{\mathbf{a},\mathbf{b}}+N_{\mathbf{a},\mathbf{c}}+N_{\mathbf{b},\mathbf{c}}$. One cannot do justice to the number of different data-pairs by using models with three pairs of mathematical symbols such as $A_{\mathbf{a}}, A_{\mathbf{b}}$, $A_{\mathbf{a}}, A_{\mathbf{c}}$ , and $A_{\mathbf{b}}, A_{\mathbf{c}}$ as they are used in Bell-type proofs. One runs into problems even if one regards these mathematical symbols as ``variables" (such as Boolean variables~\cite{HESS15a}) and not just as numbers; the reason being that one cannot cover all the different possible correlations of the data by such few variables. If we admit the two values $+1$ and $-1$ for the variables at different times of the same experiment, then we obtain $N_{\mathbf{a},\mathbf{b}}+1$ different values for the sum of the pair product $\sum_{n=1}^{M} \delta_{{\bf j}_n,\mathbf{a}}\delta_{{\bf j}_n^\prime,\mathbf{b}} D_{\mathbf{a}}^{t_n}D_{\mathbf{b}}^{t^\prime_{n}}$. If we have three such sums with all independent variables, the number of possibilities is $(N_{\mathbf{a},\mathbf{b}}+1)(N_{\mathbf{a},\mathbf{c}}+1)(N_{\mathbf{b},\mathbf{c}}+1)\approx (M/3 + 1)^3$ for $M$ sufficiently large. In contrast, we have for the Bell type variables $A_{\mathbf{a}}, A_{\mathbf{b}}$, $A_{\mathbf{a}}, A_{\mathbf{c}}$, and $A_{\mathbf{b}}, A_{\mathbf{c}}$ only about $(M/3 +1)^2$ independent choices of all possible different correlations of possible outcomes of these variables. This fact arises from Bell's description of $3M$ different {\bf pairs} of measurements ($6M$ measurements) by only 3 different variables and represents another typical trespass that is explicitly made in both the book of Peres~\cite{PERE95} and Leggett~\cite{LEGG85b}: they use a model with a severe restriction of choices before any physics is introduced and thus``overburden" their variables in a way which cannot do justice to the complexity of the data. In real EPRB experiments, one uses four not three different randomly chosen settings~\cite{ASPE82b,WEIH98} but the above argument equally holds for this case, with $(M/3 + 1)^3$ and $(M/3 + 1)^2$ being replaced by $(M/4 + 1)^4$ and $(M/4 + 1)^3$ for $4M$ different pairs ($8M$ measurements), respectively.
This more subtle problem, a well known problem in the area of computer simulations, reveals once more the enormous gulf between data and mathematical abstractions that describe the data. In the framework of Boole~\cite{HESS15a}, we need to be sure that the data can be described by ultimate alternatives (the Boolean variables) and in the framework of Kolmogorov we must be sure to deal with random variables (functions on a Kolmogorov probability space). But how can we be sure? As a minimum requirement we need to introduce functions, with sufficiently many physical variables in their arguments, to enable the description of all the possible correlations and to guarantee a one to one correspondence of mathematical abstractions and the massive amount of data.
To describe EPRB experiments in the general way that Bell intended and purported to actually have done, we need to introduce functions $A$ with variables additional to $\bf j$ in their argument (or indexes, see below). We need to have variables such as $t_n, s_n, {\bf st}_n,\ldots$ that are taken out of the realm of Einsteinian physics and do indeed guarantee the one to one correspondence to the data. For example, we may need to include $t_n$, the time of measurement at one location and $s_n$ representing any property of the objects emanating from the source. It may also be necessary to include a more general four dimensional space-time vector ${\mathbf st}_n$ instead or in addition to the measurement time $t_n$ and we include it here just for completeness. This way we obtain functions $A = A({\bf j}_n, t_n, s_n, {\bf st}_n,\ldots)$.
Some may ask whether that is not precisely what Bell used by introducing his $\lambda$ that, as he claimed \cite{BELL01}, can stand for any set of variables and, therefore, also for the set $(t_n, s_n, {\bf st}_n,\ldots)$. We thus may have $A = A({\bf j}_n, t_n, s_n, {\bf st}_n,\ldots)= A({\bf j}_n, \lambda_n)$.
Indeed it is true that this is what Bell claimed. However, as we will see below his claim is incorrect, because he and followers have postulated complete independence of $\lambda$ and $\bf j$ and thus postulated counterfactual definiteness in conjunction with the setting variable $\bf j$ according to the precise definition given in the next section. Einstein locality does not require independence of $\lambda$ of the local setting (see corresponding section).
Note that quantum mechanics does not use any setting-type of variable as independent variable in the argument of the wave-function. There, the setting-type variables label the operators. A helpful discussion of explicit and implicit assumptions of Bell, with emphasis of the mathematical structure and consistency, was given by Khrennikov~\cite{KHRE09}.
\section{Mathematical definition of counterfactual definiteness and Bell's inequality}
Counterfactual definiteness requires the following. We must be able to describe a measurement or test by using a given set of variables in the argument of the function $A$, and thus for example a setting $\mathbf{j} = \mathbf{b}$. Then, we must also be able to reason that we could have used instead of setting $\bf b$ the setting $\bf c$ and would have obtained the outcome corresponding to the value of $A$, now calculated with setting $\bf c$ and all other variables in its argument unchanged. Although this type of reasoning is not permitted in the courts of law, its mathematical restatement looks natural and general enough:
\begin{center}
\framebox{ \parbox[t]{0.8\hsize}{
\it A counterfactually definite theory is described by a function (or functions) that map(s) tests onto numbers.The variables of the function(s) argument(s) must be chosen in a one to one correspondence to physical entities that describe the test(s) and must be independent variables in the sense that they can be arbitrarily chosen from their respective domains.
}}
\end{center}
This definition means that the outcomes of measurements must be described by functions of a set of independent variables. The definition applies, of course, to the major theories of classical physics, including Einstein's special relativity. Counterfactual definiteness appears, therefore, as a reasonable and even necessary requirement of classical theories. However, most importantly, counterfactual definiteness restricts the use of variables to those that can be independently picked from their respective domains. However, a magnet- or polarizer-orientation, mathematically represented by the variable $\bf j$, cannot be picked independently of the measurement times, which are mathematically represented by $t_n$ and registered by the clocks of the measurement stations. Once a setting is picked at a certain space-time coordinate, no other setting can be linked to that coordinate, because of the relativistic limitations for the movement of massive bodies and the fact that Bell's theory is confined to the realm of Einsteinian physics and, therefore, excludes quantum superpositions. Thus any measurement is related to spatio-temporal equipment changes and the mathematical variables that describe the measurement need to represent the possible physical situations.
Enter probability theory and we certainly cannot use the setting $\bf j$ as a random variable and the measurement time $t$ as another {\it independent} random variable on the same probability space. The reason for this fact is rooted in the above explanation and can be further crystallized as follows. It is possible to define the setting $\bf j$ as a random variable on one probability space meaning that we may regard $\bf j$ as a function which assigns to each elementary event $\omega$ of a sample space $\Omega$ a so called realization of $\bf j$ e.g. ${\bf j}(\omega_1) = \mathbf{b}$. It is also possible, at least under very general circumstances, to formulate the measurement times as another random variable $t(\omega')$, where $\omega'$ is an elementary event of a second sample space $\Omega'$. Again, given some specific $\omega_1'$ we obtain a realization e.g. $t(\omega_1') = t_1$.
However, the formation of a product probability space on which both random variables $\bf j$ and $t$ are defined presents now a problem. That space would necessarily contain impossible events (such as different settings for the same measurement times) with a non-zero product probability measure assigned to them.
These facts can actually be formulated as a theorem stating that setting and time variables of EPRB experiments cannot be defined on one probability space~\cite{HESS05b}.
Thus, the postulate of counterfactual definiteness in conjunction with the use of a setting variable restricts the independent variables additional to $\bf j$ in the argument of Bell's functions $A$ to a, physically speaking, narrow subset of variables that we denote by $N_B$. This subset permits the physical description of static properties but cannot handle dynamic properties expressed by space-time dependencies.
As a consequence, the choices that can be made for variables in addition to the setting variable $\bf j$ in Bell's theory are extremely limited, particularly if these variables are related to space-time (or space and time). This limitation is so severe that it is impossible to describe general dynamic processes of classical physics with Bell's independent variables. The way to describe general dynamic processes in Kolmogorov's framework is by using stochastic processes.
To describe a dynamics of EPRB experiments one needs to use two dimensional vector stochastic processes, which involves several subtleties that, if neglected, lead to incorrect conclusions. A general vector stochastic process is in essence a vector of random variables, such as $(A_1(t_n), (A_2(t_n), A_3(t_n),...)$, whose statistical properties change in time (we use here discrete time only). A precise mathematical definition can be found in Ref.~\cite{BREU02}, pp 11--15.
In relation to EPRB experiments we thus consider vectors such as $(A_1(t_n), A_2(t_n))$.
A first difficulty that is usually encountered is related to the physics of spin measurements. According to Bohr, the outcomes of measurements on each separate side of the EPRB experiment are spin-up or spin down with equal likelihood, which appears to suggest stationarity or time-independence of the random variables $A_1(t_n)$ and $A_2(t_n)$. Bohr's postulate, however, does not necessitate a time-independence of the statistical correlations between the random variables. This fact has been explained on the basis of a mathematical model involving time in Ref.~\cite{HESS15}(pp 55--60) and demonstrated by actual EPRB related computer experiments~\cite{RAED16a}.
A second difficulty arises from the fact, explained in detail above, that the time and setting related variables of EPRB experiments cannot be treated as independent. This difficulty can be resolved by use of the following two-dimensional system of functions (vector stochastic process) on a probability space $\Omega$: \begin{equation} (A_{{\bf j}_n}^{t_n}(\omega), A_{{\bf j'}_n}^{t^\prime_n}(\omega)). \label{6aug15n1} \end{equation} Settings and times are now included as indexes that are not independent. ${\bf j}_n = {\bf a, b}$ represents the randomly chosen settings at one measurement place and ${\bf j'}_n = {\bf b, c}$ at the second. $t_n$ as well as $t^\prime_n$ are the respective measurement times. $n = 1, 2, 3...$ indicates just the number of the experiment. Only one setting can occur at one given time in order to avoid physical contradictions and incorrect assignments of probability measures. (Note that a generalization of the time-indexes to space-time ${\bf st }_n $ is straightforward.)
Bell's inequality then transforms to: \begin{equation}
A_{\bf a}^{t_n}(\omega)A_{\bf b}^{t^\prime_n}(\omega)
+
A_{\bf a}^{t_k}(\omega)A_{\bf c}^{t^\prime_k}(\omega)
-
A_{\bf b}^{t_m}(\omega)A_{\bf c}^{t^\prime_m}(\omega)
\leq 3 , \label{6aug15n2} \end{equation} where the labels $n, k, m$ are the appropriate, all different, experiment numbers for which the particular settings have been chosen. Eq~(\ref{6aug15n2}) puts no restrictions on the correlations of EPRB experiments, because the actual experiments may now be represented by a countable infinite number of different functions instead of the three or four functions used by Bell.
There do exist theorems that appear to prove the validity of Bell's inequality for stochastic processes (the Martingales discussed in \cite{GILL03a} are just special forms of stochastic processes). These theorems, however, do not use two-dimensional vector stochastic processes as used in Eq~(\ref{6aug15n1}). They use, instead, counterfactual definiteness in conjunction with setting variables to arrive at three-, four- or higher dimensional stochastic processes (Martingales). Thus these theorems cannot encompass dynamic measurement processes~\cite{NIEU13} and time- (space-time-) related variables, because they would then imply the existence of events with more than one setting at a given measurement time and, therefore, involve impossible events with non-zero probability measure. Such theorems apply, therefore, only to the set of variables $N_B$ as defined above and do not apply to EPRB types of experiments that may involve dynamical processes in the measurement equipment.
It is, therefore, imperative to view EPRB experiments in a different light. A violation of Bell-type inequalities need not be seen as crossing the border between the reasoning of classical Einstein type of physics and quantum mechanics, but indicating a possible dynamics in the interactions of particles and measurement equipment. This possible dynamics is what needs to be investigated, particularly as contrasted to the characterization of the measurement equipment by a completely static symbol~\cite{NIEU11}.
\section{Einstein locality and Bell's reasoning revisited}
Experimentalists have up to now not used Bell's theorem and its implications to search for a many body dynamics of local equipment, but instead to ``uncover" the instantaneous dynamic influence of remote measurements, the so called quantum non-localities. Some consider these non-localities to be the most profound development of modern physics \cite{HESS15}. They maintain that the measurement of the entangled partner causes instantaneous influences over arbitrary distances.
This search for influences due to distant events is based on the conviction, dating back to Bell's original paper, that Einstein locality is necessary to derive his inequality. However, this is not the case. Bell's assumption that $\lambda$ is independent of the setting variable $\bf j$ is already contained in the postulate of counterfactual definiteness and Einstein locality is not only redundant because of this fact, but does not require at all that $\lambda$ be independent of all settings. Variables dependent on the local setting and describing local many body interactions with the incoming particles are entirely permitted and necessary. It is counterfactual definiteness that requires that all additional variables such as $\lambda$ are independent of the setting variable. But why does our classical theory need to involve the setting variable in the way Bell has included it? One can use the setting variable as an index together with another index related to or representing space-time. These indexes are, of course not independent as was pointed out above for stochastic processes.
From these facts we can deduce that Einstein locality is not a necessary condition for Bell's derivation, rather the opposite. Its correct implementation prevents the derivation of Bell to go forward, as shown in Eq~(\ref{6aug15n2}).
\section{Conclusions}
The major premise for the derivation of Bell's inequality is counterfactual definiteness, which in connection with Bell's use of setting variables restricts the domain of the variables in the argument of Bell's functions $A$ to a subset $N_B$ of general physical independent variables. $N_B$ does not include the variables necessary to describe a general dynamics describing many body interactions in the measurement equipment. Using only the independent variables defined by $N_B$ , it is impossible to find a violation of Bell's inequality, which therefore represents a demarcation between possible and impossible experience~\cite{BO1862}, not between classical and quantum physics. For a wider parameter space that permits the description of dynamic processes and includes space-time coordinates, the validity of Bell-type inequalities cannot be and have not been derived. This situation is reminiscent of that with the last theorem of Fermat before 1984. There existed only rather trivial proofs of Fermat's theorem for subsets of conditions (e.g. $n=3, 5$), while a general proof was not known until Andrew Wiles supplied it in 1984. Such more complicated and general proofs of Bell's theorem have not been presented and, in the authors opinion, are not likely to be presented in the future, because they would need to remove the use of the setting variable $\bf j$.
\begin{acknowledgments} We would like to thank the referees for very valuable suggestions that improved the manuscript. \end{acknowledgments}
\end{article}
\end{document} |
\begin{document}
\def{\mathbb{N}}^{\mathbb{N}}{{\mathbb{N}}^{\mathbb{N}}} \def{\mathcal I}{{\mathcal I}} \def2^{< \omega}{2^{< \omega}} \def\mathbb{N}{\mathbb{N}} \def2^{\nat}{2^{\mathbb{N}}} \def\mbox{\sf Fin}{\mbox{\sf Fin}} \def{\mathcal F}{{\mathcal F}} \def$\mbox{p}^+${$\mbox{p}^+$} \def\mbox{$ \text{p}^-$}{\mbox{$ \text{p}^-$}} \def$\mbox{q}^+${$\mbox{q}^+$} \def\not\in{\not\in} \def\mbox{\sf nwd}{\mbox{\sf nwd}} \defF_\sigma{F_\sigma} \def\mathbb{X}{\mathbb{X}} \def\mathbb{Y}{\mathbb{Y}}
\newtheorem{definition}{Definition}[section] \newtheorem{theorem}[definition]{Theorem} \newtheorem{example}[definition]{Example} \newtheorem{corollary}[definition]{Corollary} \newtheorem{lemma}[definition]{Lemma} \newtheorem{proposition}[definition]{Proposition} \newtheorem{question}[definition]{Question} \newtheorem{claim}[definition]{Claim}
\title{Combinatorial properties on nodec countable spaces with analytic topology}
\author{Javier Murgas and Carlos Uzc\'ategui} \address{Escuela de Matem\'aticas, Facultad de Ciencias, Universidad Industrial de
Santander, Ciudad Universitaria, Carrera 27 Calle 9, Bucaramanga,
Santander, A.A. 678, COLOMBIA. } \email{javier\_murgas@hotmail.com. } \address{Escuela de Matem\'aticas, Facultad de Ciencias, Universidad Industrial de
Santander, Ciudad Universitaria, Carrera 27 Calle 9, Bucaramanga,
Santander, A.A. 678, COLOMBIA. Centro Interdisciplinario de L\'ogica y \'Algebra, Facultad de Ciencias, Universidad de Los Andes, M\'erida, VENEZUELA.} \email{cuzcatea@saber.uis.edu.co.} \thanks{The second author thanks Vicerrector\'ia de Investigaci\'on y Extensi\'on de la Universidad Industrial de Santander for the financial support for this work, which is part of the VIE project \#2422.}
\date{}
\begin{abstract}We study some variations of the product topology on families of clopen subsets of $2^{\nat}\times\mathbb{N}$ in order to construct countable nodec regular spaces (i.e. in which every nowhere dense set is closed) with analytic topology which in addition are not selectively separable and do not satisfy the combinatorial principle $q^+$. \end{abstract}
\maketitle
\noindent {\em Keywords:} nodec countable spaces; analytic sets, selective separability, $q^+$
\noindent {\em MSC: 54G05, 54H05, 03E15}
\section{Introduction}
A topological space $X$ is {\em selectively separable} ($SS$), if for any sequence $(D_n)_n$ of dense subsets of $X$ there is a finite set $F_n\subseteq D_n$, for $n\in\mathbb{N}$, such that $\bigcup_n F_n$ is dense in $X$. This notion was introduced by Scheepers \cite{Scheeper99} and has received a lot of attention ever since (see for instance \cite{BarmanDow2011,BarmanDow2012,Bella2009,Bella_et_al2008,Bella2013,CamargoUzca2018b,Gruenhage2011,Reposvetal2010}). Bella et al. \cite{Bella_et_al2008} showed that every separable space with countable fan tightness is $SS$. On the other hand, Barman and Dow \cite{BarmanDow2011} showed that every separable Fr\'echet space is also $SS$ (see also \cite{CamargoUzca2018b}).
A topological space is {\em maximal} if it is a dense-in-itself regular space such that any strictly finer topology has an isolated point. It was shown by van Douwen \cite{Vand} that a space is maximal if, and only if, it is {\em extremely disconnected} (i.e. the closure of every open set is open), {\em nodec} (i.e. every nowhere dense set is closed) and every open set is {\em irresolvable} (i.e. if $U$ is open and $D\subseteq U$ is dense in $U$, then $U\setminus D$ not dense in $U$). He constructed a countable maximal regular space.
A countable space $X$ is $\mbox{q}^+$\ at a point $x\in X$, if given any collection of finite sets $F_n\subseteq X$ such that $x\in \overline{\bigcup_n F_n}$, there is $S\subseteq \bigcup_n F_n$ such that $x\in \overline{S}$ and $ S\cap F_n$ has at most one point for each $n$. We say that $X$ is a {\em $\mbox{q}^+$-space} if it is $\mbox{q}^+$\ at every point. Every countable sequential space is $\mbox{q}^+$\ (see \cite[Proposition 3.3]{Todoruzca2000}). The collection of clopen subsets of $2^{\nat}$ with the product topology is not $\mbox{q}^+$\ at any point. This notion is motivated by the analogous concept of a $\mbox{q}^+$\ filter (or ideal) from Ramsey theory.
A problem stated in \cite{Bella_et_al2008} was to analyze the behavior of selective separability on maximal spaces. The existence of a maximal regular SS space is independent of ZFC. In fact, in ZFC there is a maximal non SS space \cite{BarmanDow2011} and it is consistent with ZFC that no countable maximal space is SS \cite{BarmanDow2011, Reposvetal2010}. On the other hand, it is also consistent that there is a maximal, countable, SS regular space \cite{BarmanDow2011}.
In this paper we are interested in these properties on countable spaces with an analytic topology (i.e. the topology of the space $X$ is an analytic set as a subset of $2^X$ \cite{todoruzca}). Maximal topologies are not analytic. In fact, in \cite{Todoruzca2014} it was shown that there are neither extremely disconnected nor irresolvable analytic topologies, nevertheless there are nodec regular spaces with analytic topology. In view of the above mentioned results about maximal spaces, it seems natural to wonder about the behavior of selective separability on nodec spaces with an analytic topology. Nodec regular spaces are not easy to construct. We continue the study of the method introduced in \cite{Todoruzca2014} in order to construct similar nodec regular spaces with analytic topology that are neither SS nor $\mbox{q}^+$. A countable regular space has an analytic topology if, and only if, it is homeomorphic to a subspace of $C_p({\mathbb{N}}^{\mathbb{N}})$ \cite{todoruzca}. Thus our examples are constructed using some special topologies on a collection of clopen subsets of $2^{\nat}\times \mathbb{N}$. It is an open question whether there is a nodec $SS$ regular space with analytic topology.
\section{Preliminaries}
An {\em ideal} on a set $X$ is a collection ${\mathcal I}$ of subsets of $X$ satisfying: (i) $A\subseteq B$ and $B\in {\mathcal I}$, then $A\in {\mathcal I}$. (ii) If $A,B\in{\mathcal I}$, then $A\cup B\in {\mathcal I}$. (iii) $\emptyset \in {\mathcal I}$. We will always assume that an ideal contains all finite subsets of $X$. If ${\mathcal I}$ is an ideal on $X$, then ${\mathcal I}^+=\{A\subseteq X:\, A\not\in {\mathcal I}\}$.
\mbox{\sf Fin}\ denotes the ideal of finite subsets of the non negative integers $\mathbb{N}$. An ideal ${\mathcal I}$ on $X$ is {\em tall}, if for every $A\subseteq X$ infinite, there is $B\subseteq A$ infinite with $B\in {\mathcal I}$. We denote by $A^{<\omega}$ the collection of finite sequences of elements of $A$. If $s$ is a finite sequence on $A$ and $i\in A$, $|s|$ denotes its length and $ s\widehat{\;\;}i$ the sequence obtained concatenating $s$ with $i$. For $s\in2^{< \omega}$ and $\alpha\in 2^{\nat}$, let $s\prec \alpha$ if $\alpha(i)=s(i)$ for all $i<|s|$ and
$$
[s]=\{\alpha\in 2^{\nat}: \; s\prec \alpha\}.
$$
If $\alpha\in2^{\nat}$ and $n\in \mathbb{N}$, we denote by $\alpha\restriction n$ the finite sequence $(\alpha(0),\cdots,\alpha(n-1))$ if $n>0$ and $\alpha\restriction 0$ is the empty sequence. The collection of all $[s]$ with $s\in2^{< \omega}$ is a basis of clopen sets for $2^{\nat}$. As usual we identify each $n\in \mathbb{N}$ with $\{0,\cdots, n-1\}$.
The ideal of nowhere dense subsets of $X$ is denoted by $\mbox{\sf nwd}(X)$. Now we recall some combinatorial properties of ideals. We put $A\subseteq^*B$ if $A\setminus B$ is finite. \begin{enumerate}
\item[({$p^+$})] ${\mathcal I}$ is $\mbox{p}^+$, if for every decreasing sequence $(A_n)_n$ of sets in ${\mathcal I}^+$, there is $A\in {\mathcal I}^+$ such that $A\subseteq^* A_n$ for all $n\in\mathbb{N}$. Following \cite{HMTU2017}, we say that ${\mathcal I}$ is $\mbox{$ \text{p}^-$}$, if for every decreasing sequence $(A_n)_n$ of sets in ${\mathcal I}^+$ such that $A_n\setminus A_{n+1}\in {\mathcal I}$, there is $B\in {\mathcal I}^+$ such that $B\subseteq^* A_n$ for all $n$.
\item[($q^+)$] ${\mathcal I}$ is $\mbox{q}^+$\ , if for every $A\in {\mathcal I}^+$ and every partition $(F_n)_n$ of $A$ into finite sets, there is $S\in{\mathcal I}^+$ such that $S\subseteq A$ and $S\cap F_n$ has at most one element for each $n$. Such sets $S$ are called (partial) {\em selectors} for the partition. \end{enumerate}
A point $x$ of a topological space $X$ is called a {\em Fr\'echet point}, if for every $A$ with $x\in \overline{A}$ there is a sequence $(x_n)_n$ in $A$ converging to $x$. We will say that $x$ is a $\mbox{q}^+$-{\em point}, if ${\mathcal I}_x$ is $\mbox{q}^+$. We say that a space is a $\mbox{q}^+$-space, if every point is $\mbox{q}^+$. We define analogously the notion of a $\mbox{p}^+$ and $\mbox{$ \text{p}^-$}$ points. Notice that if $x$ is isolated, then ${\mathcal I}_x$ is trivially $\mbox{q}^+$\ as ${\mathcal I}_x^+$ is empty. Thus a space is $\mbox{q}^+$\ if, and only if, ${\mathcal I}_x$ is $\mbox{q}^+$\ for every non isolated point $x$. The same occurs with the other combinatorial properties defined in terms of ${\mathcal I}_x$.
We say that a space $Z$ is {\em wSS} if for every sequence $(D_n)_n$ of dense subsets of $Z$, there is $F_n\subseteq D_n$ a finite set, for each $n$, such that $\bigcup_n F_n$ is not nowhere dense in $Z$. In the terminology of selection principles \cite{Scheeper99}, $wSS$ corresponds to $S_{fin}(\mathcal{D}, \mathcal{B})$ where $\mathcal{D}$ is the collection of dense subsets and $\mathcal{B}$ the collection of non nowhere dense sets. Seemingly this notion has not been considered before. Notice that if $Z$ is $SS$ and $W$ is not $SS$, then the direct sum of $Z$ and $W$ is $wSS$ but not $SS$.
A subset $A$ of a Polish space is called {\em analytic}, if it is a continuous image of a Polish space. Equivalently, if there is a continuous function $f:{\mathbb{N}}^{\mathbb{N}}\rightarrow X$ with range $A$, where ${\mathbb{N}}^{\mathbb{N}}$ is the space of irrationals. For instance, every Borel subset of a Polish space is analytic. A general reference for all descriptive set theoretic notions used in this paper is \cite{Kechris94}. We say that a topology $\tau$ over a countable set $X$ is {\em analytic}, if $\tau$ is analytic as a subset of the cantor cube $2^X$ (identifying subsets of $X$ with characteristic functions) \cite{todoruzca, Todoruzca2000,Todoruzca2014}, in this case we will say that $X$ is an {\em analytic space}. A regular countable space is analytic if, and only if, it is homeomorphic to a subspace of $C_p({\mathbb{N}}^{\mathbb{N}})$ (see \cite{todoruzca}). If there is a base $\mathcal B$ of $X$ such that $\mathcal B$ is an $F_\sigma$ (Borel) subset of $2^X$, then we say that $X$ has an {\em $F_\sigma$ (Borel) base}. In general, if $X$ has a Borel base, then the topology of $X$ is analytic.
We end this section recalling some results about countable spaces that will be used in the sequel.
\begin{theorem} \label{Fsesp} \cite[Corollary 3.8]{CamargoUzca2018b} Let $X$ be a countable space with an $F_{\sigma}$ base, then $X$ is $p^+$. \end{theorem}
Next result is essentially Lemma 4.6 of \cite{Todoruzca2014}.
\begin{lemma}\label{scompact} Let $X$ be a $\sigma$-compact space and $W$ a countable collection of clopen subsets of $X$. Then $W$, as a subspace of $2^{X}$, has an $F_{\sigma}$ base. \end{lemma}
\begin{theorem}\label{pesSS}\cite[Theorem 3.5]{CamargoUzca2018b} Let $X$ be a countable space. If $X$ is $\mbox{$ \text{p}^-$}$, then $X$ is $SS$. In particular, if $X$ has an $F_{\sigma}$ base, then $X$ is $SS$. \end{theorem}
A space $X$ is {\em discretely generated} (DG) if for every $A\subseteq X$ and $x\in\overline A$, there is $E\subseteq A$ discrete such that $x\in \overline E$. This notion was introduced by Dow et al. in \cite{DTTW2002}. It is not easy to construct spaces which are not DG, the typical examples are maximal spaces (which are nodec).
\begin{theorem} \label{sq-disc-generated} Let $X$ be a regular countable space. Suppose every non isolated point is \mbox{$ \text{p}^-$}, then $X$ is discretely generated. \end{theorem}
\proof Let $A\subset X$ with $x\in \overline A$. Fix a maximal family $(O_n)_n$ of relatively open disjoint subsets of $A$ such that $x\not\in \overline{O_n}$. Let $B=\bigcup_n O_n$. From the maximality we get that $x\in \overline{B}$. Since each $O_n$ does not accumulate to $x$ and $x$ is a \mbox{$ \text{p}^-$}-point, there is $E$ such that $x\in \overline{E}$ and $E\cap O_n$ is finite for every $n$. Clearly $E$ is a discrete subset of $A$. \qed
\begin{theorem}(Dow et al \cite[Theorem 3.9]{DTTW2002}) \label{seq-disc-generated} Every Hausdorff sequential space is discretely generated. \end{theorem}
In summary, we have the following implications for countable regular spaces (see \cite{CamargoUzca2018b}).
\[ \begin{array}{cccccccccclcl} &&&&&&& \\ & & & & && & &F_\sigma\mbox{-base}\\
&&& &&&&\swarrow\\
& & \mbox{Fr\'echet} && & & \mbox{$\mbox{p}^+$} & \\
&\swarrow & &\searrow && \swarrow&& \\ \mbox{Sequential} & && &\mbox{\mbox{$ \text{p}^-$}}\\ \downarrow & \searrow &&\swarrow && \searrow&\\ \mbox{$\mbox{q}^+$} && \mbox{DG}& & &&\mbox{$SS$} && \\
&& \downarrow& & && \downarrow && \\
&& \text{non nodec}& & &&\text{$wSS$} && \end{array} \]
\subsection{A $SS$, $\mbox{q}^+$\ nodec analytic non regular topology} As we said in the introduction, nodec regular spaces are not easy construct. However, non regular nodec spaces are fairly easy to define. We recall a well known construction given in \cite{Njastad1965}. Let $\tau$ be a topology and define \[ \tau^\alpha=\{V\setminus N:\: V\in\tau\;\mbox{and $N\in\mbox{\sf nwd}(\tau)$}\}. \] Then $\tau^\alpha$ is a topology finer than $\tau$ (see \cite{Njastad1965}).
\begin{lemma}\cite{Njastad1965} \label{tau-alpha} Let $(X,\tau)$ be a space. \begin{itemize}
\item[(i)] $V\in\tau^\alpha$ iff $V\subseteq int_\tau (cl_\tau(int_\tau(V)))$.
\item[(ii)] Let $A\subseteq X$ and $x\not\in A$. Then $x\in cl_{\tau^\alpha} (A)$ if, and only if, $x\in cl_\tau(int_\tau(cl_\tau(A)))$.
\item[(iii)] $(X,\tau^\alpha)$ is a nodec space. \end{itemize} \end{lemma}
\begin{proposition} \label{tau-alpha-q-point} Let $(X,\tau)$ be a countable space. \begin{itemize} \item[(i)] If $(X,\tau)$ is Fr\'echet, then $(X,\tau^\alpha)$ is a $\mbox{q}^+$-space.
\item[(ii)] $(X,\tau)$ is $SS$ if, and only if, $(X,\tau^\alpha)$ is $SS$. \end{itemize} \end{proposition}
\proof (i) Suppose $x\in cl_\alpha(A)\setminus A$ and $(F_n)_n$ is a partition of $A$ with each $F_n$ finite. Let $V=int_\tau(cl_\tau(A))$. By Lemma \ref{tau-alpha} we have that $x\in cl_\tau(V)$. Let $(y_m)_m$ be an enumeration of $V$. Since $A$ is $\tau$-dense in $V$, for every $m$ there is a sequence $(x^m_i)_i$ in $A$ such that $x^m_i\rightarrow y_m$ when $i \to \infty$ (with respect to $\tau$). Since each $F_n$ is finite, we can assume (by passing to a subsequence if necessary) that each $(x^m_i)_i$ is a selector for the partition $(F_n)_n$. Let $S_m$ be the range of $(x^m_i)_i$. Notice that $x\not\in cl_\tau(S_m)$ and every infinite subset of $S_m$ is also a selector for $(F_n)_n$. By a straightforward diagonalization, for each $m$, there is $T_m\subseteq S_m$ such that each $T_m$ is a selector and moreover $\bigcup_m T_m$ is also a selector. Hence we can assume that $S=\bigcup_m\{x^m_i:\;i\in\mathbb{N}\}$ is a selector for the partition. But clearly $S$ is $\tau$-dense in $V$ and thus $V\subseteq int_\tau(cl_\tau(S))$. Hence $x\in cl_\alpha(S)$ (by Lemma \ref{tau-alpha}(i)).
(ii) By Lemma \ref{tau-alpha}(ii), a set is $\tau$-dense iff it is $\tau^\alpha$-dense. \qed
Let $\tau$ be the usual metric topology on the rational ${\mathbb Q}$. It is not difficult to verify that $\tau^\alpha$ is analytic (in fact, it is Borel) and non regular (see \cite{Todoruzca2014}). Thus $({\mathbb Q}, \tau^\alpha)$ is a SS, $\mbox{q}^+$ and nodec non regular space with analytic topology. It is not known if there is a regular space with the same properties.
\section{The spaces $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}(\mathcal{I})$}
We recall the definitions of the spaces $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}({\mathcal I})$ for an ideal ${\mathcal I}$, which were introduced in \cite{Todoruzca2014}.
For each non empty ${\mathcal A}\subseteq 2^{\nat}$, let $\rho_{\mathcal A}$ be the topology on $2^{2^{\nat}\times \mathbb{N}}$ generated by the following sets:
\begin{equation*} (\alpha,p)^{+} = \{ \theta \in 2^{2^{\nat}\times \mathbb{N}}: \theta(\alpha,p)=1\}, \hspace{1.2cm} (\alpha,p)^{-} = \{ \theta \in 2^{2^{\nat}\times \mathbb{N}}: \theta(\alpha,p)=0 \}, \end{equation*}
with $\alpha\in \mathcal{A}$. A basic $\rho_{\mathcal A}$-open set is as follows: $$ V=\bigcap_{i=1}^{m} (\alpha_i,p_i)^+ \cap \bigcap_{i=1}^{n} (\beta_{i},q_i)^- $$
for some $\alpha_1,\cdots,\alpha_m,\beta_1,\cdots, \beta_{n} \in {\mathcal A}$, $p_1,,...,p_m,q_1,...,q_n \in \mathbb{N}$. We always assume that $(\alpha_i, p_i)\neq (\beta_j, q_j)$ for all $i$ and $j$, which is equivalent to saying that any set $V$ as above is not empty.
Let $\mathbb{X}$ be the collection of all finite unions of clopen sets of the form $[s] \times \{n\}$ with $n \in \mathbb{N}$ and $s \in 2^{< \omega}$. We also include $\emptyset$ as an element of $\mathbb{X}$. As usual, we regard $\mathbb{X}$ as a subset of $2^{2^{\mathbb{N}} \times \mathbb{N} }$. Let $\{\varphi_n : n \in \mathbb{N} \}$ be an enumeration of $\mathbb{X}$ and for convenience we assume that $\varphi_0$ is $\emptyset$. Each $\varphi_n$, regarded as a function from $2^{\mathbb{N}}\times \mathbb{N}$ to $\{0,1\}$, is continuous. Notice that $\mathbb{X}$ is a group with the symmetric difference as operation.
Let $\psi _n: 2^{\mathbb{N}} \times \mathbb{N} \to \{0,1\} $ be defined by \begin{equation*} \psi _n (\alpha,m)=\left\{ \begin{array}{cl} \varphi_n (\alpha,m), & \text{if } \alpha(n)=0. \\ 1, & \text{if } \alpha(n)=1. \\ \end{array} \right. \end{equation*} Then $\psi_n$ is a continuous function. Let \[ \mathbb{Y}=\{\psi_n:\;n\in \mathbb{N}\}. \]
Given $\mathcal{I}\subseteq 2^{\nat}$, we define
\begin{eqnarray*} \mathbb{X}(\mathcal{I}) & = & (\mathbb{X},\rho_{\mathcal I}),\\ \mathbb{Y}(\mathcal{I}) & = & (\mathbb{Y},\rho_{\mathcal I}). \end{eqnarray*} Also notice that $\mathbb{X}({\mathcal I})$ is a topological group.
To each $F \subseteq \mathbb{N}$, we associate two sets $F'\subseteq \mathbb{X}$ and $\widehat{F}\subseteq \mathbb{Y}$: $$ F':= \{ \varphi_n:\; n \in F\}, $$ $$ \widehat{F}:= \{ \psi_n:\; n \in F\}. $$ The topological similarities between $F'$ and $\widehat{F}$ are crucial to establish some properties of $\mathbb{Y}({\mathcal I})$.
As usual, we identify a subset $A\subseteq \mathbb{N}$ with its characteristic function. So from now on, an ideal ${\mathcal I}$ over $\mathbb{N}$ will be also viewed as a subset of $2^{\nat}$. The properties of $\mathbb{Y}({\mathcal I})$ naturally depend on the ideal ${\mathcal I}$.
\begin{lemma} \label{complexity} If ${\mathcal I}$ is analytic, then $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}({\mathcal I})$ have analytic topologies. \end{lemma}
\begin{proof} It is easy to see that the standard subspace subbases for $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}({\mathcal I})$ are also analytic when ${\mathcal I}$ is analytic. Thus the topology is analytic (see \cite[Proposition 3.2]{todoruzca}). \end{proof}
\begin{theorem} \label{fsigY} If $\mathcal{I}$ is an $F_{\sigma}$ ideal over $\mathbb{N}$, then $\mathbb{Y}(\mathcal{I})$ has an $F_{\sigma}$ base and thus it is SS and DG. \end{theorem}
\begin{proof} It follows from Lemma \ref{scompact} and Theorems \ref{pesSS} and \ref{sq-disc-generated}. \end{proof}
The reason to study the space $\mathbb{Y}( {\mathcal I})$ is the following theorem. Let
$$ \mathcal{I}_{nd}:=\{ F \subseteq \mathbb{N} : \{ \varphi_n : n \in F \} \text{ is nowhere dense in } \mathbb{X} \}. $$
\begin{theorem}\cite{Todoruzca2014} \label{yind} $\mathbb{Y}(\mathcal{I}_{nd})$ is a nodec regular space without isolated points and with an analytic topology. \end{theorem}
$\mathbb{Y}(\mathcal{I}_{nd})$ was so far the only space we knew with the properties stated above. We will present a generalization of this theorem showing other ideals ${\mathcal I}$ such that $\mathbb{Y}({\mathcal I})$ has the same properties.
\subsection{The space $\mathbb{X}({\mathcal I})$}
We present some properties of the space $\mathbb{X}({\mathcal I})$ that will be needed later. We are interested in whether $\mathbb{X}({\mathcal I})$ is DG, SS or $\mbox{q}^+$. We start with a general result which is proven as Theorem \ref{fsigY}.
\begin{theorem} If $\mathcal{I}$ is an $F_{\sigma}$ ideal over $\mathbb{N}$, then $\mathbb{X}(\mathcal{I})$ has an $F_{\sigma}$ base, and thus it is SS and DG. \end{theorem}
We will show that $\mathbb{X}({\mathcal I})$ is not $\mbox{q}^+$\ except in the extreme case when ${\mathcal I}$ is $\mbox{\sf Fin}$. The key lemma to show this is the following result.
\begin{lemma} \label{xnoq} There is a pairwise disjoint family $\{ A_n : n \in \mathbb{N} \}$ of finite subsets of $\mathbb{X}$ such that $\bigcup_{k \in E} A_k$ is dense in $\mathbb{X}$ (with the product topology) for any infinite $E \subseteq \mathbb{N}$. Moreover, for each infinite set $E\subseteq \mathbb{N}$, each selector $S$ for the family $\{ A_n : n \in E\}$ and each $\varphi \notin S \cup \{ \emptyset \}$, there is $p\in\mathbb{N}$ and $\alpha \in 2^{\mathbb{N}}$ such that $\alpha^{-1}(1) \subseteq^* E$, $\varphi \in (\alpha,p)^+$ and $(\alpha,p)^+ \cap S$ is finite. \end{lemma}
\begin{proof} We say that a $\varphi\in \mathbb{X}$ has the property $(*^m)$, for $m\in\mathbb{N}$, if there are $k \in \mathbb{N}$ and finite sequences $s_i$, for $i=1,...,k$, of length $m+1$ such that $ \varphi = \bigcup _{i=1}^{k} [s_i]\times\{m_i\}$, $m_i\leq m$ and $s_i \restriction m \neq s_j \restriction m$, whenever $m_i=m_j$ (i.e. $[s_j]\cup [s_i]$ is not a basic clopen set). Let $$ A_m = \{ \varphi \in \mathbb{X}: \varphi \text{ has the property }(*^m) \}. $$ Let $E\subseteq \mathbb{N}$ be an infinite set. We will show that $A:=\bigcup_{k \in E} A_k$ is dense in $2^{2^{\nat}\times\mathbb{N}}$. Let $V$ be a basic open set of $2^{2^{\nat}\times\mathbb{N}}$, let us say
$$ V=\bigcap_{i=1}^{m} (\alpha_i,p_i)^+ \cap \bigcap_{i=1}^{n} (\beta_{i},q_i)^- $$
for some $\alpha_1,\cdots,\alpha_m,\beta_1,\cdots, \beta_{n} \in 2^{\nat} $, $p_1,\cdots,p_m,q_1,\cdots,q_n \in \mathbb{N}$. We need to show that $V\cap A$ is not empty. Pick $l$ large enough such that $l+1\in E$, $l+1>\max\{p_i,q_j: i\leq m, j\leq n\}$, $\alpha_i\restriction l\neq \alpha_j\restriction l$ for all $i$ and $j$ such that $\alpha_i\neq \alpha_j$, $\beta_i\restriction l\neq \beta_j\restriction l$ for all $i$ and $j$ such that $\beta_i\neq \beta_j$ and $\alpha_i\restriction l\neq \beta_j\restriction l$ for all $i$ and $j$ such that $\alpha_i\neq\beta_j$. Let $\varphi = \bigcup_{i=1}^{m} [\alpha_i \restriction (l+2)]\times\{p_i\}$. Then $\varphi$ belongs to $A_{l+1}\cap V$.
To see the second claim, let $E\subseteq\mathbb{N}$ be an infinite set and let $S=\{z_n: n \in E \}$ be a selector, that is, $z_n \in A_n$ for all $n \in E$. Fix $\varphi \notin S \cup \{\emptyset\}$, say $\varphi= \bigcup_{i=1}^{l} [t_i]\times\{p_i\}$ for some $t_i\in 2^{<\omega}$ and $p_i\in \mathbb{N}$. The required $\alpha $ is recursively defined as follows: $$ \alpha (n)=\left\{
\begin{array}{cl}
t_1(n), & \text{if } n<|t_1|, \\
1, &\mbox{if $n \geq |t_1|, n \in E$ and $[(\alpha\restriction n) \widehat{\;\;} 0]\times\{p_1\} \subseteq z_n$}, \\
0, & \text{otherwise.}
\end{array}
\right. $$
From the definition of the sets $A_m$, it is easily shown that $(\alpha,p_1) \notin \bigcup\{ z_k: k \geq |t_1| \text{ and }k \in E \}$. Clearly $(\alpha,p_1)^+ \cap S \subseteq \{ z_k : k < |t_1| \text{ and } k \in E \}$ is finite and $\varphi\in (\alpha, p_1)^+$. Finally, it is also clear from the definition of $\alpha$ that $\alpha^{-1}(1) \subseteq^* E$.
\end{proof}
\begin{theorem} \label{eqclq} Let $\mathcal{I}$ be an ideal on $\mathbb{N}$. Then $\mathbb{X}({\mathcal I})$ is $\mbox{q}^+$\ at some (every) point if, and only if, ${\mathcal I}=\mbox{\sf Fin}$. \end{theorem}
\begin{proof} If ${\mathcal I} =\mbox{\sf Fin}$, then $\mathbb{X}({\mathcal I})$ has a countable basis and thus it is $\mbox{q}^+$\ at every point. Since $\mathbb{X}({\mathcal I})$ is homogeneous (as it is a topological group), then if $\mathbb{X}({\mathcal I})$ is $\mbox{q}^+$\ at some point, then it is $\mbox{q}^+$\ at every point. Suppose now that there is $E \in \mathcal{I} \setminus \mbox{\sf Fin}$. We will show that ${\mathcal I}$ is not $\mbox{q}^+$\ at some point. Let $\{ A_n : n \in \mathbb{N}\}$ be the sequence, given by Lemma \ref{xnoq}, of pairwise disjoint finite subsets of $\mathbb{X}$ such that $A:=\bigcup_{k \in E} A_k$ is dense in $\mathbb{X}$. Since the topology of $\mathbb{X}$ is finer than the topology of $\mathbb{X}({\mathcal I})$, then $A$ is dense in $\mathbb{X}({\mathcal I})$. Let $\varphi \not\in A\cup\{\emptyset\}$. We will show that $\mathbb{X}({\mathcal I})$ fails the property $\mbox{q}^+$\ at $\varphi$. Let $S$ be a selector of $\{ A_n : n \in E\}$. Let $\alpha\in 2^{\nat}$ and $p\in \mathbb{N}$ be as in the conclusion of Lemma \ref{xnoq}, that is, $\alpha^{-1}(1) \subseteq^* E$, $\varphi \in (\alpha,p)^+$ and $(\alpha,p)^+ \cap S$ is finite. Notice that $\alpha\in{\mathcal I}$ and hence $\varphi$ is not in the $\rho_{\mathcal I}$-closure of $S$. Hence $\mathbb{X}({\mathcal I})$ is not $\mbox{q}^+$\ at $\varphi$.
\end{proof}
Now we look at the $SS$ property. The following result provides a method to construct dense subsets of $\mathbb{X}({\mathcal I})$.
\begin{lemma} \label{DA} For each $A\subseteq \mathbb{N}$ infinite, let $\mathbf{D}(A)$ be the following subset of $\mathbb{X}$: \[ \left\lbrace \bigcup_{i=0}^{k} [s_i] \times \{ m_i \} \in \mathbb{X}:\; A \cap s_i^{-1}(0) \neq \emptyset\; \mbox{ for all } i\in \{0,...,k\}, k\in \mathbb{N}, s_i \in 2^{< \omega} \right\rbrace \cup \{\emptyset\}. \] Then $A\in {\mathcal I}$ if, and only if, $\mathbf{D}(A)$ is not dense in $\mathbb{X}({\mathcal I})$ if,and only if, $\mathbf{D}(A)$ is nowhere dense and closed in $\mathbb{X}({\mathcal I})$. \end{lemma}
\begin{proof}
We first show that $\mathbf{D}(A)$ is closed for every $A\in {\mathcal I}$. We shall show that the complement of $\mathbf{D}(A)$ is open in $\mathbb{X}({\mathcal I})$. Let $\varphi\in \mathbb{X}\setminus \mathbf{D}(A)$. Since $\varphi\neq\emptyset$, we have that $\varphi= \bigcup_{i=1}^{k} [s_i] \times \{m_i\}$ and we can assume that $A \cap s_1^{-1}(0) = \emptyset$. Let $B=A \cup s_1^{-1}(1)$. Notice that $B\in {\mathcal I}$. Let $\beta$ be the characteristic function of $B$. Clearly $\beta\in [s_1]$ and thus $\varphi\in (\beta,m_1)^+$. On the other hand, suppose that $\varphi'= \bigcup_{i=1}^{l} [t_i] \times \{p_i\}\in (\beta,m_1)^+$. Assume that $\beta\in [t_1]$ and $p_1=m_1$, then $t_1^{-1}(0)\subset \beta^{-1}(0)$ and hence $t_1^{-1}(0)\cap A=\emptyset$. This shows that $\varphi'\not\in \mathbf{D}(A)$ and thus $(\beta,m_1)^+\cap \mathbf{D}(A)=\emptyset$.
Now we show that if $A\in {\mathcal I}$, then $\mathbf{D}(A)$ is nowhere dense. Since $\mathbf{D}(A)$ is closed, it suffices to show that it has empty interior. Let $V$ be a basic $\rho_{\mathcal I}$-open set. Let us say
\begin{equation} \label{basicopen1} V=\bigcap_{i=1}^{m} (\alpha_i,p_i)^+ \cap \bigcap_{i=1}^{n} (\beta_{i},q_i)^- \end{equation}
for some $\alpha_1,\cdots,\alpha_m,\beta_1,\cdots, \beta_{n} \in \mathcal{I} $, $p_1,,...,p_m,q_1,...,q_n \in \mathbb{N}$. Recall that $(\alpha_i,p_i)\neq (\beta_j,q_j)$ for all $i\neq j$. Since $\beta_i\in {\mathcal I}$, then $\beta_i^{-1}(0)\neq\emptyset$ for all $i$. Let $l=\max \{ \min(\beta_i^{-1}(0) ) : 1 \leq i \leq n \}$ and $t$ be the constant sequence 1 of length $l$. Since $\mathbb{X}$ is clearly $\rho_{\mathcal I}$-dense, let $\varphi\in V\cap \mathbb{X}$. Then $\varphi\cup ([t] \times \{0\})\in V\setminus \mathbf{D}(A)$.
Finally, we show that if $A\not\in {\mathcal I}$, then $\mathbf{D}(A)$ is dense. Let $V$ be a basic $\rho_{\mathcal I}$-open set as given by \eqref{basicopen1}. Pick $l$ large enough such that $\alpha_i\restriction l\neq \alpha_j\restriction l$ for $i\neq j$, $\beta_i\restriction l\neq \beta_j\restriction l$ for $i\neq j$ and $\alpha_i\restriction l\neq \beta_j\restriction l$ for all $i$ and $j$ such that $\alpha_i\neq\beta_j$. Then pick $k\geq l$ such that $k\geq \min (\alpha_i^{-1}(0)\cap A)$ for all $i\leq m$ (notice that $\alpha_i^{-1}(0)\cap A\neq \emptyset$ as $A\not\in {\mathcal I}$ and $\alpha_i\in {\mathcal I}$). Let $s_i=\alpha_i\restriction k$ for $i\leq m$ and $\varphi=\bigcup_{i=1}^m [s_i]\times\{p_i\}$. Then $\varphi\in V\cap \mathbf{D}(A)$.
\end{proof}
We remind the reader that $F'$ denotes the set $\{\varphi_n:\; n\in F\}$ for each $F\subseteq \mathbb{N}$.
\begin{theorem}\label{xinoSS} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$. If ${\mathcal I} $ is not $p^+$, then $\mathbb{X}(\mathcal{I})$ is not $wSS$. \end{theorem}
\begin{proof} Suppose that $\mathcal I$ is not $p^+$ and fix a sequence $(A_n)_{n \in \mathbb{N}}$ of subsets of $\mathbb{N}$ such that $A_n \notin \mathcal{I}$, $n \in \mathbb{N}$, and $\bigcup_{n \in \mathbb{N}} F_n \in \mathcal{I}$ for all $F_n \subseteq A_n$ finite.
Let $D_n= \mathbf{D}(A_n)$ as in Lemma \ref{DA}. We show that the property $wSS$ fails at the sequence $(D_n)_n$. Let $K_n \subseteq D_n$ be a finite set for each $n$, we need to show that $\bigcup_n K_n$ is nowhere dense in $\mathbb{X}({\mathcal I})$. Let us enumerate each $K_n$ as follows: $$
K_n= \left\lbrace \bigcup_{i=0}^{k_{n,l}} [s_i^{n, l}]\times\{p^{n,l}_i\}: l < |K_n| \right\rbrace. $$
Let $q_n > \max \{ |s_i^{n, l}|: l < |K_n|, i \leq k_{n,l} \}$. By hypothesis, $B=\bigcup _{n \in \mathbb{N}} (A_n \cap \{0, \cdots, q_n \}) \in \mathcal{I}$. Let $\beta$ be the characteristic function of $B$. We claim that for all $m\in \mathbb{N}$ $$ (\beta,m)^+ \cap (\bigcup_{n \in \mathbb{N}} K_n) = \emptyset. $$
Otherwise, there are $n \in \mathbb{N}$, $l < |K_n|$ and $i \leq k_{n,l}$ such that $\beta \in [s_i^{n, l}]$, that is, $s_i^{n, l} \preceq \beta$. But this contradicts the fact that $(A_n \cap \{0, \cdots, q_n\}) \cap \left( s_i^{n, l} \right)^{-1} (0) \neq \emptyset$ for all $i $ and $l$ (recall that $D_n=\mathbf{D}(A_n)$). Thus $(\bigcup_{n \in \mathbb{N}} K_n)\cap (\bigcup_m (\beta,m)^+)=\emptyset$. Since $\bigcup_m (\beta,m)^+$ is $\rho_{\mathcal I}$-open dense, $\bigcup_n K_n$ is $\rho_{\mathcal I}$-nowhere dense. \end{proof}
\begin{proposition} \label{converseque} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$. Any element of $\mathbb{X}({\mathcal I})$ is a limit of a non trivial sequence. \end{proposition}
\begin{proof} Since $\mathbb{X}({\mathcal I})$ is a topological group, it suffices to show that there is a sequence converging to $\emptyset$ (i.e. to $\varphi_0$).
Let $(\alpha_n)_{n \in \mathbb{N}}$ be a sequence in $2^{\mathbb{N}}$ such that $\alpha_k \restriction (k+1) \neq \alpha_l \restriction (k+1)$ for each $k<l$. Let $(x_n)_{n}$ be defined by $x_n= [\alpha_n \restriction (n+1)]\times\lbrace 0\rbrace$. Let $V$ be a neighborhood of $\emptyset$, namely, $V= \bigcap_{i=1}^{m} (\beta_i,n_i)^-$ for some $\beta_i\in {\mathcal I}$ and $n_i \in \mathbb{N}$. We have that $\alpha_n \restriction (n+1) \not\preceq \beta_i$ for almost every $n$, therefore $x_n \in V$ and $x_n \to \emptyset$.
\end{proof}
\begin{question} When is $\mathbb{X}({\mathcal I})$ discretely generated? \end{question}
\subsection{The space $c({\mathcal I})$}
It is natural to wonder what can be said if instead of $\mathbb{X}$ we use the more familiar space $CL(2^{\nat})$ of all clopen subsets of $2^{\nat}$.
Exactly as before we can define a space $c({\mathcal I})$ as follows.
\begin{definition} Let $\mathcal{I}$ be an ideal over $\mathbb{N}$ and $c(\mathcal{I})$ be $(CL(2^{\mathbb{N}}), \tau_{\mathcal{I}} )$, where $\tau_{{\mathcal I}}$ is generated by the following subbasis: $$ \alpha^+=\{x \in CL(2^{\mathbb{N}}): \alpha \in x \} \hspace{1cm} \text{and}\hspace{1cm} \alpha^-=\{x \in CL(2^{\mathbb{N}}): \alpha \notin x \}, $$ where $\alpha \in \mathcal{I}$. \end{definition}
In fact, it is easy to see that $c({\mathcal I})$ is homeomorphic to $\{\bigcup_{i=0}^{k} [s_i] \times \{ 0 \} \in \mathbb{X}: k\in \mathbb{N}, s_i \in 2^{< \omega} \}$ and by a simple modification of the proofs above we have the following.
\begin{theorem} Let $\mathcal{I}$ be an ideal over $\mathbb{N}$. Then $c({\mathcal I})$ is $\mbox{q}^+$\ at some (every) point if, and only if, ${\mathcal I}=\mbox{\sf Fin}$. \end{theorem}
\begin{theorem} Suppose that $\mathcal{I}$ is an ideal over $\mathbb{N}$. If ${\mathcal I} $ is not $p^+$, then $c(\mathcal{I})$ is not $wSS$. \end{theorem}
\subsection{The space $\mathbb{Y}({\mathcal I})$}
In this section we work with the space $\mathbb{Y}({\mathcal I})$ in order to construct nodec spaces. To that end we introduce an operation $\star$ on ideals. We remind the reader that to each $F \subseteq \mathbb{N}$ we associate the sets $F'= \{ \varphi_n:\; n \in F\}$ and $\widehat{F}= \{ \psi_n:\; n \in F\}$.
\begin{definition} Let ${\mathcal I}$ be a nonempty subset of $2^{\nat}$. We define: $$ {\mathcal I} ^{\star} =\{F \subseteq \mathbb{N}:\; F' \text{ is nowhere dense in }\mathbb{X}({\mathcal I}) \}. $$ \end{definition}
Notice that ${\mathcal I}^{\star}$ is a free ideal and ${\mathcal I}_{nd}=(2^{\nat})^{\star}$. We are going to present several results that are useful to compare $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}({\mathcal I})$.
The following fact will be used several times in the sequel.
\begin{lemma} \label{simetricdif} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$. Let $V$ be a basic $\rho_{\mathcal I}$-open set. Then \[ \{n\in\mathbb{N}: \varphi_n\in V \} \triangle \{n\in\mathbb{N}: \psi_n\in V\}\in {\mathcal I}. \] \end{lemma}
\begin{proof} Let $V$ be a non empty basic open set, that is, \begin{equation} \label{basicopen} V=\bigcap_{i=1}^{m} (\alpha_i,p_i)^{+} \cap \bigcap_{j=1}^{l} (\beta_j,q_j)^{-}. \end{equation} From the very definition of $\psi_n$ and viewing it as a clopen set, we have that $$ \psi_n=\varphi_n\cup (\{\alpha\in 2^{\nat}:\;\alpha(n)=1\}\times \mathbb{N}). $$ From this we have the following: \[ \{n\in\mathbb{N}: \varphi_n\in V \} \setminus \{n\in\mathbb{N}: \psi_n\in V\}\subseteq \bigcup_{j=1}^l \beta_j^{-1}(1) \] and \[ \{n\in\mathbb{N}: \psi_n\in V \} \setminus \{n\in\mathbb{N}: \varphi_n\in V\}\subseteq \bigcup_{i=1}^m \alpha_i^{-1}(1). \] Thus when each $\alpha_i$ and each $\beta_j$ belongs to ${\mathcal I}$, the unions on the right also belong to ${\mathcal I}$.
\end{proof}
In the following we compare $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}({\mathcal I})$ in terms of their dense and nowhere dense subsets. Some results need that the ideals ${\mathcal I}$ and ${\mathcal I}^\star$ are comparable, i.e. ${\mathcal I}\subseteq {\mathcal I}^\star$ or ${\mathcal I}^\star \subseteq {\mathcal I}$, it is unclear whether this is always the case.
We are mostly interested in crowded spaces. The following fact gives a sufficient condition for $\mathbb{Y}({\mathcal I})$ to be crowded.
\begin{lemma}\label{crowded} Let ${\mathcal I}$ be an ideal on $\mathbb{N}$. Then \begin{enumerate} \item $\mathbb{X}$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{\mathcal I})$.
\item $int_{\mathbb{X}({\mathcal I})} ( F')= \emptyset$, for all $F\in {\mathcal I}$ if, and only if, $\mathbb{Y}$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{\mathcal I})$.
\item If ${\mathcal I}\subseteq {\mathcal I}^\star$, then $\mathbb{Y}$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{{\mathcal I}})$.
\item If ${\mathcal I}^\star\subseteq {\mathcal I}$, then $\mathbb{Y}$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{{\mathcal I}^\star})$.
\end{enumerate} \end{lemma}
\begin{proof} (1) is clear. The {\em only if} part of (2) was shown in \cite[Lemma 4.2]{Todoruzca2014}, but we include a proof for the sake of completeness. Let $V$ be a nonempty basic $\rho_{\mathcal I}$-open set. We need to find $n$ such that $\psi_n\in V$. From Lemma \ref{simetricdif} we have that \[ E=\{n\in\mathbb{N}: \varphi_n\in V \text{ and } \psi_n\not\in V\}\in {\mathcal I}. \] Since $int_{\mathbb{X}({\mathcal I})} ( E')=\emptyset$, there is $n$ such that $\varphi_n\in V$ and $n\not\in E$. Therefore $\psi_n\in V$.
For the {\em if} part, suppose that $\mathbb{Y}$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{\mathcal I})$ and, towards a contradiction, that there is a nonempty basic $\rho_{\mathcal I}$-open set $V$ such that $F=\{n\in \mathbb{N}:\; \varphi_n\in V\}$ belongs to ${\mathcal I}$. From this and Lemma \ref{simetricdif} the following set belongs to ${\mathcal I}$: \[ E=F\cup \{n\in\mathbb{N}: \varphi_n\not\in V \text{ and } \psi_n\in V\}. \] Let $\beta$ be the characteristic function of $E$. Since $V$ is a basic open set of the form \eqref{basicopen}, there is $m$ such that $V\cap (\beta, m)^-\neq\emptyset$. Since $\mathbb{Y}$ is $\rho_{\mathcal I}$-dense, there is $n$ such that $\psi_n\in V\cap (\beta, m)^-$. Hence $\psi_n\not\in (\beta, m)^+$ and, by the definition of $\psi_n$, we have that $\beta(n)=0$. Therefore $n\not\in E$ and $\psi_n\in V$, then $\varphi_n\in V$. Thus $n\in F$, a contradiction.
(3) follows immediately from (2). To see (4), it suffices to show that $int_{\mathbb{X}({\mathcal I}^\star)} ( F')= \emptyset$, for all $F\in {\mathcal I}^\star$. Let $F\in {\mathcal I}^\star$. By definition, $F'$ is nowhere dense in $\mathbb{X}({\mathcal I})$. In particular $int_{\mathbb{X}({\mathcal I}^\star)}( F')= \emptyset$, as ${\mathcal I}^\star\subseteq {\mathcal I}$. \end{proof}
Now we show that the operation $\star$ is monotone.
\begin{lemma}\label{IssinIs} Let ${\mathcal I}$ and $\mathcal{J}$ be ideals over $\mathbb{N}$ with $\mathcal{J}\subseteq {\mathcal I}$. Then \begin{enumerate} \item For every basic $\rho_{\mathcal I}$-open set $V$ of $2^{2^{\nat}\times \mathbb{N}}$ there are sets $W$, $U$ such that $V=W\cap U$, $W$ is a $\rho_{\mathcal{J}}$-open set and $U$ is a basic $\rho_{\mathcal I}$-open set which is also $\rho_{\mathcal{J}}$-dense.
\item If $A\subseteq 2^{2^{\nat}\times \mathbb{N}}$ is $\rho_\mathcal{J}$-nowhere dense, then $A$ is $\rho_\mathcal{I}$-nowhere dense.
\item $\mathcal{J}^\star\subseteq {\mathcal I}^{\star}$. Moreover, if $\mathcal{J}\subsetneq {\mathcal I}$, then $\mathcal{J}^\star\subsetneq {\mathcal I}^{\star}$. \end{enumerate}
\end{lemma}
\begin{proof} (1) Let $V$ be a basic open set, that is, \begin{equation} \nonumber V=\bigcap_{i=1}^{m} (\alpha_i,p_i)^{+} \cap \bigcap_{j=1}^{l} (\beta_j,q_j)^{-}. \end{equation} Notice that if every $\alpha$ and $\beta$ belongs to ${\mathcal I}\setminus\mathcal{J}$, then $V$ is $\rho_{\mathcal{J}}$-dense. Thus given such basic open set $V$ where every $\alpha$ and $\beta$ belongs to ${\mathcal I}$, we can separate them and form $W$ and $U$ as desired: For $W$, we use the $\alpha$'s and $\beta$'s belonging to $\mathcal J$ (put $W=2^{2^{\nat}\times \mathbb{N}}$ in case there is none in $\mathcal{J})$ and for $U$, we use the $\alpha$'s and $\beta$'s belonging to ${\mathcal I}\setminus\mathcal J$.
(2) Let $A\subseteq 2^{2^{\nat}\times \mathbb{N}}$ be a $\rho_\mathcal{J}$-nowhere dense set. Let $V$ be a basic $\rho_{\mathcal I}$-open set of $2^{2^{\nat}\times \mathbb{N}}$. Then $V=W\cap U$ where $W$ and $U$ are as given by part (1). As $A$ is $\rho_\mathcal{J}$-nowhere dense, there is a non empty $\rho_\mathcal{J}$-open set $W'\subseteq W$ such that $W'\cap A=\emptyset$. Since $W'$ is also $\rho_{\mathcal I}$-open and $U$ is $\rho_\mathcal{J}$-dense, then $U\cap W'$ is a non empty $\rho_{\mathcal I}$-open set disjoint from $A$ and contained in $V$.
(3) Since $\mathbb{X}$ is dense in $2^{2^{\nat}\times\mathbb{N}}$, then $A\in \mbox{\sf nwd}(\mathbb{X}({\mathcal I}))$ if, and only if, $A$ is nowhere dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{\mathcal I})$. From this and (2) we immediately get that $\mathcal{J}^\star\subseteq {\mathcal I}^{\star}$. Finally, notice that from Lemma \ref{DA}, we have that for $A\in {\mathcal I}\setminus \mathcal{J}$, the set $\mathbf{D}(A)$ is nowhere dense in $\mathbb{X}({\mathcal I})$ and dense in $\mathbb{X}(\mathcal{J})$. \end{proof}
Next result gives a sufficient condition for $\mathbb{Y}({\mathcal I}^\star)$ to be nodec. It is a generalization of a result from \cite{Todoruzca2014}.
\begin{lemma} \label{Lemanodec} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$ and $F\subseteq \mathbb{N}$.
\begin{enumerate} \item If $F \in \mathcal{I}$, then $\widehat{F}$ is closed discrete in $\mathbb{Y}(\mathcal{I})$.
\item Let ${\mathcal I}$ be such that ${\mathcal I}^{\star} \subseteq {\mathcal I}$. If $\widehat{F}$ is nowhere dense in $\mathbb{Y}({\mathcal I}^{\star})$, then $F \in {\mathcal I}^{\star}$. \item If ${\mathcal I}^{\star} \subseteq {\mathcal I}$, then $\mathbb{Y}({\mathcal I}^{\star})$ is nodec. \end{enumerate} \end{lemma}
\begin{proof} (1) is Lemma 4.1 from \cite{Todoruzca2014} we include the proof for the reader's convenience.
Since ${\mathcal I}$ is hereditary, it suffices to show that $\widehat{F}$ is closed for every $F\in {\mathcal I}$. Let $F\in{\mathcal I}$ and let $F$ denote also its characteristic function. Notice that for each $m\in \mathbb{N}$, if $C=\{n\in\mathbb{N}:\; \psi_n\in (F,m)^+\}$, then $\widehat{C}$ is closed in $\mathbb{Y}({\mathcal I})$. We claim that \[ F=\bigcap_{m\in\mathbb{N}}\{n\in\mathbb{N}:\; \psi_n\in (F,m)^+\}. \] From this it follows that $\widehat{F}$ is closed in $\mathbb{Y}({\mathcal I})$. To show the equality above, let $n\in F$, then by the definition of $\psi_n$, we have that $\psi_n\in (F,m)^+$ for all $m\in\mathbb{N}$. Conversely, suppose $n\not\in F$ and let
$\varphi_n$ be $[s_1]\times \{m_1\}\cup\cdots \cup [s_k]\times \{m_k\}$. Pick $m\not\in\{m_1,\cdots,m_k\}$, then $\varphi_n\not\in (F,m)^+$ and thus $\psi_n\not\in (F,m)^+$ by the definition of $\psi_n$.
(2) is a generalization of Lemma 4.3 of \cite{Todoruzca2014}. Let $\widehat{F}$ be nowhere dense in $\mathbb{Y}({\mathcal I}^\star)$ and suppose, towards a contradiction, that $F\not\in {\mathcal I}^\star$. Let $V$ be a basic $\rho_{{\mathcal I}}$-open set such that $F'\cap V$ is $\rho_{\mathcal I}$-dense in $V$. By Lemma \ref{IssinIs}, there are sets $W$ and $U$ such that $V=W\cap U$, $W$ is a $\rho_{{\mathcal I}^\star}$-open set, $U$ is a basic $\rho_{\mathcal I}$-open set and $U$ is also $\rho_{{\mathcal I}^\star}$-dense. Since $\widehat{F}$ is nowhere dense in $\mathbb{Y}({\mathcal I}^\star)$, there is a basic $\rho_{{\mathcal I}^\star}$-open set $W'\subseteq W$ such that $\widehat{F}\cap W'=\emptyset$, that is \[ F\cap\{n\in \mathbb{N}:\; \psi_n\in W'\}=\emptyset. \] From Lemma \ref{simetricdif} we know that \[ \{n\in\mathbb{N}: \varphi_n\in W' \} \setminus \{n\in\mathbb{N}: \psi_n\in W'\}\in {\mathcal I}^\star. \] From this and the previous fact we get \[ F\cap \{n\in \mathbb{N}:\; \varphi_n\in W'\}\in {\mathcal I}^\star. \] This says that $F'\cap W'$ is nowhere dense in $\mathbb{X}({\mathcal I})$, which is a contradiction, as by construction, $F'\cap V$ is $\rho_{\mathcal I}$-dense in $V$ and $W'\cap U\subseteq V$ is a non empty $\rho_{\mathcal I}$-open set (it is non empty as $U$ is $\rho_\mathcal{{\mathcal I}^\star}$-dense).
(3) follows immediately from (1) and (2). \end{proof}
The natural bijection $\psi_n\mapsto \varphi_n$ is not continuous (neither is its inverse), however it has some form of semi-continuity as we show below.
\begin{proposition} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$. Let $\Gamma:\mathbb{Y}\to \mathbb{X}$ given by $\Gamma (\psi_n)=\varphi_n$. Let $\alpha\in {\mathcal I} $ and $p\in \mathbb{N}$. Then $\Gamma^{-1} ((\alpha, p)^+\cap \mathbb{X})$ is open in $\mathbb{Y}({\mathcal I})$. In general, if $V$ is a $\rho_{\mathcal I}$-basic open set, then there is $D\subseteq \mathbb{Y}$ closed discrete in $\mathbb{Y}({\mathcal I})$ and an $\rho_{\mathcal I}$-open set $W$ such that $\Gamma^{-1}(V\cap \mathbb{X})= (W\cap \mathbb{Y})\cup D$. \end{proposition}
\begin{proof} Let $\alpha\in {\mathcal I}$ and $p\in\mathbb{N}$. Let $O=\{\psi_n:\; \varphi_n \in (\alpha, p)^+\}$. We need to show that $O$ is open in $\mathbb{Y}({\mathcal I})$. Let $F=\alpha^{-1}(1)$. Since $((\alpha, p)^+\cap \mathbb{Y})\setminus \widehat{F}\subseteq O\subseteq (\alpha, p)^+\cap \mathbb{Y}$, there is $A\subseteq F$ such that $O =((\alpha, p)^+\cap \mathbb{Y})\setminus \widehat{A}$. As $A\in {\mathcal I}$, then by Lemma \ref{Lemanodec}, $\widehat{A}$ is closed discrete in $\mathbb{Y}({\mathcal I})$. Thus $O$ is open in $\mathbb{Y}({\mathcal I})$. On the other hand, $\{\psi_n: \varphi_n\in (\alpha,p)^-\}= ((\alpha,p)^-\cap \mathbb{Y})\cup ( \{\psi_n: \varphi_n\in (\alpha,p)^{-}\}\cap \widehat{F})$. \end{proof}
The derivative operator on $\mathbb{Y}({\mathcal I})$ can be characterized as follows.
\begin{proposition} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$ and $A\subseteq \mathbb{N}$. Then $\psi_l$ is a $\rho_{\mathcal I}$-accumulation point of $\widehat{A}$ if, and only if, for every non empty $\rho_{{\mathcal I}}$-open set $V$ with $\psi_l\in V$ we have \[ \{n\in A:\;\varphi_n\in V\}\not\in {\mathcal I}. \] \end{proposition} \begin{proof} Let $V$ be a $\rho_{{\mathcal I}}$-open set with $\psi_l\in V$. Suppose $F=\{n\in A:\;\varphi_n\in V\}\in {\mathcal I}$. Then by Lemma \ref{Lemanodec}, $\widehat{F}$ is closed discrete in $\mathbb{Y}({\mathcal I})$ which is a contradiction as $\psi_l$ is an accumulation point of $\widehat{F}$. Conversely, let $V$ be a basic $\rho_{\mathcal I}$-open set containing $\psi_l$. By Lemma \ref{simetricdif} the following set belongs to ${\mathcal I}$: \[ E=\{n\in\mathbb{N}: \varphi_n\in V \text{ and } \psi_n\not\in V\}. \] We also have \[ F=\{n\in A:\;\varphi_n\in V\}\subseteq \{n\in A: \varphi_n\in V \text{ and } \psi_n\in V\}\cup E. \] Since $E\in {\mathcal I}$ and by hypothesis $F\not\in {\mathcal I}$, then there are infinitely many $n\in A$ such that $\psi_n\in V$ and we are done.
\end{proof}
Now we show that the spaces $\mathbb{X}({\mathcal I})$ and $\mathbb{Y}({\mathcal I})$ are not homeomorphic in general.
\begin{proposition} Let ${\mathcal I}$ be a tall ideal over $\mathbb{N}$. There are no non trivial convergent sequences in $\mathbb{Y}({\mathcal I})$. In particular, $\mathbb{Y}({\mathcal I})$ is not homeomorphic to $\mathbb{X}({\mathcal I})$. \end{proposition}
\begin{proof} Let $A\subseteq \mathbb{N}$ be an infinite set. We will show that $\widehat{A}=\{\psi_n:\; n\in A\}$ is not convergent in $\mathbb{Y}({\mathcal I})$. Since ${\mathcal I}$ is tall, pick $B\subseteq A$ infinite with $B\in {\mathcal I}$. Then $\widehat{B}$ is closed discrete in $\mathbb{Y}({\mathcal I})$ (by Lemma \ref{Lemanodec}). Thus $\widehat{A}$ is not convergent. From this, the last claim follows since $\mathbb{X}({\mathcal I})$ has plenty of convergent sequences (see Proposition \ref{converseque}). \end{proof}
Next result shows that our spaces are analytic.
\begin{lemma} \label{staranalytic} Let ${\mathcal I}$ be an analytic ideal over $\mathbb{N}$. Then ${\mathcal I}^{\star}$ is analytic. \end{lemma}
\begin{proof} The argument is analogous to that of the Lemma 4.8 of \cite{Todoruzca2014}. We include a sketch of it for the sake of completeness. First, we recall a result from \cite{Todoruzca2014} (see Lemma 4.7).
\noindent{\em Claim:} Let $J$ be an infinite set. Then $M \subseteq 2^{J}$ is nowhere dense if, and only if, there is $C \subseteq J$ countable such that $M\restriction C=\{ x \restriction C: x \in M \}$ is nowhere dense in $2^C$
Let $Z$ be the set of all $z \in (2^{\nat} \times \mathbb{N})^{\mathbb{N}}$ such that $z(k) \neq z(j)$ for all $k \neq j$ and $\{z(k): k \in \mathbb{N} \} \subseteq {\mathcal I} \times \mathbb{N}$. Since ${\mathcal I}$ is an analytic set, then $Z$ is an analytic subset of $(2^{\nat} \times \mathbb{N})^{\mathbb{N}}$.
Consider the following relation $R \subseteq \mathcal{P}(\mathbb{N}) \times (2^{\nat} \times \mathbb{N})^{\mathbb{N}}$: $$ (F,z) \in R \Leftrightarrow \; z\in Z \;\mbox{and }\; \{\varphi_n \restriction \{z(k): k \in \mathbb{N} \} : n \in F \} \text{ is nowhere dense in }2^{ \{z(k):\; k \in \mathbb{N} \} }. $$ Then $R$ is an analytic set. From the claim above, we have $$ F \in {\mathcal I}^{\star} \Leftrightarrow (\exists z \in (2^{\nat} \times \mathbb{N})^{\mathbb{N}}) R(F,z). $$ Thus, ${\mathcal I}^{\star}$ is analytic. \end{proof}
Finally, we can show one of our main results. Let us define a sequence $({\mathcal I}^k)_{k \in \mathbb{N}}$ of ideals on $\mathbb{N}$ as follows:
$${\mathcal I}^k=\left\{ \begin{array}{cl} 2^{\nat}, & \text{if }k=0, \\ ({\mathcal I}^{k-1})^{\star}, & \text{if }k>0. \\ \end{array} \right.$$
Notice that ${\mathcal I}^{k+1} \subsetneq {\mathcal I}^{k}$ for each $k \in \mathbb{N}$ by Lemma \ref{IssinIs}.
\begin{theorem}\label{Ykesnod} For all $k>0$, $\mathbb{Y}({\mathcal I}^k)$ is analytic, nodec and crowded. \end{theorem}
\begin{proof} That $\mathbb{Y}({\mathcal I}^k)$ is analytic and nodec follows from Lemmas \ref{staranalytic}, \ref{complexity}, \ref{Lemanodec} and \ref{IssinIs}. Since ${\mathcal I}^k\subseteq {\mathcal I}_{nd}$, then by Lemma \ref{crowded}, $\mathbb{Y}({\mathcal I}^k)$ is crowded. \end{proof}
Thus we do not know whether $\mathbb{Y}({\mathcal I}^\star)$ is nodec for ideals such that ${\mathcal I}^\star \not\subseteq {\mathcal I}$. The reason is that it is not clear if part (2) in Lemma \ref{Lemanodec} holds in general without the assumption that ${\mathcal I}^\star\subseteq {\mathcal I}$. In this respect, we only were able to show the following.
\begin{lemma} Let ${\mathcal I}$ be an ideal on $\mathbb{N}$ such that ${\mathcal I}\subseteq {\mathcal I}^\star$. Let $A\subseteq \mathbb{N}$. Then
\begin{enumerate} \item Let $V$ be a non empty $\rho_{\mathcal I}$-open set. If $A'$ is $\rho_{\mathcal I}$-dense in $V$, then $\widehat{A}$ is $\rho_{\mathcal I}$-dense in $V$.
\item If $\widehat{A}$ is nowhere dense in $\mathbb{Y}({\mathcal I})$, then $A'$ is nowhere dense in $\mathbb{X}({\mathcal I})$ (i.e., $A \in {\mathcal I}^\star$). In particular, if $\widehat{A}$ is nowhere dense in $\mathbb{Y}({\mathcal I})$, then $\widehat{A}$ is closed discrete in $\mathbb{Y}({\mathcal I}^\star)$. \end{enumerate} \end{lemma}
\begin{proof} (1) Let $V$ be a non empty $\rho_{\mathcal I}$-open set and suppose $A'$ is $\rho_{\mathcal I}$-dense in $V$. Let $W$ be a basic $\rho_{\mathcal I}$-open set with $W\subseteq V$. We need to find $n\in A$ such that $\psi_n\in W$. By Lemma \ref{simetricdif} the following set belongs to ${\mathcal I}$: \[ E=\{n\in\mathbb{N}: \varphi_n\in W \text{ and } \psi_n\not\in W\}. \] As ${\mathcal I}\subseteq {\mathcal I}^\star$, then $E'$ is nowhere dense in $\mathbb{X}({\mathcal I})$. Since $A'$ is dense in $V$, then $A'\cap W\not\subseteq E'$. Let $n\in A\setminus E$ such that $\varphi_n\in W$. As $n\not\in E$, then $\psi_n\in W$.
(2) Follows from (1) and part (1) in Lemma \ref{Lemanodec}. \end{proof}
Now we compare the dense sets in $\mathbb{Y}({\mathcal I})$ and $\mathbb{X}({\mathcal I})$.
\begin{lemma} \label{denytodenxstar} Let ${\mathcal I}$ be an ideal on $\mathbb{N}$ such that $\mathbb{Y}$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{\mathcal I})$ and $D\subseteq \mathbb{N}$. If $\widehat{D}$ is dense in $\mathbb{Y}({\mathcal I})$, then $D'$ is dense in $\mathbb{X}({\mathcal I})$. \end{lemma}
\begin{proof} Suppose $\widehat{D}$ is dense in $\mathbb{Y}({\mathcal I})$. Let $V$ be a basic $\rho_{\mathcal I}$-open set. We need to find $n\in D$ such that $\varphi_n\in V$. By Lemma \ref{simetricdif} the following set belongs to ${\mathcal I}$: \[ E=\{n\in\mathbb{N}: \varphi_n\not\in V \text{ and } \psi_n\in V\}. \] Let $F=\{n\in D:\; \psi_n\in V\}$. Since $\widehat{D}$ is $\rho_{{\mathcal I}}$-dense, then $F\not\in {\mathcal I}$ (by part (1) of Lemma \ref{Lemanodec} and the assumption that $Y$ is dense in $(2^{2^{\nat}\times\mathbb{N}},\rho_{\mathcal I})$). Thus there is $n\in F\setminus E$. Then $\psi_n\in V$ and $\varphi_n\in V$.
\end{proof}
Observe that $\mbox{\sf Fin}\subseteq \mbox{\sf Fin}^\star\subseteq \mbox{\sf Fin}^{\star\star}\subseteq \cdots\subseteq {\mathcal I}^k$ for all $k$. Notice that $\mbox{\sf Fin}^\star$ is isomorphic to $\mbox{\sf nwd} (\mathbb{Q})$ as $\mathbb{X}(\mbox{\sf Fin})$ is homeomorphic to $\mathbb{Q}$. The following is a natural and intriguing question.
\begin{question} Is $\mathbb{Y}(\mbox{\sf Fin}^\star)$ nodec? \end{question}
It is unclear when an ideal ${\mathcal I}$ satisfies either ${\mathcal I}\subseteq {\mathcal I}^\star$ or ${\mathcal I}^\star\subseteq {\mathcal I}$. The following question asks a concrete instance of this problem.
\begin{question} Two ideals that naturally extend $\mbox{\sf Fin}$ are $\{\emptyset\}\times\mbox{\sf Fin}$ and $\mbox{\sf Fin}\times \{\emptyset\}$ (where $\times$ denotes the Fubini product). Let ${\mathcal I}$ be any of those two ideals. Is ${\mathcal I}\subseteq {\mathcal I}^\star$? \end{question}
\subsection{SS property in $\mathbb{Y}({\mathcal I})$}
We do not know whether $\mathbb{Y}({\mathcal I}_{nd})$ is $SS$. However, we show below that $\mathbb{Y}({\mathcal I}^k)$ is not $wSS$ for all $k>1$, this was the reason to introduce the ideals ${\mathcal I}^\star$.
We need an auxiliary result.
\begin{lemma} \label{densostar} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$ such that ${\mathcal I}^{ \star} \subseteq {\mathcal I}$. Let $V$ be a non empty $\rho_{{\mathcal I}^\star}$-open set and $D \subseteq \mathbb{N}$. If $D'$ is $\rho_{\mathcal I}$-dense in $V$, then $\widehat{D}$ in $\rho_{{\mathcal I}^\star}$-dense in $V$. \end{lemma}
\begin{proof} Let $V$ be a non empty $\rho_{{\mathcal I}}$-open set and suppose that $D'$ is $\rho_{\mathcal I}$-dense in $V$. Let $W$ be a $\rho_{{\mathcal I}^\star}$-basic open set such that $W\subseteq V$. We need to show that there is $n\in D$ such that $\psi_n\in W$. By Lemma \ref{simetricdif} the following set belongs to ${\mathcal I}^\star$: \[ E=\{n\in\mathbb{N}: \varphi_n\in W \text{ and } \psi_n\not\in W\}. \] Since $W$ is also $\rho_{\mathcal I}$-open (as ${\mathcal I}^\star \subseteq {\mathcal I}$) and $E'$ is $\rho_{\mathcal I}$-nowhere dense, then there is a non empty $\rho_{\mathcal I}$-open set $V_1\subseteq W$ such that $V_1\cap E'=\emptyset$. Since $D'$ is $\rho_{\mathcal I}$-dense in $V$, there is $n\in D$ such that $\varphi_n\in V_1$. Notice that $n\not\in E$. Since $\varphi_n\in W$, then $\psi_n\in W$. \end{proof}
\begin{theorem}\label{Yknoss} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$ such that ${\mathcal I}^\star\subseteq {\mathcal I}$. Then $\mathbb{Y}({\mathcal I}^{\star\star})$ is not $wSS$. \end{theorem}
\begin{proof} Notice that $\mathbb{X}$ is $\rho_{{\mathcal I}}$ crowded (see Lemma \ref{crowded}). Also, observe that ${\mathcal I}^{\star\star}\subseteq {\mathcal I}^\star$ (see Lemma \ref{IssinIs}). Let $(U_n)_{n \in \mathbb{N}}$ be a pairwise disjoint sequence of non empty $\rho_{{\mathcal I}^{\star\star}}$-open sets. Let $A_n = \{ m \in \mathbb{N}: \varphi_m \in U_n \}$. It is clear that $A_n \notin {\mathcal I}^{\star}$ for each $n \in \mathbb{N}$. It is easy to verify that the sequence $(A_m)_m$ witnesses that ${\mathcal I}^\star$ is not $\mbox{p}^+$. Let $D_n=\mathbf{D}(A_n)$, as defined in Lemma \ref{DA}. Let $$ E_n= \{\psi_m \in Y:\; \varphi_m \in D_n \}. $$ We claim that the sequence $(E_n)_{n \in \mathbb{N}}$ witnesses that the space $\mathbb{Y}(\mathcal{I}^{\star\star})$ is not $wSS$. In fact, since $A_n \notin {\mathcal I}^{\star}$, then $D_n$ is dense in $\mathbb{X}({\mathcal I}^{\star})$ (by Lemma \ref{DA}), so $E_n$ is dense in $\mathbb{Y}({\mathcal I}^{\star\star})$ (by Lemma \ref{densostar}). Let $K_n \subseteq E_n$ be a finite set and $L_n= \{ \varphi_m: \psi_m \in K_n \}$ for each $m\in \mathbb{N}$. Since $A_n\not\in{\mathcal I}^{\star}$ and ${\mathcal I}^{\star}$ is not $\mbox{p}^+$, then, by the proof of Theorem \ref{xinoSS}, $L=\bigcup_{n \in \mathbb{N}} L_n$ is nowhere dense in $\mathbb{X}(\mathcal{I}^{\star})$. Thus $L\in {\mathcal I}^{\star\star}$. Therefore $\widehat{L}=\bigcup_{n \in \mathbb{N}} K_n$ is closed discrete in $\mathbb{Y}(\mathcal{I}^{\star\star})$ (by Lemma \ref{Lemanodec}).
\end{proof}
We have seen in Theorem \ref{Ykesnod} that $\mathbb{Y}({\mathcal I}^k)$ is nodec for every $k\geq 1$. From Theorem \ref{Yknoss} we have the following.
\begin{corollary} $\mathbb{Y}({\mathcal I}^{k})$ is not $wSS$ for every $k>1$. \end{corollary}
Recall that ${\mathcal I}^{1}$ is ${\mathcal I}_{nd}$. We do not know whether $\mathbb{Y}({\mathcal I}_{nd})$ is SS. We only know the following. Suppose $\widehat{D_n}=\{\psi_m: m \in D_n \}$ is open dense in $\mathbb{Y}(\mathcal{I}_{nd})$, for every $n\in \mathbb{N}$. Then there is $F_n \subseteq D_n$ finite for each $n$ such that $\bigcup_{n \in \mathbb{N}} \widehat{F_n}$ is dense.
\begin{question} Is there an ideal ${\mathcal I}$ on $\mathbb{N}$ such that ${\mathcal I}\subseteq {\mathcal I}^\star$ and $\mathbb{Y}({\mathcal I}^\star)$ is $wSS$? In particular, is $\mathbb{Y}(\mbox{\sf Fin}^\star)$ $wSS$? \end{question}
\subsection{$\mbox{q}^+$\ in $\mathbb{Y}({\mathcal I})$}
We shall prove that for certain kind of ideals, $\mathbb{Y}({\mathcal I})$ is not $q^+$. We use a construction quite similar to that in the proof of Theorem \ref{eqclq}.
We recall that in the proof of Lemma \ref{xnoq} we have introduced the following property: Let $m \in \mathbb{N}$. We say that $\varphi \in \mathbb{X}$ has the property $(*^m)$ if there are $k \in \mathbb{N}$, $s_i \in 2^{m+1}$ $(i=1,...,k)$ finite sequences and $m_i \leq m$ $(i=1,...,k)$ natural numbers such that $ \varphi= \bigcup _{i=1}^{k} [s_i]\times \{m_i\}$ and if $m_i=m_j$ with $i \neq j$, then $s_i \restriction m \neq s_j \restriction m$.
\begin{lemma} \label{xnoq2} Let ${\mathcal I}$ be an ideal over $\mathbb{N}$ such that ${\mathcal I}\subseteq {\mathcal I}_{nd}$. Let $$ A_m = \{ \varphi \in \mathbb{X}: \varphi \text{ has the property }(*^m) \} $$ and \[ B_m=\{\psi_n\in \mathbb{Y}:\; \varphi_n \in A_m\}. \]
Let $L= \{n \in \mathbb{N}: \varphi_n \notin \bigcup _{m \in \mathbb{N}} A_m \}$ and suppose there is an infinite set $L'=\{m_k:\;k\in \mathbb{N}\}\subseteq L$ such that $L'\in \mathcal{I}$. Let \[ B=\bigcup_k B_{m_k}. \] Let $q\in \mathbb{N}$ be such that $\varphi_q=2^{\nat}\times \{0\}$. Then \begin{enumerate} \item $B$ is dense in $\mathbb{Y}({\mathcal I})$ and, in particular, $\psi_q \in cl_{\rho_{\mathcal I}} (B)$.
\item Let $S\subseteq B$ be such that $S\cap B_{m_k}$ has at most one element for each $k$, then $\psi_q\not\in cl_{\rho_{\mathcal I}} (S)$. \end{enumerate}
\end{lemma}
\proof (1) Let $A=\bigcup_k A_{m_k}$. By Lemma \ref{xnoq}, $A$ is dense in $\mathbb{X}$. Thus by Lemma \ref{densostar}, $B$ is dense in $\mathbb{Y}({\mathcal I}_{nd})$ (recall that ${\mathcal I}_{nd}=(2^{\nat})^\star$). As
${\mathcal I}\subseteq {\mathcal I}_{nd}$, then $B$ is also dense in $\mathbb{Y}({\mathcal I})$.
(2) Let $S=\{\psi_{n_k}:\; k\in \mathbb{N}\}$ be such that $\psi_{n_k}\in B_{m_k}$ for all $k\in \mathbb{N}$. We will show that $\psi_q\not\in cl_{\rho_{\mathcal I}} (S)$.
Let $\alpha\in2^{\nat}$ be defined as follows: If $0 \in L'$ and $[\langle 0\rangle] \times \{0\} \subseteq \varphi_{n_0} $, then $\alpha(0)=1$. Otherwise, $\alpha(0)=0$. For $n>1$, $$ \alpha (n)=\left\{
\begin{array}{cl}
1, & \text{if }n \in L' \text{ , } \text{$n=m_k$ for some $k$} \\ & \text{ and }[\langle \alpha(0),...,\alpha(n-1),0\rangle] \times \{0\} \subseteq \varphi_{n_k}. \\
0, & \text{ otherwise.}
\end{array}
\right. $$ Observe that $\alpha \in \mathcal{I}$, as $\alpha^{-1}(1) \subseteq L' \in \mathcal{I}$.
It is clear that $\psi_q\in (\alpha,0)^+$. To finish the proof, it suffices to show that $(\alpha,0) \notin \bigcup_{k \in \mathbb{N}} \psi_{n_k}$. Suppose, towards a contradiction, that there is $l \in \mathbb{N}$ such that $(\alpha,0) \in \psi_{n_l}$, that is, $(\alpha,0) \in \varphi_{n_l} \cup ([n_l] \times \mathbb{N})$. There are two cases to be considered.
(i) Suppose $\alpha(n_l)=1$. Then $n_l\in L'$ and thus $\varphi_{n_l}\not\in A_{m_l}$ which contradicts that $\psi_{n_l}\in B_{m_l}$.
(ii) Suppose $\alpha(n_l)=0$ and thus $(\alpha, 0)\in \varphi_{n_l}$. Let $\varphi_{n_l}= \bigcup_{i=1}^{r} [s_i] \times \{p_i\}$ with $s_i \in 2^{m_l+1}$. Then $\alpha \in [s]$, where $s$ is $s_i$ for some $i$ with $p_i=0$. Hence $\alpha(n)=s(n)$ for all $n \leq m_l$. We consider two cases. Suppose $\alpha(m_l)=1$. Then $s(m_l)=1$. Let $t$ be such that $s=t\widehat{\;} 1$. Then by the definition of $\alpha$, we have that $[t\widehat{\;}0]\times\{0\}\subseteq \varphi_{n_l}$. But also $[s]\times\{0\}=[t\widehat{\;}1]\times\{0\}\subseteq \varphi_{n_l}$ which contradicts that $\varphi_{n_l}\in A_{m_l}$ (i.e. that it has property $(*^{m_l}))$. Now suppose that $\alpha(m_l)=0$. Then $[s]\times\{0\}=[t\widehat{\;}0]\times\{0\}\not\subseteq \varphi_{n_l}$, but this contradicts that $[s]\times\{0\}$ is $[s_i]\times\{p_i\}$ for some $i$. \endproof
From the previous lemma we immediately get the following.
\begin{theorem}\label{YI no q} Let ${\mathcal I}$ be a tall ideal over $\mathbb{N}$ such that $\mathcal{I} \subseteq \mathcal{I}_{nd}$. Then $\mathbb{Y}(\mathcal{I})$ is not $q^+$. \end{theorem}
\begin{question} Is there an ideal (necessarily non tall) different from $\mbox{\sf Fin}$ such that $\mathbb{Y}({\mathcal I})$ is $\mbox{q}^+$? Two natural candidates are $\{\emptyset\}\times\mbox{\sf Fin}$ and $\mbox{\sf Fin}\times \{\emptyset\}$. \end{question}
Finally, we have the following.
\begin{theorem} $\mathbb{Y}({\mathcal I}^k)$ is a non SS, non $\mbox{q}^+$\ nodec regular space with analytic topology for every $k>1$. \end{theorem}
\noindent {\bf Acknowledgment:} We are thankful to the referee for his (her) comments that improved the presentation of the paper.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Exact Density matrix of an oscillator-bath system: Alternative derivation}
\author{Fardin Kheirandish} \address{Department of Physics, Faculty of Science, University of Kurdistan, P.O.Box 66177-15175, Sanandaj, Iran}
\begin{abstract} \noindent Starting from a total Lagrangian describing an oscillator-bath system, an alternative derivation of exact quantum propagator is presented. Having the quantum propagator, the exact density matrix, reduced density matrix of the main oscillator and thermal equilibrium fixed point are obtained. The modified quantum propagator is obtained in the generalised case where the main oscillator is under the influence of a classical external force. By introducing auxiliary classical external fields, the generalised quantum propagator or generating functional of position correlation functions is obtained. \end{abstract}
\begin{keyword} \texttt{Density matrix}\sep Oscillator-Bath\sep Propagator \sep Generating function \end{keyword}
\end{frontmatter}
\linenumbers
\section{Introduction}
\noindent The quantum propagator is the most important function in quantum theories \cite{Propagator-1,Propagator-2}. Knowing the quantum propagator, we can obtain all measurable quantities related to the physical system exactly, that is we have a complete physical description of the underline system in any time. Unfortunately, except for some simple physical systems, obtaining the exact form of quantum propagator is usually a difficult task and we have to invoke perturbative methods. Among different approaches to find quantum propagatorو we can refer to two main approaches. In the first method, quantum propagator is written as a bilinear function in eigenvectors of the Schr\"{o}dinger equation. The main task in this method is to find the eigenfunctions of the Hamiltonian which are usually difficult to find and even having these eigenfunctions, extracting a closed form quantum propagator from them may be cumbersome. The second approach is based on the Feynman path integral technique \cite{Feynman-1,Kleinert,Zin}. One of the most efficient features of this method is its perturbative technique known as Feynman diagrams which extends the applicability of the method to the era of non-quadratic Lagrangians. The path integral technique has been applied to oscillator-bath system in \cite{Path-0,Path-1,Path-2,Path-3,Path-4,Path-5,Path-6}.
Here we follow an alternative approach to find the quantum propagator. This approach which we will describe in detail is based on the position and momentum operators in Heisenberg picture. In this scheme, using elementary quantum mechanical relations, two independent partial differential equations are found that quantum propagator satisfy in. The solutions of these partial differential equations are easily found and unknown functions are determined from basic properties of quantum propagators. The first message of the present paper is that this method compared to other methods to derive the quantum propagator of an oscillator-bath system with a linear coupling is easier to apply and in particular, comparing with path integral technique, there is no need to introduce more advanced mathematical notions like infinite integrations, operator determinant and Weyl ordering. The second message is that since we will find a closed form for the total quantum propagator, we will find a closed form density matrix describing the combined oscillator-bath system. Also, by tracing out the bath degrees of freedom, we find a reduced density matrix describing the main oscillator in any time. In the following, we will generalise the oscillator-bath model by including external classical sources in Hamiltonian, and find the modified quantum propagator under the influence of classical forces. The modified quantum propagator can be interpreted also as a generating functional from which time-ordered correlation functions among different position operators can be determined \cite{Greiner}. The basic ingredient of the approach is a symmetric time-independent matrix $B$, (Eq.(\ref{14-2}) depending on natural frequencies of the bath oscillators and coupling constants. Therefore, from numerical or simulation point of view, the only challenge is finding the inverse of the matrix $B$ or equivalently diagonalizing it.
The efficiency of the method introduced here in determining the exact form of the quantum propagator for quadratic Lagrangians, inspires the idea of developing a perturbative approach to include non-quadratic Lagrangians too. The process presented to determine the quantum propagator, suggest that these perturbative techniques may be based on perturbative solutions of nonlinear partial differential equations. This development deserves to be investigated in an independent work.
\section{Lagrangian}
\noindent In this section, we set the stage for what will be investigated in the following sections. We start with a total Lagrangian describing an interacting oscillator-bath system. Then from the corresponding Hamiltonian and Heisenberg equations of motion, we find explicit expressions for position and momentum operators as the main ingredients of an approach that will be applied in the next section. The Lagrangian describing a main oscillator interacting linearly with a bath of oscillators is given by \cite{Weiss} \begin{equation}\label{1}
L=\haf \dot{x}^2-\haf\omega_0^2 x^2+\sum_{i=1}^N \haf (\dot{X}^2_i-\omega_i^2 X^2_i)+\sum_{i=1}^N g_i X_i x, \end{equation} Eq.(\ref{1}) can be rewritten in a more compact form as \begin{equation}\label{2}
L=\haf \sum_{\mu=0}^N (\dot{Y}^2_\mu-\omega_\mu^2 Y_\mu^2)+\haf\sum_{\mu,\nu=0}^N Y_\mu \Omega_{\mu\nu}^2 Y_\nu, \end{equation} where the matrix $\Omega^2_{\mu\nu}$ is given by \begin{equation}\label{3}
\Omega^2_{\mu\nu}=\left(
\begin{array}{ccccc}
0 & g_1 & g_2 & \cdots & g_N \\
g_1 & 0 & 0 & \cdots & 0 \\
g_2 & 0 & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
g_N & 0 & 0 & \cdots & 0 \\
\end{array}
\right), \end{equation} and \begin{equation}\label{4}
Y_0=x,\,\,\,\,Y_k=X_k,\,\,\,\,k=1,\cdots,N. \end{equation} The corresponding Hamiltonian is \begin{equation}\label{5}
H=\haf \sum_{\mu=0}^N (P^2_\mu+\omega_\mu^2 Y_\mu^2)-\haf\sum_{\mu,\nu=0}^N Y_\mu \Omega_{\mu\nu}^2 Y_\nu, \end{equation} where $P_\mu=\dot{Y}_\mu$ is the canonical conjugate momentum corresponding to the canonical position $Y_\mu$. The system is quantized by imposing the equal-time commutation relations \begin{eqnarray}\label{6} && [\hat{Y}_\mu, \hat{P}_\nu] = i\hbar\,\delta_{\mu\nu},\nonumber\\ && [\hat{Y}_\mu, \hat{Y}_\nu] = [\hat{P}_\mu, \hat{P}_\nu]=0, \end{eqnarray} and from Heisenberg equations of motion one finds \begin{equation}\label{7}
\ddot{\hat{Y}}_\mu +\omega_\mu^2 \hat{Y}_\mu=\sum_\nu \Omega_{\mu\nu}^2 \hat{Y}_\nu. \end{equation} Note that $(\hat{Y}_0, \hat{P}_0)$ refer to the position and momentum of the main oscillator and $(\hat{Y}_k, \hat{P}_k),\,\,(k=1,\cdots,N)$ refer to position and momentum operators of bath oscillators. Taking the Laplace transform from both sides of Eq.(\ref{7}) we find \begin{equation}\label{8}
\sum_{\nu}\Lambda_{\mu\nu}(s)\hat{\tilde{Y}}_\mu (s)=s \hat{Y}_\mu (0)+\hat{P}_\mu (0), \end{equation} where the $N+1$-dimensional matrix $\Lambda$ is defined by \begin{equation}\label{9}
\Lambda_{\mu\nu} (s)=[(s^2 +\omega_\mu^2)\delta_{\mu\nu}-\Omega_{\mu\nu}^2]. \end{equation} Therefore, applying the inverse matrix, we find \begin{equation}\label{10}
\hat{\tilde{Y}}_\mu (s)=\sum_{\nu} [s\Lambda^{-1}_{\mu\nu} (s)\hat{Y}_\nu (0)+\Lambda^{-1}_{\mu\nu} (s)\hat{P}_\nu (0)], \end{equation} and a formal solution is obtained by inverse Laplace transform as \begin{equation}\label{11}
\hat{Y}_\mu (t)=\dot{F}_{\mu\nu} (t) \hat{Y}_\nu (0)+F_{\mu\nu} (t) \hat{P}_\nu (0), \end{equation} where we defined \begin{equation}\label{12}
F_{\mu\nu} (t)=\mathcal{L}^{-1}[\Lambda^{-1}(s)]_{\mu\nu}. \end{equation} The matrix $\Lambda$ is explicitly given by \begin{equation}\label{13}
\Lambda (s)=\left(
\begin{array}{ccccc}
s^2 + \omega_0^2 & -g_1 & -g_2 & \cdots & -g_N \\
-g_1 & s^2 + \omega_1^2 & 0 & \cdots & 0 \\
-g_2 & 0 & s^2 + \omega_2^2 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
-g_N & 0 & 0 & \cdots & s^2 + \omega_N^2 \\
\end{array}
\right), \end{equation} which can be rewritten as \begin{equation}\label{14-1}
\Lambda (s)=s^2 \,\mathbb{I}+B, \end{equation} wherein \begin{equation}\label{14-2}
B=\left(
\begin{array}{ccccc}
\omega_0^2 & -g_1 & -g_2 & \cdots & -g_N \\
-g_1 & \omega_1^2 & 0 & \cdots & 0 \\
-g_2 & 0 & \omega_2^2 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
-g_N & 0 & 0 & \cdots & \omega_N^2 \\
\end{array}
\right). \end{equation} The inverse matrix can be formally written as \begin{eqnarray}\label{14-3}
\Lambda^{-1} (s) &=& \frac{1}{s^2 \,\mathbb{I}+B}=\frac{1}{s^2}\,\frac{1}{\mathbb{I}+\frac{1}{s^2}\,B}\nonumber \\
&=& \frac{1}{s^2}\,\bigg(\mathbb{I}-\frac{1}{s^2}\,B+\frac{1}{s^4}B^2-\cdots\bigg)\nonumber \\
&=& \sum_{n=0}^\infty \frac{(-1)^n}{s^{2n+2}}\,B^n,\,\,\,\,(B^0=\mathbb{I}). \end{eqnarray} Therefore, from Eq.(\ref{12}) we have \begin{eqnarray}\label{15}
&& F_{\mu\nu}(t)=\sum_{n=0}^\infty \frac{(-1)^n \,t^{2n+1}}{(2n+1)!}\,(B^n)_{\mu\nu},\nonumber\\
&& \dot{F}_{\mu\nu}(t)=\Big(\frac{dF}{dt}\Big)_{\mu\nu}=\sum_{n=0}^\infty \frac{(-1)^n \,t^{2n}}{(2n)!}\,(B^n)_{\mu\nu}. \end{eqnarray} The equations Eqs.(\ref{15}) can be formally written as \begin{eqnarray}\label{15-1}
&& F(t)=\frac{1}{\sqrt{B}}\,\sin(\sqrt{B}\,t),\nonumber\\
&& \dot{F}(t)=\cos(\sqrt{B}\,t). \end{eqnarray} From Eqs.(\ref{15}) we deduce that the matrices $F_{\mu\nu}(t)$ and $\dot{F}_{\mu\nu}(t)$ are odd and even in $t$, respectively. \subsection{Connection to the previous works} \noindent The Eq. (\ref{11}) has been appeared in \cite{Haake} with a minor change of notation in the framework of Ullersma diagonalisation technique \cite{Ullersma}. Let the matrix $X_{\mu\nu}$ be a unitary matrix that diagonalizes the orthogonal matrix $B$ given by Eq. (\ref{14-2}) with corresponding eigenvalues $z^2_{\alpha},\,(\alpha=0,1,\cdots,N)$. Therefore, in matrix notation we have \begin{equation}\label{C1}
(X^t B X)_{\alpha\beta}=z^2_{\alpha}\,\delta_{\alpha\beta}, \end{equation} and using the first equation of Eq. (18), we find \cite{Haake} \begin{equation}\label{C2} F_{\mu\nu} (t)=\sum_{\alpha=0}^N X_{\mu\alpha}X_{\nu\alpha}\frac{1}{z_{\alpha}}\sin z_{\alpha}t. \end{equation} The eigenvalues $z^2_{\alpha}$ of the matrix $B$, satisfy the characteristic equation \begin{equation}\label{C3}
\det(B-z^2\mathbb{I})=0\Rightarrow \left|
\begin{array}{ccccc}
\omega_0^2-z^2 & -g_1 & -g_2 & \cdots & -g_N \\
-g_1 & \omega_1^2-z^2 & 0 & \cdots & 0 \\
-g_2 & 0 & \omega_2^2 & \cdots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
-g_N & 0 & 0 & \cdots & \omega_N^2-z^2 \\
\end{array}
\right|=0, \end{equation} the determinant can be evaluated using the mathematical induction leading to the following characteristic equation \begin{equation}\label{C4}
g(z)=z^2-\omega_0^2-\sum_{n}\frac{g_n^2}{z^2-\omega_n^2}=0. \end{equation} By making use of Eq. (\ref{7}), we find the following quantum Langevin equation for the main oscillator \begin{equation}\label{C5} \ddot{\hat{Y}}_0 (t)-\int_0^t dt'\,\chi(t-t')\,\hat{Y}_0 (t')+\omega_0^2 \,Y_0 (t)=\Upsilon(t), \end{equation} where the susceptibility of the environment is defined by \begin{equation}\label{C6} \chi(t)=\sum_{k=1}^N g^2_k\,\frac{\sin(\omega_k t)}{\omega_k}, \end{equation} and the noise operator by \begin{equation}\label{C7} \hat{\Upsilon}(t)=\sum_{k=1}^N g_k \,\big[\cos(\omega_k t)\hat{Y}_k (0)+\frac{\sin(\omega_k t)}{\omega_k}\hat{P}_k (0)\big], \end{equation} where $\hat{Y}_k (0)$ and $\hat{P}_k (0)$ are the position and momentum operators at initial time ($t=0$). Taking the Laplace transform of the Langevin equation Eq. (\ref{C5}), we will find the Laplace transform of the corresponding Green's function as \begin{equation}\label{C8} \tilde{G}(s)=\frac{1}{s^2-\tilde{\chi}(s)+\omega_0^2}=\frac{1}{s^2+\omega_0^2-\sum\limits_{k=1}^N \frac{g_k^2}{s^2+\omega_k^2}}, \end{equation} where \begin{equation}\label{C9} \tilde{\chi}(s)=\sum_{k=1}^N \frac{g_k^2}{s^2+\omega_k^2}. \end{equation} The Green's function in frequency space ($G(\omega)$) can be obtained from the Laplace transformed Green's function $\tilde{G}(s)$ using the identity $G(\omega)=\tilde{G}(i\omega)$, therefore, \begin{equation}\label{C10} G(\omega)=\frac{-1}{\omega^2-\omega_0^2-\sum\limits_{k=1}^N \frac{g_k^2}{\omega^2-\omega_k^2}}=\frac{-1}{g(\omega)}, \end{equation} that is the roots of the characteristic equation $g(z)=0$ are the poles of the Green's function $G(\omega)$ in frequency domain.
\section{Quantum Propagator}
\noindent In this section a novel scheme to derive the quantum propagator of the combined oscillator-bath system is introduced in detail. Let $|y_0{\rangle}$ be an eigenket of $\hat{Y}_0$ and $|y_k{\rangle}$ an eigenket of $\hat{Y}_k$, then in Heisenberg picture, we can write \begin{equation}\label{16}
\hat{Y}_\mu (t)\,|\mathbf{y},t{\rangle}=y_\mu \,|\mathbf{y},t{\rangle}, \end{equation} where for notational simplicity the tensor product is abbreviated as \begin{equation}\label{17}
|\mathbf{y},t{\rangle}=|y_0,t{\rangle}\otimes|y_1,t{\rangle}\otimes\cdots\otimes|y_N,t{\rangle}=|y_0,\cdots,y_N,t{\rangle}. \end{equation}
Multiplying Eq.(\ref{16}) from the left by ${\langle} \mathbf{y}'|$ and using Eq.(\ref{11}), we find \begin{equation}\label{18}
\sum_{\nu=0}^N\bigg(\dot{F}_{\mu\nu} (t)\,y'_\nu-i\hbar\,F_{\mu\nu} (t)\,\frac{{\partial}}{{\partial} y'_\nu}\bigg)
\,\mathcal{K}(\mathbf{y}'|\mathbf{y},t)=y_\mu\,\mathcal{K}(\mathbf{y}'|\mathbf{y},t), \end{equation} where we have defined the function $\mathcal{K}$ as \begin{equation}\label{18-1}
{\langle} \mathbf{y}'|\mathbf{y},t{\rangle}=\mathcal{K}(\mathbf{y}'|\mathbf{y},t), \end{equation} and made use of the identities \begin{eqnarray}\label{19}
{\langle} \mathbf{y}'|\hat{Y}_\mu (0) &=& y'_\mu {\langle} \mathbf{y}'|,\nonumber \\
{\langle} \mathbf{y}'|\hat{P}_\mu (0) &=& -i\hbar\,\frac{{\partial}}{{\partial} y'_\mu}{\langle} \mathbf{y}'|. \end{eqnarray} Eq.(\ref{18}) can be rewritten as \begin{equation}\label{20}
\sum_{\nu=0}^N F_{\mu\nu} (t)\,\frac{{\partial}}{{\partial} y'_\nu}\ln \mathcal{K}(\mathbf{y}'|\mathbf{y},t)=
\frac{i}{\hbar}\bigg(y_\mu-\sum_{\nu}\dot{F}_{\mu\nu} (t)\,y'_\nu\bigg). \end{equation} The right hand side of Eq.(\ref{20}) is linear in $y'_\mu$, so the following quadratic form can be assumed for $\ln \mathcal{K}$ \begin{equation}\label{21}
\ln\mathcal{K}(\mathbf{y}'|\mathbf{y},t)=A(\mathbf{y},t)+\sum_{\mu=0}^N A_\mu (\mathbf{y},t)y'_\mu+\haf \sum_{\mu,\nu=0}^N y'_\mu C_{\mu\nu} (\mathbf{y},t) y'_\nu, \end{equation} where $C_{\mu\nu}=C_{\nu\mu}$. By inserting Eq.(\ref{21}) into Eq.(\ref{20}), we easily find \begin{eqnarray}\label{22}
A_\mu (y,t) &=& \frac{i}{\hbar}\,\sum_{\nu=0}^N F^{-1}_{\mu\nu} (t)\,y_\nu,\nonumber\\
C_{\mu\nu} (t) &=& -\frac{i}{\hbar}\,\sum_{\sigma=0}^N F^{-1}_{\mu\sigma} (t)\,\dot{F}_{\sigma\nu} (t), \end{eqnarray} therefore, in dyadic notation, we can write \begin{equation}\label{23}
\mathcal{K}(\mathbf{y}'|\mathbf{y},t)=e^{A(\mathbf{y},t)}e^{\frac{i}{\hbar}\mathbf{y}'\cdot\mathbf{F}^{-1}(t)\cdot \mathbf{y}}
e^{-\frac{i}{2\hbar}\,\mathbf{y}'\cdot \mathbf{F}^{-1} (t)\dot{\mathbf{F}}(t)\cdot \mathbf{y}'}. \end{equation} The form of $A(\mathbf{y},t)$ can be determined from the properties of propagators. Since the Hamiltonian Eq.(\ref{5}) is time-independent, we can write \begin{equation}\label{24}
\mathcal{K}(\mathbf{y}'|\mathbf{y},t)={\langle} \mathbf{y}'|\mathbf{y},t{\rangle}={\langle} \mathbf{y}'|e^{\frac{it}{\hbar}\hat{H}}|\mathbf{y}{\rangle}. \end{equation} Eq.(\ref{24}), is invariant under successive transformations (i) complex conjugation (ii) $\mathbf{y}\leftrightarrow \mathbf{y}'$ (iii) $t\rightarrow -t$, therefore, \begin{equation}\label{25}
\mathcal{K}(\mathbf{y}'|\mathbf{y},t)=\mathcal{K}^{*}(\mathbf{y}|\mathbf{y}',-t), \end{equation} leading to \begin{eqnarray}\label{26}
e^{A(\mathbf{y},t)} &=& e^{\varphi(t)}\,e^{-\frac{i}{2\hbar}\,\mathbf{y}\cdot \mathbf{F}^{-1} (t)\dot{\mathbf{F}}(t)\cdot \mathbf{y}},\nonumber\\
\varphi^{*} (-t) &=& \varphi(t). \end{eqnarray} Note that in Sec.VI, the Hamiltonian will be time-dependent and to find $A(\mathbf{y},t)$ we can not use these transformations and we will follow another approach. Up to now the form of the propagator is as follows \begin{eqnarray}\label{27}
\mathcal{K}(\mathbf{y}'|\mathbf{y},t) &=& e^{\varphi(t)}\,e^{-\frac{i}{2\hbar}\,\mathbf{y}\cdot \mathbf{F}^{-1} (t)\dot{\mathbf{F}}(t)\cdot \mathbf{y}}e^{\frac{i}{\hbar}\mathbf{y}'\cdot\mathbf{F}^{-1}(t)\cdot \mathbf{y}}
e^{-\frac{i}{2\hbar}\,\mathbf{y}'\cdot \mathbf{F}^{-1} (t)\dot{\mathbf{F}}(t)\cdot \mathbf{y}'},\nonumber\\
&=& e^{\varphi(t)}\,e^{-\frac{i}{2\hbar}[\mathbf{y}\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}+
\mathbf{y}'\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}'-2\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{y}]}. \end{eqnarray} From Eqs.(\ref{15-1}) we find the following asymptotic behaviours of Matrices $\mathbf{F},\,\mathbf{F}^{-1}$, and $\dot{\mathbf{F}}$ \begin{eqnarray}\label{28}
\lim_{t\rightarrow 0} \mathbf{F}(t) \thickapprox t\,\mathbb{I},\nonumber \\
\lim_{t\rightarrow 0} \mathbf{F}^{-1}(t) \thickapprox \frac{1}{t}\,\mathbb{I},\nonumber \\
\lim_{t\rightarrow 0} \dot{\mathbf{F}}(t) \thickapprox \mathbb{I}, \end{eqnarray} By inserting these asymptotic behaviours into Eq.(\ref{27}) we find \begin{equation}\label{29}
\lim_{t\rightarrow 0}\mathcal{K}(\mathbf{y}'|\mathbf{y},t)=\delta(\mathbf{y}'-\mathbf{y})=
\lim_{t\rightarrow 0} e^{\varphi(t)}\,e^{-\frac{i}{2\hbar t}(\mathbf{y}'-\mathbf{y})^2}, \end{equation} comparing Eq.(\ref{29}) with the following one-dimensional representation of Dirac delta function \begin{equation}\label{30}
\lim_{t\rightarrow 0} \sqrt{\frac{A}{\pi t}}\,e^{-\frac{A}{t}\,(x-x')^2}=\delta(x-x'), \end{equation} we deduce immediately \begin{equation}\label{31}
\lim_{t\rightarrow 0} e^{\varphi(t)}=\bigg(\frac{i}{2\pi\hbar\,t}\bigg)^{\frac{N+1}{2}}, \end{equation} so we can assume \begin{equation}\label{32}
e^{\varphi(t)}=\bigg(\frac{i}{2\pi\hbar\,t}\bigg)^{\frac{N+1}{2}}e^{\lambda(t)}, \end{equation} where the unknown function $\lambda(t)$ satisfies \begin{equation}\label{33} \lim_{t\rightarrow 0} \lambda(t)=0. \end{equation} The function $\mathcal{K}$ now has the form \begin{eqnarray}\label{34}
\mathcal{K}(\mathbf{y}'|\mathbf{y},t) &=& e^{\lambda(t)}\bigg(\frac{i}{2\pi\hbar t}\bigg)^{\frac{N}{2}}\,e^{-\frac{i}{2\hbar}[\mathbf{y}\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}+\mathbf{y}'\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}'-2\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{y}]}. \end{eqnarray} To find $\lambda(t)$ we make use of the following identity \begin{equation}\label{35}
\delta(\mathbf{y}'-\mathbf{y})=\int d\mathbf{y}''\,\mathcal{K}(y'|y'',t)\,\mathcal{K}^{*}(y|y'',t), \end{equation} which can be easily checked using the definition of $\mathcal{K}$, Eq.(\ref{18-1}). By inserting Eq.(\ref{34}) and its complex conjugation into Eq.(\ref{35}) and doing the integral we will find \begin{equation}\label{36}
e^{\lambda(t)}=\frac{t^{\frac{N+1}{2}}}{\sqrt{|\det \mathbf{F}(t)|}}\,e^{i\theta}, \end{equation} where $\theta$ is a real function that will be determined from a limiting case where the coupling constants are turned off ($g_1=\cdots=g_N=0$) and also the fact that the propagator should satisfy the Schr\"{o}dinger equation.
It should be noted that according to the definition Eq.(\ref{18-1}), the Feynman propagator has the following relation to the function $\mathcal{K}$ \begin{equation}\label{37}
K(\mathbf{y},t;\mathbf{y}',0)={\langle} \mathbf{y},t|\mathbf{y}',0{\rangle}={\langle} \mathbf{y}'|\mathbf{y},t{\rangle}^{*}=\mathcal{K}^{*}(\mathbf{\mathbf{y}}'|\mathbf{\mathbf{y}},t), \end{equation} therefore, Feynman propagator is given by \begin{eqnarray}\label{38}
K(\mathbf{y},t;\mathbf{y}',0) &=& \frac{e^{-i\theta}}{\sqrt{|\det F(t)|}}\bigg(\frac{1}{2\pi i\hbar}\bigg)^{\frac{N+1}{2}}\,e^{\frac{i}{2\hbar}[\mathbf{y}\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}+\mathbf{y}'\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}'-2\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{y}]}.\nonumber\\ \end{eqnarray} Now we set $y'=0$ and require that $K(\mathbf{y},t;0,0)$ satisfy the Schr\"{o}dinger equation \begin{equation}\label{38-1}
i\hbar\frac{{\partial} K(\mathbf{y},t;0,0)}{{\partial} t}=\bigg[\haf \sum_{\mu=0}^N \bigg(-\hbar^2\frac{{\partial}^2}{{\partial} y_\mu^2}+\omega_\mu^2 y_\mu^2\bigg)-
\haf\sum_{\mu,\nu=0}^N y_\mu \Omega_{\mu\nu}^2 y_\nu\bigg]\,K(\mathbf{y},t;0,0), \end{equation} after spatial differentiations we set $y=0$ and by comparing both sides of Eq.(\ref{38-1}) we find that $\theta$ is a constant. To find the constant $\theta$, we turn off the coupling constants, ($g_1=g_2=\cdots=g_N=0$), and from consistency condition we should recover the quantum propagator of $N$ noninteracting oscillators. When the coupling constants are turned off, We have \begin{eqnarray}\label{39}
\mathbf{F}^{-1} (t) &=& \mbox{diag}\bigg(\frac{\omega_0}{\sin(\omega_0 t)},\frac{\omega_1}{\sin(\omega_1 t)},\cdots,\frac{\omega_N}{\sin(\omega_N t)}\bigg),\nonumber\\
\dot{\mathbf{F}}(t) &=& \mbox{diag}(\cos(\omega_0 t),\cos(\omega_0 t),\cdots,\cos(\omega_0 t)), \end{eqnarray} Inserting Eqs.(\ref{39}) into Eq.(\ref{38}) we find \begin{equation}\label{40}
K(\mathbf{y},t;\mathbf{y}',0)=e^{-i\theta}\prod_{\mu=0}^N \sqrt{\frac{\omega_\mu}{2\pi i\hbar \sin(\omega_\mu t)}}
\,e^{\frac{i \omega_\mu}{2\hbar\sin(\omega_\mu t)}\big[(y_\mu^2+{y'}^2_\mu)\cos(\omega_\mu t)-2y_\mu y'_\mu\big]}, \end{equation} which is the propagator of $N$ noninteracting oscillators if we set $\theta=0$. Finally, we find the quantum propagator of oscillator-bath system as \begin{eqnarray}\label{41}
K(\mathbf{y},t;\mathbf{y}',0) &=& \frac{1}{\sqrt{\det F(t)}}\bigg(\frac{1}{2\pi i\hbar}\bigg)^{\frac{N+1}{2}}\,e^{\frac{i}{2\hbar}[\mathbf{y}\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}+\mathbf{y}'\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}'-2\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{y}]}.\nonumber\\ \end{eqnarray}
\section{Density matrix}
\noindent In this section we will find the density matrix for the oscillator-bath system using the explicit form of the quantum propagator Eq.(\ref{41}) of the combined system. If we denote the evolution operator by $\hat{U}(t)$ then the density matrix at time $t$ can be obtained from the initial density matrix at $t=0$ as \begin{equation}\label{42}
\hat{\rho}(t)=\hat{U(}t) \hat{\rho}(0) \hat{U}^{\dag}(t), \end{equation} in position representation we have \begin{eqnarray}\label{43}
\rho(\mathbf{y},\mathbf{y}';t) &=& {\langle} \mathbf{y}|\rho (t)|\mathbf{y}'{\rangle} \nonumber\\
&=& \int d\mathbf{y}_1 d\mathbf{y}_2\,{\langle} \mathbf{y}|\hat{U}(t)|\mathbf{y}_1{\rangle}{\langle} \mathbf{y}_1|\rho (0)|\mathbf{y}_2{\rangle}{\langle} \mathbf{y}_2 |\hat{U}^{\dag}(t)|\mathbf{y}'{\rangle},\nonumber \\
&=& \int d\mathbf{y}_1 d\mathbf{y}_2\, K(\mathbf{y},t;\mathbf{y}_1,0)\rho(\mathbf{y}_1,\mathbf{y}_2;0) K^{*}(\mathbf{y}',t;\mathbf{y}_2,0). \end{eqnarray} We can assume an arbitrary initial state for oscillator-bath system, but for simplicity we assume that the initial state is a product state as \begin{equation}\label{44}
\rho(\mathbf{y}_1,\mathbf{y}_2;0)=\rho_{red} (y_{10},y_{20};0)\otimes \rho_B (\vec{y}_1,\vec{y}_2;0), \end{equation} where $\mathbf{y}_1=(y_{10},\vec{y}_1)$ and $\mathbf{y}_2=(y_{20},\vec{y}_2)$. To find the reduced density matrix of the main oscillator, we should take trace over the degrees of freedom of the bath oscillators. Straightforward calculations lead to \begin{eqnarray}\label{46}
\rho_{red} (y_0,y'_0;t) &=& \frac{1}{|\det F|}\frac{1}{(2\pi\hbar)^{N+1}}\int dy_{01} dy_{02}\,e^{\frac{ia(y_0^2-{y'_0}^2)}{2\hbar}}\nonumber\\
&\cdot & e^{\frac{ia(y^2_{01}-y^2_{02})}{2\hbar}-\frac{ib(y_0 y_{01}-y_0' y_{02})}{\hbar}}\rho_{red}(y_{01},y_{02};0)\nonumber\\
&\cdot & \,I(y_0,y'_0;y_{01},y_{02}), \end{eqnarray} where \begin{eqnarray}\label{I}
I(y_0,y'_0;y_{01},y_{02}) &=& \int d\vec{y}\,e^{\frac{i}{\hbar}\sum\limits_{k=1}^N\big[(y_0-y'_0)B_k-(y_{01}-y_{02})C_k\big]y_k}\int d\vec{y}_1 d\vec{y}_2\,\rho_B (\vec{y}_1,\vec{y}_2;0)\nonumber\\
&\cdot & \,e^{\frac{i}{2\hbar}\sum\limits_{k,l=1}^N y_{1k}A_{kl}y_{1l}}\,e^{\frac{i}{\hbar}\sum\limits_{k=1}^N \big[y_{01}B_k-y_0 C_k -\sum\limits_{l=1}^N D_{kl}y_l\big] y_{1k}}\nonumber\\
&\cdot & e^{\frac{-i}{2\hbar}\sum\limits_{k,l=1}^N y_{2k}A_{kl}y_{2l}}e^{\frac{-i}{\hbar}\sum\limits_{k=1}^N
\big[y_{02}B_k-y'_0 C_k -\sum\limits_{l=1}^N D_{kl}y_l\big]y_{2k}}. \end{eqnarray} The Eq. (\ref{46}) can be rewritten as \begin{equation}\label{J1}
\rho_{red} (y_0,y'_0;t) = \int dy_{01} dy_{02}\,J(y_0,y'_0;t|y_{01},y_{02})\,\rho_{red}(y_{01},y_{02};0), \end{equation} where \begin{eqnarray}\label{J2}
J(y_0,y'_0;t|y_{01},y_{02}) &=& \frac{1}{|\det F|}\frac{1}{(2\pi\hbar)^{N+1}}\,e^{\frac{ia(y_0^2-{y'_0}^2)}{2\hbar}}\,e^{\frac{ia(y^2_{01}-y^2_{02})}{2\hbar}-\frac{ib(y_0 y_{01}-y_0' y_{02})}{\hbar}}\nonumber\\ & \times & I(y_0,y'_0;y_{01},y_{02}), \end{eqnarray}
The function $J(y_0,y'_0;t|y_{01},y_{02})$, which can be interpreted as a reduced kernel, has been expressed in path integral language in terms of the Feynman-Vernon influence functional \cite{Path-0,Path-1,Path-4}. Here we have obtained the reduced kernel in terms of the quadratic integrals. The time dependent functions ($a, b$), vectors ($C_k, B_k$) and matrices ($A_{kl}, D_{kl}$) are defined by \begin{eqnarray}\label{defs}
a(t) &=& (F^{-1}\dot{F})_{00},\nonumber\\
b(t) &=& (F^{-1})_{00},\nonumber\\
C_k (t) &=& (F^{-1})_{k0}=(F^{-1})_{0k},\nonumber\\
B_k (t) &=& (F^{-1}\dot{F})_{0k}=(F^{-1}\dot{F})_{k0},\nonumber\\
A_{kl} &=& (\mathbf{A})_{kl}=(F^{-1}\dot{F})_{kl},\nonumber\\
D_{kl} &=& (\mathbf{D})_{kl}=(F^{-1})_{kl}=(F^{-1})_{lk}, \end{eqnarray} which can be rewritten more compactly in matrix form as \begin{equation}\label{FF}
F^{-1}\dot{F}=\left(
\begin{array}{cc}
a & \mathbf{B}^T \\
\mathbf{B} & \mathbf{A} \\
\end{array}
\right),\,\,\,\, F^{-1}=\left(
\begin{array}{cc}
b & \mathbf{C}^T \\
\mathbf{C} & \mathbf{D} \\
\end{array}
\right), \end{equation} \begin{equation}\label{CB}
B^T=[B_1, B_2,\cdots,B_N],\,\,\,C^T=[C_1, C_2,\cdots,C_N]. \end{equation} Let the initial state of the bath be a thermal state given by \begin{eqnarray}\label{ex-1}
\rho_B (\vec{y}_1,\vec{y}_2;0) &=& \bigg(\prod_{k=1}^N\sqrt{\frac{\omega_k}{2\pi\hbar\sinh(\beta\hbar\omega_k)}}\bigg)\nonumber\\
&\times & e^{-\sum\limits_{k=1}^N \frac{\omega_k}{2\hbar\sinh(\beta\hbar\omega_k)}\big[(y_{1k}^2+y_{2k}^2)\cosh(\beta\hbar\omega_k)-2y_{1k}y_{2k}\big]}, \end{eqnarray} then, the integrals over $\vec{y}_1$, $\vec{y}_2$ and $\vec{y}$ in Eq.(\ref{I}), will be Gaussian type integrals and can be obtained using the generic formula \cite{Zin} \begin{equation}\label{zin}
\int d\vec{x}\,e^{-\frac{1}{2}\sum\limits_{k,l=1}^N x_k \Gamma_{kl} x_l+\sum\limits_{k=1}^N j_k x_k}=(2\pi)^{N/2}(\det \Gamma)^{-1/2}
e^{\frac{1}{2}\sum\limits_{k,l=1}^N j_k \Gamma^{-1}_{kl} j_l}, \end{equation} where $\Gamma$ is a positive, symmetric matrix. \subsection{Master equation} \noindent The main ingredient quantity in open quantum system theory is the master equation. To find the master equation satisfied by the reduced density matrix $\rho_{red}$, we insert the initial bath state Eq.(\ref{ex-1}) into Eq.(\ref{I}) and take the integrals over $\vec{y}$, $\vec{y}_1$ and $\vec{y}_2$, after straightforward but tedious calculations we will find the following expression for the reduced kernel defined in Eq.(\ref{J2}) \begin{eqnarray}\label{J}
J(y_0,y'_0;y_{01},y_{02}) &=& \frac{b_3}{2\pi}\,e^{ib_1 X\xi+ib_2 X_0\xi-ib_3 X\xi_0-ib_4 X_0 \xi_0}\nonumber\\
&\times & e^{-a_{11} \xi^2-a_{12}\xi\xi_0-a_{22}\xi_0^2}, \end{eqnarray} where for later convenience, we have chosen the same notation for the time-dependent coefficients $b_k (t)$ and $a_{ij} (t)$ introduced by Paz in \cite{Paz} following the path integral technique. These coefficients can be obtained in terms of the functions given by Eqs.(\ref{defs}) or in terms of the environment properties described in \cite{Paz}. Following the same process described by Paz in \cite{Paz}, we recover the master equation $(\hbar=1)$ for the reduced density matrix as \begin{eqnarray}\label{Master}
&& i\frac{\partial \rho_{red} (y_0,y'_0,t)}{\partial t}={\langle} y_0|[H_{ren},\rho_{red}]|y'_0{\rangle}-i\gamma(t)(y_0-y'_0)(\frac{\partial }{\partial y_0}-\frac{\partial}{\partial y'_0})\rho_{red}(y_0,y'_0,t),\nonumber\\
&& -i D(t) (y_0-y'_0)^2\,\rho_{red}(y_0,y'_0,t)+f(t) (y_0-y'_0) (\frac{\partial }{\partial y_0}+\frac{\partial}{\partial y'_0})\rho_{red}(y_0,y'_0,t),\nonumber \end{eqnarray} where $H_{ren}$ is the renormalized Hamiltonian of the main oscillator with the renormalized frequency $\omega_{ren} (t)$. To find the connection between the functions $\omega_{ren} (t), \gamma(t), D(t), f(t)$ and coefficients $b_k (t),\,a_{ij}(t)$, the interested reader is referred to \cite{Paz}.
\section{Thermal Equilibrium: fixed point}
\noindent In the equilibrium state, the density matrix of oscillator-bath system can be obtained from the quantum propagator using the correspondence between quantum propagator and partition function as \begin{equation}\label{P1}
\rho(y_0,\vec{y};y'_0,\vec{y}',\beta)=\frac{1}{Z(\beta)}\,K(y_0,\vec{y},-i\hbar\beta;y'_0,\vec{y}',0), \end{equation} where $\beta=1/\kappa_B T$ is the inverse of temperature and $\kappa_B$ is Boltzmann constant. The function $Z(\beta)$ is the total partition function \begin{eqnarray}\label{P2}
Z(\beta) &=& \int dy_0 d\vec{y}\,K(y_0,\vec{y},-i\hbar\beta;y_0,\vec{y},0),\nonumber\\
&=& \frac{1}{2^{\frac{N+1}{2}}}\frac{1}{\sqrt{\det (\dot{F}-\mathbb{I})}}\bigg|_{t=-i\hbar\beta}, \end{eqnarray} and $\mathbb{I}$ is a $N$-dimensional unit matrix.
The reduced density matrix of the oscillator is obtained by integrating out the bath degrees of freedom as \begin{eqnarray}\label{p3}
\rho_{red}(y_0,y'_0;\beta) &=& \int d\vec{y}\,K(y_0,\vec{y},-i\hbar\beta;y'_0,\vec{y},0),\nonumber\\
&=& \sqrt{\frac{\det (\dot{F}-\mathbb{I})}{i\pi\hbar\det F\det(\mathbf{A}-\mathbf{D})}}\,e^{\frac{i}{2\hbar}\big[({y_0}^2+{y'_0}^2)(a-\frac{\eta}{2})-2 y_0 y'_0 (b+\frac{\eta}{2})\big]},\nonumber\\ \end{eqnarray} where \begin{equation}\label{p4}
\eta=\sum_{k,l=1}^N(B_k-C_k)(\mathbf{A}-\mathbf{D})^{-1}_{kl} (B_l-C_l)|_{t=-i\hbar\beta}. \end{equation} From Eq.(\ref{FF}) we have \begin{equation}\label{p5}
F^{-1}(\dot{F}-\mathbb{I})=\left(
\begin{array}{cc}
a-b & \mathbf{B}^T-\mathbf{C}^T \\
\mathbf{B}-\mathbf{C} & \mathbf{A}-\mathbf{D} \\
\end{array}
\right), \end{equation} by making use of the identity \cite{Matrix} \begin{eqnarray}\label{p6}
\det[F^{-1}(F-I)] &=& \det(\mathbf{A}-\mathbf{D})\det[a-b-\underbrace{(\mathbf{B}^T-\mathbf{C}^T)(\mathbf{A}-\mathbf{D})^{-1}(\mathbf{B}-\mathbf{C})}_{\eta}],\nonumber\\
&=& \det(\mathbf{A}-\mathbf{D})(a-b-\eta), \end{eqnarray} Eq.(\ref{p3}) can be rewritten as \begin{equation}\label{p7}
\rho_{red}(y_0,y'_0;\beta)=\sqrt{\frac{a-b-\eta}{i\hbar\pi}}\,e^{\frac{i}{2\hbar}\big[({y_0}^2+{y'_0}^2)(a-\frac{\eta}{2})-2 y_0 y'_0 (b+\frac{\eta}{2})\big]}. \end{equation} From Eq.(\ref{p7}) we find the thermal mean square of position and momentum as \begin{eqnarray}
{\langle} y_0^2 {\rangle} &=& \frac{i\hbar}{2(a-b-\eta)}\bigg|_{t=-i\hbar\beta},\nonumber \\
{\langle} p_0^2 {\rangle} &=& -i\hbar\frac{a+b}{2}\bigg|_{t=-i\hbar\beta}, \end{eqnarray} therefore, \begin{equation}\label{p8}
\rho_{red}(y_0,y'_0;\beta)=\frac{1}{\sqrt{2\pi{\langle} y_0^2 {\rangle}}}\,e^{-\frac{{\langle} p_0^2 {\rangle}}{2\hbar^2}(y_0-y_0')^2-\frac{1}{8{\langle} y_0^2 {\rangle}}(y_0+y_0')^2}, \end{equation} for another derivation, see \cite{Weiss}.
\section{Main oscillator interacts with an external field}
\noindent Now assume that the main oscillator is under the influence of an external classical field $f(t)$. In this case the total Lagrangian is written as \begin{equation}\label{47}
L=\haf \sum_{\mu=0}^N (\dot{Y}^2_\mu-\omega_\mu^2 Y_\mu^2)+\haf\sum_{\mu,\nu=0}^N Y_\mu \Omega_{\mu\nu}^2 Y_\nu-f(t) Y_0, \end{equation} and the corresponding Hamiltonian is \begin{equation}\label{48}
H=\haf \sum_{\mu=0}^N (P^2_\mu+\omega_\mu^2 Y_\mu^2)-\haf\sum_{\mu,\nu=0}^N Y_\mu \Omega_{\mu\nu}^2 Y_\nu+f(t) Y_0. \end{equation}
Note that the Hamiltonian is now time-dependent and we can not use Eqs.(\ref{24},\ref{25}). In this case, we can find another partial differential equation satisfied by $\mathcal{K}(y'|y,t)$ as follows. From Heisenberg equations of motion we find \begin{equation}\label{48-1}
\ddot{\hat{Y}}_\mu +\omega_\mu^2 \hat{Y}_\mu-\sum_\nu \Omega_{\mu\nu}^2 \hat{Y}_\nu=-f(t)\,\delta_{\mu 0}. \end{equation} The Green tensor corresponding to Eq.(\ref{48-1}) is defined by \begin{equation}\label{48-2}
\sum_\nu\bigg(\big[{\partial}^2_t +\omega_\mu^2\big]\delta_{\mu\nu}-\Omega^2_{\mu\nu}\bigg)G_{\nu\alpha} (t-t')=\delta_{\mu\alpha}\,\delta(t-t'). \end{equation} By making use of Laplace transform and definitions Eqs.(\ref{9},\ref{12}), we find the retarded Green tensor as \begin{equation}\label{48-3}
G_{\mu\nu} (t-t')=F_{\mu\nu} (t-t'), \end{equation} and the position and momentum operators are respectively given by \begin{eqnarray}\label{49} && \hat{Y}_\mu (t)=\sum_{\nu} \big[\dot{F}_{\mu\nu} (t) \hat{Y}_\nu (0)+F_{\mu\nu} (t) \hat{P}_\mu (0)\big]-R_\mu (t),\nonumber\\ && \hat{P}_\mu =\dot{\hat{Y}}_\mu=\sum_{\nu} \big[\ddot{F}_{\mu\nu} (t) \hat{Y}_\nu (0)+\dot{F}_{\mu\nu} (t) \hat{P}_\mu (0)\big]-\dot{R}_\mu (t), \end{eqnarray} where we defined \begin{equation}\label{50}
R_\mu (t)=\int_0^t dt'\,F_{\mu 0} (t-t') f(t'). \end{equation} We can rewrite the identity \begin{equation}\label{51}
\hat{P}_\mu (t)=\hat{U}^{\dag}(t) \hat{P}_\mu (0) \hat{U}(t), \end{equation} as \begin{equation}\label{52}
\hat{P}_\mu (t) \hat{U}^{\dag}(t)=\hat{U}^{\dag}(t) \hat{P}_\mu (0), \end{equation} then \begin{equation}\label{53}
{\langle} y'|\hat{P}_\mu (t) \hat{U}^{\dag}(t)|y{\rangle}={\langle} y'|\hat{U}^{\dag}(t) \hat{P}_\mu (0)|y{\rangle}. \end{equation} By inserting the momentum operator from the second line of Eqs.(\ref{49}) into Eq.(\ref{53}), we easily find \begin{equation}\label{54}
\sum_{\nu}\bigg(\ddot{F}_{\mu\nu} (t)\,y'_\nu-i\hbar\,\dot{F}_{\mu\nu} (t)\,\frac{{\partial}}{{\partial} y'_\nu}-\dot{g}_\mu \bigg)
\,\mathcal{K}(\mathbf{y}'|\mathbf{y},t)=y_\mu\,\mathcal{K}(\mathbf{y}'|\mathbf{y},t). \end{equation} By making use of Eqs.(\ref{18},\ref{29},\ref{35},\ref{54}), and following the same process as we did in Sec.III, we will find \begin{eqnarray}\label{55}
K^{(f)}(\mathbf{y},t;\mathbf{y}',0) &=& \frac{e^{-i\zeta(t)}}{\sqrt{|\det F(t)|}}\bigg(\frac{1}{2\pi i\hbar}\bigg)^{\frac{N+1}{2}}\,e^{\frac{i}{2\hbar}\big[\mathbf{y}\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}+\mathbf{y}'\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}'-2\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{y}\big]}\nonumber\\
&\times & e^{-\frac{i}{\hbar}\,\big[\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{R} +\mathbf{y}\cdot \mathbf{F}^{-1}\cdot\check{\mathbf{R}}\big]}, \end{eqnarray} where we have defined $\check{\mathbf{R}}$ as \begin{equation}\label{56}
\check{R}_\mu (t)=\int_0^{t} dt'\,F_{\mu 0}(t') f(t'), \end{equation} and the function $\zeta(t)$ can be determined from the Schr\"{o}dinger equation \begin{eqnarray}\label{57}
&& i\hbar\frac{{\partial} K^{(f)}(\mathbf{y},t;0,0)}{{\partial} t}\bigg|_{y=0}=\nonumber\\
&& \bigg[\haf \sum_{\mu=0}^N \bigg(-\hbar^2\frac{{\partial}^2}{{\partial} y_\mu^2}+\omega_\mu^2 y_\mu^2\bigg)-
\haf\sum_{\mu,\nu=0}^N y_\mu \Omega_{\mu\nu}^2 y_\nu +f(t) y_0\bigg]\,K^{(f)}(\mathbf{y},t;0,0)\bigg|_{y=0},\nonumber\\ \end{eqnarray} as \begin{eqnarray}\label{58}
\zeta(t) &=& \frac{1}{2\hbar}\int_0^t ds\,\check{\mathbf{R}}(s)\cdot \mathbf{F}^{-2} (s)\cdot \check{\mathbf{R}}(s),\nonumber\\
&=& \frac{1}{\hbar}\int_0^t ds\,\int_0^{s} du\,f(s)\bigg[\frac{\sin(\sqrt{B}u)\sin[\sqrt{B}(t-s)]}{\sqrt{B}\sin(\sqrt{B}t)}\bigg]_{00}f(u). \end{eqnarray} Finally, the quantum propagator for oscillator-bath system under the influence of an external classical force on the main oscillator, is obtained as
\begin{eqnarray}\label{59}
K^{(f)}(\mathbf{y},t;\mathbf{y}',0) &=& \frac{1}{\sqrt{|\det F(t)|}}\bigg(\frac{1}{2\pi i\hbar}\bigg)^{\frac{N+1}{2}}\,e^{\frac{i}{2\hbar}\big[\mathbf{y}\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}+\mathbf{y}'\cdot \mathbf{F}^{-1} \dot{\mathbf{F}}\cdot \mathbf{y}'-2\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{y}\big]}\nonumber\\
&\times & e^{-\frac{i}{\hbar}\,\big[\mathbf{y}'\cdot \mathbf{F}^{-1}\cdot \mathbf{R} +\mathbf{y}\cdot \mathbf{F}^{-1}\cdot\check{\mathbf{R}}\big]}\,e^{-\frac{i}{2\hbar}\int_0^t ds\,\check{\mathbf{R}}(s)\cdot \mathbf{F}^{-2} (s)\cdot \check{\mathbf{R}}(s)}. \end{eqnarray}
\subsection{A generalization: generating function}
\noindent We can generalize the Lagrangian Eq.(\ref{47}) as \begin{equation}\label{60}
L=\haf \sum_{\mu=0}^N (\dot{Y}^2_\mu-\omega_\mu^2 Y_\mu^2)+\haf\sum_{\mu,\nu=0}^N Y_\mu \Omega_{\mu\nu}^2 Y_\nu-\sum_{\mu=0}^N f_{\mu}(t) Y_\mu, \end{equation} in this case the quantum propagator is given by Eq.(\ref{50}) but now the definitions Eqs.(\ref{50},\ref{56}) have to be replaced by the new definitions \begin{eqnarray}\label{61}
R_\mu (t) &=& \int_0^t dt'\,F_{\mu\nu} (t-t') f_{\nu} (t'),\nonumber\\
\check{R}_\mu (t) &=& \int_0^t dt'\,F_{\mu\nu} (t') f_{\nu} (t'). \end{eqnarray} The path integral representation of quantum propagator Eq.(\ref{59}) is \cite{Greiner} \begin{equation}\label{62}
K^{(f)}(\mathbf{y},t;\mathbf{y}',0)=\int d[\mathbf{x}]\,e^{\frac{i}{\hbar}\int_0^t d\tau\,L}, \end{equation} where $L$ is the Lagrangian Eq.(\ref{60}). Having the closed form expression Eq.(\ref{59}), we can find ordered correlation functions among position operators of the oscillator-bath system. In this case, the external source $f_\mu (t)$ is an auxiliary force that should be set zero at the end of functional derivatives \cite{Greiner}, we have \begin{eqnarray}\label{63}
&& {\langle} \mathbf{y},t|\hat{T}[\hat{Y}_{\mu_1}(t_1) \hat{Y}_{\mu_2}(t_2)\cdots \hat{Y}_{\mu_N}(t_N)|\mathbf{y}',0 {\rangle} = \nonumber\\
&& \frac{\int D[\mathbf{x}]\,y_{\mu_1}(t_1) y_{\mu_2}(t_2)\cdots y_{\mu_N}(t_N)\,e^{\frac{i}{\hbar}\int_0^t d\tau\,L}}{\int D[\mathbf{x}]\,e^{\frac{i}{\hbar}\int_0^t d\tau\,L}}=
\nonumber \\
&& \frac{(i\hbar)^N}{K^{(0)}(\mathbf{y},t;\mathbf{y}',0)} \frac{\delta^{N}}{\delta f_{\mu_1} (t_1)\cdots \delta f_{\mu_N} (t_N)} K^{(f)}(\mathbf{y},t;\mathbf{y}',0)\bigg|_{f=0}, \end{eqnarray} where $\hat{T}$ is a time ordering operator acting on bosonic operators as \begin{equation}\label{64}
\hat{T}(\hat{A}(t)\hat{B}(t')=\left\{
\begin{array}{ll}
\hat{A}(t)\hat{B}(t'), & \hbox{$t>t'$;} \\
\hat{B}(t')\hat{A}(t), & \hbox{$t'>t$.}
\end{array}
\right. \end{equation}
\section{Conclusions}
\noindent Using elementary quantum mechanical calculations and basic properties of quantum propagators, an alternative derivation of exact quantum propagator for the oscillator-bath system was introduced. The method compared to other methods to derive quantum propagator of an oscillator-bath system with linear interaction or generally quadratic Lagrangians, was easier to apply and in particular, compared to path integral approach, there was no need to introduce more advanced mathematical notions like infinite integrations, operator determinant and Weyl ordering. From quantum propagator, a closed form density matrix describing the combined oscillator-bath system was obtained from which reduced density matrix could be derived. The problem was generalised to the case where the main oscillator was under the influence of an external classical source. By introducing auxiliary classical fields the modified quantum propagator or generating functional of position correlation functions was found.
The basic ingredient of the approach was a symmetric time-independent matrix $B$, which was dependent on natural frequencies of the bath oscillators and coupling constants. Therefore, from numerical or simulation point of view, the only challenge was finding the inverse of the matrix $B$ or equivalently diagonalizing it.
The efficiency of the method in determining the exact form of the quantum propagator for quadratic Lagrangians, inspired the idea of developing a perturbative approach to include non-quadratic Lagrangians too.
\end{document} |
\begin{document}
\title{Maximal ideals of generalized summing linear operators} \author{Geraldo Botelho\thanks{Supported by CNPq Grant 304262/2018-8 and Fapemig Grant PPM-00450-17.}\,, Jamilson R. Campos and Lucas Nascimento\thanks{Supported by a CAPES scholarship. \newline 2020 Mathematics Subject Classification: 47L20, 46J20, 46B45, 47B37, 46B10.\newline Keywords: Tensor norms, operator ideals, summing operators, sequence spaces.}} \date{} \maketitle
\begin{abstract} We prove when a Banach ideal of linear operators defined, or characterized, by the transformation of vector-valued sequences is maximal. Known results are recovered as particular cases and new information is obtained. To accomplish this task we study a tensor quasi-norm determined by the underlying sequence classes. The duality theory for these tensor quasi-norms is also developed. \end{abstract}
\section{Introduction} The theory of operator ideals is central in modern mathematical analysis (see \cite{handbook}, \cite[6.3]{history}) and, in this context, maximal ideals play a key role. For recent developments on maximal operator ideals, see, e.g, \cite{samuel, kim, J. A. Lopez Molina.tres, turcovillafane}. A number of important operator ideals are defined, or characterized, by the transformation of vector-valued sequences; and some of these ideals are known to be maximal. A unifying approach to this kind of operators ideals was proposed in \cite{G. Botelho} using the concept of {\it sequence classes}. For sequence classes $X$ and $Y$, a linear operator $T \colon E \longrightarrow F$ is $(X;Y)$-summing if $T((x_j))_{j=1}^\infty \in Y(F)$ whenever $(x_j)_{j=1}^\infty \in X(E)$. The Banach operator ideal of such operators is denoted by ${\cal L}_{X;Y}$. This approach has proved to be quite fruitful, see \cite{achour2018, complut, Jamilson.dual, espaco.mid, G. Botelho and D. Freitas, raquel, J.R.Campos.J.Santos, J. Ribeiro and F. Santos, J. Ribeiro and F. Santos.dois}. The purpose of this paper is to study the maximality of these Banach operator ideals. Generalizing some well known cases, we find conditions on the sequences classes $X$ and $Y$ so that the Banach operator ideal ${\cal L}_{X;Y}$ is maximal.
Following the long tradition of the interplay between operator ideals and tensor norms, which comes from Grothendieck's seminal works and stands to the day (see recent developments in \cite{achour2018, D.Achour, sheldon, maite, maite2020, kim2020, J. A. Lopez Molina, J. A. Lopez Molina.tres, lopezmolina2019, miguel}), we prove our main results defining, developing and applying a tensor quasi-norm $\alpha_{X;Y}$ determined by the sequence classes $X$ and $Y$. The tensor quasi-norms $\alpha_{X;Y}$ can be regarded as generalizations of the classical Chevet-Saphar tensor norms (see \cite{A.Defant, R. Ryan}).
In Section 2 we define a tensor quasi-norm associated to the underlying sequences classes and apply it to give conditions so that the corresponding ideal of summing operators is maximal. Known results are recovered as particular instances and new concrete information is obtained. The duality theory of the tensor quasi-norm $\alpha_{X;Y}$ associated to the sequence classes $X$ and $Y$ is developed in Section 3. For Banach spaces $E$ and $F$ we describe the continuous linear functionals on $E \otimes_{\alpha_{X;Y}} F$ as linear operators from $E$ to $F^*$ and as continuous bilinear forms on $E \times F$. As a byproduct we show when the tensor quasi-norm $\alpha_{X;Y}$ satisfies a condition that is equivalent to maximality in the case of operator ideals associated to finitely generated tensor norms.
For operator ideals we refer to \cite{A.Defant, A.Pietsch}, for the interplay between tensor norms and operator ideals to \cite{A.Defant, R. Ryan}, for the theory of absolutely summing operators to \cite{J.Diestel} and for quasi-norms and quasi-normed spaces to \cite{N. J. Kalton 2}.
Banach spaces over $\mathbb{K} = \mathbb{R}$ or $\mathbb{C}$ shall be denoted by $E$ and $F$. The closed unit ball of $E$ is denoted by $B_E$ and its topological dual by $E^*$. The symbol $E \stackrel{1}{\hookrightarrow} F$ means that $E$ is a linear subspace of $F$ and $\|x\|_F \leq \|x\|_E$ for every $x \in E$; and $E\stackrel{1}{=}F$ means that $E$ is isometrically isomorphic to $F$.
By $L(E;F)$ we denote the space of all linear operators from $E$ to $F$ and by $\mathcal{L}(E;F)$ the Banach space of all continuous linear operators $T:E\longrightarrow F$ endowed with the usual sup norm. The same notation will be used if $E$ and $F$ are quasi-normed spaces.
For $x \in E$ and $j \in \mathbb{N}$, the symbol $x\cdot e_j$ denotes the sequence $(0,\ldots, 0,x,0, 0,\ldots ) \in E^\mathbb{N}$, where $x$ is placed at the $j$-th coordinate. The symbol $(x_{j})_{j=1}^{n}$, where $x_1, \ldots, x_n \in E$, stands for the sequence $(x_{1},x_{2},\ldots,x_{n},0,0,\ldots) \in E^\mathbb{N}$.
According to \cite{G. Botelho}, a {\it sequence class} is a rule $X$ that assigns to each Banach space $E$ a Banach space $X(E)$ of $E$-valued sequences, that is $X(E)$ is a vector subspace of $E^{\mathbb{N}}$ with the coordinatewise operations, such that:\\ (i) $c_{00}(E) \subseteq X(E) \stackrel{1}{\hookrightarrow} \ell_\infty(E)$ for every Banach space $E$,\\
(ii) $\|x \cdot e_j\|_{X(E)}= \|x\|_E$ for every Banach space $E$, every $x \in E$ and every $j \in \mathbb{N}$.
To avoid ambiguity, we shall eventually denote the sequence class $X$ by $X(\cdot)$.
Given sequences classes $X$ and $Y$, we say that an operator $T \in {\cal L}(E;F)$ is {\it $(X;Y)$-summing} if $T((x_j))_{j=1}^\infty \in Y(F)$ whenever $(x_j)_{j=1}^\infty \in X(E)$. In this case, the induced linear operator
$$\widehat{T} \colon X(E) \longrightarrow Y(F)~,~\widehat{T}\left( (x_j)_{j=1}^\infty \right) = \left(T(x_j)\right)_{j=1}^\infty ,$$ is continuous and
$$\|T \|_{X;Y} : = \|\widehat{T}\| $$
is a norm that makes the space ${\cal L}_{X;Y}(E;F)$ of $(X;Y)$-summing operators a Banach space. Whenever we refer to ${\cal L}_{X;Y}(E;F)$ we assume that it is endowed with the norm $\|\cdot \|_{X;Y}$.
A sequence class $X$ is {\it linearly stable} if, regardless of the Banach spaces $E$ and $F$, every operator $T \in {\cal L}(E;F)$ is $(X;X)$-summming and $\|T\|_{X;X} = \|T\|$, that is, $\mathcal{L}_{X;X}(E;F)\stackrel{1}{=} \mathcal{L}(E;F)$.
If the sequence classes $X$ and $Y$ are linearly stable and $X(\mathbb{K}) \stackrel{1}{\hookrightarrow} Y(\mathbb{K})$, then $\mathcal{L}_{X;Y}$ is a Banach operator ideal \cite[Theorem 3.6]{G. Botelho}.
\begin{example}\label{exsec}\rm Let $p \geq 1$. The following are well known linearly stable sequence classes, endowed with their usual norms: \\ $\bullet$ The class $E \mapsto c_0(E)$ of norm null sequences.\\ $\bullet$ The class $E \mapsto \ell_\infty(E)$ of bounded sequences.\\ $\bullet$ The class $E \mapsto \ell_p(E)$ of absolutely $p$-summable sequences.\\ $\bullet$ The class $E \mapsto \ell_p^w(E)$ of weakly $p$-summable sequences.\\ $\bullet$ The class $E \mapsto \ell_p\langle E \rangle$ of Cohen strongly $p$-summable sequences.\\ $\bullet$ The class $E \mapsto {\rm Rad}(E)$ of almost unconditionally summable sequences.
Consider
$${\rm RAD}(E) : = \left\{(x_{j})_{j=1}^{\infty} \in E^{\mathbb{N}} : \|(x_{j})_{j=1}^{\infty}\|_{{\rm RAD}(E)} := \sup\limits_{k} \|(x_{j})_{j=1}^{k}\|_{{\rm Rad}(E)} < \infty\right\}$$ (see \cite{J.Diestel, tarieladze}),
$$\ell_p^{\rm mid}(E) := \left\{(x_{j})_{j=1}^{\infty} \in E^{\mathbb{N}} : \|(x_{j})_{j=1}^{\infty}\|_{{\rm mid},p} := \sup\limits_{(\varphi_n)_{n=1}^\infty \in B_{\ell_p^w(E^*)}} \left(\sum_{j,n = 1}^\infty |\varphi_n(x_j)|^p \right)^{1/p} < \infty\right\},$$ and the closed subspace $\ell_p^u(E)$ of $\ell_p^w(E)$ formed by unconditionally $p$-summable sequences, that is
$$\ell_p^u(E) = \left\{(x_j)_{j=1}^\infty \in \ell_p^w(E) : \lim\limits_k \|(x_j)_{j=k}^\infty\|_{w,p} = 0\right\} $$ (see \cite{A.Defant}). Then the correspondences $E \mapsto {\rm RAD}(E)$, $E \mapsto \ell_p^{\rm mid}(E)$ and $E \mapsto \ell_p^u(E)$ are also linearly stable sequences classes. \end{example}
The dual of a sequence class $X$ was introduced in \cite{Jamilson.dual} in the following fashion: \begin{equation*}
X^{\rm dual}(E) = \left\{(x_j)_{j=1}^\infty\ \mathrm{in\ } E: \sum_{j=1}^\infty \varphi_j(x_j)\ \mathrm{converges\ } \text{for every}\ (\varphi_j)_{j=1}^\infty\ \mathrm{in\ } X(E^*)\right\}. \end{equation*}
A sequence class $X$ is {\it spherically complete} if $(\lambda_jx_j)_{j=1}^\infty \in X(E)$ and $\|(\lambda_jx_j)_{j=1}^\infty \|_{X(E)} = \|(x_j)_{j=1}^\infty\|_{X(E)}$ whenever $(x_j)_{j=1}^\infty \in X(E)$ and $(\lambda_j)_{j=1}^\infty \in \mathbb{K}^\mathbb{N}$ is such that $|\lambda_j| = 1$ for every $j$.
For example, the sequence classes $c_0(\cdot), \ell_\infty(\cdot), {\rm RAD}(\cdot), \ell_p(\cdot), \ell_p^w(\cdot), \ell_p\langle \cdot \rangle, \ell_p^u(\cdot), \ell_p^{\rm mid}(\cdot), 1 \leq p < \infty$, are spherically complete.
Let $X$ be a linearly stable and spherically complete sequence class. In \cite{Jamilson.dual} it is proved that the expression \begin{equation*}
\left\|(x_{j})_{j=1}^{\infty} \right\|_{X^{\rm dual}}:= \sup_{(\varphi_{j})_{j=1}^{\infty}\in B_{X(E^*)} }\sum_{j=1}^{\infty}\left|\varphi_{j}(x_{j}) \right| = \sup_{(\varphi_{j})_{j=1}^{\infty}\in B_{X(E^*)} }\left|\sum_{j=1}^{\infty}\varphi_{j}(x_{j}) \right| \end{equation*} makes $X^{\rm dual}(E)$ a Banach space and $X^{\rm dual}(\cdot)$ a linearly stable spherically complete sequence class. Conditions on $X$ so that $X^{\rm dual}(E^*)$ is canonically isometrically isomorphic to $X(E)^*$ are also given in \cite{Jamilson.dual}.
For example, for $1 \leq p \leq \infty$ and $\frac{1}{p} + \frac{1}{p^*} = 1$, $(\ell_p^w)^{\rm dual} = \ell_{p^*}\langle \cdot \rangle$ and $(\ell_p)^{\rm dual} = \ell_{p^*}( \cdot )$ (the case $p = \infty$ of this last equality is somewhat surprising).
\section{Maximal ideals} The purpose of this section is to find conditions on $X$ and $Y$ so that ${\cal L}_{X;Y}$ is a maximal Banach operator ideal. Oddly enough, we begin by giving plenty of counterexamples. \begin{example}\label{exnew}\rm For $1 \leq p < \infty$, by ${\cal C}_p$ we denote the ideal of $p$-converging operators (see, e.g., \cite{chen}), that is, the operators that send weakly $p$-summable sequences to norm null sequences. In our notation, ${\cal C}_p = {\cal L}_{\ell_p^w(\cdot); c_0(\cdot)}$. The case $p = 1$ recovers the ideal of unconditionally summing operators from \cite[1.7.1]{A.Pietsch}. All these ideals are not maximal, see, e.g., \cite[Theorem 2.7]{chen}. In the same fashion, the classical ideal ${\cal CC} : = {\cal L}_{c_o^w(\cdot); c_0(\cdot)}$ of completely continuous operators is not maximal. \end{example} Since $c_0$ is not a dual space, there is no sequence class $Y$ such that $Y^{\rm dual} = c_0(\cdot)$. Our guess is that this is the fact behind the non maximality of the ideals in Examples \ref{exnew}. So, the search for maximality should be restricted to operator ideals of the form ${\cal L}_{X;Y^{\rm dual}}$. As announced, we shall do that by considering tensor quasi-norms.
Of course we need ${\cal L}_{X;Y^{\rm dual}}$ to be a Banach operator ideal. So, according to the linear case of \cite[Theorem 3.6]{G. Botelho}, whenever we refer to ${\cal L}_{X;Y^{\rm dual}}$ in this section we assume that the sequence classes $X$ and $Y$ are linearly stable, $Y$ is spherically complete and $X(\mathbb{K}) \stackrel{1}{\hookrightarrow} Y^{\rm dual}(\mathbb{K})$. The linear cases of the characterizations proved in \cite[Proposition 2.4]{G. Botelho} shall be used making no explicit reference.
A careful look at the definition of reasonable tensor norm and at the proof of \cite[Proposition 6.1]{R. Ryan} makes the following definition quite natural.
\begin{definition}\rm Let $\varepsilon$ be the injective tensor norm. For Banach spaces $E$ and $F$, a quasi-norm $\alpha$ on $E\otimes F$ is said to be {\it reasonable} if $\varepsilon \leq \alpha$ and $ \alpha(x \otimes y) \le \|x\|\cdot\|y\|$ for all $x \in E$ and $y \in F$. \end{definition}
Let $X$ and $Y$ be sequence classes. For Banach spaces $E$ and $F$, consider the map $\alpha_{X,Y} \colon E\otimes F\longrightarrow \mathbb{R}$ given by
$$\alpha_{X,Y}^{}(u)= \inf\left\lbrace \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)} \cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)} : u=\sum_{j=1}^{n}x_{j}\otimes y_{j} \right\rbrace. $$
Only one condition on the sequences classes $X$ and $Y$ is needed for $\alpha_{X,Y}$ to be a reasonable quasi-tensor norm. The notion we define now is quite weaker than the related ones that can be found in the literature (see, e.g., \cite{Botelhojlucas, argentinos}).
A sequence class $X$ is \emph{monotone} if for every Banach space $E$ and all $m,n \in \mathbb{N}$ and $x_1, \ldots, x_n \in E$, the following holds:
$$\|(\underbrace{0,0, \ldots, 0}_{m \rm{\,times}}, x_1, \ldots, x_n, 0,0, \ldots)\|_{X(E)} = \|(x_1, \ldots, x_n, 0,0,\ldots)\|_{X(E)}. $$
All sequence classes in Example \ref{exsec} are monotone.
\begin{proposition}\label{razoavel} If $X$ and $Y$ are monotone sequence classes and $\varepsilon\leq \alpha_{X,Y}$, then $\alpha_{X,Y}^{}$ is a reasonable quasi-norm on $E\otimes F$ for all $E$ and $F$. \end{proposition}
\begin{proof} Let us show that, for all Banach spaces $E$ and $F$ and all $u_{1},u_{2}\in E\otimes F$ $$\alpha_{X,Y}^{}(u_{1}+u_{2})\leq 2\left( \alpha_{X,Y}^ {}(u_{1}) + \alpha_{X,Y}^{}(u_{2})\right).$$
Given $\eta>0$, choose representations $u_{i}=\sum\limits_{j=1}^{n}x_{ij}\otimes y_{ij}$ such that $$\left\|(x_{ij})_{j=1}^{n} \right\|_{X(E)}\leq \alpha_{X,Y}^{}(u_{i} ) + \eta \ \text{ and } \ \left\|(y_{ij})_{j=1}^{n} \right\|_{Y(F)}\leq 1, \, i=1,2.$$
So, $\sum\limits_{j=1}^{n}x_{1j}\otimes y_{1j}+ \sum\limits_{j=1}^{n}x_{2j} \otimes y_{2j}$ is a representation of $u_{1}+ u_{2}$.
Using that $X$ and $Y$ are monotone, we get $\alpha_{X,Y}^{}(u_{1}+u_{2}) \leq$ \begin{align*}
&\leq\|(x_{11}, \ldots, x_{1n}, x_{21}, \ldots, x_{2n},0,0,\ldots) \|_{X(E)}\cdot \| (y_{11}, \ldots, y_{1n}, y_{21}, \ldots, y_{2n},0,0,\ldots) \|_{Y(F)}\\
& \leq \left(\|(x_{11}, \ldots, x_{1n}, 0,0,\ldots) \|_{X(E)}+ \|(0, \ldots, 0, x_{21}, \ldots, x_{2n},0,0,\ldots) \|_{X(E)}\right)\cdot \\
& ~~~\cdot \left(\|(y_{11}, \ldots, y_{1n}, 0,0,\ldots) \|_{Y(F)}+ \|(0, \ldots, 0, y_{21}, \ldots, y_{2n},0,0,\ldots) \|_{Y(F)}\right)\\
& = \left(\|(x_{11}, \ldots, x_{1n}, 0,0,\ldots) \|_{X(E)}+ \|(x_{21}, \ldots, x_{2n},0,0,\ldots) \|_{X(E)}\right)\cdot \\
& ~~~\cdot \left(\|(y_{11}, \ldots, y_{1n}, 0,0,\ldots) \|_{Y(F)}+ \|(y_{21}, \ldots, y_{2n},0,0,\ldots) \|_{Y(F)}\right)\\
&\leq 2\left(\alpha_{X,Y}^{}(u_{1}) + \alpha_{X,Y}^{}(u_{2}) + 2\eta \right).
\end{align*} The desired inequality follows by making $\eta \longrightarrow 0^+$.
The other facts either follow from the definition of sequence class or are straightforward. \end{proof}
Given a (non necessarily continuous) linear operator $T \colon E \longrightarrow F$, it is clear that $$A_{T} \colon E \times F^*\longrightarrow \mathbb{K}~,~A_{T}(x,\psi)=\psi (T(x)),$$ is a (non necessarily continuous) bilinear form. Calling $\varphi_T$ the linearization of $A_T$, we have a linear functional $\varphi_{T}\colon E\otimes F^*\longrightarrow \mathbb{K}$ satisfying $$\varphi_{T}\left(\sum_{j=1}^{n}x_{j}\otimes \psi_{j} \right)= \sum_{j=1}^{n}\psi_{j}(T(x_{j}))$$ for every $\sum\limits_{j=1}^{n}x_{j}\otimes \psi_{j} \in E \otimes F^*$. Of course, the map $T \mapsto \varphi_T$ is linear.
To proceed we need to recall one more definition from \cite{G. Botelho}. A sequence class $X$ is said to be:\\
$\bullet$ \emph{Finitely determined} if for every sequence $(x_j)_{j=1}^\infty \in E^{\mathbb{N}}$, $(x_j)_{j=1}^\infty \in X(E)$ if and only if $\displaystyle\sup_k \left\|(x_j)_{j=1}^k \right\|_{X(E)} < +\infty$ and, in this case, $\left\|(x_j)_{j=1}^\infty \right\|_{X(E)} = \sup_k \left\|(x_j)_{j=1}^k \right\|_{X(E)}. $\\ $\bullet$ {\it Finitely dominated} if there is a finitely determined sequence class $Y$ such that, for every Banach space $E$, $X(E)$ is a closed subspace of $Y(E)$ and one of the following conditions holds:\\
(i) For every sequence $(x_j)_{j=1}^\infty \in Y(E)$, $(x_j)_{j=1}^\infty \in X(E)$ if and only if $\displaystyle\lim_k \|(x_j)_{j=k}^\infty\|_{Y(E)} = 0$.\\
(ii) For every sequence $(x_j)_{j=1}^\infty \in Y(E)$, $(x_j)_{j=1}^\infty \in X(E)$ if and only if $\displaystyle\lim_{k,l} \|(x_j)_{j=k}^l\|_{Y(E)} = 0$.
For example, for $1 \leq p < \infty$, the classes $\ell_p(\cdot), \ell_p^w(\cdot), \ell_p\langle \cdot \rangle, \ell_p^{\rm mid}(\cdot), \ell_\infty(\cdot)$ and ${\rm RAD}(\cdot)$ are finitely determined; $c_0(\cdot)$ is finitely dominated by $\ell_\infty(\cdot)$, $\ell_p^u(\cdot)$ is finitely dominated by $\ell_p^w(\cdot)$ and ${\rm Rad}(\cdot)$ is finitely dominated by ${\rm RAD}(\cdot)$. Moreover, the dual $Y^{\rm dual}$ of a sequence class $Y$ is always finitely determined, even if $Y$ is not.
\begin{lemma}\label{1lema.ideal.maximal.qn}
Let $T\in \mathcal{L}(E;F)$ and suppose that $\alpha_{X,Y}^{}$ is a reasonable quasi-norm.\\
{\rm (a)} If $T\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$, then $\varphi_{T}\colon E\otimes_{\alpha_{X,Y}^{}} F^*\longrightarrow \mathbb{K}$ is continuous and $\left\|\varphi_{T} \right\| \leq \left\|T \right\|_{X;Y^{\rm dual}}$.\\
{\rm (b)} If, in addition, $X$ and $Y$ are finitely determined or finitely dominated, then the functional $\varphi_{T} \colon E\otimes_{\alpha_{X,Y}^{}} F^*\longrightarrow \mathbb{K}$ is continuous if and only if $T\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$; and, in this case, $\left\|T \right\|_{X;Y^{\rm dual}}= \left\|\varphi_{T} \right\|$. \end{lemma}
\begin{proof} (a) For every $u=\sum\limits_{j=1}^{n}x_{j}\otimes \psi_{j} \in E\otimes F^*$, we have
\begin{equation*}
\left|\varphi_{T}\left(u \right) \right|= \left|\sum_{j=1}^{n}\psi_{j}(T(x_{j})) \right| \leq \sum_{j=1}^{n}\left|\psi_{j}(T(x_{j})) \right|
\leq \left\|T \right\|_{X;Y^{\rm dual}} \cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)} \cdot \left\|(\psi_{j})_{j=1}^{n} \right\|_{Y(F^*)},
\end{equation*}
where the last inequality follows from a simple manipulation of the norm of $Y^{\rm dual}$. Taking the infimum over all representations of $u$ it follows that the functional $\varphi_{T}\colon E\otimes_{\alpha_{X,Y}^{}} F^*\longrightarrow \mathbb{K}$ is continuous with $\left\|\varphi_{T} \right\| \leq \left\|T \right\|_{X;Y^{\rm dual}}. $\\
(b) We prove the case that $X$ and $Y$ are finitely determined. Given $(x_j)_{j=1}^\infty \in X(E)$, for every $n \in \mathbb{N}$,
\begin{align*}
\left\| (T(x_{j}))_{j=1}^{n} \right\|_{Y^{\rm dual}(F)} &= \sup_{(\psi_{j})_{j=1}^{\infty}\in B_{Y(F^*)}}\left|\sum_{j=1}^{n}\psi_{j}(T(x_{j})) \right| = \sup_{(\psi_{j})_{j=1}^{\infty}\in B_{Y(F^*)}}\left|\varphi_{T}\left(\sum_{j=1}^{n}x_{j}\otimes \psi_{j} \right) \right| \\
&\leq \sup_{(\psi_{j})_{j=1}^{\infty}\in B_{Y(F^*)}}\left\|\varphi_{T} \right\|\cdot\alpha_{X,Y}^{}\left(\sum_{j=1}^{n}x_{j}\otimes \psi_{j} \right)\\
&\leq \left\|\varphi_{T} \right\| \cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \sup_{(\psi_{j})_{j=1}^{\infty}\in B_{Y(F^*)}}\left\|(\psi_{j})_{j=1}^{n} \right\|_{Y(F^*)}\\
&\leq \left\|\varphi_{T} \right\| \cdot \sup_k\left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \sup_{(\psi_{j})_{j=1}^{\infty}\in B_{Y(F^*)}}\sup_k\left\|(\psi_{j})_{j=1}^{k} \right\|_{Y(F^*)}\\
&= \left\|\varphi_{T} \right\| \cdot \left\|(x_{j})_{j=1}^{\infty} \right\|_{X(E)}\cdot \sup_{(\psi_{j})_{j=1}^{\infty}\in B_{Y(F^*)}}\left\|(\psi_{j})_{j=1}^{\infty} \right\|_{Y(F^*)}\\
&= \left\|\varphi_{T} \right\|\cdot \left\|(x_{j})_{j=1}^{\infty} \right\|_{X(E)}.
\end{align*} Since $Y^{\rm dual}$ is also finitely determined, taking the supremum over $n$ we get $(T(x_{j}))_{j=1}^{\infty} \in Y^{\rm dual}(F)$ and
$$\left\| (T(x_{j}))_{j=1}^{\infty} \right\|_{Y^{\rm dual}(F)} \leq \left\|\varphi_{T} \right\|\cdot \left\|(x_{j})_{j=1}^{\infty} \right\|_{X(E)}, $$ from which it follows that $T\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$ and $ \left\|T \right\|_{X;Y^{\rm dual}}\leq \left\|\varphi_{T} \right\|$. \end{proof}
In the same fashion of norms on tensor products (see \cite[Section 6.1]{R. Ryan}), we say that a reasonable quasi-norm $\alpha$ is {\it a tensor quasi-norm} if:\\ $\bullet$ $\alpha$ is uniform, that is, for all Banach spaces $E_1,E_2, F_1, F_2$ and all operators $T_i \in {\cal L}(E_i,F_i)$, $i = 1,2$,
$$\|T_1 \otimes T_2 \colon E_1 \otimes_\alpha E_2 \longrightarrow F_1 \otimes_\alpha F_2\| \leq \|T_1\|\cdot \|T_2\|.$$ $\bullet$ $\alpha$ is finitely generated, that is, for all Banach spaces $E,F$ and any $u \in E \otimes F$, $$\alpha(u; E \otimes F) = \inf\left\{\alpha(u; M \otimes N) : u \in M \otimes N, M \in {\cal F}(E),N \in {\cal F}(F) \right\}, $$ where ${\cal F}(E)$ is the collection of all finite dimensional subspaces of $E$.
Let us see that, in the environment of sequences classes, tensor quasi-norms are not rare. A sequence class $X$ is said to be {\it finitely injective} if $\|(x_j)_{j=1}^k\|_{X(E)} \leq \|(i(x_j))_{j=1}^k\|_{X(F)}$ whenever $i \colon E \longrightarrow F$ is a metric injection, $k \in \mathbb{N}$ and $x_1, \ldots, x_k \in E$. If $X$ is also linearly stable, then we actually have $\|(x_j)_{j=1}^k\|_{X(E)} = \|(i(x_j))_{j=1}^k\|_{X(F)}$.
All sequence classes listed in Example \ref{exsec}, but $\ell_p\langle \cdot \rangle$, are finitely injective.
\begin{proposition} \label{porpr}
Let $X$ and $Y$ be sequence classes such that $\alpha_{X,Y}^{}$ is a reasonable quasi-norm. If $X$ and $Y$ are linearly stable and finitely injective, then $\alpha_{X,Y}^{}$ is a tensor quasi-norm. \end{proposition}
\begin{proof} Given $T_{i}\in \mathcal{L}(E_{i};F_{i})$, $i=1,2$, $u= \sum\limits_{j=1}^{n}x_{j}\otimes y_{j}\in E_1\otimes_{\alpha_{X,Y}^{}}E_2$, the linear stability of $X$ and $Y$ gives
\begin{align*}
\alpha_{X,Y}^{}\left(T_{1}\otimes T_{2}(u) \right)& =\alpha_{X,Y} \left(\sum_{j=1}^n T_1(x_j) \otimes T_2(y_j) \right) \\
&\leq \left\|(T_{1}(x_{j}))_{j=1}^{n} \right\|_{X(F_{1})}\cdot \left\|(T_{2}(y_{j}))_{j=1}^{n} \right\|_{Y(F_{2})}\\
&\leq \left\|T_{1} \right\|\cdot \left\|T_{2} \right\| \cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E_{1})}\cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(E_{2})}.
\end{align*}
Since this holds for every representation of $u$, it follows that $\alpha_{X,Y}^{}\left(T_{1}\otimes T_{2}(u) \right)\leq \left\|T_{1} \right\|\cdot \left\|T_{2} \right\|\cdot \alpha_{X,Y}^{}(u)$, so $\left\|T_{1}\otimes T_{2} \right\| \leq \left\| T_{1}\right\| \cdot \left\| T_{2}\right\|.$ This proves that $\alpha_{X,Y}$ is uniform.
Let $u\in E\otimes F$ be given. Given $\eta>0$, we can take a representation $u = \sum\limits_{j=1}^{n}x_{j}\otimes y_{j}$ so that $$\left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot\left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)} \leq \alpha_{X,Y}^{}(u; E\otimes F) + \eta. $$ Taking $M= {\rm span}\{x_{1},\ldots,x_{n}\}$ and $N= {\rm span}\{y_{1}\ldots,y_{n}\}$, we have $u\in M\otimes N$ and $\alpha_{X,Y}^{}(u;E\otimes F)\leq \alpha_{X,Y}^{}(u;M\otimes N)$ because $\alpha_{X,Y}^{}$ is uniform. Moreover, the finite injectivity of $X$ and $Y$ yields
\begin{align*}
\alpha_{X,Y}^{}(u;M\otimes N)&\leq \left\|(x_{j})_{j=1}^{n} \right\|_{X(M)} \cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(N)} \\
&\leq\left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)} \leq \alpha_{X,Y}^{}(u; E\otimes F) + \eta.
\end{align*} It follows that $\alpha_{X,Y}^{}(u;E\otimes F)= \inf\left\lbrace \alpha_{X,Y}^{}(u;M\otimes N); u\in M\otimes N, M \in {\cal F}(E), N \in {\cal F}(F) \right\rbrace$, proving that $\alpha_{X,Y}$ is finitely generated. \end{proof}
Recall that a Banach operator ideal $[{\cal I}, \|\cdot\|_{\cal I}]$ is {\it maximal} (see \cite[p.\,197]{R. Ryan}) if it is the only Banach operator ideal $[{\cal J}, \|\cdot\|_{\cal J}]$ satisfying:\\
(i) ${\cal I}(E;F) \subseteq {\cal J}(E;F)$ for all Banach spaces $E$ and $F$ and $\|u\|_{\cal J} \leq \|u\|_{\cal I}$ for every $u \in {\cal I}(E;F)$, and\\
(ii) $\|u\|_{\cal J} = \|u\|_{\cal I}$ for every finite rank operator.
We denote by $\mathcal{CF}(E)$ the collection of all finite codimensional subspaces of $E$. For $M \in \mathcal{F}(E)$ we denote by $I_M \colon M \longrightarrow E$ we denote the inclusion operator and for $L \in \mathcal{CF}(F)$ we denote by $Q_L \colon F \longrightarrow F/L$ the quotient operator.
\begin{theorem}\label{maxim}
Suppose that $\alpha_{X,Y}$ is a tensor quasi-norm and that $X$ and $Y$ are finitely determined or finitely dominated. For an operator $T\in \mathcal{L}(E;F)$, $T\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$ if and only if
$$s: = \sup\left\{\left\| Q_{L}\circ T \circ I_{M}\right\|_{X;Y^{\rm dual}} : (M,L)\in \mathcal{F}(E)\times \mathcal{CF}(F)\right\} < \infty,$$
and, in this case, $\left\|T \right\|_{X;Y^{\rm dual}} = s$. In particular, the Banach operator ideal $\mathcal{L}_{X;Y^{\rm dual}}$ is maximal. \end{theorem}
\begin{proof} Suppose that $T\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$. For being a finite rank operator, each $Q_{L}\circ T \circ I_{M}$ belongs to $\mathcal{L}_{X;Y^{\rm dual}}(M;F/L)$. The ideal inequality of the norm of $\mathcal{L}_{X;Y^{\rm dual}}$ gives
\[\left\|Q_{L}\circ T \circ I_{M} \right\|_{X;Y^{\rm dual}}\leq \left\| Q_{L}\right\|\cdot \left\| T\right\|_{X;Y^{\rm dual}}\cdot \left\| I_{M}\right\| =\left\| T\right\|_{X;Y^{\rm dual}},\] which proves that $s\leq \left\|T \right\|_{X;Y^{\rm dual}} < \infty.$
Conversely, suppose that $s < \infty$. Let $u\in E\otimes F^*$ and $\eta>0$ be given. As $\alpha_{X,Y}^{}$ is finitely generated (Proposition \ref{porpr}), there are $M\in \mathcal{F}(E)$, $N\in \mathcal{F}(F^*)$ and a representation $u=\sum\limits_{j=1}^{n}x_{j}\otimes y_{j}^*\in M\otimes N$ such that \[\alpha_{X,Y}^{}\left(u; M\otimes N \right)\leq (1+\eta)\alpha_{X,Y}^{}\left(u ; E\otimes F^* \right).\]
Let $L\in \mathcal{CF}(F)$ be such that $\left(F/L \right)^* \stackrel{1}{=}N$ by means of the operator $Q_{L}^*\colon \left(F/L \right)^* \longrightarrow N$. Choose functionals $\psi_{j}\in \left( F/L\right)^* $ such that $Q_{L}^*(\psi_{j})= y_{j}^*, j= 1,\ldots,n$. In the chain $$M \otimes_{\alpha_{X;Y}} N \stackrel{Id_M \otimes (Q_L^*)^{-1}}{\xrightarrow{\hspace*{2cm}}} M \otimes_{\alpha_{X;Y}}(F/L)^* \stackrel{\varphi_{Q_L \circ T \circ I_M}}{\xrightarrow{\hspace*{2cm}}} \mathbb{K}, $$
the operator $Id_M \otimes (Q_L^*)^{-1}$ is continuous because $\alpha_{X;Y}$ is uniform (Proposition \ref{porpr}), and the functional $\varphi_{Q_L \circ T \circ I_M}$ is continuous with $\left\|\varphi_{Q_{L}\circ T \circ I_{M}} \right\|\le \left\| Q_{L}\circ T \circ I_{M}\right\|_{X;Y^{\rm dual}} $ by Lemma \ref{1lema.ideal.maximal.qn} because $Q_{L}\circ T \circ I_{M}$ belongs to $\mathcal{L}_{X;Y^{\rm dual}}(M;F/L)$. It follows that
\begin{align*} |\varphi_T(u)| & = \left|\sum_{j=1}^n y_j^* ( T(x_j)) \right| = \left|\sum_{j=1}^n Q_L^*(\psi_j)(T(x_j))\right| \\
&= \left|\sum_{j=1}^n \psi_j(Q_L(T(x_j))\right| = \left|\sum_{j=1}^n \varphi_{Q_L \circ T \circ I_M}\left(x_j \otimes \psi_j \right)\right| \\
& = \left|\sum_{j=1}^n [ \varphi_{Q_L \circ T \circ I_M} \circ (Id_M \otimes (Q_L^*)^{-1})]\left( x_j \otimes y_j^* \right)\right|\\
&= \left| [ \varphi_{Q_L \circ T \circ I_M} \circ (Id_M \otimes (Q_L^*)^{-1})]\left( \sum_{j=1}^n x_j \otimes y_j^* \right)\right| \\
&= \left| [ \varphi_{Q_L \circ T \circ I_M} \circ (Id_M \otimes (Q_L^*)^{-1})](u)\right|\\
&\leq \|\varphi_{Q_L \circ T \circ I_M}\|\cdot \|Id_M \otimes (Q_L^*)^{-1}) \| \cdot \alpha_{X,Y}(u ; M \otimes N)\\
& \leq \left\| Q_{L}\circ T \circ I_{M}\right\|_{X;Y^{\rm dual}} \cdot \|Id_M \|\cdot \|(Q_L^*)^{-1}) \| \cdot (1+\eta)\alpha_{X,Y}(u ; E \otimes F^*)\\ & \leq s \cdot (1+\eta)\alpha_{X,Y}(u ; E \otimes F^*). \end{align*}
Making $\eta \longrightarrow 0^+$ we get $\left|\varphi_{T}(u) \right|\leq s\cdot \alpha_{X,Y}^{}\left(u \right)$, which implies the continuity of the functional $\varphi_{T}\colon E\otimes_{\alpha_{X,Y}^{}}F^*\longrightarrow \mathbb{K}$ and $\|\varphi_T\| \leq s$. Calling Lemma \ref{1lema.ideal.maximal.qn} once again it follows that $T\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$ and $\left\|T \right\|_{X;Y^{\rm dual}}\le \left\|\varphi_{T} \right\|\leq s .$ This completes the proof of the first assertion.
The second assertion follows from the first one combined with \cite[8.11]{R. Ryan} (see also \cite[Theorem 8.7.5]{A.Pietsch}). \end{proof}
The next corollary is just a combination of the theorem above with \cite[Corollary 17.8(4)]{A.Defant}.
\begin{corollary} Let $u \in {\cal L}(E;F)$ be given. Under the assumptions of Theorem \ref{maxim} we have $u \in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$ if and only if $u^{**} \in \mathcal{L}_{X;Y^{\rm dual}}(E^{**};F^{**})$ and $\|u\|_{X,Y^{\rm dual}} = \|u^{**}\|_{X,Y^{\rm dual}}$. \end{corollary}
\begin{examples}\rm (a) Theorem \ref{maxim} recovers the following well known facts.\\ $\bullet$ The Banach ideal of absolutely $(q,p)$-summing operators: $$\Pi_{q,p} := {\cal L}_{\ell_p^w(\cdot); \ell_q(\cdot)} = {\cal L}_{\ell_p^w(\cdot); [\ell_{q^*}(\cdot)]^{\rm dual}},$$ $1 \leq p \leq q < \infty$, is maximal \cite[Proposition 10.2]{J.Diestel}. In particular, the ideal $\Pi_p$ of absolutely $p$-summing operators is maximal.\\ $\bullet$ The Banach ideal of Cohen strongly $(q,p)$-summing operators: $${\cal D}_{q,p} := {\cal L}_{\ell_p(\cdot); \ell_q\langle\cdot \rangle} = {\cal L}_{\ell_p(\cdot); [\ell^w_{q*}(\cdot )]^{\rm dual}},$$ $1 \leq p \leq q < \infty$, is maximal. Although we found no reference to quote, we believe this is a well known fact. \\ $\bullet$ The Banach ideal of cotype $q$ operators: $$\mathfrak{C}_q := {\cal L}_{{\rm RAD}(\cdot); \ell_q(\cdot)} = {\cal L}_{{\rm RAD}(\cdot); [\ell_{q^*}(\cdot)]^{\rm dual}},$$ $2 \leq q < \infty$, is maximal \cite[17.4]{A.Defant}.\\ {\rm (b)} Just to illustrate the new information that can be obtained from Theorem \ref{maxim} we mention that the Banach ideals $${\cal L}_{\ell_p^{\rm mid}(\cdot); \ell_q\langle\cdot \rangle} = {\cal L}_{\ell_p^{\rm mid}(\cdot); [\ell^w_{q*}(\cdot )]^{\rm dual}} {\rm ~~and~~} {\cal L}_{\ell_p^{\rm mid}(\cdot); \ell_q (\cdot )} = {\cal L}_{\ell_p^{\rm mid}(\cdot); [\ell_{q*}(\cdot )]^{\rm dual}},$$ which were studied in \cite{espaco.mid, J.R.Campos.J.Santos}, are maximal. \end{examples}
\section{The dual of $E \otimes_{\alpha_{X,Y}}F$}
As is usual in the case of tensor norms (see \cite{A.Defant, R. Ryan}), for the tensor quasi-norm $\alpha_{X,Y}$ we describe the linear functionals on $E \otimes_{\alpha_{X,Y}}F $ as linear operators from $E$ to $F^*$ and as bilinear forms on $E \times F$. As a consequence we show when these tensor quasi-norms satisfy a condition that is equivalent to maximality of the corresponding operator ideal in the case of tensor norms.
For the first part of this section, which comprises Theorems \ref{dualB} and \ref{Teo.prin.1}, we want $\alpha_{X,Y}$ to be a reasonable quasi-norm, so we will suppose that $X$ and $Y$ are monotone sequences classes and $\varepsilon \leq \alpha_{X,Y}$.
To describe the linear functionals on $E \otimes_{\alpha_{X,Y}}F $ as bilinear forms we need one more concept introduced in \cite{G. Botelho}. A bilinear form $A \colon E \times F \longrightarrow \mathbb{K}$ is said to be $(X,Y;\ell_1)$-summing if $(A(x_j,y_j))_{j=1}^\infty \in \ell_1$ whenever $(x_j)_{j=1}^\infty \in X(E)$ and $(y_j)_{j=1}^\infty \in Y(F)$. The space ${\cal L}_{X,Y;\ell_1}(E,F;\mathbb{K})$ of all such bilinear forms is a Banach space under the norm
$$\|A\|_{X,Y;\ell_1} = \sup\left\{\| (A(x_j,y_j))_{j=1}^\infty \|_1 : (x_j)_{j=1}^\infty \in B_{X(E)}, (y_j)_{j=1}^\infty \in B_{Y(F)}\right\}.$$
We also need a property that is neither weaker nor stronger than being spherically complete: a sequence class $X$ is said to be {\it finitely boundedly complete}, {\it FBC} for short, if for any Banach space $E$, every $n \in \mathbb{N}$ and all $(x_{j})_{j=1}^{n}\in X(E)$ and $(\lambda_{j})_{j=1}^{n}\in \ell_{\infty}$, it holds $(\lambda_{j}x_{j})_{j=1}^{n}\in X(E)$ and
$\left\|(\lambda_{j}x_{j})_{j=1}^{n} \right\|_{X(E)}\leq \left\|(\lambda_{j})_{j=1}^{n} \right\|_\infty\cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}. $
It is easy to check that the sequence classes $c_0(\cdot), \ell_\infty(\cdot), \ell_p(\cdot), \ell_p^w(\cdot), \ell_p^u(\cdot), \ell_p\langle \cdot \rangle, \ell_p^{\rm mid}(\cdot)$, $1 \leq p < \infty$, are FBC. Kahane's contraction principle \cite[12.2]{J.Diestel} guarantees that ${\rm Rad}(\cdot)$ and ${\rm RAD}(\cdot)$ are FBC in the real case $\mathbb{K} = \mathbb{R}$.
\begin{theorem}\label{dualB}
Suppose that $X$ and $Y$ are finitely determined or finitely dominated and that one of them is FBC. Then,
\[(E\otimes_{\alpha_{X,Y}^{}}F)^*\stackrel{1}{=} \mathcal{L}_{X,Y;\ell_{1}}(E,F;\mathbb{K}).\] \end{theorem}
\begin{proof} Let us see that map
$\Psi \colon \mathcal{L}_{X,Y;\ell_{1}}(E,F;\mathbb{K})\longrightarrow (E\otimes_{\alpha_{X,Y}^{}}F)^*$ given by
$$\Psi(A)\left(\sum_{j=1}^{n}x_{j}\otimes y_{j} \right)= \sum_{j=1}^{n}A(x_{j},y_{j}), $$ is a well defined bounded linear operator. First note that $\Psi(A)$ is the linearization of the bilinear form $A$, so it is a well defined linear functional on $E \otimes F$. To check its continuity with respect to $\alpha_{X,Y}$, note that, for $u=\sum\limits_{j=1}^{n}x_{j}\otimes y_{j}\in E\otimes F$, since $A\in \mathcal{L}_{X,Y;\ell_{1}}(E,F;\mathbb{K})$ we have
\begin{align*}
\left|\Psi(A)(u) \right| = \left|\sum_{j=1}^{n}A(x_{j},y_{j}) \right|\leq \sum_{j=1}^{n}\left|A(x_{j},y_{j}) \right| \leq \left\|A \right\|_{X,Y;\ell_{1}}\cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)}.
\end{align*}
Taking the infimum over all representations of $u$ it follows that $\left|\Psi(A)(u) \right|\leq \left\|A \right\|_{X,Y;\ell_{1}}\alpha_{X,Y}^{}(u)$, proving that $\Psi(A)$ is a bounded linear functional and
$\left\|\Psi(A) \right\|\leq \left\|A \right\|_{X,Y;\ell_{1}}$.
Now it is enough to prove that $\Psi$ is a surjective isometry. Given $\varphi\in (E\otimes_{\alpha_{X,Y}^{}}F)^*$, it is clear that $$A_{\varphi} \colon E\times F\longrightarrow \mathbb{K} : A_{\varphi}(x,y)= \varphi(x\otimes y),$$ is a bilinear form. Taking $(x_{j})_{j=1}^{n}$ in $E$ and $(y_{j})_{j=1}^{n}$ in $F$, assuming $X$ is FBC (the other case is analogous), we get
\begin{align*}
\left\|(A_{\varphi}(x_{j},y_{j}))_{j=1}^{n} \right\|_{1} &= \sup_{(\lambda_{j})_{j=1}^{\infty}\in B_{\ell_{\infty}}}\left|\sum_{j=1}^{n}\lambda_{j} A_{\varphi}(x_{j},y_{j}) \right|
= \sup_{(\lambda_{j})_{j=1}^{\infty}\in B_{\ell_{\infty}}}\left|\varphi\left( \sum_{j=1}^{n}(\lambda_{j}x_{j})\otimes y_{j})\right) \right|\\
&\leq \sup_{(\lambda_{j})_{j=1}^{\infty}\in B_{\ell_{\infty}}}\left\|\varphi \right\|\cdot \left\|(\lambda_{j}x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)}\\
&\leq \left\|\varphi \right\|\cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)}.
\end{align*} Using that $X$ and $Y$ are finitely determined we conclude that $A_{\varphi}\in \mathcal{L}_{X,Y;\ell_{1}}(E,F;\mathbb{K})$ and
$\left\|A_{\varphi} \right\|_{X,Y;\ell_{1}}\leq \left\| \varphi \right\|$. A straightforward computation shows that $\Psi(A_{\varphi})= \varphi$ and completes the proof. \end{proof}
Now we represent linear functionals on $E\otimes_{\alpha_{X,Y}^{}}F$ as linear operators from $E$ to $F^*$.
\begin{theorem}\label{Teo.prin.1}
Suppose that $X$ and $Y$ are finitely determined and $Y$ is spherically complete. Then, \[(E\otimes_{\alpha_{X,Y}^{}} F )^*\stackrel{1}{=} \mathcal{L}_{X;Y^{\rm dual}}(E;F^*).\] \end{theorem}
\begin{proof} Given $T \in \mathcal{L}_{X;Y^{\rm dual}}(E;F^*)$, call $\Psi(T)$ the linearization of the bilinear form $$(x,y) \in E \times F \mapsto T(x)(y) \in \mathbb{K}. $$ So, $\Psi(T)$ is a linear functional on $E \otimes F$ such that \[\Psi\left( T\right) \left( \sum_{j=1}^{n}x_{j}\otimes y_{j}\right) = \sum_{j=1}^{n}T\left( x_{j}\right) \left( y_{j}\right).\] To check its continuity with respect to $\alpha_{X,Y}$, note that for $u=\sum\limits_{j=1}^{n}x_{j}\otimes y_{j}\in E\otimes F$, denoting by $J_F \colon F \longrightarrow F^{**}$ the canonical embedding,
\begin{align*}
\left|\Psi(T)(u) \right|&\leq \sum_{j=1}^{n}\left|T(x_{j})(y_{j}) \right|= \sum_{j=1}^{n} \left| J_{F}(y_{j})(T(x_{j})) \right|\\
& \leq \left\|T \right\|_{X;Y^{\rm dual}} \cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \left\|(J_F(y_{j}))_{j=1}^{n} \right\|_{Y(F^{**})}\\
&\leq \left\|T \right\|_{X;Y^{\rm dual}} \cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}\cdot \left\|(y_{j})_{j=1}^{n} \right\|_{Y(F)},
\end{align*}
where the last inequality follows from the linear stability of $Y$. Since this estimate holds for every representation of $u$, it follows that $\left|\Psi(T)(u) \right|\leq \left\|T \right\|_{X;Y^{\rm dual}}\cdot \alpha_{X,Y}^{}(u) $. Hence, $\Psi(T) \in (E\otimes_{\alpha_{X,Y}^{}} F )^*$, proving that $\Psi \colon \mathcal{L}_{X;Y^{\rm dual}}(E;F^*)\longrightarrow (E\otimes_{\alpha_{X,Y}^{}} F )^*$ is a (obviously linear) bounded operator with $\left\|\Psi(T) \right\|\leq \left\|T \right\|_{X;Y^{\rm dual}}. $ Recall that $\mathcal{L}_{X;Y^{\rm dual}}(E;F^*)$ is a Banach space because $Y$ is spherically complete.
Now it is enough to show that $\Psi$ is a surjective isometry. For $\varphi\in (E\otimes_{\alpha_{X,Y}^{}}F)^*$, the map $$T_{\varphi}\colon E\longrightarrow F^*~,~T_{\varphi}(x)(y)= \varphi(x\otimes y),$$ is clearly a bounded linear operator. Given $(x_{j})_{j=1}^{\infty} \in X(E)$, for every $n \in \mathbb{N}$,
\begin{align*}
\left\|\left( T_{\varphi}(x_{j})\right)_{j=1}^{n} \right\|_{Y^{\rm dual}(F^*)} = \sup_{(y_{j})_{j=1}^{\infty}\in B_{Y(F)}}\left|\sum_{j=1}^{n}\varphi(x_{j}\otimes y_{j})\right|
\leq \left\|\varphi \cdot \right\|\left\|(x_{j})_{j=1}^{n} \right\|_{X(E)}.
\end{align*}
Since $X$ and $Y^{\rm dual}$ are finitely determined, taking the supremum over $n$ it follows that $T_{\varphi}\in \mathcal{L}_{X;Y^{\rm dual}}(E;F^*)$ and
$\left\|T_{\varphi} \right\|_{X;Y^{\rm dual}} \leq \left\|\varphi \right\|$. The easily checked equality $\Psi(T_{\varphi})=\varphi$ completes the proof. \end{proof}
It is well known (see, e.g., \cite[Ex. 17.2]{A.Defant}) that a normed operator ideal $\cal I$ is maximal if and only if there exists a finitely generated tensor norm $\alpha$ such that \begin{equation}\label{99999} {\cal I}(E;F)\stackrel{1}{=} (E \otimes_{\alpha}F^*)^*\cap \mathcal{L}(E,F) \end{equation} for all Banach $E$ and $F$. We finish the paper establishing conditions under which the tensor quasi-norm $\alpha_{X,Y}^{}$ satisfies \eqref{99999}. Therefore, according to the Propositions \ref{razoavel} and \ref{porpr}, we will assume that $X$ and $Y$ are linearly stable, monotone and finitely injective sequences classes and $\varepsilon \leq \alpha_{X,Y}$.
\begin{theorem} Suppose that $X$ and $Y$ are finitely determined or finitely dominated and that $Y$ is spherically complete. Then, regardless of the Banach spaces $E$ and $F$,
$$\mathcal{L}_{X;Y^{\rm dual}}(E;F)\stackrel{1}{=}(E\otimes_{\alpha_{X,Y}^{}}F^*)^*\cap \mathcal{L}(E;F).$$ \end{theorem}
\begin{proof} The assumptions guarantee that $\mathcal{L}_{X;Y^{\rm dual}}(E;F)$ is a Banach space. The same arguments used before show that $\Phi \colon \mathcal{L}_{X;Y^{\rm dual}}(E,F)\longrightarrow (E\otimes_{\alpha_{X,Y}}F^*)^*\cap \mathcal{L}(E,F)$
given by
\[\Phi(T)\left(\sum_{j=1}^{n}x_{j}\otimes \psi_{j} \right) = \sum_{j=1}^{n} \psi_{j}(T(x_{j})),\] is a well defined linear operator. For $u=\sum\limits_{j=1}^{n}x_{j}\otimes \psi_{j}\in E\otimes F^*$,
\begin{align*}
\left|\Phi(T)(u) \right| \leq \sum_{j=1}^{n}\left|\psi_{j}(T(x_{j})) \right| \leq \left\|T \right\|_{X;Y^{\rm dual}}\cdot \left\|(x_{j})_{j=1}^{n} \right\|_{X(E)} \cdot \left\|(\psi_{j})_{j=1}^{n} \right\|_{Y(F^*)}.
\end{align*} Again, since this holds for any representation of $u$ we have $
\left|\Phi(T)(u) \right|\leq \left\|T \right\|_{X;Y^{\rm dual}} \cdot \alpha_{X,Y}^{}(u)
$, which proves, in particular, that $\Phi(T)$ is continuous and $\left\|\Phi(T) \right\|\leq \left\| T\right\|_{X;Y^{\rm dual}}.$
Once again, it is enough to show that $\Phi$ is a surjective isometry. To do so, given $\varphi\in (E\otimes_{\alpha_{X,Y}^{}}F^*)^*\cap \mathcal{L}(E,F)$ consider the continuous linear operator $T_{\varphi} \colon E\longrightarrow J_{F}(F) \subseteq F^{**}$ given by $T_{\varphi}(x)(\psi)= \varphi(x\otimes \psi)$. Let $M\in \mathcal{F}(E)$ and $L\in \mathcal{CF}(F)$ be given. Considering the isometric isomorphism $Q_{L}^* \colon (F/L)^{*}\longrightarrow L^{\perp}$, Proposition \ref{Teo.prin.1} gives
\begin{equation}\label{eq.iso.iso}
\left(M\otimes_{\alpha_{X,Y}^{}}\left(F/L\right)^* \right)^* \stackrel{1}{=} \mathcal{L}_{X;Y^{\rm dual}}\left(M; \left(F/L \right)^{**} \right) \stackrel{1}{=} \mathcal{L}_{X;Y^{\rm dual}}\left(M;F/L \right).
\end{equation} Considering the composition
$$M \otimes_{\alpha_{X;Y}} (F/L)^* \stackrel{Id_M \otimes Q_L^*}{\xrightarrow{\hspace*{2cm}}} M \otimes_{\alpha_{X;Y}}L^\perp \stackrel{\varphi|_{M \otimes_{\alpha_{X;Y}}L^\perp }}{\xrightarrow{\hspace*{2cm}}} \mathbb{K}, $$
we have $\varphi|_{M \otimes_{\alpha_{X;Y}}L^\perp }\circ (Id_M \otimes Q_L^*) \in \left(M\otimes_{\alpha_{X,Y}^{}}\left(F/L\right)^* \right)^*$. By (\ref{eq.iso.iso}) there is a unique $T \in \mathcal{L}_{X;Y^{\rm dual}}\left(M;F/L \right)$ such that $\Psi(T) = \varphi|_{M \otimes_{\alpha_{X;Y}}L^\perp }\circ (Id_M \otimes Q_L^*)$, where $\Psi$ is the isomorphism constructed in the proof of Proposition \ref{Teo.prin.1}. For every tensor $\sum\limits_{j=1}^{n}x_{j}\otimes \psi_{j}\in M\otimes L^{\perp}$, we have $\sum\limits_{j=1}^{n}x_{j}\otimes (Q^*_{L})^{-1}(\psi_{j})\in M\otimes \left( F/L\right)^* $, so
\begin{align*}\Psi(T)& \left(\sum_{j=1}^{n}x_{j}\otimes (Q_{L}^*)^{-1}(\psi_{j}) \right) = \varphi|_{M \otimes_{\alpha_{X;Y}}L^\perp }\circ (Id_{M}\otimes Q_{L}^*)\left(\sum_{j=1}^{n}x_{j}\otimes (Q_{L}^*)^{-1}(\psi_{j}) \right)\\
&= \varphi\left(\sum_{j=1}^{n}x_{j}\otimes \psi_{j} \right)= \sum_{j=1}^{n} T_{\varphi}(x_{j})(\psi_{j}) = \sum_{j=1}^{n}\psi_{j}(T_{\varphi}\circ Id_{M})(x_{j})\\
&= \sum_{j=1}^{n}(T_{\varphi}\circ Id_{M})^*(\psi_{j})(x_{j})= \sum_{j=1}^{n}(Id_{M}^*\circ T_{\varphi}^*)(\psi_{j})(x_{j})\\
&= \sum_{j=1}^{n}(Id_{M}^*\circ T_{\varphi}^*\circ Q_{L}^*)\left((Q_{L}^*)^{-1}(\psi_{j})\right)(x_{j})= \sum_{j=1}^{n}(Q_{L}^*)^{-1}(\psi_{j})(Q_{L}\circ T_{\varphi}\circ Id_{M})(x_{j})\\ & = \Psi(Q_L \circ T_\varphi \circ Id_M)\left(\sum_{j=1}^{n}x_{j}\otimes (Q_{L}^*)^{-1}(\psi_{j}) \right). \end{align*}
The injectivity of $\Psi$ gives $T= Q_{L}\circ T_{\varphi}\circ I_{M}$, and the fact that it is an isometry yields \begin{align*}
\left\|Q_{L}\circ T_{\varphi}\circ I_{M} \right\|_{X;Y^{\rm dual}}& = \|\Psi^{-1}(T)\|= \left\|\varphi|_{M \otimes_{\alpha_{X;Y}}L^\perp }\circ (Id_M \otimes Q_L^*)\right\|\\
&\leq \left\| \varphi\right\|\cdot \left\| I_{M}\otimes Q_{L}^*\right\| \leq \left\| \varphi\right\|. \end{align*} It follows from Theorem \ref{maxim} that $T_{\varphi}\in \mathcal{L}_{X;Y^{\rm dual}}(E;F)$ and
$$\left\|T_{\varphi} \right\|_{X;Y^{\rm dual}}\leq \sup_{M,L}\left\|Q_{L}\circ T_{\varphi}\circ I_{M} \right\|_{X;Y^{\rm dual}} \leq \left\|\varphi \right\|.$$ Finally, it is not difficult to see that $\Phi(T_{\varphi})=\varphi$. \end{proof}
Plenty of concrete cases for which the theorem above applies can be provided just bearing in mind all that was said about the sequence classes listed in Example \ref{exsec}.
\noindent Faculdade de Matem\'atica\\ Universidade Federal de Uberl\^andia\\ 38.400-902 -- Uberl\^andia -- Brazil\\ e-mail: botelho@ufu.br\\
\noindent Departamento de Ci\^{e}ncias Exatas\\ Universidade Federal da Para\'iba\\ 58.297-000 -- Rio Tinto -- Brazil\\ \hspace*{1,7cm} and
\noindent Departamento de Matem\'atica\\ Universidade Federal da Para\'iba\\ 58.051-900 -- Jo\~ao Pessoa -- Brazil
\noindent e-mails: jamilson@dcx.ufpb.br \, and/or \, jamilsonrc@gmail.com\\
\noindent Departamento de Matem\'atica\\ Universidade Federal da Para\'iba\\ 58.051-900 -- Jo\~ao Pessoa -- Brazil\\ e-mail: llucascarvalho23@yahoo.com.br
\end{document} |
\begin{document}
\setcounter{page}{1}
\title[Domination number in the annihilating-submodule graph]{Domination number in the annihilating-submodule graph
of modules over commutative rings}
\author[H. Ansari-Toroghy and S. Habibi]{H. Ansari-Toroghy$^1$ and S. Habibi$^2$}
\authorsaddresses{$^1$ Department of pure Mathematics,\\ Faculty of mathematical Sciences,\\ University of Guilan, P. O. Box 41335-19141, Rasht, Iran.\\ e-mail: ansari@guilan.ac.ir\\
$^2$ School of Mathematics, Institute for Research in Fundamental Sciences (IPM), P.O. Box: 19395-5746, Tehran, Iran.\\ Department of pure Mathematics, Faculty of mathematical Sciences, University of Guilan, P. O. Box 41335-19141, Rasht, Iran. \\ e-mail: habibishk@gmail.com} \subjclass[2010]{13C13, 13C99, 05C75} \keywords{Commutative rings, annihilating-submodule graph, domination number.\\This research was in part supported by a grant from IPM (No. 96130028)}
\begin{abstract} Let $M$ be a module over a commutative ring $R$. The annihilating-submodule graph of $M$, denoted by $AG(M)$, is a simple graph in which a non-zero submodule $N$ of $M$ is a vertex if and only if there exists a non-zero proper submodule $K$ of $M$ such that $NK=(0)$, where $NK$, the product of $N$ and $K$, is denoted by $(N:M)(K:M)M$ and two distinct vertices $N$ and $K$ are adjacent if and only if $NK=(0)$. This graph is a submodule version of the annihilating-ideal graph and under some conditions, is isomorphic with an induced subgraph of the Zariski topology-graph $G(\tau_T)$ which was introduced in (The Zariski topology-graph of modules over commutative rings, Comm. Algebra., 42 (2014), 3283--3296). In this paper, we study the domination number of $AG(M)$ and some connections between the graph-theoretic properties of $AG(M)$ and algebraic properties of module $M$. \end{abstract} \maketitle
\section{Introduction}
Throughout this paper $R$ is a commutative ring with a non-zero identity and $M$ is a unital $R$-module. By $N\leq M$ (resp. $N< M$) we mean that $N$ is a submodule (resp. proper submodule) of $M$.
Define $(N:_{R}M)$ or simply $(N:M)=\{r\in R|$ $rM\subseteq N\}$ for any $N\leq M$. We denote $((0):M)$ by $Ann_{R}(M)$ or simply
$Ann(M)$. $M$ is said to be faithful if $Ann(M)=(0)$. Let $N, K\leq M$. Then the product of $N$ and $K$, denoted by $NK$, is defined by $(N:M)(K:M)M$ (see \cite{af07}). Define $ann(N)$ or simply $annN=\{m\in M|$ $m(K:M)=0\}$.
The prime spectrum of $M$ is the set of all prime submodules of $M$ and denoted by $Spec(M)$, $Max(M)$ is the set of all maximal submodules of $M$, and $J(M)$, the jacobson radical of $M$, is the intersection of all elements of $Max(M)$, respectively.
There are many papers on assigning graphs to rings or modules (see, for example, \cite{al99, ah14, b88, br11}). The annihilating-ideal graph $AG(R)$ was introduced and studied in \cite{br11}. $AG(R)$ is a graph whose vertices are ideals of $R$ with nonzero annihilators and in which two vertices $I$ and $J$ are adjacent if and only if $IJ=(0)$. Later, it was modified and further studied by many authors (see \cite{aa12, aa13, aa14, nmk, ts}).
In \cite{ah14}, the present authors introduced and studied the graph $G(\tau_T)$ (resp. $AG(M)$), called the \textit{Zariski topology-graph} (resp. \textit {the annihilating-submodule graph}), where $T$ is a non-empty subset of $Spec(M)$.
$AG(M)$ is an undirected graph with vertices $V(AG(M))$= $\{N \leq M |$ there exists $(0)\neq K<M$ with $NK=(0)$\}. In this graph, distinct vertices $N,L \in V(AG(M))$ are adjacent if and only if
$NL=(0)$ (see \cite{ah16, ah160}). Let $AG(M)^{*}$ be the subgraph of $AG(M)$ with vertices $V(AG(M)^{*})=\{ N<M$ with $(N:M)\neq Ann(M)|$ there exists a submodule $K<M$ with $(K:M)\neq Ann(M)$ and $NK=(0)\}$. By \cite[Theorem 3.4]{ah14}, one conclude that $AG(M)^{*}$ is a connected subgraph. Note that $M$ is a vertex of $AG(M)$ if and only if there exists a nonzero proper submodule $N$ of $M$ with $(N:M)=Ann(M)$ if and only if every nonzero submodule of $M$ is a vertex of $AG(M)$. Clearly, if $M$ is not a vertex of $AG(M)$, then $AG(M)=AG(M)^{*}$. In \cite[Lemma 2.8]{ah140}, we showed that under some conditions, $AG(M)$ is isomorphic with an induced subgraph of the Zariski topology-graph $G(\tau_T)$.
In this paper, we study the domination number of $AG(M)$ and some connections between the graph-theoretic properties of $AG(M)$ and algebraic properties of module $M$.
A prime submodule of $M$ is a submodule $P\neq M$ such that whenever $re\in P$ for some
$r\in R$ and $e \in M$, we have $r\in (P:M)$ or $e\in P$ \cite{lu84}.
The notations $Z(R)$ and $Nil(R)$ will denote the set of all zero-divisors, the set of all nilpotent elements of $R$, respectively. Also, $Z_{R}(M)$ or simply $Z(M)$, the set of zero divisors on $M$, is the set $\{r\in R|$ $rm=0$ for some $0\neq m\in M \}$. If $Z(M)=0$, then we say that $M$ is a domain. An ideal $I\leq R$ is said to be nil if $I$ consist of nilpotent elements.
Let us introduce some graphical notions and denotations that are used in what follows: A graph $G$ is an ordered triple $(V(G), E(G), \psi_G )$ consisting of a nonempty set of vertices,
$V(G)$, a set $E(G)$ of edges, and an incident function $\psi_G$ that associates an
unordered pair of distinct vertices with each edge. The edge $e$ joins $x$
and $y$ if $\psi_G(e)=\{x, y\}$, and we
say $x$ and $y$ are adjacent. The number of edges incident at $x$
in $G$ is called the degree of the vertex $x$ in $G$ and is
denoted by $d_G(v)$ or simply $d(v)$.
A path in graph $G$ is a finite sequence of vertices $\{x_0, x_1,\ldots ,x_n\}$, where $x_{i-1}$ and $x_i$ are adjacent for each $1\leq i\leq n$ and we denote $x_{i-1} - x_i$ for existing an edge between $x_{i-1}$ and $x_i$. The distance between two vertices $x$ and $y$, denoted $d(x, y)$, is the length of the shortest path from $x$ to $y$. The diameter of a connected graph $G$ is the maximum distance between two distinct vertices of $G$. For any vertex $x$ of a connected graph $G$, the eccentricity of $x$, denoted $e(x)$, is the maximum of the distances from $x$ to the other vertices of $G$. The set of vertices with minimum eccentricity is called the center of the graph $G$, and this minimum eccentricity value is the radius of $G$. For some $U\subseteq V(G)$, we denote by $N(U)$, the set of all vertices of $G\setminus U$ adjacent to at least one vertex of $U$ and $N[U]=N(U)\cup \{U\}$.
A graph $H$ is a subgraph of $G$, if $V(H)\subseteq V(G)$, $E(H)\subseteq E(G)$, and $\psi_H$ is the restriction of $\psi_G$ to $E(H)$. A subgraph $H$ of $G$ is a spanning subgraph of $G$ if $V(H)=V(G)$. A spanning subgraph $H$ of $G$ is called a perfect matching of $G$ if every vertex of $G$ has degree 1.
A clique of a graph is a complete subgraph and the supremum of the sizes of cliques in $G$, denoted by $cl(G)$, is called the clique number of $G$. Let $\chi(G)$ denote the chromatic number of the graph $G$, that is, the minimal number of colors needed to color the vertices of $G$ so that no two adjacent vertices have the same color. Obviously $\chi(G)\geq cl(G)$.
A subset $D$ of $V(G)$ is called a dominating set if every vertex of $G$ is either in $D$ or adjacent to at least one vertex in $D$. The domination number of $G$, denoted by $\gamma(G)$, is the number of vertices in a smallest dominating set of $G$. A total dominating set of a graph $G$ is a set $S$ of vertices of $G$ such that every vertex is adjacent to a vertex in $S$. The total domination number of $G$, denoted by $\gamma_t(G)$, is the minimum cardinality of a total dominating set. A dominating set of cardinality $\gamma(G)$ ($\gamma_t(G)$) is called a $\gamma$-set ($\gamma_t$-set). A dominating set $D$ is a connected dominating set if the subgraph $<D>$ induced by $D$ is a connected subgraph of $G$. The connected domination number of $G$, denoted by $\gamma_c(G)$, is the minimum cardinality of a connected dominating set of $G$. A dominating set $D$ is a clique dominating set if the subgraph $<D>$ induced by $D$ is complete in $G$. The clique domination number $\gamma_{cl}(G)$ of $G$ equals the minimum cardinality of a clique dominating set of $G$. A dominating set $D$ is a paired-dominating set if the subgraph
$<D>$ induced by $D$ has a perfect matching. The paired-domination number $\gamma_{pr}(G)$ of $G$ equals the minimum cardinality of a paired-dominating set of $G$.
A vertex $u$ is a neighbor of $v$ in $G$, if $uv$ is an edge of
$G$, and $u\neq v$. The set of all neighbors of $v$ is the open
neighborhood of $v$ or the neighbor set of $v$, and is denoted by
$N(v)$; the set $N[v]=N(v)\cup \{v\}$ is the closed neighborhood
of $v$ in $G$.
Let $S$ be a dominating set of a graph $G$, and $u\in S$. The
private neighborhood of $u$ relative to $S$ in $G$ is the set of
vertices which are in the closed neighborhood of $u$, but not in
the closed neighborhood of any vertex in $S\setminus
\{u\}$. Thus the private neighborhood $P_N(u, S)$ of $u$ with
respect to $S$ is given by $P_N(u, S)=N[u]\setminus (\cup_{v\in S\setminus \{u\}}
N[v])$. A set $S\subseteq V(G)$ is called irredundant if every
vertex $v$ of $S$ has at least one private neighbor. An irredundant set $S$ is a
maximal irredundant set if for every vertex $u \in V\setminus S$, the set $S\cup \{u\}$ is not irredundant. The irredundance number $ir(G)$ is the minimum cardinality of maximal irredundant sets. There are so many domination parameters in the literature and for more details one can refer \cite{hhs}.
A bipartite graph is a graph whose vertices can be divided into two disjoint sets $U$ and $V$ such that every edge connects a vertex in $U$ to one in $V$; that is, $U$ and $V$ are each independent sets and complete bipartite graph on $n$ and $m$ vertices, denoted by $K_{n, m}$, where $V$ and $U$ are of size $n$ and $m$, respectively, and $E(G)$ connects every vertex in $V$ with all vertices in $U$. Note that a graph $K_{1, m}$ is called a star graph and the vertex in the singleton partition is called the center of the graph. We denote by $P_{n}$ a path of order $n$ (see \cite{r05}).
In section 2, a dominating set of $AG(M)$ is constructed using elements of the center when $M$ is an Artinian module. Also we prove that the domination number of $AG(M)$ is equal to the number of factors in the Artinian decomposition of $M$ and we also find several domination parameters of $AG(M)$. In section 3, we study the domination number of the annihilating-submodule graphs for reduced rings with finitely many minimal primes and faithful modules. Also, some relations between the domination numbers and the total domination numbers of annihilating-submodule graphs are studied.
The following results are useful for further reference in this paper.
\begin{prop}\label{p1.1} Suppose that $e$ is an idempotent element of $R$. We have the following statements.
\begin {itemize} \item [(a)] $R=R_{1}\times R_{2}$, where $R_{1}=eR$ and $R_{2}=(1-e)R$. \item [(b)] $M=M_{1}\times M_{2}$, where $M_{1}=eM$ and $M_{2}=(1-e)M$. \item [(c)] For every submodule $N$ of $M$, $N=N_{1}\times N_{2}$ such that $N_{1}$ is an $R_{1}$-submodule $M_{1}$, $N_{2}$ is an $R_{2}$-submodule $M_{2}$, and $(N:_{R}M)=(N_{1}:_{R_{1}}M_{1})\times (N_{2}:_{R_{2}}M_{2})$. \item [(d)] For submodules $N$ and $K$ of $M$, $NK=N_{1}K_{1} \times N_{2}K_{2}$ such that $N=N_{1}\times N_{2}$ and $K=K_{1}\times K_{2}$. \item [(e)] Prime submodules of $M$ are $P\times M_{2}$ and $M_{1}\times Q$, where $P$ and $Q$ are prime submodules of $M_{1}$ and $M_{2}$, respectively. \end{itemize}
\end{prop}
\begin{proof} This is clear. \end{proof}
We need the following results.
\begin{lem}\label{l1.2} (See \cite[Proposition 7.6]{af74}.) Let $R_{1}, R_{2}, \ldots , R_{n}$ be non-zero ideals of $R$. Then the following statements are equivalent:
\begin{itemize} \item [(a)] $R= R_{1} \times \ldots \times R_{n}$; \item [(b)] As an abelian group $R$ is the direct sum of $ R_{1}, \ldots , R_{n}$; \item [(c)] There exist pairwise orthogonal idempotents $e_{1},\ldots, e_{n}$ with $1=e_{1}+ \ldots +e_{n}$, and $R_{i}=Re_{i}$, $i=1, \ldots ,n$. \end{itemize}
\end{lem}
\begin{lem}\label{l1.3} (See \cite[Theorem 21.28]{l91}.) Let $I$ be a nil ideal in $R$ and $u\in R$ be such that
$u+I$ is an idempotent in $R/I$. Then there exists an idempotent
$e$ in $uR$ such that $e-u\in I$. \end{lem}
\begin{lem}\label{l1.4} (See \cite[Lemma 2.4]{ah16}.) Let $N$ be a minimal submodule of $M$ and let $Ann(M)$ be a nil ideal. Then we have $N^{2}=(0)$ or $N=eM$ for some idempotent $e\in R$. \end{lem}
\begin{prop}\label{p1.5} Let $R/Ann(M)$ be an Artinian ring and let $M$ be
a finitely generated module. Then every nonzero proper submodule $N$ of $M$ is a vertex in $AG(M)$. \end{prop}
\begin{thm}\label{t1.6} (See \cite[Theorem 2.5]{ah16}.) Let $Ann(M)$ be a nil ideal. There exists a vertex of $AG(M)$ which is adjacent to every other vertex if and only if $M=eM\oplus (1-e)M$, where $eM$ is a simple module and $(1-e)M$ is a prime module for some idempotent $e\in R$, or $Z(M)=Ann((N:M)M)$, where $N$ is a nonzero proper submodule of $M$ or $M$ is a vertex of $AG(M)$. \end{thm}
\begin{thm}\label{t1.7} (See \cite[Theorem 3.3]{ah16}.) Let $M$ be a faithful module. Then the following statements are equivalent.
\begin{itemize} \item [(a)] $\chi(AG(M)^{*})=2$. \item [(b)] $AG(M)^{*}$ is a bipartite graph with two nonempty parts. \item [(c)] $AG(M)^{*}$ is a complete bipartite graph with two nonempty parts. \item [(d)] Either $R$ is a reduced ring with exactly two minimal prime ideals, or $AG(M)^{*}$ is a star graph with more than one vertex. \end{itemize}
\end{thm}
\begin{cor}\label{c1.8} (See \cite[Corollary 3.5]{ah16}.) Let $R$ be a reduced ring and assume
that $M$ is a faithful module. Then the following statements are equivalent.
\begin{itemize} \item [(a)] $\chi(AG(M)^{*})=2$. \item [(b)] $AG(M)^{*}$ is a bipartite graph with two nonempty parts. \item [(c)] $AG(M)^{*}$ is a complete bipartite graph with two nonempty parts. \item [(d)] $R$ has exactly two minimal prime ideals. \end{itemize}
\end{cor}
\begin{prop}\label{p1.9} (See \cite[Proposition 3.9]{hhs}.) Every minimal dominating set in a graph $G$ is a maximal irredundant set of $G$. \end{prop}
\section{Domination number in the annihilating-submodule graph for Artinian modules}
The main goal in this section, is to obtain the value certain domination parameters of the annihilating-submodule graph for Artinian modules.
Recall that $M$ is a vertex of $AG(M)$ if and only if there exists a nonzero proper submodule $N$ of $M$ with $(N:M)=Ann(M)$ if and only if every nonzero submodule of $M$ is a vertex of $AG(M)$. In this case, the vertex $N$ is adjacent to every other vertex. Hence $\gamma(AG(M))=1=\gamma_t((AG(M)))$. So we assume that \textbf{throughout this paper $M$ is not a vertex of $AG(M)$}. Clearly, if $M$ is not a vertex of $AG(M)$, \textbf{then $AG(M)=AG(M)^{*}$.}
We start with the following remark which completely characterizes all modules for which $\gamma((AG(M))) = 1$.
\begin{rem}\label{r2.1} Let $Ann(M)$ be a nil ideal. By Theorem \ref{t1.6}, there exists a vertex of $AG(M)$ which is adjacent to every other vertex if and only if $M=eM\oplus (1-e)M$, where $eM$ is a simple module and $(1-e)M$ is a prime module for some idempotent $e\in R$, or $Z(M)=Ann((N:M)M)$, where $N$ is a nonzero proper submodule of $M$ or $M$ is a vertex of $AG(M)$. Now, let $Ann(M)$ be a nil ideal and $M$ be a domain module. Then $\gamma((AG(M))) = 1$ if and only if $M=eM\oplus (1-e)M$, where $eM$ is a simple module and $(1-e)M$ is a prime module for some idempotent $e\in R$. \end{rem}
\begin{thm}\label{t2.2} Let $M$ be a f.g Artinian local module. Assume that $N$ is the unique maximal submodule of $M$. Then the radius of $AG(M)$ is $0$ or $1$ and the center of $AG(M)$ is
$\{K\subseteq ann(N)| K\neq (0)$ is a submodule in $M\}$. \end{thm}
\begin{proof}
If $N$ is the only non-zero proper submodule of $M$, then $AG(M)\cong K_1$, $e(N) = 0$ and the radius of $AG(M)$ is $0$. Assume that the number of non-zero proper submodules of $M$ is greater than $1$. Since $M$ is f.g Artinian module, there exists $m\in \Bbb N$, $m > 1$ such that $N^m = (0)$ and $N^{m-1}\neq (0)$. For any non-zero submodule $K$ of $M$, $KN^{m-1}\subseteq NN^{m-1} = (0)$ and so $d(N^{m-1}, K) = 1$. Hence $e(N^{m-1}) = 1$ and so the radius of $AG(M)$ is $1$. Suppose $K$ and $L$ are arbitrary non-zero submodules of $M$ and $K\subseteq ann(N)$. Then $KL\subseteq KN = (0)$ and hence $e(K) = 1$. Suppose $(0)\neq K'
\nsubseteq ann(N)$. Then $K'N\neq (0)$ and so $e(K') > 1$. Hence the center of $AG(M)$ is $\{K\subseteq ann(N)| K\neq (0)$ is a submodule in $M\}$.
\end{proof}
\begin{cor}\label{c2.3} Let $M$ be a f.g Artinian local module and $N$ is the unique maximal submodule of $M$. Then the following hold good.
\begin{itemize} \item [(a)] $\gamma(AG(M))=1$.
\item [(b)] $D$ is a $\gamma$-set of $AG(M)$ if and only if $D\subseteq ann(N)$.
\end{itemize} \end{cor}
\begin{proof}
$(a)$ Trivial from Theorem \ref{t2.6}.\\
$(b)$ Let $D = \{K\}$ be a $\gamma$-set of $AG(M)$. Suppose $K\nsubseteq ann(N)$.
Then $KN\neq (0)$ and so $N$ is not dominated by $K$, a contradiction. Conversely, suppose $D\subseteq ann(N)$. Let $K$ be an arbitrary vertex in
$AG(M)$. Then $KL\subseteq NL = (0)$ for every $L\in D$. i.e., every vertex $K$ is adjacent to every $L\in D$. If $|D| > 1$, then $D\setminus \{L'\}$ is also a dominating set of $AG(M)$ for some
$L'\in D$ and so $D$ is not minimal. Thus $|D| = 1$ and so $D$ is a $\gamma$-set by $(a)$. \end{proof}
\begin{thm}\label{t2.4} Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. Then the radius of
$AG(M)$ is $2$ and the center of $AG(M)$ is $\{K\subseteq J(M)| K\neq (0)$ is a submodule in $M \}$. \end{thm}
\begin{proof} Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. Let $J_i$ be the unique maximal submodule in $M_i$ with nilpotency $n_i$. Note that
$Max(M) = \{N_1,\ldots ,N_n| N_i = M_1 \oplus \ldots \oplus M_{i-1} \oplus J_i\oplus M_{i+1}\oplus \ldots \oplus M_n, 1\leq i\leq n\}$ is the set of all maximal submodules in $M$. Consider $D_i = (0) \oplus \ldots \oplus (0) \oplus J_i^{n_i-1}\oplus (0)\oplus \ldots \oplus (0)$ for $1\leq i\leq n$. Note that $J(M) = J_1\oplus \ldots \oplus J_n$ is the Jacobson radical of $M$ and any non-zero submodule in $M$ is adjacent to $D_i$ for some $i$. Let $K$ be any non-zero submodule of $M$. Then $K=\oplus _{i=1}^n K_i$, where $K_i$ is a submodule of $M_i$.\\ \textbf{Case 1}. If $K = N_i$ for some $i$, then $KD_j\neq (0)$ and $KN_j\neq (0)$ for all $j\neq i$. Note that $N(K)=\{(0) \oplus
\ldots \oplus (0) \oplus L_i\oplus (0)\oplus \ldots \oplus (0)| J_iL_i= (0)$, $L_i$ is a nonzero submodule in $M_i \}$. Clearly $N(K)\cap N(N_j) = (0)$, $d(K,N_j)\neq 2$ for all $j\neq i$, and so $K- D_i - D_j
- N_j$ is a path in $AG(M)$. Therefore $e(K) = 3$ and so $e(N) = 3$ for all $N\in Max(M)$.\\ \textbf{Case 2}. If $K\neq D_i$ and $K_i \subseteq J_i$ for all $i$. Then $KD_i = (0)$ for all $i$. Let $L$ be any non-zero submodule of $M$ with $KL\neq (0)$. Then $LD_j = (0)$ for some $j$, $K - D_j - L$ is a path in $AG(M)$ and so $e(K) = 2$.\\ \textbf{Case 3}. If $K_i = M_i$ for some $i$, then $KD_i\neq (0)$, $KN_i \neq (0)$ and $KD_j = (0)$ for some $j\neq i$. Thus $K - D_j - D_i - N_i$ is a path in $AG(M)$, $d(K,N_i) = 3$ and so $e(K) =
3$. Thus $e(K) = 2$ for all $K\subseteq J(M)$. Further note that in all the cases center of $AG(M)$ is $\{K\subseteq J(M)| K\neq (0)$ is a submodule in $M \}$. \end{proof}
In view of Theorems \ref{t2.2} and \ref{t2.4}, we have the following corollary.
\begin{cor}\label{c2.5} Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a simple module for all $1\leq i\leq n$ and $n\geq 2$. Then the radius of $AG(M)$ is $1$ or $2$ and the center of $AG(M)$ is $\cup_{i=1}^n D_i$, where $D_i = (0) \oplus \ldots \oplus (0) \oplus M_i\oplus (0)\oplus \ldots \oplus (0)$ for $1\leq i\leq n$. \end{cor}
\begin{thm}\label{t2.6} Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. Then $\gamma(AG(M))=n$. \end{thm}
\begin{proof} Let $N_i$ be the unique maximal submodule in $M_i$ with nilpotency $n_i$. Let $\Omega= \{D_1, D_2, \ldots ,D_n\}$, where $D_i = (0) \oplus \ldots \oplus (0) \oplus J_i^{n_i -1}\oplus (0)\oplus \ldots \oplus (0)$ for $1\leq i\leq n$. Note that any non-zero submodule in $M$ is adjacent to $D_i$ for some $i$. Therefore $N[\Omega] = V(AG(M))$, $\Omega$
is a dominating set of $AG(M)$ and so $\gamma(AG(M))\leq n$.
Suppose $S$ is a dominating set of $AG(M)$ with $|S| < n$. Then there exists $N\in Max(M)$ such that $NK\neq (0)$ for all $K\in S$, a contradiction. Hence $\gamma(AG(M))=n$. \end{proof}
In view of Theorem \ref{t2.6}, we have the following corollary.
\begin{cor}\label{c2.7} Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. Then
\begin{itemize} \item [(a)] $ir(AG(M))=n$.
\item [(b)] $\gamma_c(AG(M))=n$.
\item [(c)] $\gamma_t(AG(M))=n$.
\item [(d)] $\gamma_{cl}(AG(M))=n$.
\item [(e)] $\gamma_{pr}(AG(M))=n$, if $n$ is even and $\gamma_{pr}(AG(M))=n+1$, if $n$ is odd.
\end{itemize} \end{cor}
\begin{proof}
Consider the $\gamma$-set of $AG(M)$ identified in the proof of Theorem \ref{t2.6}. By Proposition \ref{p1.9}, $\Omega$
is a maximal irredundant set with minimum cardinality and so $ir(AG(M))=n$. Clearly $<\Omega>$ is a complete subgraph of $AG(M)$. Hence $\gamma_c(AG(M))=\gamma_t(AG(M))=\gamma_{cl}(AG(M))=n$. If $n$ is even, then $<\Omega>$ has a perfect matching and so $\Omega$
is a paired dominating set of $AG(M)$. Thus $pr(AG(M)) = n$. If $n$ is odd, then
$<\Omega \cup K>$ has a perfect matching for some $K\in V(AG(M))\setminus \Omega$.
and so $\Omega \cup {K}$ is a paired dominating set of $AG(M)$. Thus $\gamma_{pr}(AG(M))=n$ if $n$ even and $\gamma_{pr}(AG(M))=n+1$ if $n$ is odd.
\end{proof}
Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. Then by Theorem \ref{t2.4}, radius of $AG(M)$ is $2$. Further, by Theorem \ref{t2.6}, the domination number of $AG(M)$ is equal to $n$, where $n$ is the number of distinct maximal submodules of $M$. However, this need not be true if the radius of $AG(M)$ is $1$. For, consider the ring $M = M_1 \oplus M_2$, where $M_1$ and $M_2$ are simple modules. Then $AG(M)$ is a star graph and so has radius $1$, whereas $M$ has two distinct maximal submodules. The following corollary shows that a more precise relationship between the domination number of $AG(M)$ and the number of maximal submodules in $M$, when $M$ is finite.
\begin{cor}\label{c2.8} Let $M$ be a finite module and $\gamma((AG(M))) = n$. Then either $M = M_1 \oplus M_2$, where $M_1$, $M_2$ are simple modules or $M$ has n maximal submodules. \end{cor}
\begin{proof} When $\gamma((AG(M))) = 1$, proof follows from \cite[Corollary 2.12]{ah16}. When $\gamma((AG(M))) = n$, then $M$ cannot be $M = M_1 \oplus M_2$, where $M_1$, $M_2$ are simple modules. Hence $M=\oplus _{i=1}^m M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq m$ and $m\geq 2$. By Theorem \ref{t2.6}, $\gamma((AG(M))) = m$. Hence by assumption $m = n$. i.e., $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. One can see now that $M$ has $n$ maximal submodules. \end{proof}
\section{The relationship between $\gamma_t((AG(M)))$ and $\gamma((AG(M)))$}
The main goal in this section is to study the relation between $\gamma_t((AG(M)))$ and $\gamma((AG(M))) $.
\begin{thm}\label{t3.1} Let $M$ be a module. Then
$\gamma_t((AG(M)))= \gamma((AG(M)))$ or $\gamma_t((AG(M)))= \gamma((AG(M)))+1$.
\end{thm}
\begin{proof} Let $\gamma_t((AG(M)))\neq \gamma((AG(M)))$ and $D$ be a $\gamma$-set of $AG(M)$. If $\gamma((AG(M)))=1$, then it is clear that $\gamma_t((AG(M)))=2$. So let $\gamma((AG(M)))> 1$ and put $k
= Max \{n|$there exist $L_1, \ldots , L_n \in D$ ; $\sqcap_{i=1} ^n L_i \neq 0 \}$. Since $\gamma_t((AG(M)))\neq \gamma((AG(M)))$, we have $k \geq 2$. Let $L_1, \ldots , L_k \in D$ be such that $\sqcap_{i=1} ^k L_i \neq 0$. Then $S = \{\sqcap_{i=1} ^k L_i, ann L_1, \ldots , ann L_k \}\cup D\setminus \{L_1, \ldots , L_k \}$ is a $\gamma_t$-set. Hence $\gamma_t((AG(M)))= \gamma((AG(M)))+1$.
\end{proof}
In the following result we find the total domination number of $AG(M)$.
\begin{thm}\label{t3.2} Let $S$ be the set of all maximal elements of the set $V(AG(M))$. If $|S| > 1$, then $\gamma_t((AG(M))) = |S|$. \end{thm}
\begin{proof} Let $S$ be the set of all maximal elements of the set $V(AG(M))$,
$K\in S$ and $|S| > 1$. First we show that $K = ann(ann K)$ and there exists $m\in M$ such that $K = ann(m)$. Let $K\in S$. Then $ann K\neq 0$ and so there exists $0\neq m\in ann K$. Hence $K\subseteq ann(ann K)\subseteq ann(m)$. Thus by the maximality of $K$, we have $K = ann(ann K) = ann(m)$. By Zorn' Lemma it is clear that if $V(AG(M))\neq \emptyset$, then $S\neq \emptyset$. For any
$K\in S$ choose $m_K \in M$ such that $K = ann(m_K)$. We assert that $D = \{Rm_K | K\in S\}$ is a total dominating set of $AG(M)$. Since for every $L\in V(AG(M))$ there exists $K\in S$ such that $L\subseteq K = ann(m_K)$, $L$ and $Rm_K$ are adjacent. Also for each pair $K, K'\in S$, we have $(Rm_K)(Rm_{K'}) = 0$. Namely, if there exists $m\in (Rm_K)(Rm_{K'})\setminus \{0\}$, then $K = K' =
ann(m)$. Thus $\gamma_t((AG(M)))\leq |S|$. To complete the proof, we show that each element of an arbitrary $\gamma_t$-set of
$AG(M)$ is adjacent to exactly one element of $S$. Assume to the contrary, that a vertex $L'$ of a $\gamma_t$-set of $AG(M)$ is adjacent to $K$ and $K'$, for $K, K' \in S$. Thus $K = K' = ann L'$, which is impossible. Therefore $\gamma_t((AG(M))) = |S|$. \end{proof}
\begin{thm}\label{t3.3} Let $R$ be a reduced ring, $M$ is a faithful module,
and $|Min(R)| < \infty$. If $\gamma((AG(M)))> 1$, then $\gamma_t((AG(M)))= \gamma((AG(M)))= |Min(R)|$. \end{thm}
\begin{proof} Since $R$ is reduced, $M$ is a faithful module,
and $\gamma((AG(M)))> 1$, we have $|Min(R)| > 1$. Suppose that $Min(R) = \{p_1, \ldots , p_n \}$. If $n = 2$, the result follows from Corollary \ref{c1.8}. Therefore, suppose that $n \geq 3$. Define $\widehat{p_iM} = p_1 \ldots p_{i-1}p_{i+1} \ldots p_n M$, for every $i = 1, \ldots , n$. Clearly, $\widehat{p_iM}\neq 0$, for every $i = 1, \ldots , n$. Since $R$ is reduced, we deduce that $\widehat{p_iM} p_iM=0$. Therefore, every $p_i M$ is a vertex of $AG(M)$. If $K$ is a vertex of $AG(M)$, then by \cite[Corollary 3.5]{ati69}, $(K:M)\subseteq Z(R) = \cup_{i=1} ^n p_i$. It follows from the Prime Avoidance Theorem that $(K:M)\subseteq p_i$, for some $i$, $1\leq i \leq n$. Thus $p_iM$ is a maximal element of $V(AG(M))$, for every $i = 1, \ldots , n$. From Theorem
\ref{t3.2}, $\gamma_t((AG(M)))= |Min(R)|$. Now, we show that $ \gamma((AG(M)))= n$. Assume to the contrary, that $B = \{J_1, \ldots , J_{n-1}\}$ is a dominating set for $AG(M)$. Since $n \geq 3$, the submodules $p_iM$ and $p_jM$ , for $i \neq j$ are not adjacent (from $p_ip_j = 0 \subseteq p_k$ it would follow that $p_i\subseteq p_k$, or $p_j\subseteq p_k$ which is not true). Because of that, we may assume that for some $k < n - 1$, $J_i = p_iM$ for $i = 1,\ldots, k$, but none of the other of submodules from $B$ are equal to some $p_sM$ (if $B = \{p_1M, \ldots , p_{n-1}M\}$, then $p_nM$ would be adjacent to some $p_iM$, for $i\neq n$). So, every submodule in $\{p_{k+1}M, . . . , p_nM\}$ is adjacent to a submodule in $\{J_{k+1}, . . . , J_{n-1}\}$. It follows that for some $s\neq t$, there is an $l$ such that $(p_sM)J_l= 0 = (p_tM)J_l$. Since $p_s\nsubseteq p_t$, it follows that $J_l\subseteq p_tM$, so $J_l ^2 = 0$, which is impossible, since the ring $R$ is reduced. So $\gamma_t((AG(M)))=
\gamma((AG(M)))= |Min(R)|$. \end{proof}
Theorem \ref{t3.3} leads to the following corollary.
\begin{cor}\label{c3.4} Let $R$ be a reduced ring, $M$ is a faithful module,
and $|Min(R)| < \infty$, then the following are equivalent.
\begin{itemize} \item [(a)] $\gamma(AG(M))=2$.
\item [(b)] $AG(M)$ is a bipartite graph with two nonempty parts.
\item [(c)] $AG(M)$ is a complete bipartite graph with two nonempty parts.
\item [(d)] $R$ has exactly two minimal primes.
\end{itemize} \end{cor}
\begin{proof} Follows from Theorem \ref{t3.3} and Corollary \ref{c1.8}. \end{proof}
In the following theorem the domination number of bipartite annihilating-submodule graphs is given.
\begin{thm}\label{t3.5} Let $M$ be a faithful module. If $AG(M)$ is a bipartite graph, then $\gamma((AG(M)))\leq 2$. \end{thm}
\begin{proof} Let $M$ be a faithful module. If $AG(M)$ is a bipartite graph, then from Theorem \ref{t1.7}, either $R$ is a reduced ring with exactly two minimal prime ideals, or $AG(M)$ is a star graph with more than one vertex. If $R$ is a reduced ring with exactly two minimal prime ideals, then the result follows by Corollary \ref{c3.4}. If $AG(M)$ is a star graph with more than one vertex, then we are done. \end{proof}
The next theorem is on the total domination number of the annihilating-submodule graphs of Artinian modules.
\begin{thm}\label{t3.6} Let $M=\oplus _{i=1}^n M_i$, where $M_i$ is a f.g Artinian local module for all $1\leq i\leq n$, $n\geq 2$, and $M \neq M_1 \oplus M_2$, where $M_1$, $M_2$ are simple modules. Then
$\gamma_t((AG(M)))= \gamma((AG(M)))= |Min(R)|$. \end{thm}
\begin{proof}
By Proposition \ref{p1.5}, every nonzero proper submodule of $M$ is a vertex in $AG(M)$. So, the set of maximal elements of $V(AG(M))$ and $Max(M)$ are equal. Let $M=\oplus _{i=1}^n M_i$, where $(M_i, J_i)$ is a f.g Artinian local module for all $1\leq i\leq n$ and $n\geq 2$. Let $Max(M) = \{N_i = M_1 \oplus \ldots
\oplus M_{i-1} \oplus J_i\oplus M_{i+1}\oplus \ldots \oplus M_n | 1 \leq i \leq n \}$. By Theorem \ref{t3.2}, $\gamma_t((AG(M)))=
|Max(M)|$. In the sequel, we prove that $\gamma((AG(M))) = n$. Assume to the contrary, the set $\{K_1, \ldots , K_{n-1}\}$ is a dominating set for $AG(M)$. Since $M \neq M_1 \oplus M_2$, where $M_1$, $M_2$ are simple modules, we find that $K_i N_s=K_i N_t=0$, for some $i, t, s$, where $1 \leq i \leq n-1$ and $1 \leq t, s \leq n$. This means that $K_i = 0$, a contradiction. \end{proof}
The following theorem provides an upper bound for the domination number of the annihilating-submodule graph of a Noetherian module.
\begin{thm}\label{t3.7} If R is a Notherian ring and $M$ a f.g module, then
$\gamma((AG(M)))\leq |Ass(M)|< \infty$. \end{thm}
\begin{proof}
By \cite{s}, Since R is a Notherian ring and $M$ a f.g module, $|Ass(M)|< \infty$.
Let $Ass(M) = \{p_1, . . . , p_n\}$ where
$p_i = ann(m_i)$ for some $m_i \in M$ for every $i = 1, \ldots , n$. Set $A = \{Rm_i | 1 \leq i \leq n \}$. We show that $A$ is a dominating set of $AG(M)$. Clearly, every $Rm_i$ is a vertex of $AG(M)$, for $i = 1, \ldots , n$ $( (p_iM)(m_iR)=0)$. If $K$ is a vertex of $AG(M)$, then \cite[Corollary 9.36]{s} implies that $(K:M)\subseteq Z(M) = \cup_{i=1} ^n p_i$. It follows from the Prime Avoidance Theorem that $(K:M) \subseteq p_i$, for some $i$, $1 \leq i \leq n$. Thus $K(Rm_i) = 0$, as desired. \end{proof}
The remaining result of this paper provides the domination number of the annihilating-submodule graph of a finite direct product of modules.
\begin{thm}\label{c3.8} For a module $M$, which is a product of two $($nonzero$)$ modules, one of the following holds:
\begin{itemize} \item [(a)] If $M \cong F \times D$, where $F$ is a simple module and $D$ is a prime module, then $\gamma(AG(M))=1$. \item [(b)] If $M \cong D_1 \times D_2$, where $D_1$ and $D_2$ are prime modules which are not simple, then $\gamma(AG(M))=2$.
\item [(c)] If $M \cong M_1 \times D$, where $M_1$ is a module which is not prime and $D$ is a prime module, then $\gamma(AG(M)) = \gamma(AG(M_1)) + 1$.
\item [(d)] If $M \cong M_1 \times M_2$, where $M_1$ and $M_2$ are two modules which are not prime, then $\gamma(AG(M)) = \gamma(AG(M_1)) + \gamma(AG(M_2))$.
\end{itemize} \end{thm}
\begin{proof}
Parts $(a)$ and $(b)$ are trivial.
$(c)$ With no loss of generality, one can assume that $\gamma(AG(M_1)) < \infty$. Suppose that $\gamma(AG(M_1)) =n$ and $\{K_1, \ldots , K_n \}$ is a minimal dominating set of $AG(M_1)$. It is not hard to see that $\{K_1 \times 0, \ldots , K_n \times 0, 0\timesD\}$ is the smallest dominating set of $AG(M)$.
$(d)$ We may assume that $\gamma(AG(M_1)) =m$ and $\gamma(AG(M_2)) =n$, for some positive integers $m$ and $n$. Let $\{K_1, \ldots , K_m\}$ and $\{L_1, \ldots , L_n\}$ be two minimal dominating sets in $AG(M_1)$ and $AG(M_2)$, respectively. It is easy to see that $\{K_1 \times 0, \ldots , K_m \times 0, 0 \times L_1 \ldots 0 \times L_n\}$ is the smallest dominating set in $AG(M)$. \end{proof}
\end{document} |
\begin{document}
\title{Applications of Kronecker's limit formula for elliptic Eisenstein series} \author{Jay Jorgenson \and Anna-Maria von Pippich \and Lejla Smajlovi\'{c} \footnote{\noindent The first named author acknowledges grant support from the NSF and PSC-CUNY. We thank Professor Floyd Williams for making available to us the unpublished dissertation \cite{Vassileva96} which was written by his student I. N. Vassileva. The results in the manuscript were of great interest to us, and we hope the document will become available to the mathematical community.}} \maketitle
\begin{abstract}\noindent We develop two applications of the Kronecker's limit formula associated to elliptic Eisenstein series: A factorization theorem for holomorphic modular forms, and a proof of Weil's reciprocity law. Several examples of the general factorization results are computed, specifically for certain moonshine groups, congruence subgroups, and, more generally, non-compact subgroups with one cusp. In particular, we explicitly compute the Kronecker limit function associated to certain elliptic points for a few small level moonshine groups. \end{abstract}
\vskip .15in \section{Introduction and statement of results}
\subsection{Non-holomorphic Eisenstein series.} Let $\Gamma$ be a Fuchsian group of the first kind which acts on the hyperbolic space $\mathbb H$ by factional linear transformations, and let $M = \Gamma \backslash \mathbb H$ be the finite volume quotient. One can view $M$ as a finite volume hyperbolic Riemann surface, possibly with cusps and elliptic fixed points. In a slight abuse of notation, we will use $M$ to denote both the Riemann surface as well as a (Ford) fundamental domain of $\Gamma$ acting on $\mathbb H$.
The abelian subgroups of $\Gamma$ are classified as three distinct types: Parabolic, hyperbolic and elliptic. Accordingly, there are three types of scalar-valued non-holomorphic Eisenstein series, whose definitions we now recall.
Parabolic subgroups are characterized by having a unique fixed point $P$ on the extended upper-half plane $\widehat{\mathbb H}$. The fixed point $P$ is known as a cusp of $M$, and the associated parabolic subgroup is denoted by $\Gamma_{P}$. The parabolic Eisenstein series ${\cal E}^{\mathrm{par}}_{P}(z,s)$ associated to $P$ is a defined for $z\in M$ and $s \in \mathbb{C}$ with $\textrm{Re}(s) > 1$, by the series \begin{equation*} {\cal E}^{\mathrm{par}}_{P}(z,s) = \sum\limits_{\eta \in \Gamma_{P}\backslash \Gamma}\textrm{Im}(\sigma_{P}^{-1}\eta z)^{s}, \end{equation*} where $\sigma_{P}$ is the scaling matrix for the cusp $P$, i.e. the element of $\mathrm{PSL}_2(\mathbb{R})$ such that, when extending the action of $\sigma_{P}$ to $\widehat{\mathbb H}$, we have that $\sigma_{P}\infty = P$.
Hyperbolic subgroups have two fixed points on the extended upper-half plane $\widehat{\mathbb H}$. Let us denote a hyperbolic subgroup by $\Gamma_{\gamma}$ for $\gamma \in \Gamma$, and let $\mathcal{L}_{\gamma}$ signify the geodesic path in $\mathbb H$ connecting the two fixed points of hyperbolic element $\gamma$. Following Kudla and Millson from \cite{KM79}, one defines a scalar-valued hyperbolic Eisenstein series for $z\in M$ and $s \in \mathbb{C}$ with $\textrm{Re}(s) > 1$ by the series \begin{equation}\label{hyp_eisen} {\cal E}^{\mathrm{hyp}}_{\gamma}(z,s) =\sum\limits_{\eta \in \Gamma_{\gamma}\backslash \Gamma} \cosh(d_{\mathrm{hyp}}(\eta z, \mathcal{L}_{\gamma}))^{-s}, \end{equation} where $d_{\mathrm{hyp}}(\eta z, \mathcal{L}_{\gamma})$ is the hyperbolic distance from the point $\eta z$ to $\mathcal{L}_{\gamma}$.
Elliptic subgroups have finite order and have a unique fixed point within $\mathbb H$. In fact, for any point $w \in M$, there is an elliptic subgroup $\Gamma_{w}$ which fixes $w$, where in all but a finite number of cases $\Gamma_{w}$ is the identity element. Elliptic Eisenstein series were defined in an unpublished manuscript from 2004 by Jorgenson and Kramer and were studied in depth in the 2010 dissertation \cite{vP10} by von Pippich. Specifically, for $z\in M$, $z\not=w$, and $s \in \mathbb{C}$ with $\textrm{Re}(s) > 1$, the elliptic Eisenstein series is defined by \begin{equation}\label{ell_eisen} {\cal E}^{\textrm{ell}}_{w}(z,s) =\sum\limits_{\eta \in \Gamma_{w}\backslash \Gamma} \sinh(d_{\mathrm{hyp}}(\eta z, w))^{-s} \end{equation} where $d_{\mathrm{hyp}}(\eta z, w)$ denotes the hyperbolic distance from $\eta z$ to $w$.
\subsection{Known properties and relations}
There are some fundamental differences between the three types of Eisenstein series defined above. Hyperbolic Eisenstein series are in $L^{2}(M)$, whereas parabolic and elliptic series are not. Elliptic Eisenstein series are defined as a sum over a finite index subset of $\Gamma$, and indeed the series (\ref{ell_eisen}) can be extended to all $\Gamma$ which would introduce a multiplicative factor equal to the order of $\Gamma_{w}$. However, hyperbolic and parabolic series are necessarily formed by sums over infinite index subsets of $\Gamma$. Parabolic Eisenstein series are eigenfunctions of the hyperbolic Laplacian; however, elliptic and hyperbolic Eisenstein series satisfy a differential-difference equation which involves the value of the series at $s+2$.
Despite their differences, there are several intriguing ways in which the Eisenstein series interact. Since the hyperbolic Eisenstein series are in $L^{2}(M)$, the expression (\ref{hyp_eisen}) admits a spectral expansion which involves the parabolic Eisenstein series; see \cite{JKvP10} and \cite{KM79}. If one considers a degenerating sequence of Riemann surfaces obtained by pinching a geodesic, then the associated hyperbolic Eisenstein series converges to parabolic Eisenstein series on the limit surface; see \cite{Fa07} and \cite{GJM08}. If one studies a family of elliptically degenerating surfaces obtained by re-uniformizing at a point with increasing order, then the corresponding elliptic Eisenstein series converge to parabolic Eisenstein series on the limit surface; see \cite{GvP09}.
Finally, there are some basic similarities amongst the series. Each series admits a meromorphic continuation to all $s\in\mathbb{C}$. The poles of the meromorphic continuations have been identified and are closely related, in all cases involving data associated to the continuous and non-cuspidal discrete spectrum of the hyperbolic Laplacian and, for hyperbolic and elliptic series, involving data associated to the cuspidal spectrum as well. Finally, and most importantly for this article, the hyperbolic and elliptic Eisenstein series are holomorphic at $s=0$, and for all known instances, the parabolic Eisenstein series also is holomorphic at $s=0$. In all these cases, the value of each Eisenstein series at $s=0$ is a constant as a function of $z$. The coefficient of $s$ in the Taylor series expansion about $s=0$ shall be called the Kronecker limit function.
\subsection{Kronecker limit functions}
The classical Kronecker's limit formula is the following statement, which we quote from \cite{Siegel80}. If we consider the case when $\Gamma = \textrm{PSL}_2(\mathbb{Z})$, then \begin{align*} \mathcal{E}^{\mathrm{par}}_{\infty}(z,s)= \frac{3}{\pi(s-1)}
-\frac{1}{2\pi}\log\bigl(|\Delta(z)|\Im(z)^{6}\bigr)+C+O(s-1) \,\,\,\,\,\textrm{as $s \rightarrow 1$,} \end{align*} where $C=6(1-12\,\zeta'(-1)-\log(4\pi))/\pi$, and with Dedekind's delta function $\Delta(z)$ given by $$ \Delta(z) = \left[q_{z}^{1/24}\prod\limits_{n=1}^{\infty}\left(1 - q_{z}^{n}\right)\right]^{24} = \eta(z)^{24} \,\,\,\,\,\textrm{with $q_{z} = e^{2\pi i z}$.} $$ By employing the well-known functional equation for $\mathcal{E}^{\mathrm{par}}_{\infty}(z,s)$, Kronecker's limit formula can be reformulated as \begin{equation*} \mathcal{E}^{\mathrm{par}}_{\infty}(z,s)=
1+ \log\bigl(|\Delta(z)|^{1/6}\Im(z)\bigr)s+O(s^2) \,\,\,\,\,\textrm{as $s \rightarrow 0$.} \end{equation*}
For general Fuchsian groups of the first kind, Goldstein \cite{Go73} studied analogue of Kronecker's limit formula associated to parabolic Eisenstein series. We will use the results from \cite{Go73} throughout this article.
The hyperbolic Eisenstein series in \cite{KM79} are form-valued, and the series are defined by an infinite sum which converges for $\textrm{Re}(s) > 0$. The main result in \cite{KM79} is that the the form-valued hyperbolic Eisenstein series is holomorphic at $s=0$, and the value is equal to the harmonic form that is the Poincar\'e dual to the one-cycle in the homology group $H^{1}(M,\mathbb R)$ corresponding to the hyperbolic geodesic $\gamma$ fixed by $\Gamma_{\gamma}$.
The analogue of Kronecker's limit formula for elliptic Eisenstein series was first proved in \cite{vP10} and \cite{vP15}. Specifically, it is shown that at $s=0$, the series (\ref{ell_eisen}) admits the Laurent expansion \begin{align}\label{Kronecker_elliptic} \mathrm{ord}(w)\,\mathcal{E}^{\mathrm{ell}}_{w}(z,s)&- \frac{2^{s}\sqrt{\pi}\,\Gamma(s-\frac{1}{2})}{\Gamma(s)} \sum\limits_{k=1}^{p_{\Gamma}}\mathcal{E}^{\mathrm{par}}_{p_{k}}(w,1-s) \,\mathcal{E}^{\mathrm{par}}_{p_{k}}(z,s)= \notag \\ &-c
-\log\bigl(|H_{\Gamma}(z,w)|^{\mathrm{ord}(w)}(\Im(z))^{c}\bigr)\cdot s+O(s^2)\,\,\,\,\,\textrm{as $s \rightarrow 0$,} \end{align} where $p_k$, $k=1,\ldots, p_{\Gamma}$, are cusps of $M$, $c=2\pi/\vol_{\hyp}(M)$, and $H_{\Gamma}(z,w)$ is a holomorphic automorphic function with respect to $\Gamma$ and which vanishes only when $z=\eta w$ for some $\eta\in\Gamma$. Two explicit computations are given in \cite{vP10} and \cite{vP15} for $\Gamma = \PSL_2(\mathbb{Z})$ when considering the elliptic Eisenstein series $E^{\mathrm{ell}}_{w}(z,s)$ associated to the points $w=i$ and $w=\rho = (1+i\sqrt{3})/2$. In these cases, the elliptic Kronecker limit function $H_{\Gamma}(z,w)$ at points $w=i$ and $w=\rho$ is such that
\begin{equation}\label{elliptic_at_i} \abs{H_{\Gamma}(z,i)}= \exp(-B_i)\abs{E_6(z)}, \text{ where } B_i=-3(24\zeta'(-1)-\log(2\pi)+4\log\Gamma(1/4)) \end{equation} and \begin{equation}\label{elliptic_at_rho} \abs{H_{\Gamma}(z,\rho)}=\exp(-B_{\rho})\abs{E_4(z)}, \text{ where } B_{\rho}=-2(24\zeta'(-1)-2\log(2\pi/\sqrt{3})+6\log\Gamma(1/3)). \end{equation}
The Kronecker limit formula for elliptic Eisenstein series became the asymptotic formulas \begin{equation}\label{elliptic Eis_at_i} \mathcal{E}^{\mathrm{ell}}_{i}(z,s)= -\log(\vert E_{6}(z)\vert \vert\Delta(z)\vert^{-1/2})\cdot s + O(s^{2}) \,\,\,\,\,\text{\rm as $s\rightarrow 0$,} \end{equation} and \begin{equation}\label{elliptic Eis_at_rho} \mathcal{E}^{\mathrm{ell}}_{\rho}(z,s)= -\log(\vert E_{4}(z)\vert \vert\Delta(z)\vert^{-1/3})\cdot s + O(s^{2}) \,\,\,\,\,\text{\rm as $s\rightarrow 0$,} \end{equation} where $E_{4}$ and $E_{6}$ are classical holomorphic Eisenstein series on $\PSL_2(\mathbb{Z})$ of weight four and six, respectively.
Before continuing, let us state what we believe to be an interesting side comment. The Kronecker limit function associated to elliptic Eisenstein series is naturally defined as the coefficient of $s$ in the Laurent expansion of the elliptic Eisenstein series near $s=0$. As we show below, one can realize the Kronecker limit function for parabolic Eisenstein series for groups with one cusp as the coefficient of $s$ in the Laurent expansion of the parabolic Eisenstein series at $s=0$. One has yet to study the Laurent expansion near $s=0$, in particular the coefficient of $s$, for the scalar-valued hyperbolic Eisenstein series; for that matter, we have not fully understood the analoguous question for the vector of parabolic Eisenstein series for general groups. We expect that one can develop systematic theory by focusing on coefficients of $s$ in all cases.
\subsection{Important comment and assumption}\label{subsection_assumption}
At this time, we do not have a complete understanding of the behavior of the parabolic Eisenstein series ${\cal E}^{\mathrm{par}}_{P}(z,s)$ near $s=0$. If the group has one cusp, the functional equation of the Eisenstein series shows that ${\cal E}^{\mathrm{par}}_{P}(z,0)=1$. In notation to be set below, its scattering determinant is zero at $s=0$. However, this is not true when there is more than one cusp. For example, on page 536 of \cite{He83}, the author computes the scattering matrix for $\Gamma_{0}(N)$ for square-free $N$, from which it is clear that $\Phi(s)$ is holomorphic but not zero at $s=0$. Specifically, it remains to determine if the parabolic Eisenstein series is holomorphic at $s=0$, which is a question we were unable to answer in complete generality.
\vskip .10in \it Throughout this article, we assume that ${\cal E}^{\mathrm{par}}_{P}(z,s)$ is holomorphic at $s=0$. \rm
\vskip .10in \noindent The assumption is true in all the instances where specific examples are developed.
\subsection{Main results} The purpose of the present paper is to further study the Kronecker limit function associated to elliptic Eisenstein series. We develop two applications. To begin, we examine the relation (\ref{Kronecker_elliptic}) and study the contribution near $s=0$ of the term involving the parabolic Eisenstein series. As with the parabolic Eisenstein series, the resulting expression is particularly simple in the case when the group $\Gamma$ has one cusp. However, in all cases, we obtain an asymptotic formula for $\mathcal{E}^{\mathrm{ell}}_{w}(z,s)$ near $s=0$ which allows us to prove asymptotic bounds for the elliptic Kronecker limit function in any parabolic cusp associated to $\Gamma$. As a consequence, we are able to prove the main result of this article, namely a factorization theorem which expresses holomorphic forms on $M$ of arbitrary weight as products of the elliptic Kronecker limit functions.
The product formulas are developed in detail in the case of so-called moonshine groups, which are discrete groups obtained by adding the Fricke involutions to the congruence subgroups $\Gamma_{0}(N)$. As an application of the factorization theorem, we establish further examples of relations similar to (\ref{elliptic_at_i}), (\ref{elliptic_at_rho}), \eqref{elliptic Eis_at_i} and \eqref{elliptic Eis_at_rho}. For example, the moonshine group $\Gamma = \overline{\Gamma_0(2)^+} = \Gamma_0(2)^+/\{\pm \textrm{Id}\}$ has $e_{2}=1/2 + i/2$ as a fixed point of order four. In section 6.2, we prove that the elliptic Kronecker limit function $H_2(z,e_2)$ associated to the point $e_2$ is such that $$ \abs{H_2(z,e_2)} = \exp(-B_{2,e_2})\abs{E_{4}^{(2)}(z)}^{1/2}, $$ where $E_{4}^{(2)}(z)$ is the weight four holomorphic Eisenstein series associated to $\Gamma_{0}(2)^{+}$ and $$ B_{2,e_2}=- \left( 24\zeta'(-1) + \log(8\pi^2)
- \frac{11}{6} \log 2 +\frac{1}{12} \log\left( \left| \Delta(1/2 + i/2) \cdot \Delta(1+i) \right| \right)\right). $$ In this case, the Kronecker limit formula for the elliptic Eisenstein series $\mathcal{E}^{\mathrm{ell}}_{e_{2}}(z,s)$ reads as \begin{equation*} \mathcal{E}^{\mathrm{ell}}_{e_{2}}(z,s)= -\log\left(\vert E_{4}^{(2)}(z)\vert^{1/2} \vert \Delta(z)\Delta(2z) \vert ^{-1/12}\right)\cdot s + O(s^{2}) \,\,\,\,\,\text{\rm as $s\rightarrow 0$,} \end{equation*} or, equivalently, as \begin{equation} \label{ell Eis at e_2} \mathcal{E}^{\mathrm{ell}}_{e_{2}}(z,s)= -\log\left(\frac{1}{\sqrt{5}}\vert E_{4}(z) + 4E_4(2z)\vert^{1/2} \vert \Delta(z)\Delta(2z) \vert ^{-1/12}\right)\cdot s + O(s^{2}) \,\,\,\,\,\text{\rm as $s\rightarrow 0$.} \end{equation}
The factorization theorem allows one to formulate numerous of examples of this type, of which we develop a few for certain moonshine and congruence subgroups.
Second, we use the elliptic Kronecker limit formula to give a new proof of Weil's reciprocity formula. A number of authors have obtained generalizations of Weil's reciprocity law; see, for example, the elegant presentation in \cite{Kh08} which discusses various reciprocity laws over $\mathbb C$ as well as Deligne's article \cite{De91} where the author re-interprets Tate's local symbol and obtains a number of generalizations and applications. It would be interesting to study the possible connection between the functional analytic method of the present and companion article \cite{JvPS14} with the algebraic ideas in \cite{De91} and results surveyed in \cite{Kh08}.
An outline of this article is as follows. In section 2 we establish notation and cite various results from the literature. In section 3, we reformulate Kronecker's limit formula for parabolic Eisenstein series as an asymptotic statement near $s=0$. From the results in section 3, we then prove, in section 4, the asymptotic behavior in the cusps of the elliptic Kronecker limit function. Specific examples are given for moonshine groups $\overline{\Gamma_{0}(N)^{+}}$ with square-free level $N$ and congruence subgroups $\overline{\Gamma_{0}(p)}$ with prime level $p$. In section 5 we prove the factorization theorem which states, in somewhat vague terms, that any holomorphic form on $M$ can be written as a product of elliptic Kronecker limit functions, up to a multiplicative constant. In addition, from the asymptotic formula from section 4, one is able to obtain specific information associated to the multiplicative constant in the aforementioned description of the factorization theorem. In section 6 we give examples of the factorization theorem for holomorphic Eisenstein series for the modular group, for moonshine groups of levels $2$ and $5$, for general moonshine groups, and for congruence subgroups $\overline{\Gamma_{0}(p)}$ of prime level. Finally, in section 7, we present our proof of Weil's reciprocity using the elliptic Kronecker limit functions and state a few concluding remarks.
\section{Background material}
\subsection{Basic notation} \label{notation} Let $\Gamma\subseteq\mathrm{PSL}_{2}(\mathbb{R})$ denote a Fuchsian group of the first kind acting by fractional linear transformations on the hyperbolic upper half-plane $\mathbb{H}:=\{z=x+iy\in\mathbb{C}\,
|\,x,y\in\mathbb{R};\,y>0\}$. We let $M:=\Gamma\backslash\mathbb{H}$, which is a finite volume hyperbolic Riemann surface, and denote by $p:\mathbb{H}\longrightarrow M$ the natural projection. We assume that $M$ has $e_{\Gamma}$ elliptic fixed points and $p_{\Gamma}$ cusps. We identify $M$ locally with its universal cover $\mathbb{H}$.
We let $\mu_{\mathrm{hyp}}$ denote the hyperbolic metric on $M$, which is compatible with the complex structure of $M$, and has constant negative curvature equal to minus one. The hyperbolic line element $ds^{2}_{\hyp}$, resp.~the hyperbolic Laplacian $\Delta_{\hyp}$, are given as \begin{align*} ds^{2}_{\hyp}:=\frac{dx^{2}+dy^{2}}{y^{2}},\quad\textrm{resp.} \quad\Delta_{\hyp}:=-y^{2}\left(\frac{\partial^{2}}{\partial x^{2}}+\frac{\partial^{2}}{\partial y^{2}}\right). \end{align*} By $d_{\mathrm{hyp}}(z,w)$ we denote the hyperbolic distance from $z\in\mathbb{H}$ to $w\in\mathbb{H}$.
\subsection{Moonshine groups}
Let $N=p_1\cdots p_r$ be a square-free, non-negative integer. The subset of $\SL_2(\mathbb{R})$, defined by \begin{align*}
\Gamma_0(N)^+:=\left\{ e^{-1/2}\begin{pmatrix}a&b\\c&d\end{pmatrix}\in
\SL_2(\mathbb{R}): \,\,\, ad-bc=e, \,\,\, a,b,c,d,e\in\mathbb{Z}, \,\,\, e\mid N,\ e\mid a,
\ e\mid d,\ N\mid c \right\} \end{align*} is an arithmetic subgroup of $\SL_2(\mathbb{R})$. We use the terminology ``moonshine group'' of level $N$ to describe $\Gamma_0(N)^+$ because of the important role these groups play in ``monstrous moonshine''. Previously, the groups $\Gamma_0(N)^+$ were studied in \cite{Hel66} where it was proved that if a subgroup $G\subseteq\SL_2(\mathbb{R})$ is commensurable with $\SL_2(\mathbb{Z})$, then there exists a square-free, non-negative integer $N$ such that $G$ is a subgroup of $\Gamma_0(N)^+$. We also refer to page 27 of \cite{Sh71} where the groups $\Gamma_0(N)^+$ are cited as examples of groups which are commensurable with $\SL_2(\mathbb{Z})$ but non necessarily conjugate to a subgroup of $\SL_2(\mathbb{Z})$.
Let $\{\pm \textrm{Id}\}$ denote the set of two elements consisting of the identity matrix $\textrm{Id}$ and its product with $-1$. In general, if $\Gamma$ is a subgroup of $\SL_2(\mathbb{R})$, we let $\overline{\Gamma} := \Gamma /\{\pm \textrm{Id}\}$ denote its projection into $\textrm{PSL}_2(\mathbb{R})$.
\subsection{Holomorphic Eisenstein series}
Following \cite{Se73}, we define a weakly modular form $f$ of weight $2k$ for $k \geq 1$ associated to $\Gamma$ to be a function $f$ which is meromorphic on $\mathbb H$ and satisfies the transformation property $$ f\left(\frac{az+b}{cz+d}\right) = (cz+d)^{-2k}f(z) \,\,\,\,\,\textrm{for all $\begin{pmatrix}a&b\\c&d\end{pmatrix} \in \Gamma$.} $$
Let $\Gamma$ be a Fuchsian group of the first kind that has at least one class of parabolic elements. By rescaling, if necessary, we may always assume that the parabolic subgroup of $\Gamma$ has a fixed point at $\infty$, with identity scaling matrix. In this situation, any weakly modular form $f$ will satisfy the relation $f(z+1)=f(z)$, so we can write $$ f(z) = \sum\limits_{n=-\infty}^{\infty}a_{n}q_z^{n} \,\,\,\,\,\textrm{where $q_z =e(z)= e^{2\pi iz}$.} $$ If $a_{n} = 0$ for all $n < 0$, then $f$ is said to be holomorphic at the cusp at $\infty$.
A holomorphic modular form with respect to $\Gamma$ is a weakly modular form which is holomorphic on $\mathbb H$ and in all of the cusps of $\Gamma$. Examples of holomorphic modular forms are the holomorphic Eisenstein series, which are defined as follows. Let $\Gamma_{\infty}$ denote the subgroup of $\Gamma$ which stabilizes the cusp at $\infty$. For $k \geq 2$, let
\begin{equation} \label{E_2k, Gamma} E_{2k,\Gamma}(z) := \sum_{\left(
\begin{smallmatrix}
* & * \\
c & d \\
\end{smallmatrix}
\right) \in \Gamma_{\infty} \setminus \Gamma } (cz + d)^{-2k}. \end{equation} It is elementary to show that the series on the right-hand side of \eqref{E_2k, Gamma} is absolutely convergent for all integers $k \geq 2$ and defines a holomorphic modular form of weight $2k$ with respect to $\Gamma$. Furthermore, the series $E_{2k, \Gamma}$ is bounded and non-vanishing at cusps and such that \begin{equation*} E_{2k, \Gamma} (z) = 1 + O (\exp(-2\pi \Im (z))), \text{ as } \Im (z) \to \infty. \end{equation*}
When $\Gamma=\mathrm{PSL}_2(\mathbb{Z})$, we denote $E_{2k, \mathrm{PSL}_2(\mathbb{Z})}$ by $E_{2k}$. The holomorphic forms $E_{2k}(z)$ have the $q-$expansions $$ E_{2k}(z) = 1- \frac{4k}{B_{2k}} \sum_{n=1}^{\infty} \sigma_{2k-1}(n) q_z^n, $$ where $B_{2k}$ denotes the $2k-$th Bernoulli number and $\sigma_l$ is the generalized divisor function, which is defined by $\sigma_l(m) = \sum\limits_{d \mid m} d^l$. By convention, we set $\sigma(m)=\sigma_1(m)$.
On the full modular surface, there is no weight $2$ holomorphic modular form. Consider, however, the function $E_2(z)$ defined by its $q$-expansion $$ E_2(z) = 1-24 \sum_{n=1}^{\infty} \sigma(n) q_z^n $$ which transforms according to the formula $$ E_2(\gamma z) = (cz+d)^2 E_2(z) + \frac{6}{\pi i}c (cz+d), $$ for $\left(
\begin{smallmatrix}
* & * \\
c & d \\
\end{smallmatrix}
\right) \in \textrm{PSL}_2(\mathbb{Z})$. It is elementary to show that for a prime $p$, the function \begin{equation} \label{E_2,p} E_{2,p}(z) := E_2(z)-pE_2(pz) \end{equation} is a weight 2 holomorphic form associated to the congruence subgroup $\overline{\Gamma_0(p)}$ of $\textrm{PSL}_2(\mathbb{Z})$. The $q-$expansion of $E_{2,p}$ is \begin{equation}\label{q-exp E_2,p} E_{2,p}(z)= (1-p) - 24\sum_{n=1}^{\infty}\sigma(n) (q_z^n - pq_z^{pn}). \end{equation}
When $\Gamma = \overline{\Gamma_{0}^+(N)}$, we denote the forms $E_{2k, \overline{\Gamma_{0}^+(N)}}$ by $E_{2k}^{(N)}$. In \cite{JST13} it is proved that $E_{2k}^{(N)}(z)$ may be expressed as a linear combination of forms $E_{2k}(z)$, with dilated arguments, namely \begin{align}\label{E_k, p proposit fla} E_{2k}^{(N)}(z)= \frac1{\sigma_k(N)} \sum_{v \mid N}v^k E_{2k}(vz). \end{align}
\subsection{Scattering matrices}
Assume that the surface $M$ has $p_{\Gamma}$ cusps, we let $P_{j}$ with $j=1,\ldots, p_{\Gamma}$ denote the individual cusps. Denote by $\phi_{jk}$, with $j,k=1, \ldots, p_{\Gamma}$, the entries of the hyperbolic scattering matrix $\Phi_M(s)$ which are computed from the constant terms in the Fourier expansion of the parabolic Eisenstein series $\mathcal{E}^{\mathrm{par}}_{P_j}(z,s)$ associated to cusp $P_{j}$ in an expansion in the cusp $P_{k}$. For all $j,k = 1,\ldots, p_{\Gamma}$, each function $\phi_{jk}$ has a simple pole at $s=1$ with residue equal to $1/\vol_{\hyp}(M)$. Furthermore, $\phi_{jk}$ has a Laurent series expansion at $s=1$ which we write as \begin{equation}\label{phi exp at s=1} \phi_{jk}(s)= \frac{1}{\vol_{\hyp}(M) (s-1)} + \beta_{jk} + \gamma_{jk}(s-1) + O((s-1)^2), \text{ as } s\to 1. \end{equation} After a slight renormalization and trivial generalization, Theorem 3-1 from \cite{Go73} asserts that the parabolic Eisenstein series $\mathcal{E}^{\mathrm{par}}_{P_j}(z,s)$ admits the Laurent expansion \begin{equation} \label{KronLimitPArGen} \mathcal{E}^{\mathrm{par}}_{P_j}(z,s)= \frac{1}{\vol_{\hyp}(M) (s-1)} + \beta_{jj} - \frac{1}{\vol_{\hyp}(M)} \log \abs{\eta_{P_j}^4(z) \Im(z)} + f_j(z) (s-1) + O((s-1)^2), \end{equation} as $s \to 1$, for $j=1,\ldots, p_{\Gamma}$.
As the notation suggestions, the function $\eta_{P_j}(z)$ is a holomorphic form for $\Gamma$ and is a generalization of the classical eta function for the full modular group. To be precise, $\eta_{P_j}(z)$ is an automorphic form corresponding to the multiplier system $v(\sigma)= \exp(i\pi S_{\Gamma,j}(\sigma))$, where $S_{\Gamma,j}(\sigma)$ is a generalization of a Dedekind sum attached to a cusp $P_j$ for each $j=1,\ldots,p_{\Gamma}$ of $M$, meaning a real number uniquely determined for every $\sigma = \left(
\begin{smallmatrix}
\ast & \ast \\
c & d \\
\end{smallmatrix}
\right)\in \Gamma $ which satisfies the relation $$ \log\eta_{P_j}(\sigma(z))=\log\eta_{P_j}(z) + \frac{1}{2} \log (cz+d) + \pi i S_{\Gamma,j}(\sigma). $$
The coefficient $f_j(z)$ multiplying $(s-1)$ in formula \eqref{KronLimitPArGen} is a certain function, whose behavior is not of interest to us in this paper. This term would probably yield to a definition of generalized Dedekind sums; see, for example, \cite{Ta86}.
Finally, let us set the notation \begin{equation}\label{phi exp at s=0} \phi_{jk}(s)= a_{jk} + b_{jk}s + c_{jk}s^2 + O(s^3) \,\,\,\,\,\textrm{as $s \rightarrow 0$} \end{equation} for the coefficients in the Laurent expansion of $\phi_{jk}$ near $s=0$. Note that the form of this expansion is justified by the assumption made in subsection \ref{subsection_assumption}.
\section{Kronecker's limit formula for parabolic Eisenstein series}\label{sec: Kron limir parabolic}
\vskip .10in In this section we will re-write the Kronecker limit formula for the parabolic Eisenstein series as an expression involving the Laurent expansion near $s=0$. We begin with the following lemma which states certain relations amongst coefficients appearing in \eqref{phi exp at s=1} and \eqref{phi exp at s=0}. \it To repeat, we assume that each parabolic Eisenstein series ${\cal E}^{\mathrm{par}}_{P_j}(z,s)$ is holomorphic at $s=0$. \rm
\begin{lemma} With the notation in \eqref{phi exp at s=1} and \eqref{phi exp at s=0}, we have, for each $k, l = 1,\ldots,p_{\Gamma}$, the following relations: \begin{equation} \label{sum a_jk} \sum_{j=1}^{p_{\Gamma}} a_{jk} = 0, \end{equation} \begin{equation} \label{sum with b_jk} \sum_{j=1}^{p_{\Gamma}}\left( - \frac{b_{jk}}{\vol_{\hyp}(M)} + a_{jk}\beta_{jl}\right) = \delta_{kl}, \end{equation} \begin{equation}\label{sum with c_jk} \sum_{j=1}^{p_{\Gamma}}\left(- \frac{c_{jk}}{\vol_{\hyp}(M)} +b_{jk}\beta_{jl}\right) = \sum_{j=1}^{p_{\Gamma}} a_{jk}\gamma_{jl}, \end{equation} where $\delta_{kl}$ is the Kronecker symbol. \end{lemma} \begin{proof} The relations \eqref{sum a_jk} through \eqref{sum with c_jk} are immediate consequences of the functional equation for the scattering determinant, namely the formula $\Phi_M(s)\Phi_M(1-s) = \textrm{Id}$. In particular, the formulae are obtained by computing the coefficients of $s^{-1}$, $1$, and $s$ in the Laurent expansion near $s=0$. \end{proof}
\vskip .10in \begin{proposition}\label{prop: Kronecker limit as s to 0} With the notation in \eqref{phi exp at s=1} and \eqref{phi exp at s=0}, the parabolic Eisenstein series $\mathcal{E}^{\mathrm{par}}_{P_j}(z,s)$ has a Taylor series expansion at $s=0$ which can be written as \begin{multline} \label{parabolic Kron limit as s to 0} \mathcal{E}^{\mathrm{par}}_{P_j}(z,s) = \sum_{k=1}^{p_{\Gamma}} \left[ - \frac{b_{jk}}{\vol_{\hyp}(M)} + a_{jk}\left( \beta_{kk} - \frac{1}{\vol_{\hyp}(M)}
\log \left|\eta_{P_k}^4(z)\Im z \right|\right) \right] +\\
+ s \cdot \sum_{k=1}^{p_{\Gamma}} \left[ - \frac{c_{jk}}{\vol_{\hyp}(M)} + b_{jk}\left( \beta_{kk} - \frac{1}{\vol_{\hyp}(M)} \log \left|\eta_{P_k}^4(z)\Im z \right|\right) +a_{jk}f_k(z) \right] + O(s^2). \end{multline} \end{proposition} \begin{proof} The result is a straightforward computation based on the functional equation $$ (\mathcal{E}^{\mathrm{par}}_{1}(z,s)\,\, ....\,\, \mathcal{E}^{\mathrm{par}}_{p}(z,s))^{T} = \Phi_M(s) (\mathcal{E}^{\mathrm{par}}_{1}(z,1-s)\,\, ....\,\, \mathcal{E}^{\mathrm{par}}_{p}(z,1-s))^{T} $$ together with the expansions \eqref{KronLimitPArGen} and \eqref{phi exp at s=0}. \end{proof}
In the case when $p_{\Gamma}=1$, the relations \eqref{sum a_jk} through \eqref{sum with c_jk} and Proposition \ref{prop: Kronecker limit as s to 0} become particularly simple and yield an elegant statement. As is standard, the cusp is normalized to be at $\infty$, and the associated Eisenstein series, eta function, scattering coefficients, etc.~are written with the subscript $\infty$.
\begin{corollary} \label{Kron limit as s to 0, one cusp} The Kronecker limit formula for parabolic Eisenstein series $\mathcal{E}^{\mathrm{par}}_{\infty}$ on a finite volume Riemann surface with one cusp at $\infty$ can be written as \begin{equation} \label{KronLimas s to 0} \mathcal{E}^{\mathrm{par}}_{\infty}(z,s)= 1+ \log (\abs{\eta_{\infty}^4(z)} \Im(z))s + O(s^2), \text{ as } s \to 0. \end{equation} \end{corollary}
\vskip .06in \begin{example}\rm \label{ex: moonshine groups} In the case when $\Gamma=\overline{\Gamma_0^+(N)}$, for a square-free, positive integer $N$, the quotient space $X_N:=\overline{\Gamma_0^+(N)} \backslash \mathbb{H}$ has one cusp. The automorphic form $\eta_{\infty}$ is explicitly computed in \cite{JST13}, where it is proved that \begin{align*} \eta_{\infty}(z) = \sqrt[2^r]{\prod_{v \mid N} \eta(vz)}. \end{align*} \end{example}
\vskip .06in \begin{example}\rm \label{ex: congruence subgr} In the case when $\Gamma$ is the group $\overline{\Gamma_0(N)}$, for a positive integer $N$, the corresponding quotient space $M_N:=\overline{\Gamma_{0}(N)}\backslash \mathbb{H}$ has many cusps. Using a standard fundamental domain, $M_{N}$ has cusps at $\infty$, at $0$ and, in the case when $N$ is not prime, at the rational points $1/v$, where $v \mid N$ is such that $(v, \frac{N}{v}) =1$, where $(\cdot,\, \cdot)$ stands for the greatest common divisor. As in the above example, let use the subscript $\infty$ to denote data associated to the cusp at $\infty$. In particular, the automorphic form $\eta_{\infty}$ in the example under consideration was explicitly computed in \cite{Vassileva96}, where it is proved that $$ \eta_{\infty}(z)= \sqrt[\varphi(N)]{\prod_{v \mid N} \eta(vz) ^{v \mu(N/v)}}, $$ where $\varphi(N)$ is the Euler $\varphi-$function and $\mu$ denotes the M\"obius function. In the case of other cusps $P_k$, the automorphic form $\eta_{P_k}$ was also computed in \cite{Vassileva96}, but the expressions are more involved so we omit repeating the formulas here.
Also, for the cusp at $\infty$ and the principal congruence subgroup $\Gamma(N)$, the eta-function is computed in Theorem 1, page 405 of \cite{Ta86}. \end{example}
\vskip .10in \section{Kronecker's limit formula for elliptic Eisenstein series}
The function $H_{\Gamma}(z,w)$, defined in (\ref{Kronecker_elliptic}) is called the \textit{elliptic Kronecker limit function at $w$}. It satisfies the transformation rule \begin{align}\label{H e j transf. rule} H_{\Gamma}(\gamma z, w) = \varepsilon_{w}(\gamma) (cz + d)^{2C_{w}} H_{\Gamma}(z,w), \text{ for any } \gamma = \begin{pmatrix} * & * \\ c & d \end{pmatrix} \in \Gamma, \end{align} where $\varepsilon_{w}(\gamma) \in \mathbb{C}$ is a constant of absolute value $1$, independent of $z$ and \begin{equation}\label{C_w} C_w= 2\pi /(\mathrm{ord}(w) \vol_{\hyp}(M)), \end{equation} see \cite{vP10}, Proposition 6.1.2., or \cite{vP15}. Since $H_{\Gamma}(z,w)$, as a function of $z$, is finite and non-zero at the cusp $P_{1} = \infty$, we may re-scale the function and assume, without lost of generality, that $H_{\Gamma}(z,w)$ is real at the cusp $\infty$.
\vskip .10in We begin by studying the asymptotic behavior of $H_{\Gamma}(\sigma_{P_l}z,w)$ as $y=\Im(z) \to\infty$, for $l=1, \ldots, p_{\Gamma}$.
\begin{proposition} \label{prop: behavior of H(z,w)} For any cusp $P_l$, with $l=1,\ldots,p_{\Gamma}$, let \begin{equation} \label{B_e_j}
B_{w,P_l}=-C_{w} \left( 2-\log 2 + \log \left| \eta_{P_l}^4(w) \Im(w) \right| - \beta_{ll}\vol_{\hyp}(M)\right). \end{equation} Then there exists a constant $a_{w, P_l} \in \mathbb{C}$ of modulus one such that \begin{equation*}
H(\sigma_{P_l}z, w) = a_{w,P_l} \exp(-B_{w,P_l}) |c_l z + d_l|^{2C_w} + O(\exp(-2\pi \Im(z))), \text{ as } \Im(z)\rightarrow \infty\,, \end{equation*} where $\sigma_{P_l} =\left( \begin{smallmatrix} * & * \\ c_l & d_l \end{smallmatrix}\right)$ is the scaling matrix for a cusp $P_l$ and $C_w$ is defined by \eqref{C_w}. \end{proposition}
\begin{proof} The proof closely follows the proof of \cite{vP10}, Proposition 6.2.2. when combined with the Taylor series expansion \eqref{parabolic Kron limit as s to 0} of the parabolic Eisenstein series at $s=0$. For the convenience of the reader, we now present the complete argument.
Combining the equation \eqref{Kronecker_elliptic} with the proof of Proposition 6.1.1 from \cite{vP10}, taking $e_j=w$, we can write $$ -\log(\vert{H_{\Gamma}(z, w)} \vert \Im(z)^{C_{w}}) = \mathcal{K}_{w} (z), $$ where the function $\mathcal{K}_{w} (z)$ can be expressed as the sum of two terms: A term $\mathcal{F}_{w} (z)$ arising from the spectral expansion and a term $\mathcal{G}_{w} (z)$ which can be expressed as the sum over the group. Furthermore, for $z\in\mathbb{H}$ such that $\Im z > \Im (\gamma w)$ for all $\gamma \in \Gamma$ the parabolic Fourier expansion of $\mathcal{K}_{w}(\sigma_{P_l} z)$ is given by $$ \mathcal{K}_{w}(\sigma_{P_l} z) = \sum_{m\in\mathbb{Z}} b_{m,w,P_l}(y)e(mx) $$ with coefficients $b_{m,w,P_l}(y)$ given by $$ b_{m,w,P_l}(y)=\int\limits_{0}^{1}\mathcal{K}_{w}(\sigma_{P_l} z)e(-mx). $$ Since the hyperbolic Laplacian is $\mathrm{SL}_2-$invariant, we easily generalize computations from p. 128 of \cite{vP10} to deduce that $$ \mathcal{K}_{w} (\sigma_{P_l}z) = -C_{w}\log y + A_{w,P_l}y + B_{w,P_l} + \sum_{m=1}^\infty (A_{m; w,P_l} e(mz) + \overline{A}_{m; w,P_l} e(-m\overline{z})), $$ for some constants $A_{w, P_l}, B_{w,P_l} \in \mathbb{R}$ and complex constants $A_{m;w,P_l}$.
Let us introduce the notation \begin{align}\label{f e j} f_{w,P_l}(z):=\exp\left(-2\sum_{m=1}^\infty A_{m; w,P_l} e(mz) \right), \end{align} from which one immediately can write \begin{align}\label{K e j main formula} \mathcal{K}_{w} (\sigma_{P_l} z) = A_{w,P_l}y + B_{w,P_l} - \log(\abs{f_{w,P_l}(z)} \Im(z)^{C_{w}}). \end{align} When employing \eqref{K e j main formula}, we can re-write \eqref{Kronecker_elliptic} as \begin{align}\label{Fla for comparison} \mathcal{E}^{\mathrm{ell}}_{w}(\sigma_{P_l} z,s) &- h_{w}(s)\sum_{j=1}^{p_{\Gamma}} \mathcal{E}^{\mathrm{par}}_{P_j}(w,1-s) \mathcal{E}^{\mathrm{par}}_{P_j}(\sigma_{P_l} z,s) = \\ &-C_{w} + (A_{w,P_l}y + B_{w,P_l} - \log(\vert{f_{w, P_l}(z)}\vert \Im(z)^{C_{w}})) \cdot s + O(s^2) \nonumber, \end{align} as $s \rightarrow 0$, where \begin{equation} \label{h_w} h_{w}(s):= \frac{2^s \sqrt{\pi} \,\Gamma(s-1/2)}{\mathrm{ord}(w)\Gamma(s)}. \end{equation} As in \cite{vP10}, pp. 129--130, we use the functional equation of the parabolic Eisenstein series and consider the constant term in the Fourier series expansion, as a function of $z$, of the function \begin{equation} \label{const term} \mathcal{E}^{\mathrm{ell}}_{w}(\sigma_{P_l} z,s) - h_{w}(s) \sum_{j=1}^{p_{\Gamma}} \mathcal{E}^{\mathrm{par}}_{P_j}(w,1-s) \mathcal{E}^{\mathrm{par}}_{P_j}(\sigma_{P_l} z,s)=\mathcal{E}^{\mathrm{ell}}_{w}(\sigma_{P_l} z,s) - h_{w}(s) \sum_{j=1}^{p_{\Gamma}} \mathcal{E}^{\mathrm{par}}_{P_j}(w,s) \mathcal{E}^{\mathrm{par}}_{P_j}(\sigma_{P_l} z,1-s). \end{equation} The constant term is given by $$ -h_{w}(s)\sum_{j=1}^{p_{\Gamma}} \phi_{jl}(1-s)y^s\mathcal{E}^{\mathrm{par}}_{P_j}(w,s)= -\frac{\sqrt{\pi}}{\mathrm{ord}(w)} \frac{\Gamma(s-1/2)}{\Gamma(s)}(2y)^s \sum_{j=1}^{p_{\Gamma}} \phi_{jl}(1-s)\mathcal{E}^{\mathrm{par}}_{P_j}(w,s). $$ Recall the expansions \begin{equation} \label{gamma s-1/2} \Gamma(s-1/2)= -2\sqrt{\pi}\left(1+(2-\gamma-2\log 2) s + O(s^2)\right), \end{equation} \begin{equation} \label{gamma s} \frac{1}{\Gamma(s)}= s \left(1+ \gamma s + O(s^2)\right), \text{ and }(2y)^s= 1+ s\log(2y) + O(s^2), \end{equation} which hold when $s\to 0$, where, as usual, $\gamma$ denotes the Euler constant. When combining these expressions with \eqref{phi exp at s=1}, we can write the asymptotic expansions near $s=0$ of the constant term in the Fourier series expansion of \eqref{const term} as \begin{equation}\label{F.series const intermediate} \frac{2\pi}{\mathrm{ord}(w)} \left(1+ (2+\log y -\log 2)s + O(s^2)\right) \cdot \sum_{j=1}^{p_{\Gamma}} \left( -\frac{1}{\vol_{\hyp}(M)} + \beta_{jl}s + O(s^2) \right)\mathcal{E}^{\mathrm{par}}_{P_j}(w,s). \end{equation} Let us now compute the first two terms in the Taylor series expansion at $s=0$ of the expression \begin{equation}\label{F.series const intermediate 2} \sum_{j=1}^{p_{\Gamma}} \left( -\frac{1}{\vol_{\hyp}(M)}+ \beta_{jl}s + O(s^2) \right)\mathcal{E}^{\mathrm{par}}_{P_j}(w,s). \end{equation} By applying \eqref{parabolic Kron limit as s to 0}, we conclude that the constant term in the Taylor series expansion of (\ref{F.series const intermediate 2}) is $$
\sum_{j=1}^{p_{\Gamma}} \sum_{k=1}^{p_{\Gamma}} \frac{-1}{\vol_{\hyp}(M)} \left( -\frac{b_{jk}}{\vol_{\hyp}(M)} +a_{jk} \beta_{kk} -\frac{a_{jk}}{\vol_{\hyp}(M)} \log \left| \eta_{P_k}^4(w) \Im(w) \right|\right). $$ Applying relations \eqref{sum a_jk} and \eqref{sum with b_jk} we then obtain, by manipulation of the sums, that the constant term in (\ref{F.series const intermediate 2}) is equal to $\displaystyle -1/\vol_{\hyp}(M)$. The factor multiplying $s$ is equal to \begin{multline*}
\sum_{j=1}^{p_{\Gamma}} \sum_{k=1}^{p_{\Gamma}} \frac{-1}{\vol_{\hyp}(M)} \left( -\frac{c_{jk}}{\vol_{\hyp}(M)} +b_{jk} \beta_{kk} -\frac{b_{jk}}{\vol_{\hyp}(M)} \log \left| \eta_{P_k}^4(w) \Im(w) \right| + a_{jk} f_k(w)\right) \\
+\sum_{j=1}^{p_{\Gamma}} \sum_{k=1}^{p_{\Gamma}} \beta_{jl} \left( -\frac{b_{jk}}{\vol_{\hyp}(M)} +a_{jk} \beta_{kk} -\frac{a_{jk}}{\vol_{\hyp}(M)} \log \left| \eta_{P_k}^4(w) \Im(w) \right|\right). \end{multline*} Applying relations \eqref{sum a_jk} to \eqref{sum with c_jk} we get that $$ \sum_{j=1}^{p_{\Gamma}} \sum_{k=1}^{p_{\Gamma}}a_{jk}f_k(w)=0 $$ and \begin{align*}
\sum_{k=1}^{p_{\Gamma}}&\left( \frac{-1}{\vol_{\hyp}(M)} \log \left| \eta_{P_k}^4(w) \Im(w) \right| + \beta_{kk} \right) \sum_{j=1}^{p_{\Gamma}} \left( -\frac{b_{jk}}{\vol_{\hyp}(M)} + a_{jk} \beta_{jl}\right)
\\&= \frac{-1}{\vol_{\hyp}(M)} \log \left| \eta_{P_l}^4(w) \Im(w) \right| + \beta_{ll} \end{align*} as well as $$ \sum_{j=1}^{p_{\Gamma}} \sum_{k=1}^{p_{\Gamma}} \left( \frac{-c_{jk}}{\vol_{\hyp}(M)} + b_{jk} \beta_{jl}\right)= \sum_{j=1}^{p_{\Gamma}} \sum_{k=1}^{p_{\Gamma}} a_{jk}\gamma_{jl} =0. $$ Therefore, the factor multiplying $s$ in the Taylor series expansion of \eqref{F.series const intermediate 2} is equal to $$
\frac{-1}{\vol_{\hyp}(M)} \log \left| \eta_{P_l}^4(w) \Im(w) \right| + \beta_{ll}. $$ Inserting this into \eqref{F.series const intermediate} we see that the constant term in the Fourier series expansion of \eqref{const term} is given by $$
-C_w-C_w\left( 2-\log 2 + \log y + \log \left| \eta_{p_l}^4(w) \Im(w) \right| - \beta_{ll}\vol_{\hyp}(M) \right)s + O(s^2), $$ as $s \to 0$. Comparing this result with the right-hand side of formula \eqref{Fla for comparison}, having in mind the definition of the number $C_{w}$, we immediately deduce that $A_{w, P_l}=0$, $$
B_{w,P_l}=-C_{w} \left( 2-\log 2 + \log \left| \eta_{P_l}^4(w) \Im(w) \right| - \beta_{ll}\vol_{\hyp}(M)\right) $$ and $$
\mathcal{K}_{w} (\sigma_{P_l} z) = -\log(\vert{H_{\Gamma}(\sigma_{P_l} z, w)}\vert |c_l z +d_l |^{-2C_w} \Im(z)^{C_{w}}) = B_{w,P_l} - \log(\vert{f_{w,P_l}(z)}\vert \Im(z)^{C_{w}} ), $$ where the function $f_{w,P_l}$ is defined by \eqref{f e j}. From \eqref{f e j} we deduce that $$ \abs{f_{w,P_l}(z)} = \exp \left( -2 \Re \left( \sum_{m=1}^{\infty} A_{m;w, P_l} e(mz) \right)\right) = 1 + O(\exp(-2\pi \Im (z))), $$ as $\Im (z) \to \infty$. Therefore, $$
\abs{H_{\Gamma}(\sigma_{P_l} z, w)} = \exp(-B_{w,P_l})|c_l z +d_l |^{2C_w} + O(\exp(-2\pi \Im (z))), \text{ as } \Im(z) \to \infty\,, $$ and the proof is complete. \end{proof}
\vskip .06in \begin{example}{\bf Moonshine groups.}\rm \label{ex: constants B_N} Let $N=p_1 \cdot \ldots \cdot p_r$ be a squarefree number. Let $X_N= \overline{\Gamma_0(N)^+} \setminus \mathbb{H}$. The surface $X_N$ possesses one cusp at $\infty$ with identity scaling matrix. The scattering determinant $\varphi_N$ associated to the only cusp of $X_N$ at $\infty$ is computed in \cite{JST12}, where it was shown that $$ \varphi_N(s)=\sqrt{\pi}\frac{\Gamma(s-1/2)}{\Gamma(s)}\frac{\zeta(2s-1)}{\zeta(2s)}\cdot D_N(s), $$ where $\zeta(s)$ is the Riemann zeta function and $$ D_N(s)=\prod_{j=1}^r\frac{p_j^{1-s}+1}{p_j^s+1}= \frac{1}{N^{s-1}}\prod_{j=1}^r\frac{p_j^{s-1}+1}{p_j^s+1}. $$ Let $b_{N}$ denote the constant term in the Laurent series expansion of $\varphi_N(s)$ at $s=1$. One can compute $b_N$ by expanding functions $D_N(s)$, $\Gamma(s)$ and $\zeta(s)$ in their Laurent expansions at $s=1$, which would yield the expressions $$ D_N(s)= \frac{2^r}{\sigma(N)}\left(1 + (s-1) \left(\sum_{j=1}^{r} \frac{(1-p_j)\log p_j}{2(p_j+1)} - \log N\right) + O((s-1)^2) \right), $$ and \begin{align} \sqrt{\pi}\frac{\Gamma(s-1/2)}{\Gamma(s)}
&= \pi \left(1-2\log 2 (s-1) + O((s-1)^2)\right), \label{gamma exp} \end{align} as well as \begin{align} \frac{\zeta(2s-1)}{\zeta(2s)}
&= \frac{6}{\pi^2} \left( \frac{1}{2(s-1)} - \log (2\pi) + 1-12\zeta'(-1) + O(s-1)\right). \label{zeta exp} \end{align} Multiplying expansions \eqref{gamma exp} and \eqref{zeta exp} and using that $$ \frac{1}{\vol_{\hyp} (X_N)} = \frac{3 \cdot 2^r}{\pi \,\sigma(N)}, $$ which was proved in \cite{JST13}, we arrive at the expression \begin{equation} \label{b_N} b_N= - \frac{1}{\vol_{\hyp} (X_N) }\left( \sum_{j=1}^{r} \frac{(p_j -1)\log p_j}{2(p_j+1)}- \log N + 2\log (4\pi) + 24\zeta'(-1) - 2\right). \end{equation} With this formula, Proposition \ref{prop: behavior of H(z,w)}, and Example \ref{ex: moonshine groups} we conclude that the elliptic Kronecker limit function $H_N(z,w) := H_{\overline{\Gamma_0^+(N)}} (z,w)$ associated to the point $w \in X_N$ may we written as $$ H_N(z,w)= a_{N,w}\exp(-B_{N,w}) + \exp(-2\pi\Im (z)), \text{ as } \Im (z) \to \infty, $$ where $a_{N,w}$ is a complex constant of modulus one and \begin{align*} B_{N,w} &= - \frac{2\pi}{\mathrm{ord}(w)\vol_{\hyp} (X_N)}\left( \sum_{j=1}^{r} \frac{(p_j -1)\log p_j}{2(p_j+1)}-\log N + C+ \log \left(\sqrt[2^r]{\prod_{v \mid N} \abs{\eta(v w)}^4} \cdot \Im (w)\right) \right)\notag \end{align*} with $C:=\log (8\pi^2) + 24\zeta'(-1)$. \end{example}
\vskip .06in \begin{example}{\bf Congruence subgroups of prime level.} \rm Let $M_p= \overline{\Gamma_0(p)}\setminus \mathbb{H}$, where $p$ is a prime. The surface $M_p$ has two cusps, at $\infty$ and $0$. The scaling matrix for the cusp at $\infty$ is identity matrix. The scattering matrix in this setting is computed in \cite{He83} and is given by $$ \Phi_{M_p}(s)= \sqrt{\pi} \frac{\Gamma(s-1/2)}{\Gamma(s)} \frac{\zeta(2s-1)}{\zeta(2s)} \cdot \frac{1}{p^{2s}-1} \left(
\begin{array}{cc}
p-1 & p^s-p^{1-s} \\
p^s-p^{1-s} & p-1 \\
\end{array}
\right). $$ Using the expansions \eqref{gamma exp} and \eqref{zeta exp}, together with $\vol_{\hyp}(M_p)=\pi(p+1)/3$ and the expansion $$ \frac{p-1}{p^{2s}-1}= \frac{1}{p+1}-\frac{2p^2 \log p}{(p-1)(p+1)^2} (s-1) + O((s-1)^2) \text{ as } s \to 1\,, $$ we conclude that the coefficients $\beta_{11}$ and $\beta_{22}$ in the Laurent series expansion \eqref{phi exp at s=1} are given by $$ \beta_{11}=\beta_{22}= -\frac{2}{\vol_{\hyp} (M_p)}\left( \log (4\pi p) + 12\zeta'(-1) -1 + \frac{\log p}{p^2 -1} \right). $$ Therefore, from Proposition \ref{prop: behavior of H(z,w)}, when applied to the cusp at $\infty$, and Example \ref{ex: congruence subgr}, we conclude that the elliptic Kronecker limit function $\widetilde{H}_p(z,w) := H_{\overline{\Gamma_0(p)}} (z,w)$ associated to the point $w \in M_p$ can be written as $$ \widetilde{H}_p(z,w)= \widetilde{a}_{p,w}\exp(-\widetilde{B}_{p,w}) + \exp(-2\pi\Im (z)), \text{ as } \Im (z) \to \infty, $$ where $\widetilde{a}_{p,w}$ is a complex constant of modulus one and \begin{align*} \widetilde{B}_{p,w} &= - \frac{2\pi}{\mathrm{ord}(w)\vol_{\hyp} (M_p)}\left( \frac{2 p^2 \log p }{p^2-1} +C
+\log \left(\abs{\sqrt[p-1]{\frac{\eta(p w) ^p}{\eta(w)}} \cdot \Im (w)}\right) \right) \end{align*} with $C:=\log (8\pi^2) + 24\zeta'(-1)$. \end{example}
\vskip .10in
\section{A factorization theorem}
In (\ref{elliptic_at_i}) and (\ref{elliptic_at_rho}) one has an evaluation of the elliptic Kronecker limit function in the special case when $\Gamma = \mathrm{PSL}_2(\mathbb{Z})$ and $w=i$ or $w=\rho= \exp(2\pi i /3)$ are the elliptic fixed points of $\mathrm{PSL}_2(\mathbb{Z})$. The following theorem generalizes these results.
\begin{theorem} \label{thm: factorization} Let $M = \Gamma\setminus \mathbb{H}$ be a finite volume Riemann surface with at least one cusp, which we assume to be at $\infty$ with identity scaling matrix. Let $k$ be a fixed positive integer such that there exists a weight $2k$ holomorphic form $f_{2k}$ on $M$ which is non-vanishing in all cusps and with $q-$expansion at $\infty$ given by \begin{equation} \label{q exp. of f_2k} f_{2k}(z)= b_{f_{2k}} + \sum_{n=1}^{\infty}b_{f_{2k}}(n)q_z^n. \end{equation} Let $Z(f_{2k})$ denote the set of all zeros $f_{2k}$ counted according to their multiplicities and let us define the function $$ H_{f_{2k}}(z):= \prod_{w \in Z(f_{2k})} H_{\Gamma}(z,w), $$ where, as above, $H_{\Gamma}(z,w)$ is the elliptic Kronecker limit function. Then there exists a complex constant $c_{f_{2k}}$ such that \begin{equation} \label{factorization fla} f_{2k}(z) = c_{f_{2k}}H_{f_{2k}}(z) \end{equation} and $$ \abs{c_{f_{2k}}} =\abs{b_{f_{2k}}} \exp \left( \sum_{w\in Z(f_{2k})} B_{w, \infty} \right ), $$ where $B_{w,\infty}$ is defined in \eqref{B_e_j}. \end{theorem} \begin{proof} Assume that $f_{2k}$ possesses $m+l\geq 1$ zeroes on $M$, where $m$ zeros are at the elliptic points $e_j$ of $M$, $j=1,\ldots,m$, and $l$ zeroes are at the non-elliptic points $w_i \in M$; of course, all zeroes are counted with multiplicities. Then $H_{f_{2k}}(z)$ is a holomorphic function on $M$ which is vanishing if and only if $z \in Z(f_{2k})$ and which according to \eqref{H e j transf. rule} satisfies the transformation rule $$ H_{f_{2k}}(\gamma z) = \varepsilon_{f_{2k}}(\gamma)(cz+d)^{C_{f_{2k}}} H_{f_{2k}}(z), \text{ for any } \gamma = \begin{pmatrix} * & * \\ c & d \end{pmatrix} \in \Gamma, $$ where $\varepsilon_{f_{2k}}(\gamma)$ is a constant of modulus one and $$ C_{f_{2k}} = \frac{4\pi}{\vol_{\hyp} (M)} \left(\sum_{j=1}^{m} \frac{1}{n_{e_j}} + l \right). $$
The classical Riemann-Roch theorem relates the number of zeros of a holomorphic form to its weight and the genus of $M$ in the case $M$ is smooth and compact. A generalization of the relation follows from Proposition 7, page II-7, of \cite{SCM66} which, in the case under consideration, yields the formula \begin{align} \label{zeros f-la} k \cdot \frac{\vol_{\hyp}(M)}{2\pi}= \sum_{e \in \mathcal{E}_N} \frac{1}{n_e} v_{e}(f) + \sum_{z\in M \setminus \mathcal{E}_N} v_{z}(f), \end{align} where $\mathcal{E}_N$ denotes the set of elliptic points in $M$, $n_e$ is the order of the elliptic point $e\in \mathcal{E}_N$ and $v_z(f)$ denotes the order of the zero $z$ of $f$.
Since $Z(f_{2k})$ is the set of all vanishing points of $f_{2k}$, formula \eqref{zeros f-la} implies that $$ 2k \cdot \frac{\vol_{\hyp} (M)}{4\pi} = \sum_{j=1}^{m} \frac{1}{n_{e_j}} + l, $$ hence $C_{f_{2k}} = 2k$. In other words, $H_{f_{2k}}(z)$ is a holomorphic function on $M$, vanishing if and only if $z \in Z(f_{2k})$ and satisfying transformation rule $$ H_{f_{2k}}(\gamma z) = \varepsilon_{f_{2k}}(\gamma)(cz+d)^{2k} H_{f_{2k}}(z), \text{ for any } \gamma = \begin{pmatrix} * & * \\ c & d \end{pmatrix} \in \Gamma. $$ By Proposition \ref{prop: behavior of H(z,w)}, we have that for any $w \in Z(f_{2k})$ and any cusp $P_l$ of $M$, with $l=1,\ldots,p_{\Gamma}$, the function $$ F_{f_{2k}}(z):= \frac{H_{f_{2k}}(z)}{f_{2k}(z)} $$ is a non-vanishing holomorphic function on $M$, bounded and non-zero at the cusp at $\infty$ and has at most polynomial growth in any other cusp of $M$. Therefore, the function $\log \vert F_{f_{2k}}(z)\vert$ is harmonic on $M$ whose growth in any cusp is such that $\log \vert F_{f_{2k}}(z)\vert$ is $L^{2}$ on $M$. As a result, $\log \vert F_{f_{2k}}(z)\vert$ admits a spectral expansion; see \cite{He83} or \cite{Iwa02}. Since $\log \vert F_{f_{2k}}(z)\vert$ is harmonic, one can use integration by parts to show that $\log \vert F_{f_{2k}}(z)\vert$ is orthogonal to any eigenfunction of the Laplacian. Therefore, from the spectral expansion, one concludes that $\log \vert F_{f_{2k}}(z)\vert$ is constant, hence so is $F_{f_{2k}}(z)$. The evaluation of the constant is obtained by considering the limiting behavior as $z$ approaches $\infty$. With all this, the proof of \eqref{factorization fla} is complete. \end{proof}
\section{Examples of factorization}
\subsection{An arbitrary surface with one cusp}
In the case when a surface $M$ has one cusp, we get the following special case of Theorem \ref{thm: factorization}.
\begin{corollary} \label{cor:factorization, one cusp} Let $M = \Gamma \setminus \mathbb{H}$ be a finite volume Riemann surface with one cusp, which we assume to be at $\infty$ with identity scaling matrix. Then the weight $2k$ holomorphic Eisenstein series $E_{2k, \Gamma}$ defined in \eqref{E_2k, Gamma} can be represented as $$ E_{2k, \Gamma}(z) = a_{E_{2k, \Gamma}} B_{E_{2k, \Gamma}}\prod_{w \in Z(E_{2k, \Gamma})}H_{\Gamma}(z,w), $$ where $a_{E_{2k, \Gamma}}$ is a complex constant of modulus one and $$
B_{E_{2k, \Gamma}} = \prod_{w \in Z(E_{2k, \Gamma})} \exp \left(C_w \left(\log 2 -2 + \beta_M \vol_{\hyp}(M) \right)\right) \cdot \left|\eta_{\infty}^4(w) \Im (w) \right|^{-C_w} . $$ As before, $\eta_{\infty}$ is the parabolic Kronecker limit function defined in section \ref{sec: Kron limir parabolic}, formula \eqref{KronLimitPArGen}, and $\beta_M$ is the constant term in the Laurent series expansion of the scattering determinant on $M$. \end{corollary}
In this case, due to a very simple form of the Kronecker limit formula for parabolic Eisenstein series as $s\to 0$, the factorization theorem yields an interesting form of the Kronecker limit formula for elliptic Eisenstein series, which we state as the following proposition. \begin{proposition} \label{prop:Ell Kron limit one cusp} Let $M = \Gamma \setminus \mathbb{H}$ be a finite volume Riemann surface with one cusp, which we assume to be at $\infty$ with identity scaling matrix. Let $k$ be a fixed positive integer such that there exists a weight $2k$ holomorphic form $f_{2k}$ on $M$ with $q-$expansion at $\infty$ given by \eqref{q exp. of f_2k}. Then \begin{equation} \label{ell kroneck limit one cusp} \sum_{w\in Z(f_{2k})} \mathcal{E}^{\mathrm{ell}}_{w}(z,s)=
-s\log\left( |f_{2k}(z)| |\eta_{\infty} ^4(z)|^{-k}\right) + s\log|b_{f_{2k}}| + O(s^2) \end{equation} as $s\to 0$, where $Z(f_{2k})$ denotes the set of all zeros of $f_{2k}$ counted with multiplicities. \end{proposition}
\begin{proof} We start with formula \eqref{Kronecker_elliptic}, which we divide by $\mathrm{ord}(w)$, and take the sum over all $w \in Z(f_{2k})$ to get \begin{align} \label{Kronecker limit ell 2} \sum_{w\in Z(f_{2k})} \mathcal{E}^{\mathrm{ell}}_{w}(z,s)&- \mathcal{E}^{\mathrm{par}}_{\infty}(z,s) \sum_{w\in Z(f_{2k})} h_w(s) \mathcal{E}^{\mathrm{par}}_{\infty}(w,1-s) = \notag \\
&-\sum_{w\in Z(f_{2k})} C_w \left( 1 + s \log(\Im z)\right) - \log\left( \prod_{w\in Z(f_{2k})} |H_{\Gamma}(z,w)| \right)\cdot s +O(s^2) \end{align} as $s\to 0$, where $C_w$ and $h_w$ are defined by \eqref{C_w} and \eqref{h_w} respectively. One now expands the second term on the left hand side of \eqref{Kronecker limit ell 2} into a Taylor series at $s=0$ by applying formulas \eqref{gamma s-1/2}, \eqref{gamma s}, \eqref{KronLimas s to 0} and \eqref{KronLimitPArGen}. After multiplication, we get, as $s \to 0$, the expression \begin{multline} \label{parabolic sum}
\mathcal{E}^{\mathrm{par}}_{\infty}(z,s) \sum_{w\in Z(f_{2k})} h_w(s) \mathcal{E}^{\mathrm{par}}_{\infty}(w,1-s)
\\ = \sum_{w\in Z(f_{2k})} C_w \left( 1 + s \left[2-\log2 - \beta_M \vol(M) + \log |\eta_{\infty}^4(w) \Im (w) | +
|\eta_{\infty}^4(z) \Im (z) | \right]\right)+O(s^2) \end{multline} as $s \to 0$. Theorem \ref{thm: factorization} yields that \begin{equation} \label{log of prod}
\log\left( \prod_{w\in Z(f_{2k})} |H_{\Gamma}(z,w)| \right) = \log |f_{2k}(z)| - \sum_{w\in Z(f_{2k})} B_{w,\infty} - \log |b_{f_{2k}}|, \end{equation} where $B_{w,\infty}$ is defined by \eqref{B_e_j} for the cusp $P_l=\infty$. Finally, from formula \eqref{zeros f-la}, we get that $$ \sum_{w\in Z(f_{2k})} C_w =k. $$ Therefore, by inserting \eqref{B_e_j}, \eqref{log of prod} and \eqref{parabolic sum} and into \eqref{Kronecker limit ell 2}, we immediately deduce \eqref{ell kroneck limit one cusp}. The proof is complete. \end{proof}
\begin{remark}\rm In the case $\Gamma=\mathrm{PSL}_2(\mathbb{Z})$, the parabolic Kronecker limit function is given by $\eta_{\infty}(z)= \eta(z)=\Delta(z)^{1/24}$. Then, for $k=3$ and $f_{2k}=E_6$, we have $b_{E_6}=1$ and $Z(E_6)= \{i\}$, hence Proposition \ref{prop:Ell Kron limit one cusp} yields \eqref{elliptic Eis_at_i}. Analoguously, for $k=2$ and $f_{2k}=E_4$, we have $b_{E_4}=1$ and $Z(E_4)= \{\rho\}$, and Proposition \ref{prop:Ell Kron limit one cusp} gives \eqref{elliptic Eis_at_rho}. Furthermore (see \cite{vP10}, p.~131), we have $B_{E_6,\Gamma}=\exp(B_i)$ and $B_{E_4,\Gamma}=\exp(B_{\rho})$, where $B_i$ and $B_{\rho}$ are given by \eqref{elliptic_at_i} and \eqref{elliptic_at_rho} respectively. \end{remark}
Let us now develop further examples of a surfaces with one cusp and explicitly compute the constant $B_{E_{2k, \Gamma}}$ in these special cases.
\begin{comment} \subsection{The modular group} \mathrm{PSL}_2(\mathbb{Z}) When $\Gamma=\mathrm{PSL}_2(\mathbb{Z})= \overline{\Gamma_0(1)^+}$ the corresponding surface has one cusp at $\infty$ with identity scaling matrix and two elliptic points; an order two point at $w=i$ and an order three point at $w=\rho =\exp(2\pi i /3)$. Let us denote the elliptic Kronecker limit function by $H_1(z,w)$.
Classically, it is known that $w=i$ is the only vanishing point of the holomorphic form $E_6(z)$, and $w=\rho$ is the only vanishing point of $E_4(z)$. The volume of the surface is $\pi/3$, hence $C_i= 3$ and $C_{\rho}= 2$. Furthermore, from \eqref{b_N} with $N=1$, we see that the constant term in the Laurent series expansion of the scattering determinant at $s=1$ is $\beta_1 =-\frac{3}{\pi} (2 \log (4\pi) + 24\zeta'(-1) -2)$. Therefore, Corollary \ref{cor:factorization, one cusp} yields that $$ E_6(z)=a_6 B_{E_6}H_1(z,i)\,\,\, \text{and} \,\,\, E_4(z)=a_4 B_{E_4}H_1(z,\rho), $$ where $a_6$ and $a_4$ are constants of modulus one and $$
\log (B_{E_6})=-3[(\log(8\pi^2) + 24\zeta'(-1)) +4 \log |\eta(i)|], $$ $$
\log (B_{E_4})=-2[(\log(8\pi^2) + 24\zeta'(-1)) +4 \log |\eta(\rho)| +\log(\sqrt{3}/2)]. $$ The computations on p.131 of \cite{vP10} imply that $$
4 \log |\eta(i)| = 4\log \Gamma(1/4) - \log 2 - 3 \log (2\pi) $$ and $$
4 \log |\eta(\rho)| + \log(\sqrt{3}/2) = 6\log \Gamma(1/3) - \log 2 + \log 3- 4 \log (2\pi). $$ As a result, we have that $\log (B_{E_6})=B_i$ and $\log (B_{E_4})=B_{\rho}$, where $B_i$ and $B_{\rho}$ are given by \eqref{elliptic_at_i} and \eqref{elliptic_at_rho} respectively.
This shows that Proposition 6.2.2. from \cite{vP10} is corollary of the special case of Theorem \ref{thm: factorization}.
Furthermore, the parabolic Kronecker limit function for the full modular group is $\eta_{\infty}(z)= \eta(z)$, therefore, taking $f_{2k}=E_6$ and $Z(f_{2k})= \{i\}$ in proposition \ref{prop:Ell Kron limit one cusp} we immediately deduce \eqref{elliptic Eis_at_i}. Analogously, taking $f_{2k}=E_4$ and $Z(f_{2k})= \{\rho\}$ in proposition \ref{prop:Ell Kron limit one cusp} we deduce \eqref{elliptic Eis_at_rho}. \end{comment}
\subsection{Moonshine groups of square-free level}
\begin{example}\rm Consider the surface $X_2$. There exists one elliptic point of order two, $e_1=i/\sqrt{2}$, and one elliptic point of order four, $e_2=1/2 + i/2$. The surface $X_2$ has genus zero and one cusp, hence $\vol_{\hyp}(X_2)=\pi/2$. The transformation rule for $E_6^{(2)}$ implies that the form must vanish at the points $e_1$ and $e_2$. Furthermore, formula \eqref{zeros f-la} when applied to $X_{2}$ becomes \begin{align} \label{zeros f-la N=2} \frac{2k}{8}= v_{\infty}(f) + \frac{1}{4}v_{e_2}(f)+ \frac{1}{2} v_{e_1}(f) + \sum_{z\in X_2 \setminus \{e_1,e_2\}} v_{z}(f). \end{align} Taking $k=3$, we conclude that $e_1$ and $e_2$ are the only vanishing points of $E_6^{(2)}$ and the order of vanishing is one at each point. Therefore, in the notation of Theorem \ref{thm: factorization} and Example \ref{ex: constants B_N}, we have that the form $H_6^{(2)}(z)= H_{E_6^{(2)}}(z)$ is given by $H_6^{(2)} (z) := H_2(z, e_1)H_2(z, e_2)$. Assuming that the phase of $H_6^{(2)} (z)$ is such that it attains real values at the cusp $\infty$, we have that \begin{equation} \label{E 6,2} E_6^{(2)}(z) = C_{2,6} H_6^{(2)} (z), \end{equation} where the absolute value of the constant $C_{2,6}$ is given by
$|C_{2,6}|=e^{B_{2,e_1}+B_{2,e_2}}$ with $$ B_{2,e_1}=-2 \left( 24\zeta'(-1) + \log(8\pi^2)
- \frac{4}{3} \log 2 +\frac{1}{12} \log\left( \left| \Delta(i\sqrt{2}) \cdot \Delta(i/\sqrt{2}) \right| \right)\right) $$ and $$ B_{2,e_2}=- \left( 24\zeta'(-1) + \log(8\pi^2)
- \frac{11}{6} \log 2 +\frac{1}{12} \log\left( \left| \Delta(1/2 + i/2) \cdot \Delta(1+i) \right| \right)\right). $$
Let us now consider the case when $k=2$. From \eqref{zeros f-la N=2}, we have that only $e_1$ and $e_2$ can be vanishing points of $E_4^{(2)}$. However, there are two possibilities: Either $e_2$ is an order two vanishing point, and $E_4^{(2)}(z)\neq 0$ for all $z\neq e_2$ in a fundamental domain $\mathcal{F}_2$ of $X_2$, or $e_1$ is an order one vanishing point and $E_4^{(2)}(z)\neq 0$ for all points $z\neq e_1$ in $\mathcal{F}_2$. If the latter possibility is true, then $E_6^{(2)}(z) /E_4^{(2)}(z)$ would be a weight $2$ holomorphic modular form which vanishes only at $e_2$, which is not possible since there is no weight two modular form on $X_N$ for any squarefree $N$ such that the surface $X_N$ has genus zero; see \cite{JST14}. Therefore, $E_4^{(2)}$ vanishes at $e_{2}$ of order two, and there are no other vanishing points of $E_4^{(2)}$ on $X_2$.
Hence, in the notation of Theorem \ref{thm: factorization}, we have
$ H_4^{(2)} (z):= H_{E_4^{(2)}}(z) = H_2(z, e_2)^2$, implying that \begin{equation} \label{E 4,2}
E_4^{(2)}(z) =C_{2,4}H_2(z, e_2)^2, \end{equation}
where $|C_{2,4}|=e^{2B_{2,e_2}}$. This proves that $H_2(z, e_2)^2$ is a weight four holomorphic modular function on $\overline{\Gamma_0(2)^+}$. If we combine \eqref{E 6,2} with \eqref{E 4,2} we get $$ H_2(z, e_1)^2= \frac{C_{2,4}}{C_{2,6}^2} \cdot\frac{( E_6^{(2)}(z))^2}{ E_4^{(2)}(z)}; $$ in other words, $H_2(z, e_1)^2$ is a weight eight holomorphic modular function on $\overline{\Gamma_0(2)^+}$.
Furthermore, application of Proposition \ref{prop:Ell Kron limit one cusp} with $f_{2k} = E_4^{(2)}$ and $Z_{f_{2k}}=\{ e_2\}$ (with multiplicity two) together with Example \ref{ex: moonshine groups} and the representation formula \eqref{E_k, p proposit fla} yield \eqref{ell Eis at e_2}.
By applying Proposition \ref{prop:Ell Kron limit one cusp} with $f_{2k} = E_6^{(2)}$ and $Z_{f_{2k}}=\{e_1, e_2\}$ together with formula \eqref{ell Eis at e_2} we get the elliptic Kronecker limit formula for $ \mathcal{E}^{\mathrm{ell}}_{e_1}(z,s)$ $$
\mathcal{E}^{\mathrm{ell}}_{e_1}(z,s)=-s\log\left( |E_6^{(2)}(z)| |E_4^{(2)}(z)|^{-1/2} |\Delta(z) \Delta(2z)|^{-1/6}\right) + O(s^2) \text{ as } s \to 0. $$ \end{example}
\vskip .06in \begin{example} \rm Consider the surface $X_5$. There exist three order two elliptic elements, namely $e_1=i/\sqrt{5}$, $e_2=2/5 + i/5$, and $e_3=1/2 + i/(2\sqrt{5})$. The surface $X_{5}$ has genus zero and one cusp, hence $\vol_{\hyp}(X_5)=\pi$.
Using the transformation rule for $E_6^{(5)}$, one concludes that the holomorphic form $E_6^{(5)}$ must vanish at $e_1$, $e_2$ and $e_3$. By the dimension formula \eqref{zeros f-la}, one sees that $e_1$, $e_2$ and $e_3$ are the only zeros of $E_6^{(5)}$. Theorem \ref{thm: factorization} then implies that \begin{equation}\label{E_6_5} E_6^{(5)}(z)= C_{5,6}H_{6}^{(5)}(z), \end{equation} where the absolute value of the constant $C_{5,6}$ is given by
$|C_5|=e^{B_{5,e_1}+B_{5,e_2}+B_{5,e_3}}$ and \begin{multline*} B_{5,e_1}+B_{5,e_2}+B_{5,e_3}= -3\left(24\zeta'(-1) + \log (8\pi^2)\right) - \log 50 \\
+\frac{1}{12}\log \left(\abs{\Delta(i/\sqrt{5})\Delta(i\sqrt{5}) \Delta(2/5 + i/5) \Delta(2+i) \Delta(1/2 + i/(2\sqrt{5})) \Delta(5/2 + i\sqrt{5}/2)}\right) . \end{multline*} One can view (\ref{E_6_5}) as analogue of the Jacobi triple product formula. \end{example}
\vskip .06in \begin{remark} \rm Let $N=p_1 \cdot \ldots \cdot p_r$ be a squarefree number. Then the surface $X_{N}$ has one cusp. Numerous results are known concerning the topological structure of $X_{N}$; see, for example, \cite{Cum04} and references therein. As a consequence, one can develop a number of results similar to the above examples when $N=2$ or $N=5$. In particular, Theorem \ref{thm: factorization} holds, so one can factor any holomorphic Eisenstein series $E_{2k}^{(N)}$ of weight $2k$ into a product of elliptic Kronecker limit functions, up to a factor of modulus one. \end{remark}
\subsection{Congruence subgroups of prime level}
Consider the surface $M_p$ for a prime $p$. The smallest positive integer $k$ such that there exists a weight $2k$ holomorphic form is $k=1$. As a result, we have the following corollary of Theorem \ref{thm: factorization}.
\begin{corollary} Let $f_{2k,p}$ denote weight $2k\geq 2$ holomorphic form on the surface $M_p = \overline{\Gamma_0(p)}\setminus \mathbb{H}$ bounded at cusps and such that the constant term in its $q-$expansion is equal to $b_{f_{2k},p}$. Then, $$ f_{2k,p}(z)= a_{f_{2k},p} \widetilde{B}_{f_{2k},p} \prod_{w \in Z(f_{2k},p)} \widetilde{H}_p(z,w), $$ where $a_{f_{2k},p}$ is a complex constant of modulus one and $$ \widetilde{B}_{f_{2k},p}=\abs{b_{f_{2k},p}} \prod_{w \in Z(f_{2k},p)} \left(\exp \left[-C_w \left( \frac{2p^2 \log p}{p^2-1} + C \right) \right]
\abs{\sqrt[p-1]{\frac{\eta(p w) ^p}{\eta(w)}} \, \Im (w)}^{-C_w}\right) $$ with $C:=\log (8\pi ^2) +24\zeta'(-1)$. \end{corollary}
Let us now compute the constants $\widetilde{B}_{f_{2k},p}$ for two cases.
\begin{example}\rm If $p=2$, then the surface $M_2$ has only one elliptic point, $e=1/2 + i/2$, which has order two. Furthermore, $\vol_{\hyp}(M_p) = \pi$, hence formula \eqref{zeros f-la} with $k=1$ implies that the holomorphic form $E_{2,2}$ defined by \eqref{E_2,p} with $p=2$ vanishes only at $e$, and the vanishing is to order one. From the $q-$expansion \eqref{q-exp E_2,p} we have that $\abs{b_{E_{2,2},2}}=2-1=1$. Since $C_e = 1$, we get $$ E_{2,2}(z)=a_2 \cdot \frac{1}{16 \sqrt[3]{4}\,\pi^2} \exp(- 24\zeta'(-1)) \abs{\frac{\eta (1/2 + i/2)}{\eta(1+i)^2}} \widetilde{H}_2(z,e), $$ for some complex constant $a_2$ of modulus one. In other words, the elliptic Kronecker limit function $\widetilde{H}_2(z,e)$ is a weight two modular form on $\overline{\Gamma_0(2)}$. \end{example}
\begin{example}\rm If $p=3$, then the surface $M_3$ has only one elliptic point $e=1/2 + \sqrt{3}i/6$, which has order three. The volume of the surface $M_3$ is $4 \pi/3$, hence formula \eqref{zeros f-la} with $k=1$ implies that the holomorphic form $E_{2,3}$ vanishes only at $e$, of order two. Furthermore, $\abs{b_{E_{2,2},2}}=2$ and $C_e =1/2$, so then $$ E_{2,3}(z)= a_3 \cdot \frac{1}{12 \sqrt[4]{27}\, \pi^2} \exp(-24 \zeta'(-1)) \abs{\sqrt{\frac{\eta\left( 1/2 + i\sqrt{3}/6\right)}{\eta\left( 3/2 + i\sqrt{3}/2\right)^3}}}\widetilde{H}_3(z,e)^2, $$ for some complex constant $a_3$ of modulus one. \end{example}
\section{Additional considerations}
In this section, we use the elliptic Kronecker's limit function to prove Weil's reciprocity law. In addition, we state various concluding remarks.
\subsection{Weil reciprocity}
To conclude this article, we will use equation \eqref{Kronecker_elliptic} to prove Weil's reciprocity law which, for the convenience of the reader, we now state.
\vskip .10in \begin{theorem}{\bf [Weil Reciprocity]}\label{Weil_reciprocity} Let $f$ and $g$ be meromorphic functions on the smooth, compact Riemann surface $M$. Let $D_{f}$ and $D_{g}$ denote the divisors of $f$ and $g$, respectively, which we write as $$ D_{f} = \sum m_{f}(P)P \,\,\,\,\,\textrm{and} \,\,\,\,\,D_{g} = \sum m_{g}(P)P. $$ Then $$ \prod\limits_{w_{j}\in D_{g}}f(w_{j})^{m_{g}(w_{j})} = \prod\limits_{z_{i}\in D_{f}} g(z_{i})^{m_{f}(z_{i})}. $$ \end{theorem}
\vskip .10in \begin{proof} Consider the function $$ I(s;f,g) = \sum\limits_{z_{i}\in D_{f}}\sum\limits_{w_{j}\in D_{g}}m_{f}(z_{i})m_{g}(w_{j}) {\cal E}^{\textrm{ell}}_{w_{j}}(z_{i},s). $$ We shall compute the asymptotic expansion of $I(s;f,g)$ near $s=0$. Since both $D_{f}$ and $D_{g}$ have degree zero, we immediately have the equations $$ \sum\limits_{z_{i}\in D_{f}}\sum\limits_{w_{j}\in D_{g}}m_{f}(z_{i})m_{g}(w_{j}) c = 0 $$ and $$ \sum\limits_{z_{i}\in D_{f}}\sum\limits_{w_{j}\in D_{g}}m_{f}(z_{i})m_{g}(w_{j}) \log\left((\textrm{\rm Im}(z_{i}))^{c}\right)=0. $$ Since $M$ is assumed to be smooth and compact, the terms in \eqref{Kronecker_elliptic} involving the parabolic Eisenstein series do not appear. Hence, we have the asymptotic expansion \begin{align}\label{exp_I} I(s;f,g) = -\sum\limits_{z_{i}\in D_{f}}\sum\limits_{w_{j}\in D_{g}}m_{f}(z_{i})m_{g}(w_{j}) \log\left(\vert H(z_i,w_j)\vert\right)\cdot s+ O(s^{2}) \,\,\,\,\,\textrm{\rm as $s \rightarrow 0$.} \end{align} Weil's reciprocity formula will be proved by evaluating $$ \lim\limits_{s \rightarrow 0}s^{-1}I(s;f,g) $$ in two different ways, one by first summing over the points in $D_{f}$ the sum over the points in $D_{g}$, and the second way obtained by interchanging the order of summation.
To begin, we claim there exist constants $a_{f}$ and $a_{g}$ such that $$ f(w) = a_{f}\prod\limits_{z_{i}\in D_{f}}H(z_{i},w)^{m_{f}(z_{i})} \,\,\,\,\,\textrm{and}\,\,\,\,\, g(z) = a_{g}\prod\limits_{w_{j}\in D_{g}}H(z,w_{j})^{m_{g}(w_{j})}. $$ Indeed, both sides of each proposed equality are meromorphic functions with the same divisors, hence, differ by a multiplicative constant. Since both $D_{f}$ and $D_{g}$ have degree zero, one has that $$ \prod\limits_{z_{i}\in D_{f}}\vert a_{g}\vert^{m_{f}(z_{i})} =\prod\limits_{w_{j}\in D_{g}}\vert a_{f}\vert^{m_{g}(w_{j})} = 1. $$ Therefore, we can write the lead term in \eqref{exp_I} in two ways, yielding the identity \begin{equation}\label{Weilabs} \prod\limits_{w_{j}\in D_{g}}\vert f(w_{j})\vert^{m_{g}(w_{j})} = \prod\limits_{z_{i}\in D_{f}}\vert g(z_{i})\vert^{m_{f}(z_{i})}. \end{equation} It remains to argue that (\ref{Weilabs}) holds without the absolute value signs, which can be completed as follows. First, apply the above arguments in a fundamental domain $\cal F$ of $M$ whose interior contains the support of $D_{f}$ and $D_{g}$. On such a domain, one can choose a well-defined branch of $H(z,w)$, hence we arrive at the equality \begin{equation}\label{Weil} \prod\limits_{w_{j}\in D_{g}}f(w_{j})^{m_{g}(w_{j})} = \prod\limits_{z_{i}\in D_{f}} g(z_{i})^{m_{f}(z_{i})} \end{equation} viewing all points $z_{i}$ and $w_{j}$ as lying in $\cal F$. Now, when tessellating by $\eta \in \Gamma$, one introduces multiplicative factors of the form \begin{equation}\label{multfactor} \prod\limits_{w_{j}\in D_{g}}\epsilon_{\Gamma}(\eta)^{m_{g}(w_{j})} \,\,\,\,\,\textrm{and}\,\,\,\,\, \prod\limits_{z_{i}\in D_{f}} \epsilon_{\Gamma}(\eta)^{m_{f}(z_{i})}. \end{equation} Since $D_{f}$ and $D_{g}$ are degree zero, each term in (\ref{multfactor}) is equal to one. Therefore, one gets a well-defined extension of (\ref{Weil}) to all $z, w \in \mathbb{H} $, which completes the proof of Theorem \ref{Weil_reciprocity}. \end{proof}
\subsection{Unitary characters and Artin formalism}
As with parabolic Eisenstein series, one can extend the study of elliptic Eisenstein series to include the presence of a unitary character. More precisely, let $\pi: \Gamma \rightarrow U(n)$ denote an $n$-dimensional unitary representation of the group $\Gamma$ with associated character $\chi_{\pi}$. Let us define \begin{equation}\label{ell_eisen_pi} {\cal E}^{\textrm{ell}}_{w}(z,s;\pi) =\sum\limits_{\eta \in \Gamma} \chi_{\pi}(\eta)\sinh(d_{\mathrm{hyp}}(\eta z, w))^{-s} \end{equation} to be the elliptic Eisenstein series twisted by $\chi_{\pi}$. Note that if $n=1$ and $\pi$ is trivial, then the above definition is equal to $\textrm{ord}(w)$ times the series in (\ref{ell_eisen}). (Again, we kept the definition (\ref{ell_eisen}) in order to be consistent with the notation in \cite{JvPS14}). In general terms, the meromorphic continuation of (\ref{ell_eisen_pi}) can be studied using the methodology of \cite{JvPS14}, which depended on the spectral expansion and small time asymptotics of the associated heat kernel. As a result, we feel it is safe to say that one subsequently can prove the continuation of (\ref{ell_eisen_pi}).
Having established the meromorphic continuation of (\ref{ell_eisen_pi}), one then can study the elliptic Kronecker limit functions. It would be interesting to place the study in the context of the Artin formalism relations (see \cite{JLa94} and references therein). The system of elliptic Eisenstein series associated to the representations $\pi$ will satisfy additive Artin formalism relations, and, through exponentiation, the corresponding elliptic Kronecker limit functions will satisfy multiplicative Artin formalism relations. It would be interesting to carry out these computations in the setting of the congruence groups $\Gamma_{0}(N)$ as subgroups of the moonshine groups $\Gamma_{0}(N)^{+}$, for instance, in order to relate the above-mentioned computations for parabolic Kronecker limit functions. It is possible that a similar approach could yield further relations amongst the elliptic Kronecker limit functions.
\subsection{The factorization theorem in other cases}
\subsubsection{\bf Factorization for compact surfaces} \rm If $M$ is compact then, in a sense, Theorem \ref{thm: factorization} becomes the following. In the notation of the proof Theorem \ref{thm: factorization}, the quotient $$ F_{f_{2k}}(z):= \frac{H_{f_{2k}}(z)}{f_{2k}(z)} $$ is a non-vanishing, bounded, holomorphic function on $M$, hence is constant, thus $$ f_{2k}(z) = c_{f_{2k}} H_{f_{2k}}(z):= c_{f_{2k}} \prod_{w \in Z(f_{2k})} H_{\Gamma}(z,w) $$ for some constant $c_{f}$. The point now is to develop a strategy by which one can evaluate $c_{f_{2k}}$. Perhaps the most natural approach would be to study the limiting value of $$ \widetilde{H}_{\Gamma}(z) := \lim\limits_{w \rightarrow z} \frac{H_{\Gamma}(z,w)}{z-w}, $$ which needs to be considered in the correct sense as a holomorphic form on $M$. One can then express $c_{f_{2k}}$ in terms of the first non-zero coefficient of $f_{2k}$ about a point $z \in Z(f_{2k})$, a product of the forms $H_{f_{2k}}(z,w)$ for two different points in $Z(f_{2k})$ and $\widetilde{H}_{\Gamma}(z)$. Such formulae could be quite interesting in various cases of arithmetic interest. We will leave the development of such identities for future investigation.
\subsubsection{\bf Factorization for surfaces with more than one cusp} \rm It is evident that one can generalize Theorem \ref{thm: factorization} to the case when the holomorphic form $f_{2k}$ vanishes in a cusp, or several cusps. In such an instance, one includes factors of the parabolic Kronecker limit function in the construction of $H_{f_{2k}}$. The parabolic Kronecker limit function is bounded and non-vanishing in any cusp other than the one to which it is associated, and the (fractional) order to which it vanishes follows from Theorem 1 of \cite{Ta86}. As with Theorem \ref{thm: factorization}, one can express any holomorphic modular form as a product of parabolic and elliptic Kronecker limit functions, up to a multiplicative constant. Furthermore, the multiplicative constant can be computed, up to a factor of modulus one, from the value of the various functions at a cusp.
\noindent
\noindent Jay Jorgenson \\ Department of Mathematics \\ The City College of New York \\ Convent Avenue at 138th Street \\ New York, NY 10031 U.S.A. \\ e-mail: jjorgenson@mindspring.com
\noindent Anna-Maria von Pippich \\ Fachbereich Mathematik \\ Technische Universit\"at Darmstadt \\ Schlo{\ss}gartenstr. 7 \\ D-64289 Darmstadt \\ Germany \\ e-mail: pippich@mathematik.tu-darmstadt.de
\noindent Lejla Smajlovi\'c \\ Department of Mathematics \\ University of Sarajevo\\ Zmaja od Bosne 35, 71 000 Sarajevo\\ Bosnia and Herzegovina\\ e-mail: lejlas@pmf.unsa.ba \end{document}
\end{document} |
\begin{document}
\title{{f Harnack Inequality for Semilinear SPDE with Multiplicative Noise}
\begin{abstract} By a new approximate method, dimensional free Harnack inequalities are established for a class of semilinear stochastic differential equations in Hilbert space with multiplicative noise. These inequalities are applied to study the strong Feller property for the semigroup and some properties of invariant measure. \end{abstract}\noindent
AMS subject Classification (2000):\ 60J60. \noindent
Keywords: Harnack inequality, log-Harnack inequality, multiplicative noise, stochastic partial differential equation.
\vskip 2cm
\section{Introduction and main results} The main aim of this paper is to prove Harnack inequality for semilinear stochastic equations on Hilbert spaces with multiplicative noise. This type of inequality, which was proved for the first time in \cite{Wang97}, has became a powerful tool in infinite dimensional stochastic analysis. There are many papers prove this type of inequality for SPDE with additive noise, see \cite{DPRWang2009,Liu09,LiuW08,Ouyang2009a,Ouyang2009b,Ouyang2011a,OuyangRW2012,Wang2007,Wang2011,WangWX2011,WangX2011} and reference therein. In \cite{RoWang2010}, the log-Harnack inequality for semilinear SPDE with non-additive noise was proved for the first time, but by the gradient estimate method used there, only determine and time independent coefficient was treated. A new method to deal with the case of general coefficients for SDE was introduced in \cite{Wang2011}. This method has been generalized to functional stochastic differential equations, see \cite{WangY2011}. In this paper, we generalized this method to the case of semilinear SPDE. There are some disadvantages for finite dimension approximate method here, see Remark \ref{remark2}, therefore we use the coupling argument again as in \cite{Wang2011} with a slight modification. Since it seems not so clear to solves the similar equation of process $Y_{t}$ ( see equation (2.3) in \cite{Wang2011} ) in infinite dimension, we turn to a new process which plays the role as the difference of the coupling processes, we get it as a local strong solution of a SPDE and solve the equation by truncation in the same sprite in \cite{Brzez97}. By this process and Girsanov theorem, we get a coupling in a new probability space. On the other hand, we get Harnack inequality by another type of approximation. We perturb the linear term by a suitable linear operator which closely relates to diffusion term. It's different from finite dimensional approximate and Yosida approximate, by this perturbation, we get a stronger linear term and it makes us to prove the inequality for the perturbed equation more easy.
Let $H$ be a separable Hilbert space with inner product $\langle\cdot,\cdot\rangle$ and norm $\|\cdot\|$, consider the following stochastic differential equation on $H$: \begin{equation}\label{equ1}
\mathrm{d} x_{t} = -Ax_{t}\mathrm{d} t + F(t,x_{t})\mathrm{d} t+B(t,x_{t})\mathrm{d} W_{t} \end{equation} $W=W(t),t\geq0$ is a cylindrical Brownian motion on $H$ with covariance operator $I$ on filtered probability space $(\Omega, \mathcal{F}, \mathbb{P},(\mathcal{F}_{t})_{t\geq0})$, and the coefficients satisfy the following hypotheses: \begin{enumerate}[({H}1)]
\item $A$ is a negative self adjoint operator with discrete spectrum: \begin{equation} 0\leq\lambda_{1} \leq \lambda_{2} \leq \cdots \leq \lambda_{n} \rightarrow \infty, \end{equation} $\{\lambda_{n},n\in\mathbb{N}\}$ are the eigenvalues of $A$, and $\{e_{n}\}_{n=1}^{+\infty}$ are the corresponding eigenvectors, the compact $C_{0}$ semigroup generated by $-A$ denoted by $S(t)$.\label{itemH1}
\item $F:[0,\infty)\times\Omega\times H \rightarrow H$ and $B: [0,\infty)\times\Omega\times H \rightarrow L(H)$ are $\mathscr{P}_{\infty}\times \mathscr{B}(H)$ measurable, here $\mathscr{P}_{\infty}$ is predictable $\sigma$-algebra on $[0,\infty)\times\Omega$ and $L(H)$ is all the bounded operators on $H$, and there exists an increasing function $K_{1}:[0,+\infty)\rightarrow [0,\infty)$, such that \begin{equation}
||F(t,x)-F(t,y)||+||B(t,x)-B(t,y)||_{HS}\leq K_{1}(t)||x-y||, \end{equation}
for all $t\geq 0$, $x \in H$, $\mathbb{P}$-a.s, here $||\cdot||_{HS}$ denote the Hilber-Schmidt norm, and there exists $r>1$, such that for all $t>0$, \begin{equation}
\mathbb{E}\left(\int_{0}^{t}\left|\left|F(s,0)\right|\right|\mathrm{d} s\right)^{r} < \infty, \end{equation} \begin{equation}
\sup_{u\in [0,t]}\int_{0}^{u}\left(\mathbb{E}\left|\left|S(u-s)B(s,0)\right|\right|_{HS}^{2r}\right)^{\frac{1}{r}}\mathrm{d} s < \infty, \end{equation} \label{itemH2}
\item There exist a decreasing function $\rho:[0,\infty)\rightarrow(0,\infty)$, and a bounded self adjoint operator $B_{0}$ satisfying that there exists $\{b_{n}>0|n\in\mathbb{N}\}$ such that $B_{0}e_{n}=b_{n}e_{n}$ and \begin{equation} B(t,x)B(t,x)^{*} \geq \rho(t)^{2}B_{0}^{2},\ \forall x\in H, t\geq 0,\ \mathbb{P}\mbox{-a.s.}, \end{equation} \label{itemH3}
\item $\textrm{Ran}(B(t,x)-B(t,y))\subset \mathscr{D}(B_{0}^{-1})$ holds for all $(t,x) \in [0,\infty)\times H, \mathbb{P}$-a.s., and there exists an increasing function $K_{2}:[0,\infty)\rightarrow\mathbb{R}$ such that
\begin{align*}
2\<F(t,x)-F(t,y),B_{0}^{-2}(x-y)\rangle+&||B_{0}^{-1}(B(t,x)-B(t,y))||_{HS}^{2}\\
&\leq K_{2}(t)||B_{0}^{-1}(x-y)||^{2}
\end{align*} holds for all $x,y \in \mathscr{D}(B_{0}^{-2})$ and all $t\geq 0$, $\mathbb{P}$-a.s.,\label{itemH4}
\item There exists an increasing function $K_{3}:[0,\infty)\rightarrow (0,\infty)$, such that $||(B(t,x)^{*}-B(t,y)^{*})B_{0}^{-2}(x-y)||\leq K_{3}(t)||x-y||_{H_{0}}$ holds for all $x,y\in H$, $t\geq 0$ and $x-y\in\mathscr{D}(B_{0}^{-1})$ almost surely.\label{itemH5} \end{enumerate}
\begin{remark}\label{remark1} \begin{enumerate}[(1)] \item Under (H\ref{itemH1}), we can replace $\mathscr{D}(B_{0}^{-2})$ in (H\ref{itemH4}) by $\bigcup_{n} H_{n}$, where $H_{n}= \mathrm{span}\{e_{1},\cdots,e_{n}\}$.
\item (H\ref{itemH3}) equals to that $\mathrm{Ran}(B(t,x)) \supset \mathrm{Ran} B_{0}$ and $||B(t,x)^{-1}z||\leq \rho(t)^{-1}||B_{0}^{-1}z||$, for all $z \in \mathscr{D}(B_{0}^{-1})$, $t\geq 0$, $\mathbb{P}$\mbox{-a.s.}, \item (H\ref{itemH5}) will be used as a condition in addition to get Harnack inequality, and by (H\ref{itemH4}), $B_{0}^{-1}(B(t,x)-B(t,y))$ is an bounded operator, so in (H\ref{itemH5}) we only require $x-y\in \mathscr{D}(B_{0}^{-1})$. \end{enumerate} \end{remark} For the proof of Remark \ref{remark1}, see Appendix. We state our main result of this paper \begin{theorem}\label{mainthm} If (H\ref{itemH1})-(H\ref{itemH4}) hold, then \begin{equation}
P_{T}\log{f}(y)\leq\log{P_{T}f(x)}+\frac{K_{2}(T)||x-y||_{H_{0}}}{2(1-e^{K_{2}T})},\ \forall f\in \mathscr{B}_{b}(H),f\geq 1, x,y\in H,T>0. \end{equation} If, in addition, (H\ref{itemH5}) holds, then for $p>(1+\frac{K_{3}(T)}{\rho(T)})^{2}$, $\delta_{p,T} = K_{3}\vee \frac{\rho(T)}{2}(\sqrt{p}-1)$, the Harnack inequality \begin{equation}\label{Harineq} (P_{T}f(y))^{p}\leq (P_{T}f^{p}(x))
\exp{\left[\frac{K_{2}(T)\sqrt{p}(\sqrt{p}-1)||x-y||_{H_{0}}^{2}}{4\delta_{p,T}[(\sqrt{p}-1)\rho(T)-\delta_{p,T}](1-e^{K_{2}T})}\right]}, \end{equation}
holds for all $T>0$, $x,y\in H$ and $f\in\mathscr{B}_{b}^{+}(H)$, where $||x||^{2}_{H_{0}}=\sum_{n=0}^{+\infty}{b_{n}^{-1}}\<x,e_{n}\rangle^{2}$, $H_{0}=\mathscr{D}(B_{0}^{-1})$. \end{theorem}
\begin{remark}\label{remark2} One may use the finite dimension approximate method to get the Harnack inequalities, but here we mention that there are difficulties to overcome and it may not be better than the method used here. Let $\pi_{n}$ be the projection form $H$ to $H_{n}$, then get the following equation on $H_{n}$ \begin{equation} \mathrm{d} x^{n}_{t} = -A_{n}x^{n}_{t}\mathrm{d} t + F_{n}(t,x^{n}_{t})\mathrm{d} t+B_{n}(t,x^{n}_{t})\mathrm{d} W^{n}_{t}, \end{equation} where, \begin{equation}
A_{n}=\pi_{n}A,\ F_{n}=\pi_{n}F|_{H_{n}},\ B_{n}=\pi_{n}B|_{H_{n}},\ W^{n}=\pi_{n}W, \end{equation} one may find that after projecting to lower dimension, an invertible operator may become degenerate, for example, an operator has the matrix form, $ \left(
\begin{array}{cc}
0 & 1 \\
1 & 0 \\
\end{array}
\right) $ , under the orthonormal basis $\{e_{1},\ e_{2}\}$. It's easy to find that it's degenerate after projecting to the subspace generated by $e_{1}$. By (H\ref{itemH3}), one may replace $B$ by its symmetrization $\sqrt{BB^{*}}$, but constant may become worse in (H\ref{itemH2}) and (H\ref{itemH4}), see remark after theorem 1 in \cite{ArY81}, and it seems not easy to get similar estimate for $\sqrt{BB^{*}}$ as in (H\ref{itemH4}). \end{remark}
\section{Proof of Theorem \ref{mainthm}} Fixed a time $T>0$, we focus our discussion on the interval [0,T]. In order to prove the main theorem, we need some lemmas, and denote $K_{i}(T)$ by $K_{i}$, $i=1,2,3$, for for simplicity's sake. The first lemma prove the existence and uniqueness of mild solution of the equation (\ref{equ1}), and give some estimates. \begin{lemma} Under the condition (\textrm{H}\ref{itemH1}) and (\textrm{H}\ref{itemH2}), equation (\ref{equ1}) has a pathwise unique mild solution and \begin{equation}\label{solution_estimate_1}
\sup_{t\in[0,T]}\mathbb{E}{||x_{t}||^{r}}\leq C(r,T)(1+\mathbb{E}||x_{0}||^{r}). \end{equation} \end{lemma} \noindent\emph{Proof.} The existence part goes along the same lines as that of Theorem 7.4 in \cite{DPZ1992}, if we can prove that there exists $p\geq2$, such that \begin{equation}
\sup_{t\in[0,T]}\mathbb{E}\left|\left|\int_{0}^{t}{e^{-(t-s)A}F(s,x_{s})\mathrm{d} s}\right|\right|^{p}<\infty, \end{equation} and \begin{equation}
\sup_{t\in[0,T]}\mathbb{E}{\left|\left|\int_{0}^{t}{e^{-(t-s)A}B(s,x_{s})\mathrm{d} W_{s}}\right|\right|^{p}}<\infty \end{equation} for all $H$-valued predictable processes $x$ defined on $[0,T]$ satisfying \begin{equation}
\sup_{t\in[0,T]}\mathbb{E}{||x_{t}||^{p}}<\infty. \end{equation} In fact, for $r$ in (H\ref{itemH2}), \begin{equation*} \begin{split}
&\sup_{t\in[0,T]}\mathbb{E}{\left|\left|\int_{0}^{t}{e^{-(t-s)A}B(s,x_{s})\mathrm{d} W_{s}}\right|\right|^{r}}\\
\leq &\sup_{t\in[0,T]}{\mathbb{E}{\left|\left|\int_{0}^{t}{e^{-(t-s)A}(B(s,x_{s})-B(s,0))\mathrm{d} W_{s}}\right|\right|^{r}}}+\sup_{t\in[0,T]}{\mathbb{E}{\left|\left|\int_{0}^{t}{e^{-(t-s)A}B(s,0)\mathrm{d} W_{s}}\right|\right|^{r}}}\\
\leq & C(r,T)(1+\mathbb{E}{||x_{t}||^{r}})+\left(\frac{r}{2}(r-1)\right)^{\frac{r}{2}}
\sup_{t\in[0,T]}\left(\int_{0}^{t}\left(\mathbb{E}||S(t-s)B(s,0)||_{HS}^{r}\right)^{\frac{2}{r}}\right)^{r} <\infty. \end{split} \end{equation*} $F$ is treated similarly, we omit it. Estimate (\ref{solution_estimate_1}) follows from Grownwall's lemma. For the uniqueness part. If $x_{t}^{1},x_{t}^{2}$ are mild solutions of equation (\ref{equ1}), then \begin{equation} \begin{split}
\mathbb{E}\sup_{u\in[0,t]}{||x_{u}^{1}-x_{u}^{2}||^{r}}\leq& 2^{r}T\mathbb{E}{\sup_{u\in[0,t]}{\int_{0}^{u}{||S(u-s)(F(s,x_{s}^{1})-F(s,x_{s}^{2}))||^{r}\mathrm{d} s}}}\\
&+ 2^{r}\mathbb{E}\sup_{u\in[0,t]}{||\int_{0}^{u}{S(u-s)(B(t,x^{1}_{s})-B(t,x^{2}_{s}))\mathrm{d} W_{s}}||^{r}}\\
\leq& 2^{r}T\int_{0}^{t}{\mathbb{E}{||x_{u}^{1}-x_{u}^{2}||^{r}}\mathrm{d} s}+C(r,T)\mathbb{E}\int_{0}^{t}{||x_{s}^{1}-x_{s}^{2}||^{r}\mathrm{d} s}\\
\leq& C(r,T)\int_{0}^{t}{\mathbb{E}\sup_{u\in[0,s]}{||x_{u}^{1}-x_{u}^{2}||^{r}}\mathrm{d} s}, \end{split} \end{equation}
by the second inequality, $\mathbb{E}\sup_{u\in[0,t]}{||x_{u}^{1}-x_{u}^{2}||^{r}}<\infty$, then by Gronwall's lemma, $x_{t}^{1}=x_{t}^{2},\ \forall t\in[0,T]$, $\mathbb{P}$-a.s.
\qed
Denote $A_{\epsilon} = A + \epsilon B_{0}^{-2}$, $\mathscr{D}(A_{\epsilon}) = \mathscr{D}(A) \bigcap \mathscr{D}(B_{0}^{-2}) \subset \mathscr{D}(B_{0}^{-2})$, it is a self adjoint operator, the eigenvalues of $A_{\epsilon}$ are $\{\lambda_{n,\epsilon}:=\lambda_{n}+\epsilon b_{n}^{-2}\ | n\in \mathbb{N}\}$ and the eigenvectors remain $\{e_{n}|n\in\mathbb{N}\}$. In fact, one can define a self adjoint operator $\tilde{A}$ by \begin{align}
\mathscr{D}(\tilde{A})&=\left\{x\in H \ |\ \sum_{n=0}^{+\infty}{(\lambda_{n}+\epsilon b_{n}^{-2})^{2}\<x,e_{n}\rangle^{2}}<+\infty \right\},\\ \tilde{A}x&=\sum_{n=0}^{+\infty}{(\lambda_{n}+\epsilon b_{n}^{-2})\<x,e_{n}\>e_{n}}, \end{align} then by basic inequality and spectral decomposition of $A$ and $B_{0}^{-2}$, it is easy to see that $\tilde{A}=A_{\epsilon}$. \begin{lemma} For the mild solution of equation \begin{equation}\label{equ2} \mathrm{d} x_{t}^{\epsilon}= -(A+\epsilon B_{0}^{-2})x_{t}^{\epsilon}\mathrm{d} t + F(t,x_{t}^{\epsilon})\mathrm{d} t + B(t,x_{t}^{\epsilon})\mathrm{d} W_{t},\ x_{0}^{\epsilon}= x, \end{equation} we have \begin{equation}
\lim_{\epsilon \rightarrow 0^{+}}\mathbb{E}||x_{t}-x_{t}^{\epsilon}||^{2}=0,\ \forall t\in [0,T]. \end{equation} \end{lemma} \noindent\emph{Proof.} Since \begin{align} x_{t} &= e^{-tA}x + \int_{0}^{t}{e^{-(t-s)A}F(s,x_{s})\mathrm{d} s} + \int_{0}^{t}{e^{-(t-s)A}B(s,x_{s})\mathrm{d} W_{s}},\\ x^{\epsilon}_{t} &= e^{-t(A+\epsilon B_{0}^{2})}x + \int_{0}^{t}{e^{-(t-s)(A+\epsilon B_{0}^{2})}F(s,x^{\epsilon}_{s})\mathrm{d} s} + \int_{0}^{t}{e^{-(t-s)(A+\epsilon B_{0}^{2})}B(s,x^{\epsilon}_{s})\mathrm{d} W_{s}}, \end{align} then \begin{equation} \begin{split}
||x_{t}-x_{t}^{\epsilon}||^{2}\leq& 3||(e^{-t\epsilon B_{0}^{-2}}-1)e^{(-tA)}x||^{2}\\
&+3||\int_{0}^{t}{(e^{-(t-s)A}F(s,x_{s})-e^{-(t-s)(A+\epsilon B_{0}^{-2})}F(s,x_{s}^{\epsilon}))\mathrm{d} s}||^{2}\\
&+3||\int_{0}^{t}{(e^{-(t-s)A}B(s,x_{s})-e^{-(t-s)(A+\epsilon B_{0}^{-2})}B(s,x_{s}^{\epsilon}))\mathrm{d} W_{s}}||^{2}\\ =:& I_{1}+I_{2}+I_{3}. \end{split} \end{equation} It's clear that $\lim_{\epsilon \rightarrow 0^{+}}I_{1}=0$. For $I_{2}$, we have \begin{equation} \begin{split}
I_{2}&\leq 6T\int_{0}^{t}{||(e^{-(t-s)A}-e^{-(t-s)(A+\epsilon B^{-2}_{0})})F(s,x_{s})||^{2}\mathrm{d} s}\\
&+6T\int_{0}^{t}{||e^{-(t-s)(A+\epsilon B^{-2}_{0})}(F(s,x_{s})-F(s,x_{s}^{\epsilon}))||^{2}\mathrm{d} s} =: I_{2,1}+I_{2,2}, \end{split} \end{equation} Since \begin{align}
&||(e^{-(t-s)A}-e^{-(t-s)(A+\epsilon B_{0}^{-2})})F(s,x_{s})||\leq C(1+||x_{s}||),\\
&\lim_{\epsilon \rightarrow 0^{+}}{||(e^{-(t-s)A}-e^{-(t-s)(A+\epsilon B_{0}^{-2})})F(s,x_{s})||}=0. \end{align} By domain convergence theorem $\lim_{\epsilon \rightarrow 0^{+}}{\mathbb{E} I_{2,1}}=0$. On the other hand, \begin{equation} \begin{split}
I_{2,2}&\leq 6T\int_{0}^{t}{||e^{-(t-s)(A+\epsilon B_{0}^{2})}(F(s,x_{s})-F(s,x_{s}^{\epsilon}))||^{2}\mathrm{d} s}\\
&\leq 6T\int_{0}^{t}{||F(s,x_{s})-F(s,x_{s}^{\epsilon})||^{2}\mathrm{d} s}\leq 6TK_{1}\int_{0}^{t}{||x_{s}-x_{s}^{\epsilon}||^{2}\mathrm{d} s}. \end{split} \end{equation} For $I_{3}$, \begin{equation} \begin{split}
\mathbb{E}{I_{3}}&\leq 6\mathbb{E}{||\int_{0}^{t}{(e^{-(t-s)A}-e^{-(t-s)(A+\epsilon B_{0}^{-2})})B(s,x_{s})\mathrm{d} W_{s}}||^{2}}\\
&+6\mathbb{E}{||\int_{0}^{t}{e^{-(t-s)(A+\epsilon B_{0}^{-2})}(B(s,x_{s})-B(s,x_{s}^{\epsilon}))\mathrm{d} W_{s}}||^{2}}=I_{3,1}+I_{3,2}, \end{split} \end{equation} and \begin{equation} \begin{split}
\mathbb{E} I_{3,1} \leq &12T \mathbb{E} ||\int_{0}^{t}{(I-e^{-(t-s)\epsilon B_{0}^{-2}})(e^{-(t-s)A}B(s,0))\mathrm{d} W_{s}}||^{2}\\
&+ 12T \mathbb{E} ||\int_{0}^{t}{(e^{-(t-s)A}-e^{-(t-s)(A+\epsilon B_{0}^{-2})})(B(s,x_{s})-B(s,0))\mathrm{d} W_{s}}||^{2}\\
\leq &12T \mathbb{E} \int_{0}^{t}{||(I-e^{-(t-s)\epsilon B_{0}^{-2}})(e^{-(t-s)A}B(s,0))||_{HS}^{2}\mathrm{d} s}\\
&+12T \mathbb{E} \int_{0}^{t}{||(I-e^{-(t-s)\epsilon B_{0}^{-2}})(e^{-(t-s)A}(B(s,x_{s})-B(s,0)))||_{HS}^{2}\mathrm{d} s}\\ =:&I_{3,1,1}+I_{3,1,2}, \end{split} \end{equation} since \begin{align}
||(I-e^{-(t-s)\epsilon B_{0}^{-2}})e^{-(t-s)A}B(s,0)||^{2}=\sum_{n=1}^{+\infty}||(e^{-(t-s)\epsilon B_{0}^{-2}}-I)e^{-(t-s)A}B(s,0)e_{n}||^{2} \end{align} and \begin{align}
&\lim_{\epsilon \rightarrow 0}||(e^{-(t-s)\epsilon B_{0}^{-2}}-1)e^{-(t-s)A}B(s,0)e_{n}||=0\\
&||(e^{-(t-s)\epsilon B_{0}^{-2}}-I)e^{-(t-s)A}B(s,0)e_{n}||\leq||e^{-(t-s)A}B(s,0)e_{n}||\\ \end{align} and by (H\ref{itemH2}) \begin{equation}
\mathbb{E} \int_{0}^{t}{\sum_{n=1}^{+\infty}||e^{-(t-s)A}B(s,0)e_{n}||^{2}\mathrm{d} s}=\mathbb{E} \int_{0}^{t}{||e^{-(t-s)A}B(s,0)||_{HS}^{2}\mathrm{d} s}< \infty. \end{equation} By dominate convergence theorem, $\lim_{\epsilon \rightarrow 0}I_{3,1,1}=0$, Note that $B(s,x_{s})-B(s,0)\in L_{HS}(H)$, and \begin{equation} \begin{split}
&||(I-e^{-(t-s)\epsilon B_{0}^{2}})e^{-(t-s)A}(B(s,x_{s})-B(s,0))||_{HS}^{2}\\
=&\sum_{n=1}^{+\infty}{||(I-e^{-(t-s)\epsilon B_{0}^{2}})e^{-(t-s)A}(B(s,x_{s})-B(s,0))e_{n}||^{2}} \end{split} \end{equation} and \begin{align}
||(I-e^{-(t-s)\epsilon B_{0}^{-2}})(e^{-(t-s)A}(B(s,x_{s})-B(s,0)))e_{n}||^{2}&\leq ||(B(s,x_{s})-B(s,0))e_{n}||^{2}\\
\mathbb{E} \int_{0}^{t}{\sum_{n=1}^{+\infty}||(B(s,x_{s})-B(s,0))e_{n}||^{2}\mathrm{d} s}&\leq \mathbb{E}\int_{0}^{t}{||x_{s}||^{2}\mathrm{d} s}<\infty, \end{align} by dominate convergence theorem, $\lim_{\epsilon \rightarrow 0}{\mathbb{E} I_{3,1}}=0$. Finally, \begin{equation}
\mathbb{E} I_{3,2} \leq 6T \mathbb{E}\int_{0}^{t}{||B(s,x_{s})-B(s,x_{s}^{\epsilon})||_{HS}^{2}\mathrm{d} s}\leq 6TK_{2}\mathbb{E}\int_{0}^{t}{||x_{s}-x_{s}^{\epsilon}||^{2}\mathrm{d} s}. \end{equation} Now, we have \begin{equation}
\mathbb{E}||x_{t}-x_{t}^{\epsilon}||^{2}\leq \psi_{\epsilon}(t) + C(T,K_{2})\mathbb{E}\int_{0}^{t}{||x_{s}-x_{s}^{\epsilon}||^{2}\mathrm{d} s} \end{equation} for some $\psi_{\epsilon}(t)$, which satisfies $\lim_{\epsilon \rightarrow 0}{\psi_{\epsilon}(t)}=0$, then by Gronwall's lemma, \begin{equation}
\lim_{\epsilon \rightarrow 0}{\mathbb{E}||x_{t}-x_{t}^{\epsilon}||^{2}}=0,\ \forall t\in [0,T]. \end{equation} \qed
Firstly, we shall consider the following equation, $\xi_{t}=\frac{2-\theta}{K_{2}}(1-\operatorname{e}^{K_{2}(t-T)})$, \begin{equation}\label{equ3} \begin{split} \mathrm{d} z_{t}=&-A_{\epsilon}z_{t}\mathrm{d} t+(F(t,x_{t})-F(t,x_{t}-z_{t}))\mathrm{d} t + (B(t,x_{t})-B(t,x_{t}-z_{t}))\mathrm{d} W_{t}\\ &-\frac{1}{\xi_{t}}(B(t,x_{t}-z_{t})-B(t,x_{t}))B(t,x_{t})^{-1}z_{t}\mathrm{d} t - \frac{1}{\xi_{t}}z_{t}\mathrm{d} t,\ z_{0}=z. \end{split} \end{equation} Note that, by (H\ref{itemH2})--(H\ref{itemH4}), \begin{align} &F(t,x_{t}) - F(t,x_{t}-z_{t}) \in H,\ (B(t,x_{t})-B(t,x_{t}-z_{t}))\in L_{HS}(H,H_{0}),\\ &(B(t,x_{t}-z_{t})-B(t,x_{t}))B(t,x_{t})^{-1}\in L(H_{0},H_{0}), \end{align} it's natural to solve the equation in $H_{0}$, we shall search a suitable Gelfand triple. To this end, we should restrict the operator $A_{\epsilon}$ to $H_{0}$.
\begin{lemma} Define $A_{0,\epsilon}$ as follows \begin{align} \mathscr{D}(A_{0,\epsilon}) = B_{0}(\mathscr{D}(A_{\epsilon})),\ A_{0,\epsilon}x=A_{\epsilon}x, \forall x\in B_{0}(\mathscr{D}(A_{\epsilon})), \end{align} then, $A_{0,\epsilon}$ is well defined and $(A_{0,\epsilon}, B_{0}(\mathscr{D}(A_{\epsilon})))= (B_{0}A_{\epsilon}B_{0}^{-1},B_{0}(\mathscr{D}(A_{\epsilon})))$. \end{lemma} \noindent\emph{Proof.} It's well defined. In fact for all $x\in B_{0}(\mathscr{D}(A_{\epsilon}))$, \begin{equation} \sum_{n=1}^{+\infty}{\lambda_{n,\epsilon}\<x,e_{n}\rangle^{2}} =\sum_{n=0}^{+\infty}{\lambda^{2}_{n,\epsilon}b_{n}^{2}\<B_{0}^{-1}x,e_{n}\rangle^{2}}
\leq ||B||_{H}^{2}\sum_{n=1}^{+\infty}(\lambda_{n,\epsilon}^{2})\<B_{0}^{-1}x,e_{n}\rangle^{2}<+\infty, \end{equation} then $x\in\mathscr{D}(A_{\epsilon})$, and \begin{align} \sum_{n=1}^{+\infty}b_{n}^{-2}\<A_{\epsilon}x,e_{n}\rangle^{2}=\sum_{n=1}^{+\infty}{\lambda^{2}_{n,\epsilon}\<B_{0}^{-1}x,e_{n}\rangle^{2}}<+\infty, \end{align} then $A_{\epsilon}x\in\mathscr{D}(B_{0}^{-1}),\ \forall x\in B_{0}(\mathscr{D}(A_{\epsilon}))$, i.e. $A_{\epsilon}x\in H_{0}$. Finally, for all $x\in B_{0}(\mathscr{D}(A))$, \begin{equation} B_{0}A_{\epsilon}B_{0}^{-1}x=A_{\epsilon}B_{0}B_{0}^{-1}x=A_{\epsilon}x=A_{0,\epsilon}x. \end{equation} \qed
Now, we can define our Gelfand triple. Let \begin{equation}
(V,||\cdot||_{V} )= (\mathscr{D}(A_{0,\epsilon}^{\frac{1}{2}}),||A_{0,\epsilon}^{\frac{1}{2}}\cdot||_{H_{0}}), \end{equation}
then $(V^{*},||\cdot||_{V^{*}})$ is the complete of $(H_{0}, ||A_{0,\epsilon}^{-\frac{1}{2}}\cdot||_{H_{0}})$, $V^{*}\supset H_{0}\supset V$ is the triple we need. Since $\mathscr{D}(A_{\epsilon})\subset\mathscr{D}(B_{0}^{-2})$, $\mathscr{D}(A_{0,\epsilon})\subset \mathscr{D}(B_{0}^{-3})$, we have the following relationship moreover \begin{equation} V^{*}\supset H \supset H_{0} \supset \mathscr{D}(B_{0}^{-2}) \supset V. \end{equation}
\begin{lemma}\label{lemma_strongsolution} If conditions (H\ref{itemH1})-(H\ref{itemH4}) hold, equation (\ref{equ3}) has a unique strong solution up to the explosion time $\tau$. \end{lemma} \noindent\emph{Proof.} Let \begin{equation} G_{n}(t,v)= \left\{ \begin{array}{ll}
B(t,x_{t})^{-1}v,& \ ||v||_{H_{0}}\leq n,\\
B(t,x_{t})^{-1}\frac{nv}{\ ||v||_{H_{0}}},& \ ||v||_{H_{0}}>n,\\ \end{array} \right. \end{equation} and for simplicity's sake, we denote $$F(t,x_{t}-v_{1})-F(t,x_{t}-v_{2}),\ G_{n}(t,v_{1})-G_{n}(t,v_{2}),\ B(t,x_{t})-B(t,x_{t}-z_{t})$$ by $F(t,v_{2},v_{1})$, $G_{n}(t,v_{1},v_{2})$, $\hat{B}(t,z_{t})$ respectively. We consider the following equation firstly, \begin{equation}\label{equ4} \begin{split} \mathrm{d} z_{t}=&-A_{0,\epsilon}z_{t}\mathrm{d} t + F(t,z_{t},0)\mathrm{d} t- \frac{1}{\xi_{t}}z_{t}\mathrm{d} t+\frac{1}{\xi_{t}}\hat{B}(t,z_{t})G_{n}(t,z_{t})\mathrm{d} t + \hat{B}(t,z_{t})\mathrm{d} W_{t}\\ =:&A_{n,\epsilon}(t,z_{t})\mathrm{d} t + \hat{B}(t,z_{t})\mathrm{d} W_{t} \end{split} \end{equation} It's clearly that the hemicontinuous holds, since $G_{n}(t,\cdot)$ remains a Lipschitz mapping from $H_{0}$ to $H$. By the direct calculus, see Appendix, we get that, for all $v,v_{1},v_{2} \in V$, \begin{enumerate}[({A}1)] \item Local monotonicity \begin{equation*} \begin{split} &2 _{V^{*}}\<A_{n,\epsilon}(t,v_{1})-A_{n,\epsilon}(t,v_{2}),v_{1}-v_{2}\rangle_{V}
+||\hat{B}(t,v_{2})-\hat{B}(t,v_{1})||_{L_{HS}(H,H_{0})}^{2}\\ \leq&\left[K_{2}+\frac{2n\sqrt{K_{2}}-2}{\xi_{t}}
+\frac{n^{2}K_{1}||B_{0}||^{2}}{\epsilon^{2}\xi_{t}^{2}\delta^{2}}
+\frac{2}{\xi_{t}}(\sqrt{K_{2}}||v_{2}||_{H_{0}}^{2}
+\sqrt{\frac{2K_{1}}{\epsilon}}||B_{0}||\cdot||v_{2}||_{V}^{2})\right]\times\\
&\times||v_{1}-v_{2}||_{H_{0}}^{2}-2(1-\delta^{2})||v_{1}-v_{2}||_{V}^{2},\ \forall \delta\in(0,1). \end{split} \end{equation*}\label{itemA1}
\item Coercivity \begin{equation*} \begin{split}
&2 _{V^{*}}\<A_{n,\epsilon}(t,v),v\rangle_{V} + ||\hat{B}(t,v)||_{L_{HS}(H,H_{0})}^{2}\\
\leq& -2(1-\delta^{2})||v||_{V}^{2}+(\frac{n\sqrt{K_{2}}-2}{\xi_{t}}+\frac{n^{2}K_{1}}{\epsilon^{2}\xi_{t}^{2}\delta^{2}})||v||_{H_{0}}^{2}, \forall \delta\in(0,1). \end{split} \end{equation*}\label{itemA2}
\item Growth \begin{equation*}
||A_{n,\epsilon}(t,v)||_{V^{*}}^{2}\leq \left(\frac{||B_{0}||^{2}}{\epsilon\xi_{t}}K_{2}
+\left(1+\frac{||B_{0}||^{4}K_{1}}{\epsilon\xi_{t}^{2}}\right)||v||_{V}^{2}\right)(1+||v||_{H_{0}}^{4}). \end{equation*}\label{itemA3} \end{enumerate} Since \begin{equation}\label{inequ1} \begin{split}
||\hat{B}(t,v)||^{2}_{L_{HS}}=||B_{0}^{-1}\hat{B}(t,v)||^{2}_{HS}
\leq K_{2}||v||_{H_{0}}^{2}+\frac{2K_{1}}{\epsilon}||B_{0}||^{3}||v||_{V}||v||_{H_{0}}. \end{split} \end{equation} does not satisfies the condition (1.2) in \cite{LiuR10}, but by the basic inequality one can check that the proof in Lemma2.2 goes on well, see Appendix B. By the estimates above and Theorem 1.1 in \cite{LiuR10} for any $T_{0}<T$, equation (\ref{equ4}) has unique strong solution $(z_{t}^{n})_{t\in[0,T_{0}]}$, one can extends the solution to the interval $[0,T)$ by the pathwise uniqueness and continuous. Next we shall let $n$ goes to infinite. Let, $m>n$, \begin{equation}
\tau_{m}^{n} = \inf\{t\in[0,T)\ |\ ||z_{t}^{m}||_{H_{0}}>n\}, \end{equation} definite $\inf\emptyset=T$, then \begin{equation} \begin{split} z_{t}^{m}=&z_{0} + \int_{0}^{t}{(-A_{0,\epsilon}z_{s}^{m} + F(s,z_{s}^{m},0)-\frac{1}{\xi_{s}}z_{s}^{m})\mathrm{d} s}\\ &-\int_{0}^{t}{\frac{1}{\xi_{s}}\hat{B}(s,z_{s}^{m})B(s,x_{s})^{-1}z_{s}^{m}\mathrm{d} s} +\int_{0}^{t}{\hat{B}(s,z_{s}^{m})\mathrm{d} W_{s}},\ t<\tau_{m}^{n}, \end{split} \end{equation} by It\^{o}'s formula and (A\ref{itemA1}), for $t<\tau_{n}^{n}\wedge\tau_{m}^{n}$, we have \begin{equation*} \begin{split}
&\mathrm{d} ||z_{t}^{n}-z_{t}^{m}||_{H_{0}}^{2}-2\langle\hat{B}(t,z_{t}^{n})-\hat{B}(t,z_{t}^{m}))\mathrm{d} W_{t}, z_{t}^{n}-z_{t}^{m}\rangle_{H_{0}}\\ =&\ 2 _{V^{*}}\<A_{n,\epsilon}(t,z_{t}^{n})-A_{n,\epsilon}(t,z_{t}^{m}),z_{t}^{n}-z_{t}^{m}\rangle_{V}
+||\hat{B}(t,z_{t}^{n})-\hat{B}(t,z_{t}^{m})||_{L_{HS}(H,H_{0})}\mathrm{d} t\\
\leq &\left(K_{2}+\frac{2}{\xi_{t}}(n\sqrt{K_{1}}+\sqrt{K_{2}}||z_{t}^{n}||_{H_{0}}^{2}
+\sqrt{\frac{2K_{1}}{\epsilon}}||B_{0}||\cdot||z_{t}^{n}||_{V}^{2})
+\frac{n^{2}K_{1}}{\epsilon^{2}\xi_{t}^{2}\delta^{2}}||B_{0}||^{2}\right)||z_{t}^{n}-z_{t}^{m}||_{H_{0}}^{2}\\ \end{split} \end{equation*} define \begin{equation} \begin{split}
&\Psi_{s}=K_{2}+\frac{2}{\xi_{s}}(\sqrt{K_{2}}||z_{s}^{n}||_{H_{0}}^{2}+n\sqrt{K_{1}}
+\sqrt{\frac{2K_{1}}{\epsilon}}||B_{0}||^{2}||z_{s}^{n}||_{V}^{2})
+\frac{n^{2}K_{1}||B_{0}||^{2}}{\epsilon^{2}\xi_{s}^{2}\delta^{2}}, \end{split} \end{equation} then \begin{equation} \begin{split}
&\exp{\left[-\int_{0}^{t}{\Psi_{s}\mathrm{d} s}\right]}||z_{t}^{n}-z_{t}^{m}||_{H_{0}}^{2}\\ \leq &\int_{0}^{t}{2\exp{\left[-\int_{0}^{r}{\Psi_{s}\mathrm{d} s}\right]}\langle(\hat{B}(r,z_{r}^{n})-\hat{B}(t,z_{r}^{m}))\mathrm{d} W_{r},z_{r}^{n}-z_{r}^{m}\rangle_{H_{0}}}, \end{split} \end{equation} therefore \begin{equation}
\mathbb{E}\left\{\exp{\left[-\int_{0}^{t\wedge\tau_{n}^{n}\wedge\tau_{m}^{n}}{\Psi_{s}\mathrm{d} s}\right]}||z_{t\wedge\tau_{n}^{n}\wedge\tau_{m}^{n}}^{n}-z_{t\wedge\tau_{n}^{n}\wedge\tau_{m}^{n}}^{m}||_{H_{0}}^{2}\right\}=0. \end{equation} Note that \begin{equation}
\mathbb{E}\int_{0}^{t}{||z_{s}^{n}||^{2}_{V}\mathrm{d} s}<\infty,\ \forall t<T \end{equation} implies \begin{equation}
\int_{0}^{t}{||z_{s}^{n}||_{V}^{2}\mathrm{d} s}<\infty,\ \forall t\in[0,T),\ \mathbb{P}\mbox{-a.s.}, \end{equation} then \begin{equation} z_{t\wedge\tau_{n}^{n}\wedge\tau_{m}^{n}}^{n}=z_{t\wedge\tau_{n}^{n}\wedge\tau_{m}^{n}}^{m},\ \forall t\in[0,T),\ \mathbb{P}\mbox{-a.s.}, \end{equation} let $t\uparrow T$, by the continuity, we have \begin{equation} z_{\tau_{n}^{n}\wedge\tau_{m}^{n}}^{n}=z_{\tau_{n}^{n}\wedge\tau_{m}^{n}}^{m},\ \mathbb{P}\mbox{-a.s.} \end{equation} If $\tau_{n}^{n}<\tau_{m}^{n}$, $z_{\tau_{n}^{n}}^{n}=z_{\tau_{n}^{n}}^{m}\in \partial B_{n}^{H_{0}}(0)$, by the definition of $\tau_{m}^{n}$, it's a contradictory. Thus $\tau_{n}^{n}\geq\tau_{m}^{n}$, similarly, $\tau_{n}^{n}\leq\tau_{m}^{n}$, so $\tau_{n}^{n}=\tau_{m}^{n}$, $\mathbb{P}$-a.s. and $z_{\tau_{n}^{n}}^{n}=z_{\tau_{m}^{n}}^{m}$. Therefore, we can definite \begin{equation} z_{t}=z_{t}^{n},\ t<\tau_{n}^{n};\ \tau=\sup_{n}{\tau_{n}^{n}}, \end{equation} $(z,\tau)$ is a strong solution of equation (\ref{equ3}). By the same method, we can prove the uniqueness easily. \qed
\\\emph{Proof of Theorem \ref{mainthm}}. Let \begin{align*} \mathrm{d} \tilde{W}_{s} &= \mathrm{d} W_{s} + \frac{1}{\xi_{s}}B(s,x_{s})^{-1}z_{s}\mathrm{d} s,\ s<T\wedge\tau\\
R_{s}&=\exp{\left[-\int_{0}^{s}\xi_{t}^{-1}\<B(t,x_{t})^{-1}z_{t},\mathrm{d} W_{t}\rangle-\frac{1}{2}\int_{0}^{s}{\frac{||B(t,x_{t})^{-1}z_{t}||^{2}}{\xi_{t}}\mathrm{d} t}\right]},\ s<T\wedge\tau,\\
\tau_{n}&=\inf\{t\in[0,T)\ |\ ||z_{t}||_{H_{0}}>n\},\ \mathbb{Q}:= R_{T\wedge\tau}\mathbb{P}, \end{align*} write the equation of $z$ in the form of $\tilde{W}$: \begin{equation}\label{equ5} \mathrm{d} z_{t} = -A_{0,\epsilon}z_{t}\mathrm{d} t+F(t,z_{t},0)\mathrm{d} t + \hat{B}(t,z_{t})\mathrm{d} \tilde{W}_{t}- \frac{1}{\xi_{t}}z_{t}\mathrm{d} t, \end{equation} By It'\^{o}'s formula and (H\ref{itemH4}), for $s\in[0,T)$, and for $t<\tau_{n}\wedge s$, \begin{equation} \begin{split}
\mathrm{d} ||z_{t}||_{H_{0}}^{2} = &-2||z_{t}||_{V}^{2}\mathrm{d} t + 2 _{V^{*}}\<F(t,z_{t},0),z_{t}\rangle_{V}\mathrm{d} t-\frac{2||z_{t}||_{H_{0}}^{2}}{\xi_{t}}\mathrm{d} t\\
&+||\hat{B}(t,z_{t})||_{L_{HS}(H,H_{0})}^{2}\mathrm{d} t+2\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}}\\
\leq&\ 2 \<F(t,z_{t},0),B_{0}^{-2}z_{t}\rangle\mathrm{d} t + ||\hat{B}(t,z_{t})||_{L_{HS}(H,H_{0})}^{2}\mathrm{d} t \\
&-\frac{2||z_{t}||_{H_{0}}^{2}}{\xi_{t}}\mathrm{d} t +2\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}} \\
\leq& -\frac{2||z_{t}||_{H_{0}}^{2}}{\xi_{t}}\mathrm{d} t +2\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}}+ K_{2}||z_{t}||_{H_{0}}^{2}\mathrm{d} t, \end{split} \end{equation} and \begin{equation} \begin{split}
\mathrm{d} \frac{||z_{t}||_{H_{0}}^{2}}{\xi_{t}} \leq & -\frac{2||z_{t}||_{H_{0}}^{2}}{\xi_{t}^{2}}\mathrm{d} t + \frac{K_{2}}{\xi_{t}}||z_{t}||_{H_{0}}^{2}\mathrm{d} t - \frac{\xi_{t}^{'}}{\xi_{t}^{2}}||z_{t}||_{H_{0}}^{2}\mathrm{d} t+\frac{2}{\xi_{t}}\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}}\\
=&\ \frac{2-K_{2}\xi_{t}+\xi_{t}^{'}}{\xi_{t}^{2}}||z_{t}||_{H_{0}}^{2}\mathrm{d} t+ \frac{2}{\xi_{t}}\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}}\\
=&\ \frac{\theta}{\xi_{t}^{2}}||z_{t}||_{H_{0}}^{2}\mathrm{d} t+ \frac{2}{\xi_{t}}\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}}, \end{split} \end{equation} by Girsanov theorem, $(\tilde{W})_{t<s\wedge\tau_{n}}$ is a Wiener process under the probability $\mathbb{Q}_{s,n} := R_{s\wedge\tau_{n}}\mathbb{P}$, and \begin{equation}\label{inequ2}
\int_{0}^{s\wedge\tau_{n}}{\frac{||z_{t}||^{2}}{\xi_{t}^{2}}\mathrm{d} t}\leq \frac{||z_{0}||_{H_{0}}^{2}}{\theta\xi_{0}}+\int_{0}^{s\wedge\tau_{n}}{\frac{2}{\theta\xi_{t}}\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle_{H_{0}}}, \end{equation} then \begin{equation}
\mathbb{E}_{\mathbb{Q}_{s,n}}{\int_{0}^{s\wedge\tau_{n}}{\frac{||z_{t}||^{2}}{\xi_{t}^{2}}\mathrm{d} t}}\leq \frac{||z_{0}||_{H_{0}}^{2}}{\theta\xi_{0}}, \end{equation} Since, by (H\ref{itemH3}) \begin{equation} \begin{split}
\log{R_{u}}&= -\int_{0}^{u}\xi_{t}^{-1}\<B(t,x_{t})^{-1}z_{t},\mathrm{d} \tilde{W}_{t}\rangle+\frac{1}{2}\int_{0}^{u}{\frac{||B(t,x_{t})^{-1}z_{t}||^{2}}{\xi_{t}}\mathrm{d} t}\\
&\leq -\int_{0}^{u}\xi_{t}^{-1}\<B(t,x_{t})^{-1}z_{t},\mathrm{d} \tilde{W}_{t}\rangle+\frac{1}{2\rho(T)^{2}}\int_{0}^{u}{\frac{||z_{t}||_{H_{0}}^{2}}{\xi_{t}}\mathrm{d} t},\ u\leq s\wedge\tau_{n}, \end{split} \end{equation}
\begin{equation}
\mathbb{E}{R_{s\wedge\tau_{n}}}\log{R_{s\wedge\tau_{n}}}\leq \frac{||z_{0}||_{H_{0}}^{2}}{2\theta\xi_{0}\rho(T)^{2}},\ \forall s\in[0,T),n\geq1. \end{equation}
As in \cite{Wang2011}, we can prove that $\{R_{s\wedge\tau}\ |\ s\in[0,T]\}$ is a martingale. Since \begin{equation}
\mathbb{E}_{\mathbb{Q}}1_{[\tau_{n}\leq t]}\frac{||z_{t\wedge\tau_{n}}||_{H_{0}}^{2}}{\xi_{t\wedge\tau_{n}}}
\leq\mathbb{E}_{\mathbb{Q}}\frac{||z_{t\wedge\tau_{n}}||_{H_{0}}^{2}}{{\xi_{t\wedge\tau_{n}}}}
\leq\frac{||z_{0}||^{2}_{H_{0}}}{\xi_{0}}, \end{equation} and \begin{equation}
\mathbb{E}_{\mathbb{Q}}1_{[\tau_{n}\leq t]}\frac{||z_{t\wedge\tau_{n}}||_{H_{0}}^{2}}{\xi_{t\wedge\tau_{n}}}\geq\frac{n\mathbb{Q}(\tau_{n}\leq t)}{\xi_{0}} \end{equation} let $n$ goes to infinite, we have $\mathbb{Q}(\tau_{n}\leq t)=0$, $\forall t\in [0,T)$, then $\mathbb{Q}(\tau=T)=1$. Now, since $\tau = T$, $\mathbb{Q}$-a.s., equation (\ref{equ5}) can be solved up to time $T$. Let \begin{equation}
\zeta=\inf\{t\in[0,T]\ |\ ||z_{t}||_{H_{0}}=0\}, \end{equation} we shall prove that $\zeta \leq T$, here we assume $\inf\emptyset = +\infty$. Otherwise, there exists a set $\Omega_{0}$, such that $\mathbb{P}(\Omega_{0})>0$, and for any $\omega \in \Omega_{0}$, $\zeta(\omega)>T$, then by the continuity of path, we have \begin{equation}
\inf_{t\in[0,T]}{||z_{t}(\omega)||_{H_{0}}}>0, \end{equation} so \begin{equation}
\int_{0}^{T}{\frac{||z_{t}||^{2}_{H_{0}}}{\xi_{t}^{2}}\mathrm{d} t}=+\infty, \end{equation} but \begin{equation}
\mathbb{E}_{\mathbb{Q}}\int_{0}^{T}{\frac{||z_{t}||^{2}_{H_{0}}}{\xi_{t}^{2}}\mathrm{d} t}\leq \frac{||z_{0}||^{2}_{H_{0}}}{2\rho(T)^{2}\theta\xi_{0}}<+\infty, \end{equation} hence, $\zeta\leq T$, $\mathbb{Q}$-a.s., by the uniqueness of solution of equation (\ref{equ5}), we have \begin{equation} z_{t}\equiv 0,\ t>\zeta,\ \mathbb{Q}\mbox{-a.s.} \end{equation} Thus, $z_{T}=0$, $\mathbb{Q}$-a.s.
Next, we shall construct the coupling. Since under the probability space $(\Omega,\mathscr{F},R_{\tau\wedge T}\mathbb{P})$, $(\tilde{W}_{t})_{t\in[0,T]}$ is a Wiener process, let $y$ be the unique mild solution of the following equation \begin{equation} \mathrm{d} y_{t}=-A_{\epsilon}y_{t}\mathrm{d} t + F(t,y_{t})\mathrm{d} t + B(t,y_{t})\mathrm{d} \tilde{W}_{t},\ y_{0}=y, \end{equation} for $x_{t}$, it's the unique solution of the following equation \begin{equation} \mathrm{d} x_{t}=-A_{\epsilon}x_{t}\mathrm{d} t + F(t,x_{t})\mathrm{d} t -\frac{z_{t}}{\xi_{t}}\mathrm{d} t+ B(t,x_{t})\mathrm{d} \tilde{W}_{t},\ x_{0}=x. \end{equation} For the process $x_{t}-y_{t}$, it's the mild solution of the following equation \begin{equation}\label{equ6} \mathrm{d} u_{t}=-A_{\epsilon}u_{t}\mathrm{d} t+F(t,u_{t},0)\mathrm{d} t+\hat{B}(t,u_{t})\mathrm{d} \tilde{W}_{t} -\frac{z_{t}}{\xi_{t}}\mathrm{d} t, \end{equation} note that $z_{t}$ is a solution of equation \begin{equation} \mathrm{d} z_{t}=-A_{0,\epsilon}z_{t}\mathrm{d} t+F(t,z_{t},0)\mathrm{d} t+\hat{B}(t,z_{t})\mathrm{d} \tilde{W}_{t} -\frac{z_{t}}{\xi_{t}}\mathrm{d} t, \end{equation} Similar to equation (1.41), one can prove that equation (\ref{equ6}) has a strong solution in $H_{0}$, since $V^{*} \supset H\supset H_{0}$ and $A_{0,\epsilon}$ is the restriction of $A_{\epsilon}$ to $H_{0}$, by the relation ship of variational solution and mild solution and the pathwise uniqueness, then $z_{t}=x_{t}-y_{t},\ \forall t\in[0,T]$, $\mathbb{Q}$-a.s.
By the method used in \cite{Wang2011}, we have log-Harnack inequality for equation (\ref{equ2}) : \begin{equation} \begin{split} P_{T}^{\epsilon}\log{f}(y)&=\mathbb{E}_{\mathbb{Q}}\log{f(y_{T}^{\epsilon})}=\mathbb{E}{R_{T\wedge\tau}\log{f(x_{T}^{\epsilon})}}\leq\mathbb{E}{R_{T\wedge\tau}\log{R_{T\wedge\tau}}}+\log{\mathbb{E}{f(x_{T}^{\epsilon})}}\\ &\leq\log{P_{T}^{\epsilon}f(x)}
+\frac{||x-y||_{H_{0}}}{2\rho(T)^{2}\theta\xi_{0}}=\log{P_{T}^{\epsilon}f(x)}+\frac{K_{2}||x-y||_{H_{0}}}{2\rho(T)^{2}\theta(2-\theta)(1-e^{K_{2}T})}\ , \end{split} \end{equation} then by lemma 1.2, let $\epsilon\rightarrow 0$, and choose $\theta = 1$, for $f\in\mathscr{B}_{b}^{+}(H)$ and $f\geq 1$, \begin{equation}
P_{T}\log{f}(y)\leq\log{P_{T}f(x)}+\frac{K_{2}||x-y||_{H_{0}}}{2\rho(T)^{2}(1-e^{K_{2}T})}. \end{equation} If (H\ref{itemH5}) holds in addition, by inequality (\ref{inequ2}), we have \begin{equation} \begin{split}
&\mathbb{E}_{s,n}{\exp{\left[h\int_{0}^{s\wedge\tau_{n}}{\frac{||z_{t}||^{2}_{H_{0}}}{\xi_{t}^{2}}\mathrm{d} t}\right]}}\\
\leq& \exp{\left[\frac{h||x-y||_{H_{0}}^{2}}{\theta\xi_{0}}\right]} \mathbb{E}_{s,n}{\exp{\left[\frac{2h}{\theta}\int_{0}^{s\wedge\tau_{n}}{\frac{1}{\xi_{t}}\langle\hat{B}(t,z_{t})\mathrm{d} \tilde{W},z_{t}\rangle}\right]}}\\
\leq&\exp{\left[\frac{h||x-y||_{H_{0}}^{2}}{\theta\xi_{0}}\right]} \mathbb{E}_{s,n}{\left(\exp{\left[\frac{8h^{2}K_{3}^{2}}{\theta^{2}}
\int_{0}^{s\wedge\tau_{n}}{\frac{||z_{t}||^{2}_{H_{0}}}{\xi_{t}^{2}}\mathrm{d} t}\right]}\right)^{\frac{1}{2}}}, \end{split} \end{equation} for $h=\frac{\theta^{2}}{8K_{3}^{2}}$, and \begin{equation}
\mathbb{E}_{s,n}{\exp{\left[\frac{\theta^{2}}{8K_{3}^{2}}\int_{0}^{s\wedge\tau_{n}}{\frac{||z_{t}||^{2}_{H_{0}}}{\xi_{t}^{2}}\mathrm{d} t}\right]}}
\leq \exp{\left[\frac{\theta K_{2}||x-y||_{H_{0}}^{2}}{4K_{3}^{2}(2-\theta)(1-e^{-K_{2}T})}\right]}, \end{equation} Similar to \cite{Wang2011}, we get that \begin{equation}
\sup_{s\in[0,T]}{\mathbb{E}{R_{s\wedge\tau}^{1+r}}\leq \exp{\left[\frac{\theta K_{2}(2K_{3}+\theta\rho(T))||x-y||_{H_{0}}}{8K_{3}^{2}(2-\theta)(K_{3}+\theta\rho(T))(1-e^{-K_{2}T})}\right]}} \end{equation} and for $p>(1+K_{3})^{2}$, $\delta_{p,T} = K_{3}\vee \frac{\rho(T)}{2}(\sqrt{p}-1)$,$f\in\mathscr{B}_{b}^{+}(H)$, choose $\theta = \frac{2K_{3}\rho(T)}{\sqrt{p}-1}$, \begin{equation} (P_{T}^{\epsilon}f(y))^{p}\leq (P_{T}^{\epsilon}f^{p}(x))
\exp{\left[\frac{K_{2}(T)\sqrt{p}(\sqrt{p}-1)||x-y||_{H_{0}}^{2}}{4\delta_{p,T}[(\sqrt{p}-1)\rho(T)-\delta_{p,T}](1-e^{K_{2}T})}\right]}, \end{equation} by lemma 1.2, let $\epsilon\downarrow 0$, we have \begin{equation} (P_{T}f(y))^{p}\leq (P_{T}f^{p}(x))
\exp{\left[\frac{K_{2}(T)\sqrt{p}(\sqrt{p}-1)||x-y||_{H_{0}}^{2}}{4\delta_{p,T}[(\sqrt{p}-1)\rho(T)-\delta_{p,T}](1-e^{K_{2}T})}\right]}, \end{equation} for $x,y\in H$,$x-y\in\mathscr{D}(B_{0}^{-1})$.
\qed
\section{Application} In this section, we give some simple applications of Theorem \ref{mainthm}. \begin{corollary} Assume that $F$, $B$ are determined and independent of t and (H\ref{itemH1}) to (H\ref{itemH5}) hold. If $\lambda_{0}>0$, $\lambda_{0}>K_{1}^{2}+2K_{1}$ and $B(0)\in L_{HS}(H)$, then \begin{enumerate}[(1)] \item $P_{t}$ has uniqueness invariant measure $\mu$ and has full support on $H$, $\mu(V)=1$.\label{itemc1}
\item If $\sup_{x}||B(x)||<\infty$, then $\mu(\operatorname{e}^{\epsilon_{0}||\cdot||_{H}^{2}})<\infty$ for some $\epsilon_{0}>0$.\label{itemc2}
\item If there exists $q>0$ such that $\inf_{n}b_{n}^{2q}\lambda_{n}^{q-1}>0$, then $\mu$ has full support on on $H_{0}$.\label{itemc3} \end{enumerate} \end{corollary}
\noindent\emph{Proof.} Let $(V,\ ||\cdot||_{V})=(\mathscr{D}(A^{\frac{1}{2}}),\ ||A^{\frac{1}{2}}\cdot||$. Since $\lambda_{0}>0$ and $B(0)\in L_{HS}(H)$, by (H\ref{itemH1}), equation (\ref{equ1}) has strong solution and $P_{t}$ is Feller semigroup. By Ito's formula and $\lambda_{0}>K^{2}_{1}-2K_{1}$, there exists a constant $c>0$ such that \begin{equation*}
\mathrm{d} ||x_{t}||^{2} \leq \left( c -2(1-\frac{K^{2}_{1}+2K_{1}}{\lambda_{0}})||x_{t}||^{2}_{V} + 2||F(0)||\cdot||x_{t}||\right)\mathrm{d} t + 2\<B(x_{t})\mathrm{d} W_{t},x_{t}\rangle \end{equation*} and \begin{equation*} \begin{split}
\mathrm{d} \operatorname{e}^{\epsilon ||x_{t}||^{2}}\leq &\ \epsilon\operatorname{e}^{\epsilon ||x_{t}||^{2}}\left(c-2(1-\frac{K^{2}_{1}+2K_{1}}{\lambda_{0}})||x_{t}||^{2}_{V}
+\frac{\epsilon^{2}}{4}||B^{*}(x_{t})x_{t}||^{2}+2||F(0)||\cdot||x_{t}||\right)\mathrm{d} t \\
&\ + 2\epsilon\operatorname{e}^{\epsilon ||x_{t}||^{2}}\<B(x_{t})\mathrm{d} W_{t},x_{t}\rangle, \end{split} \end{equation*}
for sufficient small $\epsilon$, by H\"{o}lder inequality and noting that $||\cdot||_{V}$ is compact function on $H$, then by standard argument in Theorem 1.2 in \cite{Wang2007}, one can prove (\ref{itemc1}) and (\ref{itemc2}). For (\ref{itemc3}), $\inf_{n}b_{n}^{2q}\lambda_{n}^{q-1}>0$ implies that there exists a constant $c(m)>0$ such that \begin{equation}
||\cdot||^{2}_{H_{0}} \leq c(m)||\cdot||^{2}+ \frac{1}{m}||\cdot||_{V}^{2},\ \forall m\geq1, \end{equation} by Ito's formula, one can get following inequality, \begin{equation}
\mathrm{d} ||x_{t}(x)-x||^{2} \leq -||x_{t}(x)-x||_{V}^{2}\mathrm{d} t + (c_{1}+c_{2}||x_{t}(x)||^{2})\mathrm{d} t + 2\<B(x_{t})\mathrm{d} W_{t},x_{t}-x\rangle \end{equation} here we denote $x_{t}(x)$ for the process starts from $x$, $c_{1}$, $c_{2}$ are constants depend on $x$. Using Harnack inequality (\ref{Harineq}), (\ref{itemc3}) can be proved following the line of \cite{WangX2011}. \qed \begin{corollary} Assume (H\ref{itemH1}) to (H\ref{itemH5}) hold, $F$ and $B$ are determined and time independent, then for any $t>0$, $P_{t}$ is $H_{0}$-strong Feller. Let $\mu$ be the $P_{t}$-subinvariant probability with full support on $H_{0}$ as in \cite{RoWang2010}, then the transition density $p_{t}(x,y)$ w.r.t. $\mu$ satisfies \begin{equation}
||p_{t}(x,\cdot)||_{L^{p}(\mu)}\leq\ \left\{ \int_{H_{0}}{\exp{\left[-\frac{K_{2}\sqrt{q}(\sqrt{q}-1)||x-y||_{H_{0}}^{2}} {4\delta_{q}[(\sqrt{q}-1)\rho-\delta_{q}](1-e^{K_{2}t})}\right]}\mu(dy)}\right\}^{-\frac{1}{q}} \end{equation} for all $1<p<\frac{(K_{3}+\rho)^{2}}{(K_{3}+\rho)^{2}-1}$, here $q=\frac{p}{p-1}$. \end{corollary} \noindent\emph{Proof.} It follows the proof of \cite{Wang2007,RoWang2010,WangX2011}.
\textbf{Acknowledgement} ~The author would like to thank Professor Zdzislaw Brzezniak to provide him the article \cite{Brzez97}, and Professor Feng-Yu Wang for his useful comments.
\paragraph{\Large{Appendix}} \paragraph{A. Proof of Remark \ref{remark1}}\
\\ \emph{Proof of (1):} since $\bigcup_{n} H_{n}$ is a core of $B_{0}^{-2}$, for any $x\in \mathscr{D}(B_{0}^{-2})$, choose $\{x_{n}\}$ such that $x_{n}\rightarrow x$ and $B_{0}^{-2}x_{n}\rightarrow B_{0}^{-2}x$, hence $B_{0}^{-1}x_{n}\rightarrow x$, as $n \rightarrow +\infty$. Similarly, a sequence $\{y_{n}\}$ with the same property. Therefore \begin{align*}
&||B_{0}^{-1}[(B(t,x_{n})-B(t,y_{n}))-(B(t,x_{m})-B(t,y_{m})]||_{HS}^{2}\\
\leq &2K_{2}(||B_{0}^{-1}(x_{n}-x_{m})||^{2}
+||B_{0}^{-1}(y_{n}-y_{m})||^{2}) -4\<F(t,x_{n})-F(t,x_{m}),B^{-2}_{0}(x_{n}-x_{m})\rangle\\ &-4\<F(t,y_{n})-F(t,y_{m}),B^{-2}_{0}(y_{n}-y_{m})\rangle, \end{align*} by the continuous of $F$, we have that $\{B(t,x_{n})-B(t,y_{n})\}$ forms a Cauchy sequence in $L_{HS}(H,H_{0})$. Note that $B(t,x_{n})-B(t,y_{n})$ convergent to $B(t,x)-B(t,y)$ in $L_{HS}(H)$, and $B_{0}^{-1}$ is closed, we have $B(t,x)-B(t,y)\in L_{HS}(H,H_{0})$, \begin{equation*} \lim_{n\rightarrow +\infty}(B(t,x_{n})-B(t,y_{n}))=B(t,x)-B(t,y), \end{equation*} and \begin{equation*}
2\<F(t,x)-F(t,y),B_{0}^{-2}(x-y)\rangle+||B_{0}^{-1}(B(t,x)-B(t,y))||_{HS}^{2}\leq K_{2}||B_{0}^{-1}(x-y)||^{2}. \end{equation*}
\\\emph{Proof of (2):} we assume $\rho(t)=1$, by definition, it's clear that $B_{0}$ is one to one and has dense range. $$B(t,x)B(t,x)^{*}\geq B_{0}^{2} \Leftrightarrow ||B(t,x)^{*}y||\geq||B_{0}y||, \forall y\in H, $$ implies that $\mathrm{Ran}B(t,x) \supset \mathrm{Ran}B_{0}\mbox{ by Proposition B.1 in \cite{DPZ1992}, and }$ $$||z||\geq||B_{0}(B(t,x)^{*})^{-1}z||, \forall z \in \mathrm{Ran}(B(t,x)^{*}).$$ Since for any $z\in \mathrm{Ran}(B(t,x)^{*})$, $y \in \mathrm{Ran}(B(t,x))$, we have \begin{equation} \<B(t,x)^{-1}y,z\rangle=\<B(t,x)B(t,x)^{-1}y,\ (B(t,x)^{*})^{-1}z\rangle=\<y,(B(t,x)^{*})^{-1}z\rangle, \end{equation} then \begin{equation} z\in\mathscr{D}((B(t,x)^{-1})^{*}),\ (B(t,x)^{-1})^{*}z=(B(t,x)^{*})^{-1}z. \end{equation} On the other hand, for any $z\in \mathscr{D}((B(t,x)^{-1})^{*})$, there exists $z^{*}$ such that \begin{equation} \<B(t,x)^{-1}y,z\rangle=\<y,z^{*}\rangle,\ \forall y \in \mathscr{D}((B(t,x)^{-1})), \end{equation}
let $u=B(t,x)^{-1}y$, then $\<u,z\rangle=\<B(t,x)u,z^{*}\rangle$, we have $z=B(t,x)^{*}z^{*}$ and $$(B(t,x)^{*})^{-1}z=z^{*}=(B(t,x)^{-1})^{*}z,$$ hence $\mathscr{D}((B(t,x)^{-1})^{*})=\mathscr{D}((B(t,x)^{*})^{-1})$. Therefore, $||z||\geq ||B_{0}(B(t,x)^{-1})^{*}z||$, for all $z\in\mathrm{Ran} B(t,x)^{*}$. Since $\mathrm{Ran}(B(t,x)^{*})$ is dense in $H$, $B_{0}(B(t,x)^{-1})^{*}$ can be extended to be a bounded operator on $H$, and for all $z\in H,\ y\in H$, there is $\{z_{n}\}_{n=1}^{+\infty}$, $\lim_{n}z_{n}=z$, such that $\lim_{n}B_{0}(B(t,x)^{-1})^{*}z_{n}=B_{0}(B(t,x)^{-1})^{*}z$, then \begin{equation} \begin{split} &\<B_{0}(B(t,x)^{-1})^{*}z,y\rangle=\lim_{n}\<B_{0}(B(t,x)^{-1})^{*}z_{n},y\rangle\\ =&\lim_{n}\<z_{n},(B(t,x)^{-1})B_{0}y\rangle=\<z,(B(t,x)^{-1})B_{0}y\rangle, \end{split} \end{equation}
hence $||(B(t,x)^{-1})B_{0}y||\leq ||y||$, for all $y\in H$, let $z=B_{0}y$, then $||(B(t,x)^{-1})z||\leq ||B_{0}^{-1}z||$, for all $z\in \mathscr{D}(B_{0}^{-1})$. By Proposition B.1 in \cite{DPZ1992}, and the proof above, the converse is easy.
\qed
\paragraph{B. For Lemma \ref{lemma_strongsolution}}\
\\(1) \emph{For local monotonicity}. For any $v_{1},v_{2}\in V$, \begin{equation}
-2 _{V^{*}}\<A_{0,\epsilon}(v_{1}-v_{2}),v_{2}\rangle_{V}= -2||\sqrt{A_{0,\epsilon}}(v_{1}-v_{2})||_{H_{0}}^{2}=-2||v_{1}-v_{2}||_{V}^{2}, \end{equation} \begin{equation} \begin{split} &2_{V^{*}}\<F(t,v_{1},v_{2}),v_{1}-v_{2}\rangle_{V}
+||\hat{B}(t,v_{1})-\hat{B}(t,v_{2})||_{L_{HS}(H,H_{0})}^{2}\\ =&2\<F(t,v_{1},v_{2}),B_{0}^{-2}(v_{1}-v_{2})\rangle
+||B_{0}^{-1}(\hat{B}(t,v_{1})-\hat{B}(t,v_{2}))||_{HS}^{2}\\
\leq & K_{2}||v_{1}-v_{2}||_{H_{0}}^{2} \end{split} \end{equation} and \begin{equation} \begin{split} &\frac{1}{\xi_{t}}\ _{V^{*}}\langle\hat{B}(t,v_{1})G_{n}(t,v_{1})-\hat{B}(t,v_{2})G_{n}(t,v_{2}),v_{1}-v_{2}\rangle_{V}\\ =&\frac{1}{\xi_{t}}\ \langle(\hat{B}(t,v_{1})-\hat{B}(t,v_{2}))G_{n}(t,v_{1})-\hat{B}(t,v_{2})G_{n}(t,v_{1},v_{2}),B_{0}^{-2}(v_{1}-v_{2})\rangle\\
\leq& \frac{1}{\xi_{t}}||B_{0}^{-1}(\hat{B}(t,v_{1})-\hat{B}(t,v_{2}))||\cdot||G_{n}(t,v_{1})||\cdot||v_{1}-v_{2}||_{H_{0}}\\
&+\frac{1}{\xi_{t}}||B_{0}^{-1}\hat{B}(t,v_{2})||\cdot||G_{n}(t,v_{1},v_{2})||\cdot||v_{1}-v_{2}||_{H_{0}}, \end{split} \end{equation} note that, by (H\ref{itemH1}), \begin{equation*} \begin{split}
&||B_{0}^{-1}(\hat{B}(t,v_{1})-\hat{B}(t,v_{2}))||_{HS}^{2}\leq K_{2}||v_{1}-v_{2}||_{H_{0}}^{2}-2\<F(t,v_{1},v_{2}),B_{0}^{-2}(v_{1}-v_{2})\rangle\\
\leq &K_{2}||v_{1}-v_{2}||_{H_{0}}^{2}+
2K_{1}||B_{0}A_{0,\epsilon}^{-\frac{1}{2}}A_{0,\epsilon}^{\frac{1}{2}}(v_{1}-v_{2})||_{H_{0}}
\cdot||B_{0}^{-1}A_{0,\epsilon}^{-\frac{1}{2}}A_{0,\epsilon}^{\frac{1}{2}}(v_{1}-v_{2})||_{H_{0}}\\
\leq &K_{2}||v_{1}-v_{2}||_{H_{0}}^{2}+2K_{1}\left(\sup_{n}\frac{b_{n}}{\sqrt{\lambda_{n}+\epsilon b_{n}^{-2}}}\right)\left(\sup_{n}\frac{1}{b_{n}\sqrt{\lambda_{n}+\epsilon b_{n}^{-2}}}\right)||v_{1}-v_{2}||^{2}_{V}\\
\leq &K_{2}||v_{1}-v_{2}||_{H_{0}}^{2} + \frac{2}{\epsilon}K_{1}||B_{0}||^{2}||v_{1}-v_{2}||_{V}^{2}, \end{split} \end{equation*} hence \begin{equation} \begin{split}
&\frac{1}{\xi_{t}}||B_{0}^{-1}(\hat{B}(t,v_{1})-\hat{B}(t,v_{2}))||\cdot||G_{n}(t,v_{1})||\cdot||v_{1}-v_{2}||_{H_{0}}\\
\leq & \frac{n}{\xi_{t}}(\sqrt{K_{2}}||v_{1}-v_{2}||_{H_{0}}+\sqrt{\frac{2}{\epsilon}K_{1}}||B_{0}||\cdot||v_{1}-v_{2}||_{V})||v_{1}-v_{2}||_{H_{0}}\\
\leq &(\frac{n}{\xi_{t}}\sqrt{K_{2}}+\frac{n^{2}K_{1}||B_{0}||^{2}}{\epsilon \xi_{t}^{2}\delta^{2}})||v_{1}-v_{2}||_{H_{0}}^{2}+\delta^{2}||v_{1}-v_{2}||_{V}^{2}, \end{split} \end{equation} and \begin{equation} \begin{split}
&\frac{1}{\xi_{t}}||B_{0}^{-1}\hat{B}(t,v_{2})||\cdot||G_{n}(t,v_{1},v_{2})||\cdot||v_{1}-v_{2}||_{H_{0}}\\
\leq &\frac{1}{\xi_{t}}(\sqrt{K_{2}}||v_{2}||_{H_{0}}
+\sqrt{\frac{2K_{1}}{\epsilon}}||B_{0}||\cdot||v_{2}||_{V})||v_{1}-v_{2}||^{2}_{H_{0}}, \end{split} \end{equation} therefore, we have \begin{equation*} \begin{split} &2 _{V^{*}}\<A_{n,\epsilon}(t,v_{1})-A_{n,\epsilon}(t,v_{2}),v_{1}-v_{2}\rangle_{V}
+||\hat{B}(t,x_{t}-v_{2})-\hat{B}(t,x_{t}-v_{1})||_{L_{HS}(H,H_{0})}^{2}\\ \leq&\left[K_{2}+\frac{2n\sqrt{K_{2}}-2}{\xi_{t}}
+\frac{n^{2}K_{1}||B_{0}||^{2}}{\epsilon^{2}\xi_{t}^{2}\delta^{2}}
+\frac{2}{\xi_{t}}(\sqrt{K_{2}}||v_{2}||_{H_{0}}^{2}
+\sqrt{\frac{2K_{1}}{\epsilon}}||B_{0}||\cdot||v_{2}||_{V}^{2})\right]\times\\
&\times||v_{1}-v_{2}||_{H_{0}}^{2}-2(1-\delta^{2})||v_{1}-v_{2}||_{V}^{2}. \end{split} \end{equation*} \\(2) \emph{For coercivity:} \begin{equation}
-2 _{V^{*}}\<A_{0,\epsilon}v,v\rangle_{V}=-2||v||_{V}^{2},\ ||B_{0}^{-1}\hat{B}(t,v)||_{HS}^{2} + 2\<F(t,v,0),B_{0}^{-2}v\rangle\leq K_{2}||v||^{2}, \end{equation} \begin{equation} \begin{split} \frac{2}{\xi_{t}}\ _{V^{*}}\langle\hat{B}(t,v)G_{n}(t,v),v\rangle_{V}
\leq &\frac{2}{\xi_{t}}||B_{0}^{-1}\hat{B}(t,v)||\cdot||G_{n}(t,v)||\cdot||v||_{H_{0}}\\
\leq&\frac{2n}{\xi_{t}}(K_{2}||v||^{2}+\frac{2K_{1}}{\epsilon}||B_{0}||^{2}||v||_{V}^{2})^{\frac{1}{2}}||v||_{H_{0}}\\
\leq&\frac{2n}{\xi_{t}}(\sqrt{K_{2}}||v||_{H_{0}}+\sqrt{\frac{2K_{1}}{\epsilon}}||B_{0}||\cdot||v||_{V})||v||_{H_{0}}\\
\leq&(\frac{2n\sqrt{K_{2}}}{\xi_{t}}+\frac{2n^{2}K_{1}||B_{0}||^{2}}{\epsilon \xi_{t}^{2}\delta^{2}})||v||_{H_{0}}^{2}+\delta^{2}||v||_{V}^{2}, \end{split} \end{equation} hence \begin{equation} \begin{split}
&2 _{V^{*}}\<A_{n}(t,v),v\rangle_{V} + ||\hat{B}(t,v)||_{L_{HS}(H,H_{0})}^{2}\\
\leq& -2(1-\delta^{2})||v||_{V}^{2}+(\frac{n\sqrt{K_{2}}-2}{\xi_{t}}+\frac{n^{2}K_{1}}{\epsilon^{2}\xi_{t}^{2}\delta^{2}})||v||_{H_{0}}^{2}. \end{split} \end{equation} \\(3) \emph{For Growth:} \begin{equation}
||A_{0,\epsilon}v||^{2}_{V^{*}}=||v||_{V}^{2},\ ||\frac{1}{\xi_{t}}v||_{V^{*}}=\frac{1}{\xi_{t}}||v||_{V^{*}},\ ||F(t,v,0)||_{V^{*}} \leq \frac{K_{1}}{\sqrt{\epsilon}}||v||, \end{equation} since, by (H\ref{itemH1}), \begin{equation} \begin{split}
| _{V^{*}}\<F(t,v,0),z\rangle_{V}|=|\<F(t,v,0),B_{0}^{-2}z\rangle|
\leq K_{1}||v||\cdot||B_{0}^{-2}z||\leq \frac{K_{1}}{\sqrt{\epsilon}}||v||\cdot||z||_{V}. \end{split} \end{equation} And \begin{equation} \begin{split}
||\frac{1}{\xi_{t}}\hat{B}(t,v)G_{n}(t,v)||_{V^{*}}
&\leq\frac{||B_{0}||}{\sqrt{\epsilon}\xi_{t}}||\hat{B}(t,v)G_{n}(t,v)||_{H_{0}}\\
&\leq\frac{||B_{0}||}{\sqrt{\epsilon}\xi_{t}}||B_{0}^{-1}\hat{B}(t,v)||\cdot||G_{n}(t,v)||_{L(H_{0},H)}\\
&\leq\frac{||B_{0}||}{\sqrt{\epsilon}\xi_{t}}||(\sqrt{K_{2}}||v||_{H_{0}}
+\sqrt{\frac{2K_{1}}{\epsilon}}||v||_{V}||B_{0}||_{H_{0}})||v||_{H_{0}}, \end{split} \end{equation} we have \begin{equation}
||A_{n,\epsilon}(t,v)||_{V^{*}}^{2}\leq \left(\frac{||B_{0}||^{2}}{\epsilon\xi_{t}}K_{2}
+\left(1+\frac{||B_{0}||^{4}K_{1}}{\epsilon\xi_{t}^{2}}\right)||v||_{V}^{2}\right)(1+||v||_{H_{0}}^{4}). \end{equation}
\\(4) \emph{For the Lemma 2.2 of \cite{LiuR10}: } We give new estimates to replace inequalities (2.3) and (2.4) there. For convenience, we use the notations there. In (2.3), we only have to replace $f_{s}\cdot||X_{s}^{(n)}||_{H}^{p-2}$ by $||X_{s}^{(n)}||_{V}||X_{s}^{(n)}||_{H}\cdot||X_{s}^{(n)}||_{H}^{p-2}$ and use the basic inequality \begin{equation} a\cdot b \leq \frac{a^{2}}{2\delta}+ \frac{\delta }{2}b^{2}, \forall \delta>0, \end{equation} and note that in our case $\alpha = 2$. For (2.4), one can use the following estimate, \begin{equation*} \begin{split}
&\mathbb{E}\left(\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{2p-2}||B(s,X_{s}^{(n)})||_{2}^{2}}\mathrm{d} s\right)^{\frac{1}{2}}\\
\leq&\ \mathbb{E}\left(\int_{0}^{\tau_{R}^{n}}{C||X_{s}^{(n)}
||_{H}^{2p-2}(||X_{s}^{(n)}||_{V}||X_{s}^{(n)}||_{H}+||X_{s}^{(n)}||_{H}^{2})\mathrm{d} s}\right)^{\frac{1}{2}}\\
\leq&\ C(\delta_{1})\mathbb{E}\left(\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{2p-2}||X_{s}^{(n)}||_{H}^{2}\mathrm{d} s}\right)^{\frac{1}{2}}
+\sqrt{\delta_{1}}\mathbb{E}\left(\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{2p-2}||X_{s}^{(n)}||_{V}^{2}\mathrm{d} s}\right)^{\frac{1}{2}}\\
\leq&\ \delta_{2}\mathbb{E}\sup_{s\in[0,\tau_{R}^{n}]}{||X_{s}^{(n)}||_{H}^{p}}
+C(\delta_{1},\delta_{2})\mathbb{E}{\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{p}\mathrm{d} s}}\\
&+\sqrt{\delta_{1}}\mathbb{E}\sup_{s\in[0,\tau_{R}^{n}]}{||X_{s}^{(n)}||_{H}^{\frac{p}{2}}}
\left(\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{p-2}||X_{s}^{(n)}||_{V}^{2}\mathrm{d} s}\right)^{\frac{1}{2}}\\
\leq&\ (\delta_{2}+\delta_{3})\mathbb{E}\sup_{s\in[0,\tau_{R}^{n}]}{||X_{s}^{(n)}||_{H}^{p}}
+\frac{\delta_{1}}{4\delta_{3}}\mathbb{E}{\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{p-2}||X_{s}^{(n)}||_{V}^{2}\mathrm{d} s}} +C(\delta_{1},\delta_{2})\mathbb{E}{\int_{0}^{\tau_{R}^{n}}{||X_{s}^{(n)}||_{H}^{p}\mathrm{d} s}}, \end{split} \end{equation*} choose $\delta_{2}$, $\delta_{3}$ small enough and $\delta_{1}$ such that $\frac{\delta_{1}}{4\delta_{3}}$ small enough, using $\alpha = 2$ again, then Gronwall's lemma can be applied as in \cite{LiuR10}.
\end{document} |
\begin{document}
\title{Rank-one transformations, odometers, and finite factors} \author[M. Foreman, S. Gao, A. Hill, C.E. Silva, B. Weiss]{Matthew Foreman, Su Gao, Aaron Hill, Cesar E. Silva, Benjamin Weiss} \address{Mathematics Department, UC Irvine, Irvine, CA 92697, USA} \email{mforeman@math.uci.edu} \address{Department of Mathematics, University of North Texas, 1155 Union Circle \#311430, Denton, TX 76203, USA} \email{sgao@unt.edu}
\address{Proof School, 973 Mission Street, San Francisco, CA, 94103, USA} \email{ahill@proofschool.org} \address{Department of Mathematics and Statistics, Williams College,, Williamstown, MA 01267, USA} \email{csilva@williams.edu} \address{Institute of Mathematics, Hebrew University of Jerusalem, Jerusalem, Israel} \email{weiss@math.huji.ac.il}
\date{\today} \subjclass[2010]{Primary 37A05, 37A35} \keywords{rank-one transformation, odometer, factor, isomorphism, totally ergodic}
\begin{abstract} In this paper we give explicit characterizations, based on the cutting and spacer parameters, of (a) which rank-one transformations factor onto a given finite cyclic permutation, (b) which rank-one transformations factor onto a given odometer, and (c) which rank-one transformations are isomorphic to a given odometer. These naturally yield characterizations of (d) which rank-one transformations factor onto some (unspecified) finite cyclic permutation, (d$^\prime$) which rank-one transformations are totally ergodic, (e) which rank-one transformations factor onto some (unspecified) odometer, and (f) which rank-one transformations are isomorphic to some (unspecified) odometer. \end{abstract}
\maketitle \thispagestyle{empty}
\section{Introduction}
The ultimate motivation of the work done in this paper is the isomorphism problem in ergodic theory as formulated by von Neumann in his seminal paper \cite{vN} of 1932. There he asked for an explicit process to determine when two measure-preserving transformations are measure-theoretically isomorphic. Two important theorems in this direction are von Neumann's theorem classifying discrete spectrum transformations by their eigenvalues, and Ornstein's theorem classifying Bernoulli transformations by their entropy. To our knowledge, no other complete isomorphism invariants that classify a class of transformations have been found, though of course notions such as mixing, weak mixing, etc., are invariant under isomorphism. In \cite{FRW}, Foreman, Rudolph, and Weiss showed that the isomorphism relation on the class of all ergodic transformations is complete analytic, in particular not Borel. In some sense, this brings a negative conclusion to the von Neumann program. However, in \cite{FRW} the authors also showed that the isomorphism problem is Borel on the generic class of (finite measure-preserving) rank-one transformations. Thus this provides hope that there should exist some explicit method for determining whether two rank-one transformations are isomorphic. In particular, if one is given a specific rank-one transformation, there should be an explicit description of all rank-one transformations that are isomorphic to it. In this paper we give such explicit descriptions, provided that the given rank-one transformation is an odometer. All the transformations we consider in this paper are invertible finite measure-preserving transformations.
Another reason for considering odometers is the role they played in a question of Ferenczi. In his survey article \cite{Fe}, Ferenczi asked whether every odometer is isomorphic to a symbolic rank-one transformation. This question is connected to whether two common definitions of rank-one---the constructive geometric definition and the constructive symbolic definition---are equivalent. As noted by the referee, in the Introduction to Adams--Ferenczi--Petersen \cite{AFP}, the authors mention how one can use Remark 2.10 in Danilenko \cite{D16} to answer this question in the affirmative, and also show how to construct a symbolic rank-one transformation that is isomorphic to any given odometer. The results in this paper can be thought of as a continuation of work in \cite{AFP}, \cite{D16}. Namely, we explicitly describe {\em all} rank-one transformations that are isomorphic to any given odometer (Theorem~\ref{isomorphictothisodometer}). In addition, we also explicitly describe all rank-one transformations that are isomorphic to some (unspecified) odometer (Theorem~\ref{isomorphictosomeodometer}).
Rank-one transformations are determined by two sequences of parameters, known as the cutting parameter and spacer parameter (see Section~\ref{Pre} for the precise definitions). In this paper we give explicit descriptions, in terms of the cutting parameter and spacer parameter, of when a rank-one transformation factors onto a given finite cyclic transformation, or factors onto an (infinite) odometer, or is isomorphic to a given odometer.
Note that a measure-preserving transformation factors onto a non-trivial finite cyclic transformation if and only if it is not totally ergodic. Thus results in this paper give an explicit description of when an arbitrary rank-one transformation is totally ergodic. This generalizes some result of \cite{GH}, where Gao and Hill gave an explicit description of which rank-one transformations with bounded cutting parameter are totally ergodic.
The rest of paper is organized as follows. In Section~\ref{Pre} we recall the constructive geometric definition and the constructive symbolic definition of rank-one transformations. We also explicitly define odometers and finite cyclic transformations. In Section~\ref{Fin} we give an explicit description of all rank-one transformations that factor onto a given finite cyclic transformation, as well as a description of rank-one transformations that allow a finite factor. In Section~\ref{Odo} we describe all rank-one transformations that factor onto a given odometer. As a corollary, we get a description of all rank-one transformations that factor onto some odometers. Finally, in Section~\ref{Iso} we describe all rank-one transformations that are isomorphic to a given odometer. Again, this gives rise to a description of all rank-one transformations that are isomorphic to some odometer.
\vskip 12pt {\em Acknowledgment.} The research in this paper was done at the AIM SQuaRE titled {\it The isomorphism problem for rank-one transformations}. The authors would like to acknowledge the American Institute of Mathematics for the support on this research. M.F. acknowledges the US NSF grant DMS-1700143 for support for this research. S.G. acknowledges the US NSF grants DMS-1201290 and DMS-1800323 for the support of his research. Since August 2019, C.S. has been serving as a Program Director in the Division of Mathematical Sciences at the National Science Foundation (NSF), USA, and as a component of this job, he received support from NSF for research, which included work on this paper. Any opinion, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation. We we would like to thank the referee for a careful reading and suggestions that shortened our proofs.
\section{Preliminaries}\label{Pre}
\subsection{Measure-preserving transformations} We will be concerned with Lebesgue spaces, which we shall denote by $(X,\mu)$ or $(Y,\nu)$, and typically not mention the $\sigma$-algebra. We shall assume that the measure of the space is 1 and in most cases, and unless we explicitly specify to the contrary, we will assume our measures to be nonatomic and call the spaces standard Lebesgue spaces. A map $\phi:(X,\mu)\to (Y,\nu)$ is {\it measure-preserving} if for all measurable sets $A$, $\phi^{-1}(A)$ is measurable and $\mu(\phi^{-1}(A))=\nu(A)$. A {\it transformation} $T:(X,\mu)\to (X,\mu)$ is a measure-preserving map that is invertible on a set of full measure and whose inverse is measure-preserving. We will call $(X,\mu,T)$ a measure-preserving system and, by abuse of notation, also a measure-preserving transformation.
If $(X, \mu, T)$ and $(Y, \nu, S)$ are measure-preserving transformations, then a {\em factor} map from $T$ to $S$ is a measure-preserving map $\phi: (X, \mu) \to (Y, \nu)$ such that for $\mu$-almost every $x \in X$, $\phi \circ T (x) = S \circ \phi (x)$. We say that $T$ {\em factors onto} $S$ if there exists a factor map $\phi$ from $(X,\mu,T)$ onto $(Y,\nu,S)$. If $(X, \mu, T)$ and $(Y, \nu, S)$ are measure-preserving transformations, then an {\em isomorphism} between $T$ and $S$ is a factor map $\phi$ from $(X, \mu,T)$ to $(Y, \nu,S)$
that is invertible a.e.. We note here that neither factor maps nor isomorphisms need to be defined on the entire underlying space $(X, \mu)$, only a subset of $X$ of full measure, and that two measure isomorphisms are considered the same if they agree on a set of full measure.
\subsection{Rank-one transformations} The constructive geometric definition of a rank-one transformation is given below (see e.g., \cite{Fe}). It describes a recursive cutting and stacking process that produces infinitely many Rokhlin towers (or columns) to approximate the transformation. \begin{definition} A measure-preserving transformation $T$ on a standard Lebesgue space $(X, \mu)$ is {\it rank-one} if there exist sequences of positive integers $r_n > 1$, for $n\in\N=\{0, 1, 2, \dots\}$, and nonnegative integers $s_{n,i}$, for $n\in\N$ and $0 < i \leq r_n$, such that, if $h_n$ is defined by $$ h_0 = 1; h_{n+1} = r_nh_n +\sum_{0<i\leq r_n}s_{n,i}, $$ then \begin{equation}\label{r1} \sum^{+\infty}_{n=0} \frac{h_{n+1}-r_nh_n}{h_{n+1}}< +\infty; \end{equation} and there are subsets of $X$, denoted by $B_n$ for $n\in\N$, by $B_{n,i}$ for $n\in \N$ and $0<i\leq r_n$, and by $C_{n,i,j}$ for $n\in\N$, $0<i\leq r_n$ and $0<j\leq s_{n,i}$ (if $s_{n,i}= 0$ then there are no $C_{n,i,j}$), such that for all $n\in\N$: \begin{itemize} \item $\{B_{n,i}\,:\, 0 < i \leq r_n\}$ is a partition of $B_n$, \item the $T^k(B_n)$, $0\leq k < h_n$, are disjoint, \item $T^{h_n}(B_{n,i}) = C_{n,i,1}$ if $s_{n,i} \neq 0$ and $i \leq r_n$, \item $T^{h_n}(B_{n,i}) = B_{n,i+1}$ if $s_{n,i} = 0$ and $i < r_n$, \item $T(C_{n,i,j}) = C_{n,i,j+1}$ if $j < s_{n,i}$, \item $T(C_{n,i,s_{n,i}}) = B_{n,i+1}$ if $i < r_n$, \item $B_{n+1} = B_{n,1}$, \end{itemize} and the collection $\bigcup_{n=0}^\infty\{B_n, T(B_n), \dots, T^{h_n-1}(B_n)\}$ is dense in the $\sigma$-algebra of all $\mu$-measurable subsets of $X$. \end{definition}
Assumption (\ref{r1}) of this definition is equivalent to the finiteness of the measure $\mu$. In this definition the sequence $(r_n)$ is called the {\em cutting parameter}, the sets $C_{n,i,j}$ are called the {\em spacers}, and the doubly-indexed sequence $(s_{n,i})$ is called the {\em spacer parameter}. For each $n\in\N$, the collection $\{B_n, T(B_n), \dots, T^{h_n-1}(B_n)\}$ gives the {\em stage-$n$ tower}, with $B_n$ as the {\em base} of the tower, and each $T^k(B_n)$, where $0 \leq k < h_n$, a {\em level} of the tower. The stage-$n$ tower has height $h_n$. At stage $n+1$, the stage-$n$ tower is cut into $r_n$ many $n$-blocks of equal measure. Each block has a base $B_{n,i}$ for some $0 < i\leq r_n$ and has height $h_n$. These $n$-blocks are then stacked up, with spacers inserted in between. At future stages, these $n$-blocks are further cut into thinner blocks, but they always have height $h_n$.
Note that the base of the stage-$m$ tower, $B_m$, is partitioned into $\{ B_{m,i}\,:\, 0<i\leq r_m\}$, where each $B_{m,i}$ is now a level of the stage-$(m+1)$ tower, with $B_{m,1}=B_{m+1}$ being the base of the stage-$(m+1)$ tower. It is clear by induction that for any $n\geq m$, $B_m$ is partitioned into various levels of the stage-$n$ tower.
We let $I_{m,n}$, for $n\geq m$, denote the set of indices for all levels of the stage-$n$ tower that form a partition of $B_m$, i.e., $$ I_{m,n}=\{ i\, :\, T^i(B_n)\subseteq B_m, 0\leq i<h_n\}. $$ Note that $B_m=\bigcup_{i\in I_{m,n}}T^i(B_n)$. $I_{m,n}$ is a finite set of natural numbers that can be inductively computed from the cutting and spacer parameters. For example, $$ I_{m,m+1}=\{0, h_m+s_{m,1}, 2h_m+s_{m,1}+s_{m,2}, \dots, (r_m-1)h_m+\sum_{0<i<r_m}s_{m,i}\}. $$
We next turn to the constructive symbolic definition of rank-one transformations. This often gives a succinct way to describe a concrete rank-one transformation. We will be talking about finite words over the alphabet $\{0,1\}$. Let $F$ be the set of all finite words over the alphabet $\{0,1\}$ that start with 0. A {\em generating rank-one sequence} is an infinite sequence $(v_n)$ of finite words in $F$ defined by induction on $n\in\N$: $$v_0 = 0; v_{n+1} = v_n1^{s_{n,1}}v_n1^{s_{n,2}}\cdots v_n1^{s_{n,r_n}}$$ for some integers $r_n>1$ and non-negative integers $s_{n,i}$ for $0 < i\leq r_n$. We continue to refer to the sequence $(r_n)$ as the cutting parameter and the doubly-indexed sequence $(s_{n,i})$ as the spacer parameter. Note that the cutting and spacer parameters uniquely determine a generating rank-one sequence. A generating rank-one sequence converges to an infinite rank-one word $V\in \{0,1\}^{\N}$. We write $V = \lim_{n}v_n$.
\begin{definition} Given an infinite rank-one word $V$, the {\em symbolic rank-one system} induced by $V$ is a pair $(X, \sigma)$, where $$ X = X_V = \{x \in\{0,1\}^\Z\,:\, \mbox{every finite subword of $x$ is a subword of $V$}\}$$ and $\sigma: X \to X$ is the shift map defined by $$\sigma(x)(k) = x(k + 1)\ \mbox{for all $k\in\Z$}. $$ \end{definition}
Under the same assumption (\ref{r1}) as in the constructive geometric definition, the symbolic rank-one system will carry a unique non-atomic, invariant probability measure. In this case the symbolic rank-one system will be isomorphic to the rank-one transformation that is constructed with the same cutting and spacer parameters.
The symbolic definition does not explicitly describe odometers (see Subsection \ref{cyclic} below for definitions), which are considered rank-one transformations. This was the motivation of Ferenczi's question in \cite{Fe} as discussed in the introduction.
In contrast, we note that in the topological setting, Gao and Ziegler have recently proved in \cite{GZ} that (infinite) odometers are not topologically isomorphic to symbolic rank-one systems (which are called rank-one subshifts in \cite{GZ}).
When we work with a rank-one transformation we will use both the terminology and the notation in this subsection.
\subsection{Finite cyclic permutations and odometers\label{cyclic}}
Here we precisely describe what we mean by ``finite cyclic permutation" in the context of measure-preserving transformations. If $k\in\N$ with $k>1$ and $n\in\N$, we denote by $[n]_k$ the unique $m\in\N$ with $m<k$ and $n\equiv m\mod k$. For each $k \in \N$ with $k>1$, let $X_k = \{0, 1, \ldots, k-1\}$, let $\mu_k$ be the measure on $X_k$ where each point has measure $1/k$, and let $f_k: X_k \rightarrow X_k$ given by $f_k (i) = [i+1]_k$. We let $\Z/k\Z$ denote the transformation $(X_k, \mu_k, f_k)$ and refer to such a transformation as a finite cyclic permutation. These are the sole cases we consider where the measure is atomic, so the measures are defined on atomic Lebesgue probability spaces, and we will still refer to $(X_k, \mu_k, f_k)$ as a transformation, though it should be clear from the context, such as when we denote a transformation by $T$, when a transformation is defined on a non-atomic space. It is natural to speak of a factor map from a measure-preserving transformation $T$ to $(X_k, \mu_k, f_k)$, but since $T$ is implicitly defined on a non-atomic space, it is not possible for such a factor map to be an isomorphism.
Now we describe what we mean by an odometer (see \cite{Do}). Loosely it can be described as an inverse limit of a coherent sequence of finite cyclic permutations. To be more precise, suppose we have a sequence $(k_n : n \in \N)$ of positive integers greater than 1 such that for all $n \in \N$, $k_n | k_{n+1}$. We now define $X$ as the collection of sequences $\alpha = (\alpha_n : n\in \N) \in \Pi_{n \in \N} \Z / k_n\Z$ such that for all $m,n \in \N$ with $m \leq n$, $[\alpha_n]_{k_m} = \alpha_m$. There is a natural measure $\mu$ on $X$ satisfying the following: for all $n \in \Z$ and all $i \in \{0, 1, \ldots, k_n-1\}$ the set $\{\alpha \in X : \alpha_n = i \}$ has measure $1/k_n$. There is also a natural bijection $f: X \rightarrow X$ defined by $$f(\alpha) = (f_1(\alpha_1), f_2(\alpha_2), \ldots ) = ([\alpha_1 + 1]_{k_1}, [\alpha_2 + 1]_{k_2}, \dots ).$$ A transformation $(X, \mu, f)$ obtained in this way is called an {\it odometer}. For example, if $k_n=2^n$, one obtains the standard dyadic odometer.
The following characterization of when two such odometers are isomorphic is well known. Suppose $(k_n : n \in \N)$ and $(k_n^\prime : n \in \N)$ are sequences of positive integers greater than 1 such that for all $n \in \N$, $k_n | k_{n+1}$ and $k_n^\prime | k_{n+1}^\prime$. Then the odometers corresponding to these two sequences are isomorphic if and only if
$$ \{m\in \N\,:\, \exists n \in \N\ (m | k_n)\}=\{m\in\N\,:\, \exists n \in \N\ (m | k_n^\prime)\}.$$
Because of this characterization we often describe an odometer by an infinite collection $K$ of natural numbers that is closed under taking factors. If one has such a set $K$, then it is easy to produce a sequence $(k_n: n \in \N)$ of integers $>1$ such that $k_n | k_{n+1}$, for all $n \in \N$, and for which $$K = \bigcup_{n \in \N} \{m \in \N : m | k_n\}.$$ Moreover, any choice of such a sequence $(k_n : n \in \N)$ will give rise to the same odometer, up to isomorphism. We can now let $\mathcal{O}_K$ denote (any) one of the odometers produced by choosing such a sequence $(k_n: n \in \N)$. There are canonical ways to choose $\mathcal{O}_K$ based on the maximum power of each prime that occurs in $K$, but we will not go into the details of this canonical choice in this paper. It is worth noting that the characterization in the preceding paragraph guarantees that if $K \neq K^\prime$ are infinite collections of natural numbers that are closed under factors, then $\mathcal{O}_K \not\cong \mathcal{O}_{K^\prime}.$
Here we collect the important facts about $\mathcal{O}_K$ that we will use in this paper. \begin{enumerate} \item For each $k \in K$, then there is a canonical factor map $\pi_k$ from $\mathcal{O}_K$ to $\Z/k\Z$.
\item For all $k, k^\prime \in K$, with $k | k^\prime$, then for all $x$ in the underlying set of $\mathcal{O}_K$, $\pi_k (x) = [\pi_{k^\prime} (x)]_k$. \item The collection of sets $\{ \pi_k^{-1} (i): k \in K, 0 \leq i < k \}$ generates the $\sigma$-algebra on $\mathcal{O}_K$. \item If a measure-preserving transformation factors onto $\Z/k\Z$ for all $k \in K$, then it also factors onto $\mathcal{O}_K$. If, moreover, the fibers of these maps generate the $\sigma$-algebra on $(X, \mu)$, then that factor map is an isomorphism. The argument for this is similar to the construction of the Kronecker factor of a transformation, see e.g. \cite{Qu}. \end{enumerate}
\subsection{The notion of $\epsilon$-containment}
In this subsection we define a precise notion of almost containment and briefly describe some of its properties; this is a standard notion in measure theory also called $(1-\epsilon)$-full.
\begin{definition} Let $A$ and $B$ be measurable subsets of positive measure of a measure space $(X, \mu)$ and let $\epsilon >0$. We say that $A$ is {\em $\epsilon$-contained} in $B$, and write $A \subseteq_{\epsilon} B$, provided that $$\frac{\mu (A \setminus B)}{\mu (A)} < \epsilon.$$ Equivalently, we say that $A$ is {\it $(1-\epsilon)$-full} of $B$ if $\mu(A\cap B)>(1-\epsilon)\mu(A)$. \end{definition}
Here are the basic facts we will need; the reader may refer to e.g. \cite{Si}. \begin{enumerate} \item If $A \subseteq_\epsilon B$ and $A$ is partitioned into sets $A_1, A_2, \ldots, A_r$, there is some $i \leq r$ such that $A_i \subseteq_\epsilon B$. \item If $A$ is partitioned into sets $A_1, A_2, \ldots, A_r$ and for all $i \leq r$, $A_i \subseteq_\epsilon B$, then $A \subseteq_\epsilon B$.
\item Let $(X, \mu, T)$ be a measure-preserving transformation. If $A \subseteq_\epsilon B$ and $z \in \Z$, the $T^z (A) \subseteq_\epsilon T^z(B)$. \item Let $(X, \mu, T)$ be a rank-one transformation. If $B \subseteq X$ has positive measure, there there is some $n \in \N$ and some $0 \leq i < h_n$ such that $T^i(B_n) \subseteq_{\epsilon} B$.
\end{enumerate}
\section{Factoring onto a finite cyclic permutation}\label{Fin}
It is quite easy to build a rank-one transformation that factors onto a cyclic permutation of $k$ elements. Simply ensure that for some $N \in \N$, the height of the stage-$N$ tower is a multiple of $k$ and furthermore insist that every time spacers are inserted after stage-$N$ the number of spacers inserted is a multiple of $k$. If a rank-one transformation is constructed in this way, then one can define, for all $m \geq N$, a function $\pi_m$ which goes from the stage-$m$ tower to $\Z / k\Z$ defined by $\pi_m (x) = [i]_k$, where $x$ belongs to level $i$ of the stage-$m$ tower. The method of construction guarantees that if $x$ belongs to the stage-$m$ tower and $n \geq m$, then $\pi_m (x) = \pi_n (x)$. The domains of the functions $\pi_m$ are increasing and their measure goes to one. Thus, we can define $\pi$ from a full-measure subset of $X$ to $\Z / k\Z$ by $$\pi (x) = \lim_{m \rightarrow \infty} \pi_m (x).$$ This map $\pi$ is clearly a factor map.
The theorem below gives a full characterization of which transformations factor onto a cyclic permutation of $k$ elements. \begin{theorem} \label{finitefactor1} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation and let $1 < k \in \N$. The following are equivalent. \begin{enumerate} \item[\rm (i)] $(X, \mu, T)$ factors onto $\Z / k\Z$.
\item[\rm (ii)] $\forall \eta > 0, \exists N \in \N, \forall n \geq m \geq N, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$ \end{enumerate} \end{theorem}
\begin{proof}
First we will show that (i) implies (ii). Suppose that $\pi : X \rightarrow \Z/k\Z$ is a factor map. The fibers $\pi^{-1} (0), \pi^{-1} (1), \pi^{-1} (2), \ldots, \pi^{-1} (k-1)$ are a partition of $X$ into sets of measure $1/k$ such that $T(\pi^{-1} (j)) = \pi^{-1} ([j+1]_k)$, for all $j \in \Z/k\Z$. Let $\eta >0$ and choose $\epsilon$ smaller than both $\eta/2$ and $1/2$.
Since the levels of the towers generate the $\sigma$-algebra of $X$, there exists $N\in\N$ such that for all $n>m\geq N$, every level of the stage-$n$ tower is $\epsilon$-contained in $\pi^{-1}(j)$ for some $j\in \Z/k\Z$. Fix $j_0 \in \Z/k\Z$ such that $B_m \subseteq_\epsilon \pi^{-1}(j_0)$. We claim that among the levels of the stage-$n$ tower that comprise the base of the stage-$m$ tower, the fraction of those that are $\epsilon$-contained in $ \pi^{-1}(j_0)$ must be at least $1-2\epsilon$. In other words, letting $I^\prime = \{i \in I_{m,n}: T^i(B_n) \not\subseteq_\epsilon \pi^{-1} (j_0)\}$, we claim that
\begin{equation}\label{fraction}\frac{|I^\prime|}{|I_{m,n}|} < 2\epsilon. \end{equation} Suppose this is not the case. Since $$B_m \setminus \pi^{-1}(j_0) \supseteq \bigcup_{i \in I^\prime} \left( T^i(B_n) \setminus \pi^{-1}(j_0) \right), \text{we have that} $$
$$\mu \left( B_m \setminus \pi^{-1}(j_0) \right) \geq |I^\prime| \cdot \mu(B_n) \cdot (1 - \epsilon) = \frac{ |I^\prime|}{|I_{m,n}|} \cdot \mu(B_m) \cdot (1 - \epsilon).$$
Therefore, $$\frac{\mu \left( B_m \setminus \pi^{-1}(j_0) \right) }{ \mu(B_m)} \geq \frac{ |I^\prime|}{|I_{m,n}|} \cdot (1 - \epsilon) \geq (2 \epsilon) \cdot (1-\epsilon) > \epsilon,$$ since $\epsilon < 1/2$. This contradicts the fact that $B_m$ is $\epsilon$-contained in $\pi^{-1}(j_0)$ and completes the proof of \eqref{fraction}.
Since the levels of the stage-$n$ tower that are $\epsilon$-contained in $\pi^{-1}(j_0)$ are all in the same congruence class mod $k$, there is some $j \in \Z / k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < 2 \epsilon < \eta,$$ completing the proof that (i) implies (ii).
Next we will show that (ii) implies (i). Assuming (ii) we construct a factor map $\pi : X \rightarrow \Z/ k\Z$.
For all $\alpha \in \N$, let $\eta_\alpha = \frac{1}{2^{\alpha+2}}$ and use (ii) to produce $N_\alpha \in \N$. We may assume that the sequence $(N_\alpha : \alpha \in \N)$ is increasing and that for each $\alpha$, $N_\alpha$ is large enough that the measure of the stage-$N_\alpha$ tower is at least $1 - \frac{1}{2^{\alpha +1}}$. Now, for each $\alpha \in \N$ we also choose $j_\alpha \in \Z/k\Z$ such that
$$\frac{ |\{i \in I_{N_\alpha,N_{\alpha+1}} : [i]_k \neq j_\alpha \}|}{| I_{N_\alpha,N_{\alpha+1}}|} < \eta_\alpha .$$
For all $\alpha \in \N$, define a function $\phi_{\alpha}$ from the stage-$N_\alpha$ tower to $\Z/k\Z$ as follows: If $x$ belongs to level $i$ of the stage-$N_\alpha$ tower, then $\phi_\alpha (x) = [i]_k$. Since for most $x$ in the base of the $N_\alpha$-tower, $\phi_{\alpha+1} (x) = j_\alpha$, the reader can verify that for all $\alpha \in \N$, $$\mu \left( \{x \in \textnormal{dom}(\phi_\alpha): \phi_{\alpha+1} (x) \neq j_\alpha \} \right) < \eta_\alpha.$$
Now, for each $\alpha \in \N$, we let $J_\alpha = \sum_{\beta < \alpha} j_\beta$. Also, for each $\alpha \in \N$ we define a function $\pi_\alpha$ from the stage-$N_\alpha$ tower to $\Z/k\Z$ by $\pi_\alpha (x) = [\phi_\alpha (x) - J_\alpha]_k$. Since $\phi_\alpha$ and $\pi_\alpha$ have the same domain for all $\alpha \in \N$, and in addition, if $x \in \textnormal{dom} (\pi_\alpha)$, then $\pi_{\alpha+1} (x) = \pi_{\alpha} (x)$ if and only if $\phi_{\alpha+1} (x) = [\phi_{\alpha} (x) + j_\alpha]_k$, and we already know that $\mu \left( \{x \in \textnormal{dom}(\phi_\alpha): \phi_{\alpha+1} (x) \neq [\phi_\alpha (x) + j_\alpha]_k \} \right) < \eta_\alpha,$ then one can verify that for all $\alpha \in \N$, $$\mu \left( \{x \in \textnormal{dom}(\pi_\alpha): \textnormal{ for all $\beta \geq \alpha$, } \pi_\alpha (x) = \pi_{\beta} (x) \} \right) \geq 1 - \frac{1}{2^\alpha }.$$
It follows that for $\mu$-almost every $x \in X$, the sequence $(\pi_\alpha (x) : \alpha \in \N)$ eventually stabilizes and we can define $$\pi (x) = \lim_{\alpha \rightarrow \infty} \pi_\alpha (x).$$
Choose $\alpha$ sufficiently large so that $\pi_\alpha (x) = \pi(x)$, $\pi_\alpha(T(x)) = \pi(T(x))$ and $x$ belongs to a non-top level of the stage-$N_\alpha$ tower. If $x$ belongs to level $i$ of the stage $N_\alpha$ tower, then $T(x)$ belongs to level $i+1$ of the stage-$N_\alpha$ tower which implies that $\phi_\alpha (T(x)) = [\phi_\alpha (x) + 1]_k$. Now, $$\pi (T(x)) = \pi_\alpha (T(x)) = [\phi_{\alpha} (T(x)) - J_\alpha]_k = [\phi_{\alpha} (x) + 1 - J_\alpha]_k = [\pi (x) +1]_k. $$ Therefore, $\pi: X \rightarrow \Z/k\Z$ is a factor map. \end{proof}
As a corollary, we obtain a characterization of the rank-one transformations that factor onto some (unspecified) non-trivial finite cyclic permutation, a condition that is well-know to be equivalent to the transformation not being totally ergodic.
\begin{corollary}\label{cortoterg} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation. The following are equivalent. \begin{enumerate} \item $T$ factors onto some finite cyclic permutation.
\item $\exists k \in \N$ with $k>1$, $\forall \eta > 0, \exists N \in \N, \forall n \geq m \geq N, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$
\end{enumerate} \end{corollary}
We end with an equivalent characterization as suggested by the referee. The proof is similar to that of Theorem \ref{finitefactor1}.
\begin{theorem} \label{finitefactor2} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation and let $1 < k \in \N$. The following are equivalent. \begin{enumerate} \item[\rm (i)] $(X, \mu, T)$ factors onto $\Z / k\Z$. \item[\rm (ii)] There is an increasing sequence $(q_n)$
such that $$\sum_{n=1}^\infty \frac{ |\{i \in I_{q_n,q_n+1} : i \equiv 0 \mod k \}|}{| I_{q_n,q_n}|} < \infty.$$ \end{enumerate} \end{theorem}
\section{Factoring onto an odometer}\label{Odo}
We now give characterizations of which rank-one transformations factor onto a given odometer, and which rank-one transformations factor onto some (unspecified) odometer. These characterizations are essentially corollaries of Theorem \ref{finitefactor1}.
\begin{theorem}\label{T:factortoodometer} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation and let $\mathcal{O}_K$ be an odometer. The following are equivalent. \begin{enumerate} \item[\rm (i)] $(X, \mu, T)$ factors onto $\mathcal{O}_K$.
\item[\rm (ii)] $\forall k \in K, \forall \eta > 0, \exists N \in \N, \forall n \geq m \geq N, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$ \end{enumerate} \end{theorem}
\begin{proof} Suppose $(X, \mu, T)$ factors onto $\mathcal{O}_K$. Then for each $k \in K$, one can compose this factor map with a factor map from $\mathcal{O}_K$ to $\Z/ k\Z$ to get a factor map from $(X, \mu, T)$ to $\Z/ k\Z$. Together with Theorem \ref{finitefactor1}, this implies condition (ii).
Now suppose that condition (ii) holds. By Theorem \ref{finitefactor1} we know that $(X, \mu, T)$ factors onto $\Z/ k\Z$ for every $k \in K$. Therefore, $(X, \mu, T)$ factors onto $\mathcal{O}_K$. \end{proof}
By a proof is similar to that of Theorem~\ref{T:factortoodometer} we obtain the following corollary.
\begin{corollary} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation. The following are equivalent. \begin{enumerate} \item[\rm (i)] $(X, \mu, T)$ factors onto some odometer $\mathcal{O}$.
\item[\rm (ii)] $\forall M \in \N, \exists k \geq M, \forall \eta > 0, \exists N \in \N, \forall n \geq m \geq N, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$ \end{enumerate} \end{corollary}
\section{Being isomorphic to a given odometer}\label{Iso}
It turns out that it is not too hard to construct a rank-one transformation that is isomorphic to a given odometer. Let $K$ be an infinite set of natural numbers that is closed under factors. First choose a sequence $(k_n: n \in \N)$ of natural numbers such that the factors of the partial products $\prod_{m<n}k_m$ are precisely the set $K$ and for which $$\sum_{n \in \N} \frac{1}{k_n}< \infty. $$ Then build a rank-one transformation by a symbolic construction as follows. For $n \in \N$, let $v_0 = 0$ and let $v_{n+1} = (v_n)^{k_n-1} 1^{v_n}$. Then the resulting transformation $T$ is what is called {\em essentially $0$-expansive} by Adams, Ferenczi, and Petersen in \cite{AFP}, and their method shows that $T$ is isomorphic to the odometer $\mathcal{O}_K$. A definition of an isomorphism is also implicit in our results below.
In this section we characterize in general when a rank-one transformation is isomorphic to a given odometer. The idea is to build on our characterization for rank-one transformations which factor onto a given odometer, and then to examine when a factor map turns out to be an isomorphism. The following result gives the explicit details.
\begin{theorem} \label{isomorphictothisodometer} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation and let $\mathcal{O}_K$ be an odometer. The following are equivalent. \begin{enumerate} \item[\rm (I)] $T$ is isomorphic to $\mathcal{O}_K$. \item[\rm (II)] Both of the following hold. \begin{enumerate}
\item[\rm (IIa)] $\forall k \in K, \forall \eta > 0, \exists N \in \N, \forall n \geq m \geq N, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$
\item[\rm (IIb)] $\forall l \in \N, \forall \epsilon>0, \exists k \in K, \exists N \in \N, \forall m \geq N, \exists D \subseteq \Z / k\Z$ such that $$\frac{|\{i \leq |h_m|: [i]_k \in D\} \Delta I_{l,m} |}{|I_{l,m}|} < \epsilon$$ \end{enumerate} \end{enumerate} \end{theorem} \begin{proof} First assume (II). Using condition (IIa) and the proof of Theorem \ref{finitefactor1} we construct, for each $k\in K$, a factor map $\pi_k: X \rightarrow \Z/k\Z$. Recall that $\pi_k$ is built using a series of approximating maps $(\pi_{k, \alpha}: \alpha \in \N)$.
It suffices to show that for every $l \in \N$ and every $\delta > 0$, there is some $k \in K$ and some $E \subseteq \Z/ k\Z$ such that $$\mu (B_l \Delta \pi_k^{-1} [ E] ) < \delta.$$
Let $l \in \N$ and $\delta >0$. Let $\epsilon = \delta/2$. First, we use condition (IIb) above to produce $k \in K$ and $N >l$ such that for all $m \geq N$, there exists some $D \subseteq \Z/k\Z$ such that
$$\frac{|\{i \leq |h_m|: [i]_k \in D\} \Delta I_{l,m} |}{|I_{l,m}|} < \epsilon.$$
Since $k \in K$, we have a factor map $\pi_k : X \rightarrow \Z/k\Z$ that is built using the approximating maps $\pi_{k, \alpha}$. Choose a specific $\alpha \in \N$ so that $\frac{1}{2^{\alpha}} < \delta/2$ and such that $N_\alpha$ is greater than the $N$ produced in the preceding paragraph. Using the fact that $N_\alpha>N$ and using features of the approximating maps $\pi_{k, \alpha}$ we get the following.
\begin{enumerate} \item [(i)] There exists some $D \subseteq \Z/k\Z$ such that
$$\frac{|\{i \leq h_{N_\alpha}: [i]_k \in D\} \Delta I_{l,N_\alpha} |}{|I_{l,N_\alpha}|} < \epsilon.$$
\item [(ii)] There exists $E \subseteq \Z/k\Z$ such that $$\bigcup_{d \in D} ( \bigcup_{\substack{0 \leq i < h_{N_\alpha}\\ [i]_k =d}} T^i (B_{N_\alpha})) = \bigcup_{e \in E} \pi_{k, \alpha}^{-1} (e). $$
\item [(iii)] $\mu (\{x \in \textnormal{dom}(\pi_{k, \alpha}): \pi_{k, \alpha} (x) = \pi_k(x)\}) \geq 1 - \frac{1}{2^{\alpha}}$. \end{enumerate}
Using these properties one can show that $$\mu (B_l \Delta \pi_k^{-1} [ E] ) < \delta,$$
completing the proof that $(X, \mu, T)$ is isomorphic to $\mathcal{O}_K$.
Now we assume that $(X, \mu, T)$ is isomorphic to $\mathcal{O}_K$ and let $\phi$ be an isomorphism between $T$ and $\mathcal{O}_K$. For each $k \in K$ we can compose $\phi$ with the canonical factor map of $\mathcal{O}_K$ onto $\Z/k\Z$ to get a factor map $\pi_{k}$ from $X$ to $\Z/k\Z$. For such a $k\in K$, Theorem \ref{finitefactor1} guarantees that $\forall \eta > 0, \exists N \in \N, \forall n \geq m \geq N, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$ Thus we have condition (IIa).
Next, exchanging the variable $\epsilon$ for $\delta$ in condition (IIb), we will prove that $\forall l \in \N, \forall \delta>0, \exists k \in K, \exists N \in \N, \forall m \geq N, \exists D \subseteq \Z / k\Z$ such that $$\frac{|\{i \leq |h_m|: [i]_k \in D\} \Delta I_{l,m} |}{|I_{l,m}|} < \delta.$$
Let $l \in \N$ and $\delta>0$. Let $\epsilon = \delta \cdot \mu(B_l)/4$. The reader can verify that there exists some $k \in K$ and $E \subseteq \Z/k\Z$ such that \begin{equation} \mu (B_l \Delta \pi_k^{-1} (E)) < \epsilon. \tag{*} \end{equation}
We next claim that there exists $N \in \N$ such that for all $m \geq N$ there exists some $j \in \Z/k\Z$ such that for all $0 \leq i < h_m$, $T^{i}(B_m) \subseteq_\epsilon \pi_k^{-1} ([i +j]_k)$. We can prove this with similar methods.
Fix such an $N \in \N$ that also satisfies $\mu\left(\bigcup_{0\leq i<h_N}T^i(B_N)\right) >1-\epsilon$ and let $m \geq N$. We now claim that there exists $D \subseteq \Z/k\Z$ such that \begin{equation} \mu ( \bigcup_{\substack{0 \leq i < h_m\\ [i]_k \in D}} T^i(B_m) \Delta \ \pi_k^{-1} (E) ) < 3 \epsilon. \tag{**} \end{equation}
Combining equations (*) and (**) we now have that $$\mu ( \bigcup_{\substack{0 \leq i < h_m\\ [i]_k \in D} }T^i(B_n) \Delta \ B_l) < 4 \epsilon.$$ To finish the proof of the theorem,
note that
$$\begin{array}{l} \displaystyle\frac{|\{i < h_m: [i]_k \in D\} \Delta I_{l,m} |}{|I_{l,m}|} =\frac{\displaystyle \mu ( \bigcup_{\substack{0 \leq i < h_m\\ [i]_k \in D}} T^i(B_m) \Delta \bigcup_{i \in I_{l,m} } T^i(B_m) ) }{\displaystyle \mu ( \bigcup_{i \in I_{l,m} } T^i(B_m) )} \\ =\frac{\displaystyle \mu ( \bigcup_{\substack{0 \leq i < h_m\\ [i]_k \in D}} T^i(B_m) \Delta B_l ) }{\displaystyle \mu \left( B_l \right)} <\displaystyle\frac{4\epsilon}{\mu(B_l)} = \delta.\end{array}$$ \end{proof}
Next we characterize when a rank-one transformation is isomorphic to some (unspecified) odometer.
\begin{theorem} \label{isomorphictosomeodometer} Let $(X, \mu, T)$ be a rank-one measure-preserving transformation. The following are equivalent. \begin{enumerate} \item[\rm (I)] $T$ is isomorphic to an odometer. \item[\rm (II)] For all $l \in \N$ and all $\epsilon>0$, there is some $k \in \N$ such that for all $\eta >0$ there exists an $N \in \N$ such that for all $n > m \geq N$, \begin{enumerate}
\item[\rm (IIa)] There is some $j \in \Z / k\Z$ such that $$\frac{|\{i \in I_{m,n}: [i]_k \neq j\}|}{|I_{m,n}|} < \eta$$
\item[\rm (IIb)] There is some $D \subseteq \Z / k\Z$ such that $$\frac{|\{i \leq |h_m|: [i]_k \in D\} \Delta I_{l,m} |}{|I_{l,m}|} < \epsilon$$ \end{enumerate} \end{enumerate} \end{theorem}
\begin{proof}
Suppose $T$ is isomorphic to an odometer. Let $K$ be the finite factors of that odometer. Let $l \in \N$ and $\epsilon >0$. Using condition (IIb) of Theorem \ref{isomorphictothisodometer} we can find some $k \in K$ and some $N_1 \in \N$, such that $\forall m \geq N_1, \exists D \subseteq \Z / k\Z$ such that $$\frac{|\{i \leq |h_m|: [i]_k \in D\} \Delta I_{l,m} |}{|I_{l,m}|} < \epsilon$$
For any $\eta >0$ we can use that specific $k\in K$ and condition (IIa) of Theorem \ref{isomorphictothisodometer} to find $N_2 \in \N$ such that $\forall n \geq m \geq N_2, \exists j \in \Z/k\Z$ such that $$\frac{ |\{i \in I_{m,n} : [i]_k \neq j \}|}{| I_{m,n}|} < \eta.$$ Letting $N = \max\{N_1, N_2\}$ we complete condition (II) of the theorem.
Suppose now that condition (II) holds. For all $l \in \N$ and all $\epsilon >0$, produce $k_{l, \epsilon}$, and $N_{l, \epsilon}$ according to condition (II).
Let $$K = \{k \in \N : k | k_{l, \epsilon} \textnormal{ for some $l \in \N$ and $\epsilon>0$}\}.$$
It is clear that $K$ is closed under factors. We leave it to the reader to show that $K$ is infinite by showing that if $l \in \N$ and $\epsilon<1$, then $k_{l, \epsilon} \geq h_l$.
Now, consider $\mathcal{O}_K$. We will prove that $T$ is isomorphic to $\mathcal{O}_K$ by showing that conditions (IIa) and (IIb) of Theorem \ref{isomorphictothisodometer} hold. First, let $k \in K$. Choose $l \in N$ and $\epsilon>0$ such that $k | k_{l, \epsilon}$. We chose $k_{l, \epsilon}$ using condition (II) of this theorem. Theorem \ref{finitefactor1} guarantees that that $T$ factors onto $\Z / k_{l, \epsilon}\Z$. Therefore, $T$ must also factor onto $\Z/k\Z$. Now Theorem \ref{finitefactor1} guarantees that condition (IIa) of Theorem \ref{isomorphictothisodometer} holds. Condition (IIb) of Theorem \ref{isomorphictothisodometer} follows immediately from our assumption that condition (II) of this theorem holds and our choice of $K$. \end{proof}
Before closing we consider an example of a rank-one transformation that factors onto an odometer but is not isomorphic to any odometer.
\noindent {\bf Example.} Let $T$ be the rank-one transformation corresponding to the symbolic definition $v_0=0$ and $$ v_{n+1}=v_nv_n1^{2^{n+1}}v_nv_n. $$ Then the length of $v_n$, or equivalently the height $h_n$ of the stage-$n$ tower, is $2^n(2^{n+1}-1)$. Using Theorem~\ref{finitefactor1} it is easy to verify that $T$ has all powers of $2$ as finite factors. Thus $T$ factors onto the dyadic odometer. As n1oted by the referee, ergodicity of the dyadic powers and non-ergodicity of the odd powers follows from \cite[Theorem H]{D19}. An argument using Theorem~\ref{finitefactor1} also shows that $T$ does not have any other factors. Indeed, suppose $T$ has an odd finite factor $a$. If no multiples of $a$ are of the form $2^k-1$ for any $k$, then the condition in Theorem~\ref{finitefactor1} fails, since the elements of $I_{m,n}$ come in pairs, with a difference $h_m=2^m(2^{m+1}-1)$ between them. On the other hand, suppose $a$ has a multiple of the form $2^{m+1}-1$ for some $m$. Then note that the elements of $I_{m,n}$ come in quadruples, with the sequence of differences $h_m, 2^{m+1}, h_m$ in between them. This implies also that at least half of the indices of $I_{m,n}$ disagree on the congruence class mod $a$, and thus the condition in Theorem~\ref{finitefactor1} fails. Therefore the maximal odometer factor of $T$ is the dyadic odometer. Finally, a similar argument shows that condition (IIb) of Theorem~\ref{isomorphictothisodometer} fails. Consequently $T$ is not isomorphic to the dyadic odometer. In conclusion, $T$ is not isomorphic to any odometer.
\thebibliography{999}
\bibitem{AFP} T. Adams, S. Ferenczi, K. Petersen, \textit{Constructive symbolic presentations of rank one measure-preserving systems,} {Colloq. Math.} 150 (2017), no. 2, 243--255.
\bibitem{D16} A.I. Danilenko, \textit{Actions of finite rank: weak rational ergodicity and partial rigidity}, Ergodic Theory Dynam. Systems 36 (2016), no. 7, 2138�2171.
\bibitem{D19} A.I. Danilenko, \textit{Rank-one actions, their (C,F)-models and constructions with bounded parameters}, J. Anal. Math. 139 (2019), no. 2, 697�749.
\bibitem{Do} T. Downarowicz, \textit{Survey of odometers and Toeplitz flows.} {Algebraic and topological dynamics}, 7--37, \textit{Contemp. Math.}, 385, Amer. Math. Soc., Providence, RI, 2005.
\bibitem{Fe} S. Ferenczi, \textit{Systems of finite rank}, {Colloq. Math.} 73:1 (1997), 35--65.
\bibitem{FRW} M. Foreman, D. J. Rudolph, B. Weiss, \textit{The conjugacy problem in ergodic theory}, {Ann. of Math.} 173 (2011), 1529--1586.
\bibitem{GH} S. Gao, A. Hill, \textit{Bounded rank-one transformations}, {J. Anal. Math.} 129 (2016), 341--365.
\bibitem{GZ} S. Gao, C. Ziegler, \textit{Topological factors of rank-one subshifts}, {Proc. Amer. Math. Soc. Ser. B} 7 (2020), 118�126.
\bibitem{Qu} M. Queffelec, \textit{Substitution dynamical systems and spectral analysis}, LNM 1294, Springer, NY, 2010.
\bibitem{Si} C.E. Silva, \textit{Invitation to ergodic theory}, SML 42. American Mathematical Society, Providence, RI, 2008.
\bibitem{vN} J. von Neumann, \textit{Zur Operatorenmethode in der klassischen Mechanik}, {Ann. of
Math.} 33 (1932), 587--642.
\end{document}
\section{Conclusions and questions}
The work in this paper gives a fairly complete understanding of which rank-one transformations are isomorphic to (or factor onto) a given element (or some element) of the class of odometers (or the class of finite cyclic permutations). There are several natural questions that arise if one moves beyond odometers and finite cyclic permutations. Here are a few such questions.
\begin{enumerate} \item For a specific rank-one transformation that is not an odometer, can one explicitly describe which rank-one transformations are isomorphic to that given transformation? For example, can one explicitly describe which rank-one transformations are isomorphic to Chacon's transformation? Can one explicitly describe which rank-one transformations are isomorphic to a given irrational rotation?
\item For a natural class of transformations, can one explicitly describe which rank-one transformations are isomorphic to some element of that class? For example, can one explicitly describe which rank-one transformations are isomorphic to some (unspecified) irrational rotation? Can one explicitly describe which rank-one transformations are isomorphic to some (unspecified) compact group rotation?
\item For a specific class of transformations that is closed under factors, can one describe explicitly which rank-one transformations factor onto some element of that class? For example, can one explicitly describe which rank-one transformations have some (unspecified) compact group rotation as a factor? If the answer to this last question is yes, then one can also characterize which rank-one transformations are weakly mixing. \end{enumerate}
\end{document} |
\begin{document}
\author{Pablo Ramacher} \email{ramacher@mathematik.uni-marburg.de} \address{Philipps-Universit\"at Marburg, FB 12 Mathematik und Informatik, Hans-Meerwein-Str., 35032 Marburg} \title{Addendum to ''The equivariant spectral function of an invariant elliptic operator''}
\begin{abstract} Let $M$ be a compact boundaryless Riemannian manifold, carrying an effective and isometric action of a torus $T$, and $P_0$ an invariant elliptic classical pseudodifferential operator on $M$. In this note, we strengthen the asymptotics for the equivariant (or reduced) spectral function of $P_0$ derived in \cite{ramacher16}, which are already sharp in the eigenvalue aspect, to become almost sharp in the isotypic aspect. In particular, this leads to hybrid equivariant ${\rm L}^p$-bounds for eigenfunctions that are almost sharp in the eigenvalue and isotypic aspect. \end{abstract}
\maketitle
\setcounter{tocdepth}{1} \tableofcontents{}
\section{Introduction}
Let $M$ be a closed $n$-dimensional Riemannian manifold with an effective and isometric action of a compact Lie group $G$. In this paper, we strenghten the asymptotics derived in \cite{ramacher16} for the equivariant (or reduced) spectral function of an invariant elliptic operator on $M$, which are already sharp in the eigenvalue aspect, to become also almost sharp in the isotypic aspect in case that $G=T$ is a torus, that is, a compact connected Abelian Lie group. In particular, if $T$ acts on $M$ with orbits of the same dimension, we obtain hybrid equivariant ${\rm L}^p$-bounds for eigenfunctions that are almost sharp up to a logarithmic factor.
To explain our results, consider an elliptic classical pseudodifferential operator \begin{equation*} P_0:{\rm C^{\infty}}(M) \, \longrightarrow \, {\rm L}^2(M) \end{equation*} of degree $m$ on $M$ acting on the Hilbert space of square integrable functions on $M$ with the space of smooth functions on $M$ as domain. We assume that $P_0$ is positive and symmetric, so that it has a unique self-adjoint extension $P$, which has discrete spectrum. Let $\mklm{E_\lambda}$ be a spectral resolution of $P$, and denote by $e(x,y,\lambda)$ the \emph{spectral function} of $P$ which is given by the Schwartz kernel of $E_\lambda$.
Further, assume that $M$ carries an effective and isometric action of a compact Lie group $G$ with Lie algebra ${\bf \mathfrak g}$ and orbits of dimension less or equal $n-1$. Suppose that $P$ commutes with the {left-regular representation} $(\pi,{\rm L}^2(M))$ of $G$ so that each eigenspace of $P$ becomes a unitary $G$-module. If $\widehat G$ denotes the set of equivalence classes of irreducible unitary representations of $G$, the Peter-Weyl theorem asserts that
\begin{equation} \label{eq:PW} {\rm L}^2(M)=\bigoplus_{\gamma \in \widehat G} {\rm L}^2_\gamma(M), \end{equation} a Hilbert sum decomposition, where ${\rm L}^2_\gamma(M):=\Pi_\gamma {\rm L}^2(M)$ denotes the $\gamma$-isotypic component, and $\Pi_\gamma$ the corresponding projection. Let $e_\gamma(x,y,\lambda)$ be the spectral function of the operator $P_\gamma:=\Pi_\gamma \circ P\circ \Pi_\gamma$, which is also called the \emph{reduced spectral function} of $P$. Further, let ${\mathbb J }:T^\ast M \to {\bf \mathfrak g}^\ast$ denote the momentum map of the Hamiltonian $G$-action on $T^\ast M$, induced by the action of $G$ on $M$, and write $\Omega:={\mathbb J }^{-1}(\mklm{0})$. In \cite[Theorem 4.3]{ramacher16}, the \emph{equivariant local Wey law}
\begin{equation*}
\left |e_\gamma(x,x,\lambda)- \lambda^{\frac{n-\kappa_x}{m}} \frac{d_\gamma [\pi_{\gamma|G_x}:{\bf 1}]}{(2\pi)^{n-\kappa_x}} \int_{\{ (x,\xi) \in \Omega, \, p(x,\xi)< 1\}} \frac{ \,d \xi}{\text{vol}\, {\mathcal O}_{(x,\xi)}} \right | \leq C_{x,\gamma} \, \lambda^{\frac{n-\kappa_x-1}{m}}, \quad x \in M, \end{equation*}
was shown as $\lambda \to +\infty $, where $\kappa_x:=\dim {\mathcal O}_x$ is the dimension of the $G$-orbit through $x$, $d_\gamma$ denotes the dimension of an irreducible $G$-representation $\pi_\gamma$ belonging to $\gamma$ and $ [\pi_{\gamma|G_x}:{\bf 1}]$ the multiplicity of the trivial representation in the restriction of $\pi_\gamma$ to the isotropy group $G_x$ of $x$, while $C_{x,\gamma}>0$ is a constant satisfying
\begin{equation}
\label{eq:25.5.2018a}
C_{x,\gamma} =O_x\big ( d_\gamma \sup_{l \leq \lfloor \kappa_x/2+3 \rfloor} \norm{{\mathcal D}^l \gamma}_\infty\big ),
\end{equation}
and $D^l$ are differential operators on $G$ of order $l$. Both the leading term and the constant $C_{x,\gamma}$ in general depend in a highly non-uniform way on $x\in M$, exhibiting a caustic behaviour in the neighborhood of singular orbits.
A precise description of this caustic behaviour was achieved in \cite{ramacher16} by relying on the results \cite{ramacher10} on singular equivariant asymptotics obtained via resolution of singularities. More precisely, consider the stratification $M=M(H_1) \, \dot \cup \dots \dot \cup \, M(H_L)$ of $M$ into orbit types, arranged in such a way that $(H_i) \leq (H_j)$ implies $i \geq j$, and let $\Lambda$ be the maximal length that a maximal totally ordered subset of isotropy types can have. Write $M_\mathrm{prin}:=M(H_L)$, $M_\mathrm{except}$, and $M_\mathrm{sing}$ for the union of all orbits of principal, exceptional, and singular type, respectively, so that \begin{equation*}
M= M_\mathrm{prin}\, \dot \cup \, M_\mathrm{except}\, \dot \cup \, M_\mathrm{sing}, \end{equation*}
and denote by $\kappa:=\dim G/H_L$ the dimension of an orbit of principal type. Then, by \cite[Theorem 7.7]{ramacher16} one has for $x \in M_\mathrm{prin}\cup M_\mathrm{except}$ and $\lambda \to +\infty $ the \emph{singular equivariant local Weyl law}
\begin{align*}
\begin{split}
\Big |e_\gamma(x,x,\lambda)&- \frac{d_\gamma \lambda^{\frac{n-\kappa}{m}}}{(2\pi)^{n-\kappa}} \sum_{N=1}^{\Lambda-1} \, \sum_{{i_1<\dots< i_{N} }} \, \prod_{l=1}^{N} |\tau_{i_l}|^{\dim G- \dim H_{i_l}-\kappa} \mathcal{L}^{0,0}_{i_1\dots i_{N} }(x,\gamma) \Big |\\
&\leq \widetilde C_\gamma \lambda^{\frac{n-\kappa-1}m} \sum_{N=1}^{\Lambda-1}\, \sum_{{i_1<\dots< i_{N}}} \prod_{l=1}^N |\tau_{i_l}|^{\dim G- \dim H_{i_l}-\kappa-1}, \end{split} \end{align*}
where the multiple sums run over all possible maximal totally ordered subsets $\mklm{(H_{i_1}),\dots, (H_{i_N})}$ of singular isotropy types, the coefficients $\mathcal{L}^{0,0}_{i_1\dots i_{N}}$ are explicitly given and bounded functions in $x$, and $\tau_{i_j} =\tau_{i_j}(x)\in (-1,1)$ are desingularization parameters that arise in the resolution process satisfying $|\tau_{i_j}|\approx \text{dist}\, (x, M(H_{i_j}))$, while $\widetilde C_\gamma>0$ is a constant independent of $x$ and $\lambda$ that fulfills
\begin{equation}
\label{eq:25.5.2018b} \widetilde C_{\gamma} =O\big ( d_\gamma \sup_{l \leq \lfloor \kappa/2+3 \rfloor} \norm{{\mathcal D}^l \gamma}_\infty\big ).
\end{equation}
As a major consequence, the above expansions lead to equivariant bounds for eigenfunctions. In the non-singular case, that is, when only principal and exceptional orbits are present, and consequently all $G$-orbits have the same dimension $\kappa$, the hybrid ${\rm L}^q$-estimates \begin{equation} \label{eq:Lqbound} \norm{u}_{{\rm L}^q(M)} \leq \begin{cases} C_\gamma \, \lambda^{\frac{\delta_{n-\kappa}(q)}{m}} \norm{u}_{{\rm L}^2}, & \frac{2(n-\kappa+1)}{n-\kappa-1} \leq q \leq \infty,
\\ C_\gamma \, \lambda^{\frac{(n-\kappa-1)(2-q')}{4m q'}} \norm{u}_{{\rm L}^2}, & 2 \leq q \leq \frac{2(n-\kappa+1)}{n-\kappa-1}, \end{cases}
\end{equation} were shown in \cite[Theorem 5.4]{ramacher16} for any eigenfunction $u \in {\rm L}^2_\gamma(M)$ of $P$ with eigenvalue $\lambda$, where $\frac 1q+\frac 1{q'}=1$, $\delta_n(p):=\max \left ( n \left |1/ 2 - 1/p \right | -1/2,0 \right )$, and $C_{\gamma}>0$ is a constant independent of $\lambda$ satisfying the estimate
\begin{equation}
\label{eq:24.07.2017}
C_\gamma \ll \sqrt{d_\gamma \sup_{l \leq \lfloor \kappa/2+1\rfloor} \norm{D^l\gamma}_\infty},
\end{equation} provided that the co-spheres $S_x^\ast M$ are strictly convex. Note that for the proof of ${\rm L}^p$-bounds it is necessary to describe the caustic behaviour of the relevant spectral kernels as $\mu\to +\infty $ in a neighborhood of the diagonal, which makes things considerably more envolved.
In case that singular orbits are present, one has the pointwise bound \begin{equation} \label{eq:4.12.2015}
\sum_{\stackrel{\lambda_j \in (\lambda,\lambda+1],}{ e_j \in {\rm L}^2_\gamma(M)}} |e_j(x)|^2 \leq \begin{cases} C \, \lambda^{\frac{n-1}m}, & \hspace{-.0cm} x\in M_\mathrm{sing}, \\ & \\
\widetilde C_\gamma \, \lambda^{\frac{n-\kappa-1}m} \sum\limits_{N=1}^{\Lambda-1}\, \sum\limits_{{i_1<\dots< i_{N}}} \prod\limits_{l=1}^N |\tau_{i_l}|^{\dim G- \dim H_{i_l}-\kappa-1}, & x\in M- M_\mathrm{sing}, \end{cases} \end{equation} for a constant $C>0$ independent of $\gamma$, where $\mklm{e_j}_{j \geq 0}$ is an orthonormal basis of ${\rm L}^2(M)$ compatible with the decomposition \eqref{eq:PW},
showing that eigenfunctions tend to concentrate along lower dimensional orbits.
The aim of this note is to sharpen the above results in the isotypic aspect in case that $G=T$ is a torus, and show that instead of the bounds \eqref{eq:25.5.2018a} and \eqref{eq:25.5.2018b} one has the better estimates \begin{equation*} C_{x,\gamma}=O_x\Big (\sup_{l \leq 1 }\norm{D^l \gamma}_\infty\Big ), \qquad \widetilde C_{\gamma}=O\Big (\sup_{l \leq 1 }\norm{D^l \gamma}_\infty\Big ), \qquad \gamma \in {\mathcal W}_\lambda, \end{equation*} where ${\mathcal W}_\lambda$ denotes the set of representations \begin{equation*}
{\mathcal W}_\lambda:=\mklm{\gamma \in \widehat T' \mid |\gamma | \leq \frac{\lambda^{1/m}}{\log \lambda}}. \end{equation*} Here $\widehat T'\subset$ stands for the subset of representations occuring in the Peter-Weyl decomposition \eqref{eq:PW}, and we denoted the differential of a character $\gamma\in \widehat T$, which corresponds to an integral linear form $\gamma:{\bf \mathfrak t} \rightarrow i{\mathbb R}$, by the same letter. Similarly, it will be shown that the constant $C_\gamma$ in \eqref{eq:24.07.2017} actually satisfies the bound
\begin{equation*}
C_\gamma \ll 1, \qquad \gamma \in {\mathcal W}_\lambda.
\end{equation*}
By the equivariant Weyl law \cite{ramacher10} and Gauss' law, $|\gamma|$ can grow at most of rate $\lambda^{1/m}$.
Thus, the bounds \eqref{eq:Lqbound} hold for \emph{almost any} eigenfunction $u\in {\rm L}^2(M)$ with $C_\gamma$ independent of $\gamma$, which is consistent with recent results of Tacy \cite{tacy18}.
As will be discussed, the improved bounds are almost sharp in this sense, being already attained for $\mathrm{SO}(2)$-actions on the $2$-sphere and the $2$-torus. For their proof, a careful examination of the remainder in the stationary phase expansion of the relevant spectral kernels is necessary. These bounds are crucial for deriving hybrid subconvex bounds for Hecke-Maass forms on compact arithimetic quotients of semisimple Lie groups in the eigenvalue and isotypic aspect \cite{ramacher-wakatsuki17}.
Through the whole document, the notation $O(\mu^{k}), k \in {\mathbb R} \cup \mklm{\pm \infty},$ will mean an upper bound of the form $C \mu^k$ with a constant $C>0$ that is uniform in all relevant variables, while $O_\aleph(\mu^{k})$ will denote an upper bound of the form $C_\aleph \, \mu^k$ with a constant $C_\aleph> 0$ that depends on the indicated variable $\aleph$. In the same way, we shall write $a\ll_\aleph b$ for two real numbers $a$ and $b$, if there exists a constant $C_\aleph>0$ depending only on $\aleph$ such that $|a| \leq C_\aleph b$, and similarly $a \ll b$, if the bound is uniform in all relevant variables. Finally, ${\mathbb N}$ will denote the set of natural numbers $0,1,2,3,\dots$. \\
\section{The reduced spectral function of an invariant elliptic operator} \label{sec:RSF}
Let $M$ be a closed connected Riemannian manifold of dimension $n$ with Riemannian volume density $dM$, and $P_0$ an elliptic classical pseudodifferential operator on $M$ of degree $m$ which is positive and symmetric. The principal symbol $p(x,\xi)$ of $P_0$ constitutes a strictly positive function on $T^\ast M\setminus\mklm{0}$, where $T^\ast M$ denotes the cotangent bundle of $M$. The operator $P_0$ has a unique self-adjoint extension $P$, its domain being the $m$-th Sobolev space $H^m(M)$. It is well known that there exists an orthonormal basis $\mklm{e_j}_{j\geq 0}$ of ${\rm L}^2(M)$ consisting of eigenfunctions of $P$ with eigenvalues $\mklm{\lambda_j}_{j \geq 0}$ repeated according to their multiplicity, and that $Q:=\sqrt[m]{P}$ constitutes a classical pseudodifferential operator of order $1$ with principal symbol $q(x,\xi):=\sqrt[m]{p(x,\xi)}$ and domain $H^1(M)$. Again, $Q$ has discrete spectrum, and its eigenvalues are given by $\mu_j:=\sqrt[m]{\lambda_j}$. The spectral function $e(x,y,\lambda)$ of $P$ can then be described by studying the spectral function of $Q$, which in terms of the basis $\mklm{e_j}$ is given by \begin{equation*} e(x,y,\mu):=\sum_{\mu_j\leq \mu} e_j(x) \overline{e_j(y)}, \qquad \mu\in {\mathbb R}, \end{equation*} and belongs to ${\rm C^{\infty}}(M \times M)$ as a function of $x$ and $y$. Let $\chi_\mu$ be the spectral projection onto the sum of eigenspaces of $Q$ with eigenvalues in the interval $(\mu, \mu+1]$, and denote its Schwartz kernel by $\chi_\mu(x,y):=e(x,y,\mu+1) - e(x,y,\mu)$. To obtain an asymptotic description of the spectral function of $Q$ let $\varrho \in {\mathcal S}({\mathbb R},{\mathbb R}_+)$ be such that $\varrho(0)=1$ and $\supp \hat \varrho\in (-\delta/2,\delta/2)$ for a given $\delta>0$, and define the {approximate spectral projection operator} \begin{equation} \label{eq:2.1} \widetilde \chi_\mu u := \sum_{j=0}^\infty \varrho(\mu-\mu_j) E_{j} u, \qquad u \in {\rm L}^2(M), \end{equation} where $E_j$ denotes the orthogonal projection onto the subspace spanned by $e_j$. Clearly, $K_{\widetilde \chi_\mu}(x,y):=\sum_{j=0}^\infty \varrho(\mu-\mu_j) e_j(x) \overline{e_j(y)}\in {\rm C^{\infty}}(M\times M)$ constitutes the kernel of $\widetilde \chi_\mu$.
As H\"ormander \cite{hoermander68} showed, $\widetilde \chi_\mu$ can be approximated by Fourier integral operators yielding an asymptotic formula for the kernels of $\widetilde \chi_\mu$ and $\chi_\mu$, and finally for the spectral function of $Q$ and $P$.
Now, assume that $M$ carries an effective and isometric action of a compact Lie group $G$. Let $P$ commute with the left-regular representation $(\pi,{\rm L}^2(M))$ of $G$. Consider the Peter-Weyl decomposition of ${\rm L}^2(M)$, and let $\Pi_\gamma$ be the projection onto the isotypic component belonging to $\gamma \in \widehat G$, which is given by the Bochner integral \begin{equation*} \Pi_\gamma=d_\gamma \intop_G \overline{\gamma(g)} \pi(g) \,d_G(g), \end{equation*} where $d_\gamma$ is the dimension of an unitary irreducible representation of class $\gamma$, and $d_G(g) \equiv dg$ Haar measure on $G$, which we assume to be normalized such that $\text{vol}\, G=1$. If $G$ is finite, $d_G$ is simply the counting measure. In addition, let us suppose that the orthonormal basis $\mklm{e_j}_{j\geq 0}$ is compatible with the Peter-Weyl decomposition in the sense that each vector $e_j$ is contained in some isotypic component ${\rm L}^2_\gamma(M)$. In order to describe the spectral function of the operator $Q_\gamma:=\Pi_\gamma \circ Q\circ \Pi_\gamma=Q\circ \Pi_\gamma=\Pi_\gamma \circ Q$ given by \begin{equation} \label{eq:24.09.2015} e_\gamma (x,y,\mu):=\sum_{\mu_j\leq \mu,\, e_j \in {\rm L}^2_\gamma(M)} e_j(x) \overline{e_j(y)}, \end{equation} we consider the composition $ \chi_\mu\circ \Pi_\gamma$ with kernel $ K_{\chi_\mu \circ \Pi_\gamma}(x,y)=e_\gamma(x,y,\lambda+1)-e_\gamma(x,y,\lambda) $, together with the corresponding equivariant approximate spectral projection \begin{align} \label{eq:1004} (\widetilde \chi_\mu \circ \Pi_\gamma) u = \sum_{j\geq 0,\, e_j \in {\rm L}^2_\gamma(M)} \varrho(\mu-\mu_j) E_{j} u. \end{align} Its kernel can be written as \begin{equation*} K_{\widetilde \chi_\mu \circ \Pi_\gamma}(x,y):=\sum_{j\geq 0, e_j \in {\rm L}^2_\gamma(M)} \varrho(\mu-\mu_j) e_j(x) \overline{e_j(y)}\in {\rm C^{\infty}}(M\times M). \end{equation*} By using Fourier integral operator methods, it was shown in \cite{ramacher16} that the kernel of $\widetilde \chi_\mu \circ \Pi_\gamma$ can be expressed as follows. Let $\mklm{(\kappa_\iota, Y_\iota)}_{\iota \in I}$, $\kappa_\iota:Y_\iota \stackrel{\simeq}\to \widetilde Y_\iota \subset {\mathbb R}^n$, be an atlas for $M$, $\mklm{f_\iota}$ a corresponding partition of unity, and $\mklm{\bar f_\iota}$ a set of test functions with compact support in $Y_\iota$ satisfying $\bar f_\iota \equiv 1$ on $\supp f_\iota$. Consider further a test function $0 \leq \alpha \in {\rm C^{\infty}_c}(1/2, 3/2)$ such that $\alpha \equiv 1$ in a neighborhood of $1$, and set \begin{align} \begin{split} \label{eq:02.05.2015}
I^\gamma_\iota(\mu, R, s, x,y):= & \int _G \int_{\Sigma^{R,s}_{\iota,x}} e^{i{ \mu} \Phi_{\iota,x,y}(\omega,g)} \hat \varrho(s) \overline{\gamma(g)} f_\iota( x) \\ &\cdot a_\iota(s, \kappa_\iota( x) , \mu \omega) \bar f _\iota (g \cdot y) \alpha(q( x, \omega)) J_\iota(g,y) {\,d\Sigma^{R,s}_{\iota,x}(\omega) \,d g},
\end{split} \end{align} where $\Phi_{\iota,x,y}(\omega,g):=\eklm{\kappa_\iota( x) - \kappa_\iota(g \cdot y),\omega}$, $a_\iota \in S^0_{\mathrm{phg}}$ is a suitable classical polyhomogeneous symbol satisfying $a_\iota(0,\tilde x, \eta)=1$, $J_\iota(g,y)$ a Jacobian, and \begin{equation} \label{eq:20.04.2015} \Sigma^{R,s}_{\iota,x}:=\mklm{\omega \in {\mathbb R}^n \mid \zeta_\iota (s, \kappa_\iota (x),\omega) = R} \end{equation} is a smooth compact hypersurface given in terms of a smooth function $\zeta_\iota$ which is homogeneous in $\eta$ of degree $1$ and satisfies $\zeta_\iota(0, \tilde x, \eta) = q(\kappa_\iota^{-1}(\tilde x), \eta)$.
Then, by \cite[Corollary 2.2]{ramacher16} one has for $\mu \geq 1$, $x,y \in M$, and each $\tilde N \in {\mathbb N}$ the asymptotic expansion \begin{align} \label{eq:13.06.2016} K_{\widetilde \chi_\mu \circ \Pi_\gamma}(x,y)
= &\Big(\frac{\mu}{2\pi}\Big )^{n-1} \frac{d_\gamma}{2\pi} \sum _\iota \Big [ \sum_{j=0}^{\tilde N-1} D^{2j}_{R,s} I^\gamma_\iota(\mu, R, s,x,y)_{|(R,s)=(1,0)} \, \mu^{-j} + \mathcal{R}^\gamma_{\iota}(\mu,x,y) \Big ] \end{align} up to terms
of order $O(|\mu|^{-\infty}\norm{\gamma}_\infty)$ which are uniform in $x,y$, where $D^{2j}_{R,s}$ are known differential operators of order $2j$ in $R,s$, and \begin{align*}
|\mathcal{R}^\gamma_{\iota}(\mu,x,y)| \leq& C\mu^{-\tilde N} \sum_{|\beta| \leq 2\tilde N +3} \sup_{R,s} \big |\gd_{R,s}^\beta I^\gamma_\iota(\mu,R,s,x,y) \big | \end{align*} for some constant $C>0$. On the other hand, $K_{\widetilde \chi_\mu \circ \Pi_\gamma}(x,y)$ is rapidly decaying as $\mu \to -\infty$ and uniformly bounded in $x,y$ by $\norm{\gamma}_\infty$.
\section{Equivariant asymptotics of oscillatory integrals}
Let the notation be as in the previous section. As we have seen there, the question of describing the spectral function in the equivariant setting reduces to the study of oscillatory integrals of the form \begin{equation} \label{eq:03.05.2015} I^\gamma_{x,y}(\mu):=\int_{G}\int_{\Sigma^{R,s}_x} e^{i\mu \Phi_{x,y}(\omega,g)} \overline{\gamma(g)} a (x,y, \omega,g) \,d \Sigma^{R,s}_x (\omega) \,d g, \qquad \mu \to + \infty, \end{equation} with $\Sigma^{R,s}_x$ as in \eqref{eq:20.04.2015} and phase function \begin{equation*} \Phi_{x,y}(\omega,g):= \eklm{\kappa(x) - \kappa( g\cdot y), \omega}, \end{equation*} where we have skipped the index $\iota$ for simplicity of notation, and $a \in {\rm C^{\infty}_c}$ is an amplitude that might depend on $\mu$ and other parameters such that $(x,y,\omega, g) \in \supp a$ implies $x, g \cdot y \in Y$. In what follows, we shall write $^yG:=\mklm{g \in G \mid g\cdot y \in Y}$, as well as
\begin{equation}
\label{eq:11.9.2017}
I^\gamma_x(\mu) :=I^\gamma_{x,x}(\mu), \qquad \Phi_x:=\Phi_{x,x}.
\end{equation}
Let us assume in the following that $G$ is a continuous group, and write $\kappa(x)=(\tilde x_1, \dots, \tilde x_n)$ so that the canonical local trivialization of $T^\ast Y$ reads \begin{equation*} Y \times {\mathbb R}^n \, \ni (x,\eta) \quad \equiv \quad \sum_{k=1}^n \eta_k (d\tilde x_k)_{x} \in \, T^\ast_xY. \end{equation*} With respect to this trivialization, we shall identify $\Sigma^{R,s}_{x'}$ with a subset in $T^\ast_{x} Y$ for eventually different $x$ and $x'$, if convenient. Let $\Omega:={\mathbb J }^{-1}(\mklm{0})$ be the zero level set of the momentum map ${\mathbb J }: T^\ast M \to {\bf \mathfrak g}^\ast$ of the underlying Hamiltonian $G$-action on $T^\ast M$.
Let ${\mathcal O}_x:=G\cdot x$ denote the $G$-orbit and $G_x:=\mklm{g \in G\mid g\cdot x=x}$ the stabilizer or isotropy group of a point $x\in M$. Throughout the paper, it is assumed that \begin{equation*}
\dim {\mathcal O}_x \leq n-1 \qquad \text{for all } x \in M. \end{equation*} Let further $N_y{\mathcal O}_x$ be the normal space to the orbit ${\mathcal O}_x$ at a point $y \in {\mathcal O}_x$, which can be identified with $\mathrm{Ann}(T_y{\mathcal O}_x)$ via the underlying Riemannian metric. For $x\in Y$ and ${\mathcal O}_y \cap Y \not=\emptyset$ let \begin{equation*} \mathrm{Crit} \, \Phi_{x,y}:=\Big \{(\omega,g) \in \Sigma^{R,s}_x \times \, ^y G\mid \, d(\Phi_{x,y})_{(\omega,g)}=0\Big \} \end{equation*} be the critical set of $\Phi_{x,y}$. With $M_\text{prin}$, $M_\text{except}$, and $M_\text{sing}$ denoting the principal, exceptional, and singular stratum, respectively, it was shown in \cite[Lemma 3.1]{ramacher16} that
\begin{itemize} \item if $y\in \mathcal{O}_x$, the set $\mathrm{Crit} \, \Phi_{x,y}$ is clean and given by the smooth submanifold \begin{equation*}
{\mathcal J }=\big \{(\omega,g)\mid (g \cdot y, \omega) \in \Omega, \, x=g\cdot y\big \}= V_{\mathcal J } \times G_{\mathcal J } \end{equation*} of codimension $2\dim {\mathcal O}_x$, with $V_{\mathcal J }=\Sigma^{R,s}_x \cap N_x{\mathcal O}_x$ and $G_{\mathcal J }=\mklm{g \in G \mid x=g\cdot y}\subset \, ^y G$.
\item if $y \not\in \mathcal{O}_x$, $$\mathrm{Crit} \, \Phi_{x,y}=\Big \{(\omega,g)\mid (g \cdot y,\omega) \in \Omega, \, \kappa(x)-\kappa(g \cdot y) \in N_\omega \Sigma^{R,s}_x\Big \};$$ furthermore, assume that $G$ acts on $M$ with orbits of the same dimension $\kappa$, that is, $M=M_\mathrm{prin}\, \cup\, M_\mathrm{except}$, and that the co-spheres $S_x^\ast M$ are strictly convex. Then, either $\mathrm{Crit} \, \Phi_{x,y}$ is empty, or, choosing $Y$ sufficiently small, $\mathrm{Crit} \, \Phi_{x,y}$ is clean and of codimension $n-1+\kappa$, its finitely many connected components being of the form \begin{equation*} {\mathcal J }=V_{\mathcal J } \times G_{\mathcal J } \end{equation*} with $V_{\mathcal J }= \mklm{\omega_{\mathcal J }}$ and $G_{\mathcal J } = g_{\mathcal J } \cdot G_y\subset \, ^y G$ for some $\omega_{\mathcal J }\in \Sigma^{R,s}_x$ and $g_{\mathcal J } \in G$. \end{itemize}
From this an asymptotic expansion for the integrals $I^\gamma_{x,y}(\mu)$ was deduced in \cite[Theorem 3.3]{ramacher16}, yielding a corresponding asymptotic formula for $K_{\widetilde \chi_\mu \circ \Pi_\gamma}(x,y)$. In this paper, we improve the estimate for the remainder in the isotypic aspect in case that $G=T$ is a torus, which we assume from now on.
For this, recall that the exponential function $\exp$ is a covering homomorphism of ${\bf \mathfrak t}$ onto $T$, and its kernel $L$ a lattice in ${\bf \mathfrak t}$. Let $\widehat T$ denote the \emph{set of characters of $T$}, that is, of all continuous homomorphisms of $T$ into the circle, which we identify with the unitary dual of $T$. The differential of a character $\gamma: T \to S^1$, denoted by the same letter, is a linear form $\gamma:{\bf \mathfrak t}\to i {\mathbb R}$ which is \emph{integral} in the sense that $\gamma(L) \subset 2\pi i \, {\mathbb Z}$. On the other hand, if $\gamma$ is an integral linear form, one defines \begin{equation*} t^\gamma= e^{\gamma(X)}, \qquad t= \exp X \in T, \end{equation*} setting up an identification of $\widehat T$ with the integral linear forms on ${\bf \mathfrak t}$ via $\gamma(t)\equiv t^\gamma$. Further, all irreducible representations of $T$ are $1$-dimensional. We now make the following
\begin{definition} Denote by $\widehat T'\subset \widehat T$ the subset of representations occuring in the decomposition \eqref{eq:PW} of ${\rm L}^2(M)$, and let $\mklm{\mathcal{V}_\mu}_{\mu \in (0,\infty)}$ be a family of finite subsets $\mathcal{V}_\mu\subset \widehat T'$ such that \begin{equation*}
\max_{\gamma \in \mathcal{V}_\mu} |\gamma| \leq C \frac\mu{\log \mu} \end{equation*} for a constant $C>0$ independent of $\mu$. \end{definition}
Our main result is the following improvement of the remainder and coefficient estimates in \cite[Theorem 3.3]{ramacher16}.
\begin{thm} \label{thm:12.05.2015} Assume that $T$ is a torus acting on $M$ with orbits of dimension less or equal $n-1$, and let $ \mathcal{V}_\mu$ be as in the previous definition.
\begin{enumerate}
\item[(a)] Let $y \in \mathcal{O}_x$. Then, for every $\gamma \in \widehat T$ and $\tilde N=0,1,2,\dots $ one has the asymptotic formula \begin{equation*}
I^\gamma_{x,y}(\mu)=(2\pi/\mu)^{\dim \mathcal{O}_x} \left [\sum_{k=0}^{\tilde N-1} \mathcal{Q}_{k}(x,y) \mu^{-k} +\mathcal{R}_{\tilde N}(x,y,\mu)\right ], \qquad \mu \to +\infty, \end{equation*} where the coefficients and the remainder depend smoothly on $R$ and $s$. The coefficients satisfy the bounds \begin{align*}
|\mathcal{Q}_k(x,y)|&\leq C_{k,\Phi_{x,y}} \text{vol}\, (\supp a(x,y,\cdot,\cdot)\cap \mathcal{C}_{x,y}) \sup _{l\leq k} \norm{(D_\omega^{2l} D_t^l \gamma a)(x,y, \cdot,\cdot)}_{\infty} \end{align*} while the remainder satisfies \begin{align*}
|\mathcal{R}_{\tilde N}(x,y, \mu) | &\leq \widetilde C_{\tilde N,\Phi_{x,y}} \text{vol}\, (\supp a (x,y,\cdot,\cdot )) \\ & \cdot \sup_{l\leq 2\tilde N+ \dim \mathcal{O}_x +1} \norm{(D_\omega^l D_t^l a)(x,y,\cdot,\cdot )}_{\infty} \, \sup_{l\leq \tilde N} \norm{ D_t^l \gamma}_{\infty} \mu^{-\tilde N}, \qquad \gamma \in \mathcal{V}_\mu. \end{align*} The bounds are uniform in $R,s$ for suitable constants $C_{k,\Phi_{x,y}}>0$ and $\widetilde C_{\tilde N,\Phi_{x,y}}>0$, where $D_\omega^l$ and $D_t^l$ denote differential operators of order $l$ on $\Sigma^{R,s}_x$ and $T$, respectively. As functions in $x$ and $y$, $\mathcal{Q}_k(x,y)$ and $\mathcal{R}_{\tilde N}(x,y,\mu)$ are smooth on $Y \cap M_\mathrm{prin}$, and the constants $C_{k,\Phi_{x,y}}$ and $\widetilde C_{\tilde N,\Phi_{x,y}}$ are uniformly bounded in $x$ and $y$ if $M= M_\mathrm{prin} \cup M_\mathrm{except}$.
\item[(b)] Let $y \not \in \mathcal{O}_x$. Assume that $M=M_\mathrm{prin}\, \cup\, M_\mathrm{except}$ and that the co-spheres $S_x^\ast M$ are strictly convex. Then, for sufficiently small $Y$ and every $\tilde N=0,1,2,\dots $ one has the asymptotic formula \begin{equation*}
I^\gamma_{x,y}(\mu)=\sum_{{\mathcal J } \in \pi_0(\mathrm{Crit} \, \Phi_{x,y})}(2\pi/\mu)^{\frac{n-1+\kappa}{2}} e^{i\mu \,^0\Phi_{x,y}^{\mathcal J }} \left [ \sum_{k=0}^{\tilde N-1} \mathcal{Q}_{{\mathcal J },k}(x,y) \mu^{-k} +\mathcal{R}_{{\mathcal J }, \tilde N}(x,y,\mu)\right ] \end{equation*} as $\mu \to +\infty$, where $\kappa:=\dim M/T$ and $^0\Phi_{x,y}^{\mathcal J }$ stands for the constant values of $\Phi_{x,y}$ on the connected components ${\mathcal J }$ of its critical set. The coefficients $\mathcal{Q}_{{\mathcal J },k}(x,y)$ and the remainder term $\mathcal{R}_{{\mathcal J },\tilde N}(x,y,\mu) $ depend smoothly on $R,s$, and $x,y \in Y \cap M_\mathrm{prin}$. Furthermore, they satisfy bounds analogous to the ones in (a), where now derivatives in $t$ up to order $2k$ and $2\tilde N$ can occur, and the constants $C_{k,\Phi_{x,y}}$ and $\widetilde C_{\tilde N,\Phi_{x,y}}$ are no longer uniformly bounded, but satisfy \begin{align*} C_{k,\Phi_{x,y}}& \ll \text{dist}\,(y, {\mathcal O}_x)^{-(n-1-\kappa)/2 -k}, \qquad \widetilde C_{\tilde N,\Phi_{x,y}} \ll \text{dist}\,(y, {\mathcal O}_x)^{-(n-1-\kappa)/2-\tilde N}. \end{align*}
\end{enumerate} \end{thm}
\begin{proof} The asymptotic expansion for the integral $I^\gamma_{x,y}(\mu)$, the smoothness of the coefficients $\mathcal{Q}_{k}(x,y)$, $\mathcal{Q}_{{\mathcal J },k}(x,y)$, and the remainder terms in the parameters $R,s$, and $x,y \in Y\cap M_\text{prin}$, as well as corresponding bounds for the coefficients and the remainder term were shown in \cite[Theorem 3.3]{ramacher16}. To improve on the remainder estimate concerning its dependence on $\gamma$ as $\mu \to +\infty$, we rewrite $I^\gamma_{x,y}(\mu)$ up to a volume factor as \begin{equation*} I^\gamma_{x,y}(\mu)\equiv \int_{{\bf \mathfrak t}}\int_{\Sigma^{R,s}_x} e^{i\mu \Phi_{x,y}(\omega,\exp (-X))} e^{-\gamma(X)} a (x,y, \omega,X) \,d \Sigma^{R,s}_x (\omega) \,d X, \qquad \gamma \in \widehat T, \end{equation*} where we can assume that $a$ is compactly supported with respect to $X\in {\bf \mathfrak t}$ in a small open connected subset $^y{\bf \mathfrak t}\subset {\bf \mathfrak t}$ by choosing $Y$ small. If we were to apply the stationary and non-stationary phase principles to $I^\gamma_{x,y}(\mu)$ with $\Phi_{x,y}$ as phase function, which was the way we followed in \cite{ramacher16}, this would involve derivatives of the amplitude $\overline \gamma a$ and generate non-optimal powers in $\gamma$ in the remainder estimate. Instead, note that the character $\gamma(t)=e^{\gamma(X)}\in S^1$ constitutes itself a phase, which can oscillate rather quickly as $\gamma$ increases. To deal with these oscillations, we shall absorb them into the phase function, and define for arbitrary $\xi \in {\bf \mathfrak t}^\ast$ \begin{equation*}
\Phi^\xi_{x,y}(\omega,X):= \Phi_{x,y}(\omega,\e{-X})-\xi(X), \qquad t=\exp X \in T. \end{equation*} The idea is then to apply the stationary and non-stationary phase principles to the integrals $I^\gamma_{x,y}(\mu)$ with phase function $\Phi^\xi_{x,y}(\omega,X)$ and $\xi=\gamma/i\mu$ as parameter, compare \cite[Theorem 7.7.6]{hoermanderI}, to obtain remainder estimates that are optimal in $\gamma\in \mathcal{V}_\mu$. If $\mklm{X_1,\dots,X_d}$ denotes a basis of ${\bf \mathfrak t}$, the $X$-derivatives of $ \Phi^\xi_{x,y}(\omega,X)$ read \begin{equation*} \sum_{k=1}^n \omega_k (d \tilde x_k)_{\e{-X} \cdot y} (\widetilde X_j) -\xi(X_j)=[{\mathbb J }(\e{-X} \cdot y, \omega)-\xi](X_j), \end{equation*}
so that
\begin{equation*} \mathrm{Crit} \, \Phi^\xi_{x,y}=\mklm{(\omega,X) \mid \kappa(x) - \kappa (\e{-X} \cdot y) \in N_\omega(\Sigma^{R,s}_x), \quad (\e{-X} \cdot y, \omega) \in {\mathbb J }^{-1}(\mklm{\xi})}.
\end{equation*} A repetition of the arguments given in \cite[Proof of Lemma 3.1]{ramacher16} then shows that for sufficiently small $|\xi|$
\begin{itemize} \item if $y\in \mathcal{O}_x$, the set $\mathrm{Crit} \, \Phi^\xi_{x,y}$ is clean and given by the smooth submanifold \begin{equation*}
{\mathcal J }=\big \{(\omega,X)\mid (\e{-X} \cdot y, \omega) \in {\mathbb J }^{-1}(\mklm{\xi}), \, x=\e{-X}\cdot y\big \} \end{equation*} of codimension $2\dim {\mathcal O}_x$;
\item if $y \not\in \mathcal{O}_x$ and $T$ acts on $M$ with orbits of the same dimension $\kappa$ and the co-spheres $S_x^\ast M$ are strictly convex, then either $\mathrm{Crit} \, \Phi^\xi_{x,y}$ is empty, or, choosing $Y$ sufficiently small, $\mathrm{Crit} \, \Phi^\xi_{x,y}$ is clean and of codimension $n-1+\kappa$,
\end{itemize} which would also just follow from \cite[Proof of Lemma 3.1]{ramacher16} and the implicit function theorem. In addition, note that for $(\omega,X)\in \mathrm{Crit} \, \Phi^\xi_{x,y}$ \begin{equation*} {\mathcal M}_{x,y}(\omega,X):=\text{Trans Hess } \Phi^\xi_{x,y}(\omega,X) \, \text{is independent of $\xi$}. \end{equation*}
Next, notice that under the assumptions in (a) and (b), respectively, there is an open tubular neighborhood $U_0$ of $\mathrm{Crit} \, \Phi_{x,y}$ and a constant $\mu_0>0$ such that for all $\mu \geq \mu_0$ and $\gamma \in \mathcal{V}_\mu$ \begin{itemize} \item $\mathrm{Crit} \, \Phi_{x,y}^{\gamma / i\mu} \subset U_0$, \item $\mathrm{Crit} \, \Phi_{x,y}^{\gamma/ i\mu}$ is clean, that is, $\Phi_{x,y}^{\gamma/i\mu} $ is a Morse-Bott function. \end{itemize}
Let $U_1$ and $U_2$ be two further open tubular neighborhoods of $\mathrm{Crit} \, \Phi_{x,y}$ and $\mu_0 >\mu_1 >\mu_2>0$ be such that $U \subset U_1 \subset U_2$ are proper inclusions and the pairs $(U_1,\mu_1)$, $(U_2,\mu_2)$ have the same properties than $(U_0,\mu_0)$. Let $u \in {\rm C^{\infty}}(U_2,{\mathbb R}^+)$ be a test function with $u_{|U_1}\equiv 1$ and define \begin{align*} ^1I^\gamma_{x,y}(\mu)&:= \int_{{\bf \mathfrak t}}\int_{\Sigma^{R,s}_x} e^{i\mu \Phi^{\gamma/i\mu}_{x,y}(\omega,X)}u(\omega,X) a(x,y, \omega,X) \,d \Sigma^{R,s}_x (\omega) \,d X, \\ ^2I^\gamma_{x,y}(\mu)&:=I^\gamma_{x,y}(\mu)-^1I^\gamma_{x,y}(\mu). \end{align*} By construction, for $\gamma \in\mathcal{V}_\mu$ and $\mu \geq \mu_0$ all critical sets $\mathrm{Crit} \, \Phi_{x,y}^{\gamma / i\mu}$ have a minimal, non-vanishing\footnote{ At least on the intersection of the support of $a(x,y,\cdot,\cdot)$ and $ U_1$.} distance to $\gd U_1$, so that \begin{equation*}
|\grad \Phi^{\gamma/i\mu}_{x,y}| \geq C >0 \quad \text{on $\supp(1-u) a(x,y,\cdot,\cdot )$ for all $\gamma \in\mathcal{V}_\mu$ with $\mu \geq \mu_0$.} \end{equation*} An application of the non-stationary phase principle \cite[Theorem 7.7.1]{hoermanderI} with respect to the phase function $ \Phi^{\gamma/i\mu}_{x,y}$ then yields for every $k \in {\mathbb N}$ the uniform bound \begin{equation*} ^2I^\gamma_{x,y}(\mu) =O_{k,a}(\mu^{-k}) \qquad \text{for all $\gamma \in\mathcal{V}_\mu$ with $\mu \geq \mu_0$.} \end{equation*} It remains to estimate the integral $^1I^\gamma_{x,y}(\mu)$ by means of the stationary phase principle with $\xi=\gamma/i\mu$ as parameter, for which we shall follow \cite[Theorem 7.7.5]{hoermanderI} and its proof. Assume as we may that $U_2$ is sufficiently small, and introduce normal tubular coordinates on $U_2$ in form of an atlas $\mklm{(\zeta_\iota,\mathcal{Y}_\iota)}_{\iota \in I}$ such that
\begin{enumerate}
\item $\supp ua(x,y,\cdot,\cdot ) \subset \bigcup_\iota \mathcal Y_\iota$,
\item $\zeta_\iota^{-1}(m',m'') \in \mathrm{Crit} \, \Phi^\xi_{x,y}$ iff ${\mathbb R}^{\,d''}\ni m''=m''_\xi$, where \begin{equation*} d''=\begin{cases} 2 \dim {\mathcal O}_x & \text{in case (a)} \\ n-1+\kappa & \text{in case (b)}. \end{cases} \end{equation*}
\item the ${\bf \mathfrak t}$-coordinates are given by standard Euclidean coordinates, so that in each chart
$$X=\sum_\alpha m'_{{\bf \mathfrak t},\alpha} X_\alpha ' + \sum_\beta m''_{{\bf \mathfrak t},\beta} X_\beta ''$$
for a suitable basis $\{X_\alpha',X_\beta''\}$ of ${\bf \mathfrak t}$.
\end{enumerate}
Let $\mklm{p_\iota}$ be a partition of unity subordinated to the covering $\mklm{\mathcal{Y}_\iota}$, and write $ a_\iota(x,y,\omega,X):= p_\iota(\omega,X) a(x,y,\omega,X) $ as well as $a_\iota (x,y,m):=a_\iota (x,y, \zeta_\iota^{-1}(m))\beta_\iota(m) $, $\beta_\iota$ being a Jacobian. Denote the product of $u \circ \zeta^{-1}_\iota$ with the Taylor expansion of $a_\iota(x,y,\cdot )$ in the variable $m''$ at the point $m''_\xi$ of order $2k$ by $T^\xi_\iota(x,y,m)$, which is smooth and bounded in $\xi$. Let ${\mathcal M}_{x,y}(\omega,X)$ be as above and set ${\mathcal M}^\iota_{x,y}(m',m''_\xi):=({\mathcal M}_{x,y} \circ \zeta_\iota^{-1})(m',m''_\xi)$. Since for sufficiently small $|m''-m''_\xi|$
\begin{equation*}
\frac{|m''-m''_\xi|}{|\grad_{m''} \Phi^\xi_{x,y} (m',m''_
\xi)|} \ll \norm{ {\mathcal M}^\iota_{x,y}(m',m''_\xi)^{-1}} \ll 1
\end{equation*} for all $\xi$, \cite[Theorem 7.7.1]{hoermanderI} yields with respect to $\Phi^\xi_{x,y}(m):=(\Phi^\xi_{x,y}\circ \zeta_\iota^{-1})(m)$ for any $k \in {\mathbb N}$
\begin{equation*}
^1I^\gamma_{x,y}(\mu)= \sum_\iota \int_{{\mathbb R}^{d'}} \int_{{\mathbb R}^{d''}} e^{i\mu \Phi^{i\gamma/\mu}_{x,y}(m)} T^{i\gamma/\mu}_\iota(x,y,m) \,d m'' \,d m' +O_{k,a}(\mu^{-k})
\end{equation*}
uniformly in $\gamma$. Next, note that for fixed $m'$
\begin{equation}
\label{eq:quadrform} m'' \longmapsto \eklm{{\mathcal M}^\iota_{x,y}(m',m''_\xi) (m''-m''_\xi),(m''-m''_\xi)}
\end{equation}
defines a non-degenerate quadratic form, and introduce the auxiliary function
\begin{equation*}
H^\xi(m):=\Phi^\xi_{x,y}(m)-\Phi^\xi_{x,y}(m',m''_\xi)-\eklm{{\mathcal M}^\iota_{x,y}(m',m''_\xi) (m''-m''_\xi),(m''-m''_\xi)}/2,
\end{equation*}
which vanishes of third order at $m''=m''_\xi$. The function
\begin{equation*}
^s\Phi^\xi_{x,y}(m):=\eklm{{\mathcal M}^\iota_{x,y}(m',m''_\xi) (m''-m''_\xi),(m''-m''_\xi)}/2+s H^\xi(m)
\end{equation*}
interpolates between $\Phi^\xi_{x,y}(m)-\Phi^\xi_{x,y}(m',m''_\xi)=\, ^1\Phi^\xi_{x,y}(m)$ and the quadratic form \eqref{eq:quadrform}, and we define
\begin{equation*}
{\mathcal I}(s):=\int_{{\mathbb R}^{d''}} e^{i\mu \, ^s\Phi^{\xi}_{x,y}(m)} T^\xi_\iota(x,y,m) \,d m''.
\end{equation*}
Taylor expansion then yields
\begin{equation*}
\Big | {\mathcal I}(1)- \sum_{l=0}^{2k-1} {\mathcal I}^{(l)} (0)/l! \Big | \ll \sup_{0 \leq s \leq 1} |{\mathcal I}^{(2k)} (s)|/L!.
\end{equation*}
Now, differentiation with respect to $s$ gives
\begin{equation*}
{\mathcal I}^{(l)}(s)= \int_{{\mathbb R}^{d''}} e^{i\mu \, ^s\Phi^{\xi}_{x,y}(m)} (i\mu H^\xi(m))^l\, T^\xi_\iota(x,y,m) \,d m''.
\end{equation*}
In view of the uniform bounds
\begin{equation*}
\frac{|m''-m''_\xi|}{|\grad_{m''} \, ^s\Phi^\xi_{x,y} (m',m''_\xi)|} \ll \norm{ {\mathcal M}^\iota_{x,y}(m',m''_\xi)^{-1}} \ll 1 \qquad \text{for all $\xi$ and $s$}
\end{equation*} and\footnote{Note that $D^\alpha_{m''} H^\xi(m)=D^\alpha_{m''} \Phi_{x,y}(m)$ for $|\alpha|\geq 3$, while for $|\alpha| \leq 2$ Taylor expansion at $m''_\xi$ implies \begin{align*}
|D^\alpha_{m''} H^\xi(m)| \ll |m''-m''_\xi|^{3-|\alpha|} \sum_{|\beta|=3} \sup |D^\beta_{m''} \Phi_{x,y}(m)| \ll |m''-m''_\xi|^{3-|\alpha|} \end{align*}
uniformly in $\xi$ since $H^\xi(m)$ depends on $\xi$ only via the term $\xi\big (\sum_\alpha m'_{{\bf \mathfrak t},\alpha} X_\alpha ' + \sum_\beta m''_{{\bf \mathfrak t},\beta} X_\beta ''\big )$, which vanishes when differentiated more than one time.} \begin{equation*}
\big |D^\alpha_{m''} [H^\xi(m)^{2k}\, T^\xi_\iota(x,y,m)]\big | \ll |m''-m''_\xi|^{6k-|\alpha|} \quad \text{for all $\xi$} \end{equation*} we obtain from \cite[Theorem 7.7.1]{hoermanderI} with $k$ replaced by $3k$ there the important uniform bound
\begin{equation*}
{\mathcal I}^{(2k)}(s) =O(\mu^{-k}) \qquad \text{for all $\gamma \in \mathcal{V}_\mu$ with $\mu \geq \mu_0$ and all $s$.}
\end{equation*}
Next, denote by ${\mathcal H}^\xi(m)$ the Taylor expansion of $H^\xi(m)$ of order $3k$, and notice that one has
\begin{equation*}
(H^\xi)^l-({\mathcal H}^\xi)^l=O(|m''-m''_\xi|^{2k+2l})
\end{equation*}
uniformly in $\xi$. Applying again \cite[Theorem 7.7.1]{hoermanderI} gives
\begin{equation*}
{\mathcal I}^{(l)}(0)= \int_{{\mathbb R}^{d''}} e^{i\mu \, ^0\Phi^{\xi}_{x,y}(m)} (i\mu {\mathcal H}^\xi(m))^l\, T^\xi_\iota(x,y,m) \,d m'' + O_{k,a}(\mu^{-k})
\end{equation*}
uniformly in $\xi$. The assertion now follows by taking into account \cite[Lemma 7.7.3]{hoermanderI} and the final arguments in the proof of \cite[Theorem 7.7.5]{hoermanderI}. Note that the Taylor expansion ${\mathcal H}^\xi$ starts with terms of degree $3$ and depends on $\xi$ in that the coefficients are evaluated at $m''=m''_{\xi}$. Consequently, when applied to ${\mathcal I}^{(l)}(0)$ the remainder estimate in \cite[Lemma 7.7.3]{hoermanderI} can be uniformly estimated in $\xi$. The final remainder estimate results from the above uniform estimates, and local contributions of higher order where additional derivatives of $\gamma$ arise. The local terms are unique, and coincide with the ones with phase function $\Phi_{x,y}$ and amplitude $\overline \gamma a$ considered in \cite[Theorem 3.3]{ramacher16}, from which the corresponding bounds are deduced. The fact that in case (a) only $t$-derivatives of order $k$ and $\tilde N$ occur, follows from the particular form of the transversal Hessian, \cite[Proof of Theorem 3.3]{ramacher16}.
\end{proof}
Similarly, one derives
\begin{thm} \label{thm:14.05.2017} Consider the integrals $I^\gamma_{x,y}(\mu)$ defined in \eqref{eq:03.05.2015}. Assume that the torus $T$ acts on $M$ with orbits of the same dimension $\kappa \leq n-1$, and that the co-spheres $S_x^\ast M$ are strictly convex. Then, for sufficiently small $Y$ and arbitrary $\tilde N_1, \tilde N_2 \in {\mathbb N}$ one has the asymptotic formula \begin{gather*}
I^\gamma_{x,y}(\mu)\\
= \sum_{{\mathcal J } \in \pi_0(\mathrm{Crit} \, \Phi_{x,y})} \frac{e^{i \mu \,^0\Phi_{ x,y}^{\mathcal J }}}{\mu^\kappa (\mu \norm{ \kappa(x)-\kappa(g_{\mathcal J } \cdot y)}+1 )^{\frac{n-1-\kappa}2}} \left [ \sum_{k_1,k_2 =0}^{\tilde N_1-1,\tilde N_2-1} \frac { \mathcal{Q}_{{\mathcal J }, k_1,k_2} (x,y)}{\mu^{k_1} (\mu \norm{ \kappa(x)-\kappa(g_{\mathcal J } \cdot y)}+1)^{k_2}}\right. \\ \left. + \mathcal{R}_{{\mathcal J },\tilde N_1, \tilde N_2}(x,y,\mu) \right ] \end{gather*} as $\mu \to +\infty$. The coefficients and the remainder term depend smoothly on $R,t$,
while $^0\Phi_{x,y}^{\mathcal J }:= R \, c_{x,g_{\mathcal J }\cdot y} (t)$ denotes the constant value of $\Phi_{x,y}$ on ${\mathcal J }$. Furthermore, the coefficients are uniformly bounded in $R,s, x$, and $y$ by derivatives of $\gamma$ up to order $2k_1$,
and the remainder term
\begin{equation*}
\mathcal{R}_{{\mathcal J },\tilde N_1, \tilde N_2}(x,y,\mu)= O_{{\mathcal J },\tilde N_1, \tilde N_2} \Big ( \mu^{-\tilde N_1} (\mu \norm{ \kappa(x)-\kappa(g_{\mathcal J } \cdot y)}+1)^{-\tilde N_2} \Big )
\end{equation*}
by derivatives of $\gamma$ up to order $2 \tilde N_1$, provided that $\gamma \in \mathcal{V}_\mu$. \end{thm} \begin{proof} The proof is essentially the same than the one of \cite[Theorem 3.4]{ramacher16}, using the arguments given in the proof of the previous theorem.
\end{proof}
\section{The equivariant local Weyl law}
We shall now prove an improved version of the equivariant local Weyl derived in \cite{ramacher16}. For this, we first prove the following refinement of \cite[Proposition 4.1]{ramacher16}.
\begin{proposition} [\bf Point-wise asymptotics for the kernel of the equivariant approximate projection] \label{thm:kernelasymp} For any fixed $x \in M$, $\gamma \in \widehat T$, and $\tilde N\in {\mathbb N}$ one has as $\mu \to +\infty$ \begin{align} \label{eq:13.05.2015} \begin{split}
K_{\widetilde \chi_\mu \circ \Pi_\gamma}(x,x)&=\sum_{j\geq 0, \, e_j \in {\rm L}^2_\gamma(M)} \varrho(\mu-\mu_j) |{e_j(x)}|^2 \\ & = \Big (\frac{\mu}{2\pi}\Big )^{n-\dim \mathcal{O}_x-1} \frac{d_\gamma}{2\pi} \left [\sum_{k=0}^{\tilde N-1} {\mathcal L}_k(x,\gamma) \mu^{-k}+ \mathcal{R}_{\tilde N}(x,\gamma) \right ] \end{split} \end{align} with coefficients and remainder depending smoothly on $x \in M_\mathrm{prin}$. They satisfy the bounds \begin{equation*}
|\mathcal{L}_k(x,\gamma)| \leq C_{k,x} \sup_{l \leq k} \norm{D^l \gamma}_\infty, \end{equation*} as well as \begin{equation*}
|\mathcal{R}_{\tilde N}(x,\gamma)| \leq \tilde C_{\tilde N,x} \sup_{l \leq \tilde N} \norm{D^l \gamma}_\infty \mu^{-\tilde N}, \qquad \gamma \in \mathcal{V}_\mu, \end{equation*} where $D^l$ denotes a differential operator on $T$ of order $l$, and the constants $C_{k,x}$, $\tilde C_{\tilde N,x}$ are uniformly bounded in $x$ if $M= M_\mathrm{prin} \cup M_\mathrm{except}$. In particular, the leading coefficient is given by \begin{align*}
{\mathcal L}_0(x,\gamma) = \hat \varrho(0) [{\pi_\gamma}_{|T_x}:{\bf 1}] \, \mbox{vol} \, [( \Omega \cap S_x^\ast M)/T], \end{align*} where $S^\ast M:=\mklm{(x,\xi) \in T^\ast M\mid p(x,\xi)=1}$. If $\mu \to -\infty$, the function $K_{\widetilde \chi_\mu \circ \Pi_\gamma}(x,x)$ is rapidly decreasing in $\mu$. \end{proposition} \begin{proof} We only have to prove the bounds for the coefficients and the remainder, since all other asssertions have been shown in \cite{ramacher16}. Let the notation be as in Section \ref{sec:RSF}, and $R,s \in {\mathbb R}$, $x \in Y_\iota$ be fixed. As a direct consequence of Theorem \ref{thm:12.05.2015} (a) we have for any $\tilde N\in {\mathbb N}$ \begin{equation*}
\gd_{R,s}^\beta I^\gamma_\iota(\mu, R, s, x,x)= (2\pi/\mu)^{\dim \mathcal{O}_x} \left [ \sum_{k=0}^{\tilde N-1} {\mathcal L}^k_{\iota,\beta}(R,s,x,\gamma) \mu^{-k}+ \mathcal{R}^{\tilde N}_{\iota,\beta}(R,s,x,\gamma,\mu) \right ], \end{equation*} where the coefficients and the remainder term are explicitly given and depend smoothly on $R,s$, and $x\in Y \cap M_\mathrm{prin}$. Furthermore, both the coefficients ${\mathcal L}^k_{\iota,\beta}(R,s,x,\gamma)$ and the remainder are bounded by expressions involving derivatives of $\gamma$ up to order $k$ and $\tilde N$, respectively, which are uniformly bounded in $x$ if $M=M_\mathrm{prin} \cup M_\mathrm{except}$. Equation \eqref{eq:13.06.2016} then implies the asymptotic expansion \eqref{eq:13.05.2015} with the specified estimate for the remainder.
\end{proof}
We can now sharpen \cite[Theorem 4.3]{ramacher16} in the isotypic aspect as follows.
\begin{thm}[\bf Equivariant local Weyl law] \label{thm:main} Let $M$ be a closed connected Riemannian manifold $M$ of dimension $n$ carrying an isometric and effective action of a torus $T$, and $P_0$ a $T$-invariant elliptic classical pseudodifferential operator on $M$ of degree $m$. Let $p(x,\xi)$ be its principal symbol, and assume that $P_0$ is positive and symmetric. Denote its unique self-adjoint extension by $P$, and for a given $\gamma \in \widehat T$ let $e_\gamma(x,y,\lambda)$ be its reduced spectral function. Further, let ${\mathbb J }:T^\ast M \to {\bf \mathfrak t}^\ast$ be the momentum map of the $T$-action on $M$, and put $\Omega:={\mathbb J }^{-1}(\mklm{0})$. Then, for fixed $x \in M$ one has
\begin{equation}
\label{eq:29.10.2015}
\left |e_\gamma(x,x,\lambda)-\frac{ [\pi_{\gamma|T_x}:{\bf 1}]}{(2\pi)^{n-{\kappa_x}}} \lambda^{\frac{n-\kappa_x}{m}} \int_{\mklm{\xi\mid \, (x,\xi) \in \Omega, \, p(x,\xi)< 1}} \frac{ \,d \xi}{\text{vol}\, {\mathcal O}_{(x,\xi)}} \right | \leq C_{x,\gamma} \, \lambda^{\frac{n-{\kappa_x}-1}{m}}
\end{equation} as $\lambda \to +\infty$, where $\kappa_x:=\dim {\mathcal O}_x$ and $ [\pi_{\gamma|T_x}:{\bf 1}]\in \mklm{0,1}$ denotes the multiplicity of the trivial representation in the restriction of $\pi_\gamma$ to the isotropy group $T_x$ of $x$. Furthermore, for arbitrary $\gamma \in {\mathcal W}_\lambda:=\mklm{\gamma \in \widehat T' \mid |\gamma | \leq \frac{\lambda^{1/m}}{\log \lambda}}$ \begin{equation} \label{eq:4.6.2017}
C_{x,\gamma}=O_x\Big (\sup_{l \leq 1 }\norm{D^l \gamma}_\infty\Big )=O_x (|\gamma| ) \end{equation}
is a constant that depends smoothly on $x\in M_\mathrm{prin}$ and is uniformly bounded in $x$ if $M= M_\mathrm{prin} \cup M_\mathrm{except}$. \end{thm} \begin{proof} This follows directly by taking $\tilde N=1$ in \eqref{eq:13.05.2015} and integrating with respect to $\mu$ from $-\infty$ to $\sqrt[m]{\lambda}$ with the arguments given in \cite[Proof of Eq. (2.25)]{duistermaat-guillemin75}. \end{proof}
\begin{rem} \label{rem:23.04.2017} \hspace{0cm} \begin{enumerate} \item With the same constant $C_{x,\gamma}$ as in \eqref{eq:29.10.2015} one also has the bound \begin{equation*}
\left | e_\gamma(x,y,\lambda+1)-e_\gamma(x,y,\lambda) \right | \leq \sqrt{C_{x,\gamma} \lambda^{\frac{n-\kappa_x-1}m}} \sqrt{C_{y,\gamma} \lambda^{\frac{n-\kappa_y-1}m}}, \qquad x, y \in M, \, \gamma \in {\mathcal W}_\lambda, \end{equation*} compare \cite[Remark 4.4]{ramacher16}. \item As a consequence of Theorem \ref{thm:main}, the constant $C_{x,\gamma}$ in \cite[Corollary 4.6]{ramacher16} can be improved accordingly, as well as all examples given in \cite[Section 4]{ramacher16}. \end{enumerate} \end{rem}
\section{Equivariant ${\rm L}^p$-bounds of eigenfunctions for non-singular group actions} \label{sec:equivLp}
Let the notation be as in the previous sections. As a consequence of the improved point-wise asymptotics for the kernel of the equivariant approximate projection, one obtains in the non-singular case the following sharpened equivariant ${\rm L}^\infty$-bounds for eigenfunctions.
\begin{proposition}[\bf ${\rm L}^\infty$-bounds for isotypic spectral clusters] \label{thm:bounds} Assume that $T$ acts on $M$ with orbits of the same dimension $\kappa$, and denote by $\chi_\lambda$ the spectral projection onto the sum of eigenspaces of $P$ with eigenvalues in the interval $(\lambda, \lambda+1]$. Then, for any $\gamma \in {\mathcal W}_\lambda$, \begin{equation} \label{eq:5} \norm{(\chi_\lambda\circ \Pi_\gamma) u}_{{\rm L}^\infty(M)} \leq C (1+ \lambda)^{\frac{n-\kappa-1}{2m}} \norm{u}_{{\rm L}^2(M)}, \qquad u \in {\rm L}^2(M), \end{equation} for a positive constant $C$ independent of $\gamma$. In particular, we obtain \begin{equation*} \norm{u}_{{\rm L}^\infty(M)} \ll \lambda^{\frac{n-\kappa-1}{2m}} \end{equation*} for any eigenfunction $u \in {\rm L}^2_\gamma(M)$ of $P$ with eigenvalue $\lambda$ satisfying $\norm{u}_{{\rm L}^2}=1$ and $\gamma \in {\mathcal W}_\lambda$. \end{proposition}
\begin{proof}
By Proposition \ref{thm:kernelasymp} we have for $\gamma \in {\mathcal W}_\lambda$ the uniform bound \begin{equation*}
|K_{ \widetilde \chi_\lambda\circ \Pi_\gamma}(y,y)| \ll (1+\lambda)^{\frac{n-\kappa-1}m}, \qquad y \in M=M_\mathrm{prin} \cup M_\mathrm{except}. \end{equation*} The assertion now follows by a repetition of the arguments in the proof of \cite[Proposition 5.1 and Equation (5.4)]{ramacher16}. \end{proof}
Similarly, we are able to sharpen the ${\rm L}^p$-bounds for isotypic spectral clusters derived in \cite[Theorem 5.4]{ramacher16} in the isotypic aspect.
\begin{thm}[\bf ${\rm L}^p$-bounds for isotypic spectral clusters] \label{thm:20.02.2016} Let $M$ be a closed connected Riemannian manifold $M$ of dimension $n$ on which a torus $T$ acts effectively and isometrically with orbits of the same dimension $\kappa$. Further, let $P$ be the unique self-adjoint extension of a $T$-invariant elliptic positive symmetric classical pseudodifferential operator on $M$ of degree $m$, and assume that its principal symbol $p(x,\xi)$ is such that the co-spheres $S_x^\ast M:=\mklm{(x,\xi) \in T^\ast M\mid \, p(x,\xi)=1}$ are strictly convex. Denote by $\chi_\lambda$ the spectral projection onto the sum of eigenspaces of $P$ with eigenvalues in the interval $(\lambda, \lambda+1]$, and by $\Pi_\gamma$ the projection onto the isotypic component ${\rm L}^2_\gamma(M)$, where $\gamma \in \widehat T$. Then, for $u \in {\rm L}^2(M)$ and arbitrary $\gamma\in {\mathcal W}_\lambda$ \begin{equation} \label{eq:31.12.2015} \norm{(\chi_\lambda \circ \Pi_\gamma) u}_{{\rm L}^q(M)} \leq \begin{cases} C \, \lambda^{\frac{\delta_{n-\kappa}(q)}{m}} \norm{u}_{{\rm L}^2(M)}, & \frac{2(n-\kappa+1)}{n-\kappa-1} \leq q \leq \infty,
\\ C \, \lambda^{\frac{(n-\kappa-1)(2-q')}{4m q'}} \norm{u}_{{\rm L}^2(M)}, & 2 \leq q \leq \frac{2(n-\kappa+1)}{n-\kappa-1}, \end{cases} \end{equation}
for a positive constant $C$ independent of $\gamma$, where $\frac 1q+\frac 1{q'}=1$ and
\begin{equation*}
\delta_{n-\kappa}(q):=\max \left ( (n-\kappa) \left | \frac 12-\frac 1q \right| -\frac 12,0 \right ).
\end{equation*} In particular, \begin{equation*} \norm{u}_{{\rm L}^q(M)} \ll \begin{cases} \lambda^{\frac{\delta_{n-\kappa}(q)}{m}}, & \frac{2(n-\kappa+1)}{n-\kappa-1} \leq q \leq \infty,
\\ \lambda^{\frac{(n-\kappa-1)(2-q')}{4m q'}}, & 2 \leq q \leq \frac{2(n-\kappa+1)}{n-\kappa-1}, \end{cases} \end{equation*} for any eigenfunction $u \in {\rm L}^2_\gamma(M)$ of $P$ with eigenvalue $\lambda$ satisfying $\norm{u}_{{\rm L}^2}=1$ and $\gamma \in {\mathcal W}_\lambda$. \end{thm}
\begin{proof}
The proof is a verbatim repetition of the proof of \cite[Theorem 5.4]{ramacher16} where instead of \cite[Theorem 3.4]{ramacher16} the improved estimates from Theorem \ref{thm:14.05.2017} are used.
\end{proof}
As a consequence of the previous theorem, all examples given in \cite[Section 5]{ramacher16} can be sharpened in the isotypic aspect.
\section{The singular equivariant local Weyl law. Caustics and concentration of \\ eigenfunctions} \label{sec:5}
Using the improved remainder estimates from Theorem \ref{thm:12.05.2015} all results in \cite[Section 7]{ramacher16} can be sharpened. In particular, the singular equivariant local Weyl law proved in \cite[Theorem 7.7]{ramacher16} can be improved in the isotypic aspect.
As before, let $M$ be a closed connected Riemannian manifold and $T$ a torus acting on $M$ by isometries, and consider the decomposition of $M$ into orbit types
\begin{equation}
\label{eq:2.19} M=M(H_1) \, \dot \cup \, \cdots \, \dot \cup \, M(H_L), \end{equation} where we suppose that the isotropy types are numbered in such a way that $(H_i) \geq (H_j)$ implies $i \leq j$, $(H_L)$ being the principal isotropy type. We then have the following
\begin{thm}[\bf Singular equivariant local Weyl law] \label{thm:15.11.2015} Let $M$ be a closed connected Riemannian manifold $M$ of dimension $n$ with an isometric and effective action of a torus $T$ and $P_0$ a $T$-invariant elliptic classical pseudodifferential operator on $M$ of degree $m$. Let $p(x,\xi)$ be its principal symbol, and assume that $P_0$ is positive and symmetric. Denote its unique self-adjoint extension by $P$, and for a given $\gamma \in \widehat T$ let $e_\gamma(x,y,\lambda)$ be its reduced spectral counting function. Write $\kappa$ for the dimension of an $T$-orbit in $M$ of principal type. Then, for $x \in M_\mathrm{prin}\cup M_\mathrm{except}$ one has the asymptotic formula
\begin{gather*}
\left |e_\gamma(x,x,\lambda)- \frac{ \lambda^{\frac{n-\kappa}{m}}}{(2\pi)^{n-\kappa}} \sum_{N=1}^{\Lambda-1} \, \sum_{i_1<\dots< i_{N}} \, \prod_{l=1}^{N} |\tau_{i_l}|^{\dim G- \dim H_{i_l}-\kappa} \mathcal{L}_{i_1\dots i_{N} }^{0,0}(x,\gamma) \right | \\
\leq \widetilde C_\gamma \, \lambda^{\frac{n-\kappa-1}m} \sum_{N=1}^{\Lambda-1}\, \sum_{i_1<\dots< i_{N}} \prod_{l=1}^N |\tau_{i_l}|^{\dim G- \dim H_{i_l}-\kappa-1} \end{gather*}
as $\lambda \to +\infty$, where the multiple sum runs over all possible totally ordered subsets $\mklm{(H_{i_1}),\dots, (H_{i_N})}$ of singular isotropy types, and the coefficients satisfy the bounds $
\mathcal{L}_{i_1\dots i_{N}}^{0,0}(x,\gamma) \ll \norm{\gamma}_\infty
$
uniformly in $x$, while
\begin{equation*}
\widetilde C_\gamma \ll \sup_{l\leq 1} \norm{D^l \gamma}_\infty
\end{equation*}
is a constant independent of $x$ and $\lambda$, the $D^l$ are differential operators on $T$ of order $l$, and the $\tau_{i_j}=\tau_{i_j}(x)$ parameters satisfying $|\tau_{i_j}|\approx \text{dist}\, (x, M(H_{i_j}))$. \end{thm} \begin{proof} The proof consists in a verbatim repetition of the proof of \cite[Theorem 7.7]{ramacher16} using the improved remainder estimate in Theorem \ref{thm:12.05.2015} (a). \end{proof}
As an immediate consequence this yields
\begin{cor}[\bf Singular point-wise bounds for isotypic spectral clusters] \label{cor:2.12.2015} In the setting of Theorem \ref{thm:15.11.2015} we have \begin{equation*}
\sum_{\stackrel{\lambda_j \in (\lambda,\lambda+1],}{ e_j \in {\rm L}^2_\gamma(M)}} |e_j(x)|^2 \leq \begin{cases} C \, \lambda^{\frac{n-1}m}, & x\in M_\mathrm{sing}, \\ & \\
C_\gamma \, \lambda^{\frac{n-\kappa-1}m} \sum\limits_{N=1}^{\Lambda-1}\, \sum\limits_{i_1<\dots< i_{N}}\prod\limits_{l=1}^N |\tau_{i_l}|^{\dim G- \dim H_{i_l}-\kappa-1}, & x\in M-M_\mathrm{sing}, \end{cases} \end{equation*} with $C>0$ independent of $\gamma$. In particular, the bound holds for each individual $e_j \in {\rm L}^2_\gamma(M)$ with $\lambda_j \in (\lambda, \lambda+1]$. \end{cor} \qed
Integrating the asymptotic formulae in Theorems \ref{thm:main} and \ref{thm:15.11.2015} over $x\in M$ yields a sharpened remainder estimate for the equivariant Weyl law derived in \cite{ramacher10}.
In addition, as a consequence of the previous theorem, the example given in \cite[Section 7]{ramacher16} can be sharpened in the isotypic aspect.
\section{Sharpness} \label{sec:sharpness}
By the arguments given in \cite[Section 8]{ramacher16} the remainder estimates in Theorems \ref{thm:main} and \ref{thm:15.11.2015} are sharp in the spectral parameter $\lambda$, and already attained on the $2$-dimensional sphere $S^2$.
To see that they are almost sharp in the isotypic aspect, endow $M=S^2$ with the induced metric, and let $\Delta$ be the corresponding Laplace-Beltrami operator. The eigenvalues of $-\Delta$ are given by the numbers $ \lambda_k=k(k+1)$ with $k=0,1,2,3,\dots$, and the corresponding $k(k+1)$-dimensional eigenspaces ${\mathcal H}_k$ are spanned by the classical spherical functions $Y_{km}$, $m \in {\mathbb Z}$, $|m| \leq k$.
The ${Y_{kl}}$ are orthonormal to each other, and by the spectral theorem we have the decomposition $ {\rm L}^2(M)= \bigoplus _{k=0}^\infty {\mathcal H}_k$. Furthermore, by restricting the left regular representation of $\mathrm{SO}(3)$ in ${\rm L}^2(S^2)$ to the eigenspaces ${\mathcal H}_k$ one obtains realizations for all elements in the unitary dual $\widehat{\mathrm{SO}(3)}\simeq \mklm{k=0,1,2,3,\dots}$. Now, let $T= \mathrm{SO}(2)$ be isomorphic to the isotropy group of a point in $S^2\simeq \mathrm{SO}(3)/\mathrm{SO}(2)$. The irreducible representations of $\mathrm{SO}(2)$ are $1$-dimensional, and the corresponding characters are given by the exponentials $\theta \mapsto e^{im\theta}$, where $\theta \in [0,2\pi)\simeq \mathrm{SO}(2)$, $m \in {\mathbb Z}\simeq \widehat{\mathrm{SO}(2)}$. Each ${\mathcal H}_k$ decomposes into $\mathrm{SO}(2)$ representations with multiplicity $1$ according to $
{\mathcal H}_k=\bigoplus_{|m|\leq k} {\mathcal H}_k^m, $ where ${\mathcal H}_k^m$ is spanned by $Y_{km}$.
Consequently, if $N_{m}(\lambda):=\int_{S^2} e_m(x,x,\lambda) dS^2(x)$ denotes the equivariant counting function of $\Delta$ we obtain the estimate \begin{align} \label{eq:3.6.2017}
N_{m}(\lambda) =\sum_{k(k+1) \leq \lambda, \, |m| \leq k} 1\approx \sum_{|m| \leq k \leq \sqrt{\lambda}} 1\approx \sqrt{\lambda}-|m|, \end{align} as $\lambda \to +\infty $,
showing that the remainder estimates in Theorems \ref{thm:main} and \ref{thm:15.11.2015} are almost sharp both in the eigenvalue and in the isotypic aspect. \\
To see that the equivariant ${\rm L}^p$-bounds in Section \ref{sec:equivLp} are almost sharp in the eigenvalue and isotypic aspect, let us consider the standard $2$-torus $M=T^2\subset {\mathbb R}^3$ on which $G=\mathrm{SO}(2)$ acts by rotations around the symmetry axis. Then all orbits are $1$-dimensional and of principal type.
Proposition \ref{thm:bounds} then implies the bound \begin{equation*} \norm{u}_{{\rm L}^\infty(T^2)} =O(1 ) , \qquad u \in {\rm L}^2(T^2), \, \norm{u}_{{\rm L}^2}=1, \end{equation*} for any eigenfunction of the Laplace-Beltrami operator $\Delta$ on $T^2$. Now, via the identification \begin{equation*} {\mathbb R}^2/{\mathbb Z}^2 \stackrel{\simeq} \longrightarrow T^2 \simeq S^1 \times S^1, (x_1,x_2) \, \longmapsto \, (e^{2\pi i x_1}, e^{2\pi i x_2}), \end{equation*} the standard orthonormal basis of eigenfunctions of $\Delta$ is given by $\mklm{e^{2\pi i k_1 x_1}e^{2\pi i k_2 x_2}\mid (k_1,k_2) \in {\mathbb Z}^2}$, showing that the bounds in Proposition \ref{thm:bounds} and Theorem \ref{thm:20.02.2016} are almost sharp both in the eigenvalue and isotypic aspect.
\appendix \renewcommand*{\thesection}{\Alph{section}}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\title[One and two level densities]{Type-I contributions to the one and two level densities of quadratic Dirichlet $L$--functions over function fields}
\author{Hung M. Bui, Alexandra Florea and J. P. Keating} \address{Department of Mathematics, University of Manchester, Manchester M13 9PL, UK} \email{hung.bui@manchester.ac.uk} \address{Department of Mathematics, Columbia University, New York NY 10027, USA} \email{aflorea@math.columbia.edu} \address{Mathematical Institute, University of Oxford, Oxford OX2 6GG, UK} \email{keating@maths.ox.ac.uk}
\begin{abstract} Using the Ratios Conjecture, we write down precise formulas with lower order terms for the one and the two level densities of zeros of quadratic Dirichlet $L$--functions over function fields. We denote the various terms arising as Type-$0$, Type-I and Type-II contributions. When the support of the Fourier transform of the test function is sufficiently restricted, we rigorously compute the Type-$0$ and Type-I terms and confirm that they match the conjectured answer. When the restrictions on the support are relaxed, our results suggest that Type-II contributions become important in the two level density. \end{abstract}
\allowdisplaybreaks
\maketitle
\section{Introduction}
In this paper we compute the one and the two level densities of zeros of $L$--functions associated to quadratic characters over function fields. We compute certain Type-I contributions (as in the work of Conrey and Keating \cite{ck1, ck2, ck3, ck4, ck5}) and write down explicit conjectural Type-II terms predicted by the Ratios Conjecture \cite{cfz}.
Understanding zeros in families of $L$--functions is a problem of considerable interest which has been much-studied. Katz and Sarnak \cite{katzsarnak, katzsarnak2} conjectured that the behavior of zeros close to the central point in a family of $L$--functions coincides with the distribution of eigenvalues near $1$ of matrices in a certain symmetry group associated to the family. There is an abundance of papers in the literature in which the above mentioned agreement is observed (for example \cite{ILS, hughesrudnick, miller, miller2}).
When computing the $n$--level density of zeros for a particular family of $L$--functions, the Katz and Sarnak conjectures predict the main term in the asymptotic formula. Conrey, Farmer and Zirnbauer \cite{cfz} conjectured formulas for averages of ratios of $L$--functions, and using the Ratios Conjecture, one can write down an explicit formula for the $n$--level density which recovers the Katz-Sarnak main term and further include lower order terms \cite{CS}. In the case of the Riemann zeta-function, the resulting expressions coincide with formulas obtained earlier by Bogomolny and Keating using the Hardy-Littlewood twin-prime conjecture \cite{BK1} (see also \cite{BeKe, BK4, BK5}).
A related problem is that of computing moments in families of $L$--functions. Using analogies with random matrix theory, Keating and Snaith \cite{ks2, ksnaith} conjectured asymptotic formulas with the leading order term for moments in various families. A more refined conjecture, due to Conrey, Farmer, Keating, Rubinstein and Snaith \cite{cfkrs}, and similar in nature to the Ratios Conjecture \cite{cfz}, predicts lower order terms undetected by the random matrix models. More recent work of Conrey and Keating \cite{ck1, ck2,ck3,ck4,ck5} revisits the question of evaluating shifted moments of the Riemann zeta-function from a different perspective, and recovers the lower order terms predicted in \cite{cfkrs}. Conrey and Keating used long Dirichlet polynomials rather than the approximate functional equation, and divide the terms that arise into certain Type-$0$, Type-I and Type-II contributions (depending on the number of swaps in the shifts). This builds on previous work in the case of the $n$-point correlation of the zeros by Bogomolny and Keating \cite{BK2, BK3}, where a similar division was first introduced (see also \cite{ck6, ck7}). Here we use the same ideas to examine asymptotic formulas including lower order terms for the $n$ level density of zeros. Throughout our paper, we use the Conrey and Keating nomenclature for Type-$0$, Type-I and Type-II terms.
For the family of quadratic Dirichlet $L$--functions, \"{O}zl\"{u}k and Snyder \cite{ozluk} computed the one level density of zeros when the support of the Fourier transform of the test function is in $(-2,2)$. The higher densities in this family of $L$--functions were studied by Rubinstein \cite{rubinstein}. For a Schwartz test function $f \in \mathcal{S}(\mathbb{R}^n)$, even in all the variables, Rubinstein computed the $n$--level density when the Fourier transform of $f$ is supported in $\sum_{j=1}^n |u_j|<1$, conditional on the Generalized Riemann Hypothesis. Gao \cite{gao} attempted to double the range in Rubinstein's result. More specifically, he showed that if $f$ is of the form $f(x_1,\ldots,x_n)= \prod_{i=1}^n f_i(x_i)$ and each $\hat{f_i}$ is supported in $|u_i|<s_i$ and $\sum_{i=1}^n s_i<2$, then the $n$--level density of zeros is equal to a complicated combinatorial factor $A(f)$. For $n=2,3$, he showed that $A(f)$ agrees with the Katz and Sarnak conjecture. Recent work of Entin, Roditty-Gershon and Rudnick \cite{entin} showed that indeed the combinatorial factor $A(f)$ obtained by Gao matches the random matrix theory prediction for all $n$. Their novel approach does not involve doing the combinatorics directly, but passing to a function field analog of the problem, taking the limit $q \to \infty$ and using equidistribution results of Katz and Sarnak. An alternative approach was developed in \cite{CS2, MS}.
In the function field setting, Rudnick \cite{R} computed the one level density of zeros for the family of quadratic Dirichlet $L$--functions and showed that there is a transition when the support of the Fourier transform goes beyond $1$. Bui and Florea \cite{bf} obtained infinitely many lower order terms when the support of the Fourier transform is in certain ranges, and further computed the pair correlation of zeros in the family.
In the present paper, we consider the two level density of zeros in the family of quadratic Dirichlet $L$--functions. Let $\mathcal{H}_{2g+1}$ denote the space of monic, square-free polynomials of degree $2g+1$ over $\mathbb{F}_q[x]$. For simplicity, in the definition of the two level density, we take the test function to be equal to $1$. The two level density of zeros is defined to be \begin{equation}
I_2(N;\alpha,\beta)=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\sum_{\substack{f_1,f_2\in\mathcal{M}\\d(f_1f_2)\leq N}}\frac{\Lambda(f_1)\Lambda(f_2)\chi_D(f_1f_2)}{|f|_1^{1/2+\alpha}|f|_2^{1/2+\beta}}, \label{i2} \end{equation} where $\Lambda(f)$ denotes the von Mangoldt function over function fields, and $\chi_D(f)$ is the quadratic character.
Using the Ratios Conjecture over function fields \cite{AK}, we write down precise formulas for the two level density in terms of Type-$0$, Type-I and Type-II contributions. The Type-I terms kick in when $N \geq 2g$ and Type-II terms appear when $N \geq 4g$. We compute the Type-$0$ and Type-I terms rigorously by estimating sums over primes (i.e.~over monic irreducible polynomials). Our approach in computing the two level density is more direct than the one used by Entin, Roditty-Gershon and Rudnick \cite{entin}, and we do not take $q \to \infty$ (hence we do not use any equidistribution results). The Type-$0$ terms, or the so-called "diagonal", come from prime powers $f_1$ and $f_2$ in \eqref{i2} with the product $f_1 f_2$ being a square. The diagonal terms are relatively straightforward to compute. Evaluating the Type-I terms is more subtle and requires more involved computations. We use the Poisson summation formula for the sum over $D$ (after removing the squarefree condition) and then we compute the contribution from the parameter on the dual side of the Poisson summation formula being a square. We sum up these contributions and then we check that they match the answer conjectured from the Ratios Conjecture.
Type-I terms essentially come from squares on the dual side of the Poisson summation formula over function fields. Our methods do not allow us to identify the Type-II terms which only arise when $N \geq 4g$, but we explicitly write down the conjectured Type-II contribution. This is one of our main goals: to draw attention to the fact that when the methods that have been employed successfully for many years in calculations of the one level density are applied to the two level density they fail to capture all of the terms, underlining the importance of developing methods to compute the Type-II terms in this case.
For the sake of completeness, we also include the computation of the one level density (with a shift) and match the terms we obtain with the Type-$0$ and Type-I contributions.
\subsection{Outline of the paper} In Section \ref{background} we gather a few useful lemmas we will need. In Section \ref{1rc} we use the Ratios Conjecture to write down formulas for the one level density of zeros with Type-$0$ and Type-I terms (there are no Type-II terms for the one level density). We rigorously compute these terms when $N<4g$ and match them to the conjecture in Section \ref{1compute}. In Section \ref{2rc} we again use the Ratios Conjecture to predict the Type-$0$, Type-I and Type-II contributions for the two level density. The diagonal terms are computed in Section \ref{2diag} and Type-I terms in Section \ref{type1}. In subsection \ref{combine} we combine the various contributions from Sections \ref{type11} and \ref{type12} and show that they agree with the conjecture.
{\bf Acknowledgements.} A. Florea gratefully acknowledges the support of an NSF Postdoctoral Fellowship during part of the research which led to this paper. J.P. Keating was supported by a Royal Society Wolfson Research Merit Award, EPSRC Programme Grant EP/K034383/1 LMF: $L$-Functions and Modular Forms, and by ERC Advanced Grant 740900 (LogCorRM).
The authors would also like to thank Julio Andrade, Brian Conrey, Chantal David, Steve Gonek and Matilde Lal\'{i}n for many stimulating discussions and useful comments during SQuaRE meetings at AIM.
\section{Lemmas} \label{background} Let $q \equiv 1 \pmod 4$ be a prime. We denote the set of monic polynomials over $\mathbb{F}_q[x]$ by $\mathcal{M}$. Let $\mathcal{M}_n$ denote the set of monic polynomials of degree $n$, $\mathcal{H}_n$ the set of monic, squarefree polynomials of degree $n$, and $\mathcal{P}_n$ the monic, irreducible polynomials of degree $n$. The set of monic polynomials of degree less than or equal to $n$ is denoted by $\mathcal{M}_{\leq n}$. For simplicity, we denote the degree of a polynomial $f$ by $d(f)$.
The norm of a polynomial $f$ is defined by $|f|= q^{d(f)}$.
The zeta-function over $\mathbb{F}_q[x]$ is defined by
$$\zeta_q(s) = \sum_{f\in\mathcal{M}} \frac{1}{|f|^s}$$ for $\Re(s)>1$. Since there are $q^n$ monic polynomials of degree $n$, one can easily show that $$\zeta_q(s) = \frac{1}{1-q^{1-s}},$$ and this provides a meromorphic continuation of $\zeta_q$ with a simple pole at $s=1$. Making the change of variables $u=q^{-s}$, the zeta-function becomes $$ \mathcal{Z}(u) = \zeta_q(s) = \sum_{f \in\mathcal{M}} u^{d(f)} = \frac{1}{1-qu},$$ which has a simple pole at $u=1/q$. Note that $\mathcal{Z}(u)$ is given by the Euler product
$$ \mathcal{Z}(u) = \prod_P \Big(1-u^{d(P)}\Big)^{-1},$$ for $|u|<1/q$, where the product is over monic, irreducible polynomials in $\mathbb{F}_q[t]$.
The quadratic character over $\mathbb{F}_q[t]$ is defined as follows. For $P$ a monic, irreducible polynomial let $$ \Big( \frac{f}{P} \Big)= \begin{cases} 1 & \mbox{ if } P \nmid f, f \text{ is a square modulo }P, \\ -1 & \mbox{ if } P \nmid f, f \text{ is not a square modulo }P, \\
0 & \mbox{ if } P|f. \end{cases} $$ We extend the definition of the quadratic residue symbol above to any monic $D \in \mathbb{F}_q[t]$ by multiplicativity, and define the quadratic character $\chi_D$ by $$\chi_D(f) = \Big( \frac{D}{f} \Big).$$ Since we assumed that $q \equiv 1 \pmod 4$, note that the quadratic reciprocity law takes the following form: if $A$ and $B$ are two monic coprime polynomials, then $$ \Big( \frac{A}{B} \Big) = \Big( \frac{B}{A} \Big).$$ We define the von Mangoldt function to be $$\Lambda(f) = \begin{cases} d(P) & \mbox{ if } f=cP^k, c \in \mathbb{F}_q^{\times}, \\ 0 & \mbox{ otherwise.} \end{cases} $$ The following lemma expresses sums over squarefree polynomials in terms of sums over monics.
\begin{lemma}\label{L1} For $f\in\mathcal{M}$ we have \[
\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(f)=\sum_{C|f^\infty}\sum_{h\in\mathcal{M}_{2g+1-2d(C)}}\chi_f(h)-q\sum_{C|f^\infty}\sum_{h\in\mathcal{M}_{2g-1-2d(C)}}\chi_f(h), \] where the summations over $C$ are over monic polynomials $C$ whose prime factors are among the prime factors of $f$. \end{lemma} \begin{proof} See Lemma $2.2$ in \cite{aflorea}. \end{proof}
We define the generalized Gauss sum as follows. For $f \in \mathcal{M}$, let \[ G(V,f):= \sum_{u \pmod f} \chi_f(u)e\Big(\frac{uV}{f}\Big), \] where the exponential over function fields was defined in \cite{hayes}. Specifically, for $a \in \mathbb{F}_q((1/t))$, $$e(a) = e^{2 \pi i a_1/q},$$ where $a= \ldots +a_1/t+ \ldots$.
The following two lemmas are Proposition 3.1 and Lemma 3.2 in \cite{aflorea}.
\begin{lemma}\label{L2} Let $f\in\mathcal{M}_n$. If $n$ is even then \[
\sum_{h\in\mathcal{M}_m}\chi_f(h)=\frac{q^m}{|f|}\bigg(G(0,f)+q\sum_{V\in\mathcal{M}_{\leq n-m-2}}G(V,f)-\sum_{V\in\mathcal{M}_{\leq n-m-1}}G(V,f)\bigg), \] otherwise \[
\sum_{h\in\mathcal{M}_m}\chi_f(h)= \frac{q^{m+1/2}} {|f|}\sum_{V\in\mathcal{M}_{n-m-1}}G(V,f). \] \end{lemma}
\begin{lemma}\label{L3} \begin{enumerate} \item If $(f,h)=1$, then $G(V, fh)= G(V, f) G(V,h)$. \item Write $V= V_1 P^{\alpha}$ where $P \nmid V_1$. Then
$$G(V , P^j)= \begin{cases} 0 & \mbox{if } j \leq \alpha \text{ and } j \text{ odd,} \\ \varphi(P^j) & \mbox{if } j \leq \alpha \text{ and } j \text{ even,} \\
-|P|^{j-1} & \mbox{if } j= \alpha+1 \text{ and } j \text{ even,} \\
\chi_P(V_1) |P|^{j-1/2} & \mbox{if } j = \alpha+1 \text{ and } j \text{ odd, } \\ 0 & \mbox{if } j \geq 2+ \alpha . \end{cases}$$ \end{enumerate} \end{lemma}
The following lemmas are the equivalent of the Polya-Vinogradov inequality and the Weil bound in function fields. \begin{lemma} \label{pv} We have
$$ \sum_{D \in \mathcal{H}_{2g+1}} \chi_D(P) \ll |P|^{1/2},$$ and for $Q$ a prime polynomial,
$$ \sum_{\substack{D \in \mathcal{H}_{2g+1} \\ (D,Q)=1}} \chi_D(P) \ll \frac{g}{d(Q)}|P|^{1/2} .$$ \end{lemma} \begin{proof} See, for example, Lemma $3.5$ and p. $8033$ in \cite{bf}. \end{proof}
\begin{lemma}[The Weil bound]\label{sumprimes} For $V\in\mathcal{M}$ not a perfect square we have $$\sum_{P \in \mathcal{P}_n} \chi_V(P) \ll \frac{d(V)}{n} q^{n/2}.$$ \end{lemma} \begin{proof} See equation $2.5$ in \cite{R}. \end{proof}
\begin{lemma}\label{L4} For $f\in\mathcal{M}$ we have \[
\frac{1}{| \mathcal{H}_{2g+1}|}\sum_{D \in \mathcal{H}_{2g+1}} \chi_D(f^{2})=\prod_{P|f}\bigg(1-\frac{1}{|P|+1}\bigg)+O(q^{-2g}). \] \end{lemma} \begin{proof} See, for example, Lemma $3.7$ in \cite{bf}. \end{proof}
\section{The one level density - using the Ratios Conjecture} \label{1rc} Consider \begin{equation}\label{maineq}
I_1(N;\alpha)=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\sum_{f\in \mathcal{M}_{\leq N}}\frac{\Lambda(f)\chi_D(f)}{|f|^{1/2+\alpha}}, \end{equation}
where the shift is assumed to satisfy $|\alpha| \ll 1/g$.
Using an analogue of the Perron formula in the form \begin{equation}\label{Perron}
\sum_{n\leq N}a(n)=\frac{1}{2\pi i}\oint_{|u|=r}\bigg(\sum_{n=0}^{\infty}a(n)u^{n}\bigg)\frac{du}{u^{N+1}(1-u)} \end{equation} we get \begin{align*}
I_1(N;\alpha)&=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\frac{1}{2\pi i}\oint_{|u|=r}\sum_{f\in \mathcal{M}}\frac{\Lambda(f)\chi_D(f)u^{d(f)}}{|f|^{1/2+\alpha}}\frac{du}{u^{N+1}(1-u)}\\
&=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}} \frac{1}{2 \pi i} \oint_{|u|=r} \frac{u}{q^{1/2+\alpha}} \frac{ \mathcal{L}'}{\mathcal{L}} \Big(\frac{u}{q^{1/2+\alpha}},\chi_D \Big) \frac{du}{u^{N+1}(1-u)} \end{align*}
for any $r<q^{-1/2-\varepsilon}$. We enlarge the contour to $|u|=r=q^{-\varepsilon}$. The Ratios Conjecture implies that (see, for example, Theorem $8.1$ in \cite{bf}) \begin{align*}
\frac{1}{|\mathcal{H}_{2g+1}|} &\sum_{D\in\mathcal{H}_{2g+1}}u \frac{\mathcal{L}'}{\mathcal{L}} (u,\chi_D) = u^2 \frac{\mathcal{Z}'}{\mathcal{Z}}(u^2) - \mathcal{B}(u) + (qu^{2})^{g} \mathcal{A}_1(u)\mathcal{Z}\Big(\frac{1}{q^{2}u^{2}}\Big)+O_\varepsilon(q^{-g+\varepsilon g}), \end{align*}
where \begin{equation}\label{mathcalBr}\mathcal{B}(u) = \sum_P \frac{ d(P) u^{2d(P)}}{(1-u^{2 d(P)})(|P|+1)}\end{equation} and \begin{align*}
\mathcal{A}_1(u) &= \prod_P \Big(1- \frac{1}{|P|} \Big)^{-1} \Big( 1-\frac{1}{|P|^2u^{2 d(P)} (|P|+1)}-\frac{1}{|P|+1}\Big) \\
&= \prod_P \Big(1-\frac{1}{|P|^2} \Big)^{-1} \Big(1- \frac{1}{|P|^3 u^{2d(P)}} \Big) = \frac{\mathcal{Z}(1/q^{2})}{\mathcal{Z}(1/q^{3}u^{2})}=1+\frac{1-(qu^2)^{-1}}{q-1}. \end{align*} Hence, up to an error of size $O_\varepsilon(q^{-g+\varepsilon g})$, \begin{align}\label{1}
I_1(N;\alpha)&=\frac{1}{2\pi i}\oint_{|u|=r}\frac{du}{u^{N-1}(1-u)(q^{2\alpha}-u^2)}-\frac{1}{2\pi i}\oint_{|u|=r}\frac{\mathcal{B}(u,\alpha)du}{u^{N+1}(1-u)}\\
&\qquad\quad+\frac{q^{-2g\alpha}}{2\pi i}\oint_{|u|=r}\frac{du}{u^{N-2g-1}(1-u)(u^2-q^{2\alpha})}+\frac{q^{-2g\alpha}}{2\pi i(q-1)}\oint_{|u|=r}\frac{du}{u^{N-2g+1}(1-u)},\nonumber \end{align} where \begin{equation*} \mathcal{B}(u,\alpha)=\mathcal{B}\Big(\frac{u}{q^{1/2+\alpha}}\Big). \end{equation*}
Enlarging the contours we cross the poles at $u=1$ and $u=\pm q^{\alpha}$ in the first integral, and the only pole at $u=1$ in the second integral. Note that $\mathcal{B}(u,\alpha)$ is absolutely convergent for $|u|<q^{1/2-\varepsilon}$, so in the second integral we shift the contour to $|u|=q^{1/2-\varepsilon}$, obtaining an error term of size $O_\varepsilon(q^{-N/2+\varepsilon N})$. Hence the contribution of the first two terms in \eqref{1} is equal to \begin{align}\label{type01} &\frac{q^{-2[N/2]\alpha}-1}{1-q^{2\alpha}}-B(\alpha) +O_\varepsilon(q^{-N/2+\varepsilon N}), \end{align} where \begin{equation}\label{Br}
B(\alpha):=\mathcal{B}(1,\alpha)= \sum_P \frac{ d(P) }{(|P|^{1+2\alpha}-1)(|P|+1)}. \end{equation} This should correspond to the diagonal terms.
For the remaining two terms in \eqref{1}, we note that they vanish if $N< 2g$, and if $N\geq 2g$ they contribute \begin{align}\label{type1} \frac{q^{-2g\alpha}-q^{-2[N/2]\alpha}}{1-q^{2\alpha}}+\frac{q^{-2g\alpha}}{q-1}. \end{align} This should correspond to the Type-I terms. Combining \eqref{type01} and \eqref{type1} we arrive at the following conjecture.
\begin{conjecture}\label{conjecture1level} We have \begin{align*} I_1(N;\alpha)&=\frac{q^{-2[N/2]\alpha}-1}{1-q^{2\alpha}}-B(\alpha)+\mathds{1}_{N\geq 2g}\bigg(\frac{q^{-2g\alpha}-q^{-2[N/2]\alpha}}{1-q^{2\alpha}}+\frac{q^{-2g\alpha}}{q-1}\bigg)\\ &\qquad\qquad+O_\varepsilon(q^{-g+\varepsilon g}) +O_\varepsilon(q^{-N/2+\varepsilon N}). \end{align*} \end{conjecture}
\section{The one level density} \label{1compute} We assume in this section that $N<4g$.
\subsection{The diagonal}
The diagonal, denoted by $I_1^0(N;\alpha)$, corresponds to the terms $f=P^{2k}$ in \eqref{maineq}, and so in view of Lemma \ref{L4} we have \[
I_1^0(N;\alpha)=\sum_{1\leq kn\leq [N/2]}\sum_{P\in\mathcal{P}_{n}}\frac{d(P)}{|P|^{k(1+2\alpha)}}-\sum_{1\leq kn\leq [N/2]}\sum_{P\in\mathcal{P}_{n}}\frac{d(P)}{|P|^{k(1+2\alpha)}(|P|+1)}+O_\varepsilon(q^{-2g+\varepsilon g}). \] The first term, by the Prime Polynomial Theorem, is equal to \begin{align*}
\sum_{d(f)\leq [N/2]}\frac{\Lambda(f)}{|f|^{1+2\alpha}}&=\sum_{1\leq n\leq [N/2]}q^{-2n\alpha }=\frac{q^{-2[N/2]\alpha}-1}{1-q^{2\alpha}}. \end{align*} For the second term, note that \begin{align*}
\sum_{1\leq kn\leq [N/2]}\sum_{P\in\mathcal{P}_{n}}\frac{d(P)}{|P|^{k(1+2\alpha)}(|P|+1)}&=B(\alpha)-\sum_{kn> [N/2]}\sum_{P\in\mathcal{P}_{n}}\frac{d(P)}{|P|^{k(1+2\alpha)}(|P|+1)}\\ &=B(\alpha)+O_\varepsilon\big(q^{-N/2+\varepsilon g}\big). \end{align*} Hence, \[ I_1^0(N;\alpha)=\frac{q^{-2[N/2]\alpha}-1}{1-q^{2\alpha}}-B(\alpha)+O_\varepsilon\big(q^{-N/2+\varepsilon g}\big). \] Notice that the leading term matches up with \eqref{type01}.
\subsection{Type-I terms}
We now evaluate the off-diagonal terms corresponding to $f=P^{2k+1}$ in \eqref{maineq}, \[
I_1^1(N;\alpha)=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{d(P^{2k+1})\leq N}\frac{d(P)}{|P|^{(2k+1)(1/2+\alpha)}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(P). \] Combining the Polya-Vinogradov inequality in Lemma \ref{pv} with the Prime Polynomial Theorem, the contribution of the terms with $k\geq 1$ is \begin{equation*}
\ll q^{-2g}\sum_{n\leq N}\sum_{k\geq1}q^{-(k-1)n}\ll Nq^{-2g},
\end{equation*} and the contribution of the terms with $d(P)=n$ is \[ \ll q^{n-2g}. \] So \[
I_1^1(N;\alpha)=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{g+1\leq d(P)\leq N}\frac{d(P)}{|P|^{1/2+\alpha}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(P)+O(q^{- g}). \]
From Lemma \ref{L1} we have \begin{align*}
&\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(P)=\sum_{C|P^\infty}\sum_{h\in\mathcal{M}_{2g+1-2d(C)}}\chi_P(h)-q\sum_{C|P^\infty}\sum_{h\in\mathcal{M}_{2g-1-2d(C)}}\chi_P(h). \end{align*}
The sums over $h$ are non-zero only if $0\leq 2g\pm1-2d(C)<d(P) $. Since $C|P^\infty$ and $d(P)\geq g+1$, we must have $C=1$ and, consequently, $d(P)\geq 2g$. Thus, \[
I_1^1(N;\alpha)=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{2g\leq d(P)\leq N}\frac{d(P)}{|P|^{1/2+\alpha}}\bigg(\sum_{h\in\mathcal{M}_{2g+1}}\chi_P(h)-q\sum_{h\in\mathcal{M}_{2g-1}}\chi_P(h)\bigg)+O(q^{- g}). \]
Consider the terms with $d(P)$ odd. Applying Lemma \ref{L2} and Lemma \ref{L3}, the expression inside the bracket is \[
\frac{q^{2g+3/2}}{|P|^{1/2}}\sum_{d(V)= d(P)-2g-2}\chi_P(V)-\frac{q^{2g+1/2}}{|P|^{1/2}}\sum_{d(V)= d(P)-2g}\chi_P(V). \] Notice that $V$ cannot be a square in the sums, and hence by Lemma \ref{sumprimes}, the contribution of these terms to $I_1^1(N;\alpha)$ is $O(Nq^{N/2-2g})$.
If $d(P)$ is even, then from Lemma \ref{L2} and Lemma \ref{L3} we have \begin{align*}
\sum_{h\in\mathcal{M}_{2g+1}}\chi_P(h)-q\sum_{h\in\mathcal{M}_{2g-1}}\chi_P(h)=&\frac{q^{2g+1}}{|P|^{1/2}}\bigg(q\sum_{d(V)\leq d(P)-2g-3}\chi_P(V)-\sum_{d(V)\leq d(P)-2g-2}\chi_P(V)\bigg)\\
&\ -\frac{q^{2g}}{|P|^{1/2}}\bigg(q\sum_{d(V)\leq d(P)-2g-1}\chi_P(V)-\sum_{d(V)\leq d(P)-2g}\chi_P(V)\bigg). \end{align*} As above, the contribution of the terms $V$ non-square is negligible. For $V=\square$, as $d(V)<d(P)$ we have $\chi_P(V)=1$. Thus, the contribution from $V=\square$ is \begin{align*}
&\frac{q^{2g+1}}{|P|^{1/2}}\bigg(q\sum_{d(V)\leq d(P)/2-g-2}1-\sum_{d(V)\leq d(P)/2-g-1}1\bigg)-\frac{q^{2g}}{|P|^{1/2}}\bigg(q\sum_{d(V)\leq d(P)/2-g-1}1-\sum_{d(V)\leq d(P)/2-g}1\bigg)\\ &\qquad=\begin{cases}
-\frac{q^{2g}(q-1)}{|P|^{1/2}}& \textrm{if }d(P)\geq 2g+2,\\ q^g & \textrm{if }d(P)=2g. \end{cases} \end{align*} We hence obtain that \[
I_1^1(N;\alpha)=\mathds{1}_{N\geq 2g}\bigg(-\sum_{g+1\leq n\leq[N/2]}\sum_{P\in\mathcal{P}_{2n}}\frac{d(P)}{|P|^{1+\alpha}}+\frac{q^{-2g\alpha}}{q-1}\bigg)+O(Nq^{N/2-2g})+O(q^{- g}). \]
Now, in view of the Prime Polynomial Theorem, \begin{align*}
&\sum_{g+1\leq n\leq[N/2]}\sum_{P\in\mathcal{P}_{2n}}\frac{d(P)}{|P|^{1+\alpha}}=\sum_{g+1\leq n\leq[N/2]}q^{-2n(1+\alpha)}\big(q^{2n}+O(q^n)\big)\\ &\qquad\qquad=\sum_{g+1\leq n\leq[N/2]}q^{-2n\alpha}+O(q^{- g})=-\frac{q^{-2g\alpha}-q^{-2[N/2]\alpha}}{1-q^{2\alpha}}+O(q^{- g}). \end{align*} So \[ I_1^1(N;\alpha)=\mathds{1}_{N\geq 2g}\bigg(\frac{q^{-2g\alpha}-q^{-2[N/2]\alpha}}{1-q^{2\alpha}}+\frac{q^{-2g\alpha}}{q-1}\bigg)+O(Nq^{N/2-2g})+O(q^{- g}). \] Notice that the leading term matches up with \eqref{type1}.
\section{The two level density - Using the Ratios Conjecture} \label{2rc} \subsection{The Ratios Conjecture}
We would like to study \[
\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\frac{L(1/2+\alpha,\chi_D)L(1/2+\beta,\chi_D)}{L(1/2+\gamma,\chi_D)L(1/2+\delta,\chi_D)} \]
using the recipe in \cite{CS}, where the shifts are assumed to satisfy $|\alpha|, |\beta|, |\gamma|, |\delta| \ll 1/g$.
We use the approximate functional equation for each of the two $L$--functions in the numerator. The contribution coming from the first parts of the approximate functional equations is equal to \[
\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{f_1,f_2,h_1,h_2}\frac{\mu(h_1)\mu(h_2)}{|f_1|^{1/2+\alpha}|f_2|^{1/2+\beta}|h_1|^{1/2+\gamma}|h_2|^{1/2+\delta}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(f_1f_2h_1h_2). \] We only keep the terms with $f_1f_2h_1h_2=\square$. The above expression then becomes \begin{align*}
&\sum_{f_1f_2h_1h_2=\square}\frac{\mu(h_1)\mu(h_2)a(f_1f_2h_1h_2)}{|f_1|^{1/2+\alpha}|f_2|^{1/2+\beta}|h_1|^{1/2+\gamma}|h_2|^{1/2+\delta}}, \end{align*} where \[
a(f)=\prod_{P|f}\bigg(1+\frac{1}{|P|}\bigg)^{-1}. \] Using multiplicativity, this is equal to \begin{align*}
&\prod_{P}\sum_{\substack{f_1,f_2,h_1,h_2\\f_1+f_2+h_1+h_2\ \textrm{even}}}\frac{\mu(P^{h_1})\mu(P^{h_2})a(P^{f_1+f_2+h_1+h_2})}{|P|^{(1/2+\alpha)f_1+(1/2+\beta)f_2+(1/2+\gamma)h_1+(1/2+\delta)h_2}}\\ &\qquad\qquad=A(\alpha,\beta,\gamma,\delta)\frac{\zeta_q(1+2\alpha)\zeta_q(1+2\beta)\zeta_q(1+\alpha+\beta)\zeta_q(1+\gamma+\delta)}{\zeta_q(1+\alpha+\gamma)\zeta_q(1+\alpha+\delta)\zeta_q(1+\beta+\gamma)\zeta_q(1+\beta+\delta)}, \end{align*} where \begin{align*}
&A(\alpha,\beta,\gamma,\delta)=\prod_P\bigg(1+\frac{1}{|P|}\bigg)^{-1}\bigg(1-\frac{1}{|P|^{1+\alpha+\beta}}\bigg)\bigg(1-\frac{1}{|P|^{1+\gamma+\delta}}\bigg)\\
&\qquad \bigg(1-\frac{1}{|P|^{1+\alpha+\gamma}}\bigg)^{-1}\bigg(1-\frac{1}{|P|^{1+\alpha+\delta}}\bigg)^{-1}\bigg(1-\frac{1}{|P|^{1+\beta+\gamma}}\bigg)^{-1}\bigg(1-\frac{1}{|P|^{1+\beta+\delta}}\bigg)^{-1}\\
&\qquad\qquad\bigg(1+\frac{1}{|P|}+\frac{1}{|P|^{1+\alpha+\beta}}+\frac{1}{|P|^{1+\gamma+\delta}}-\frac{1}{|P|^{1+\alpha+\gamma}}-\frac{1}{|P|^{1+\alpha+\delta}}-\frac{1}{|P|^{1+\beta+\gamma}}-\frac{1}{|P|^{1+\beta+\delta}}\\
&\qquad\qquad\qquad-\frac{1}{|P|^{2+2\alpha}}-\frac{1}{|P|^{2+2\beta}}+\frac{1}{|P|^{2+\alpha+\beta+\gamma+\delta}}+\frac{1}{|P|^{3+2\alpha+2\beta}}\bigg). \end{align*}
The contributions from the other parts of the approximate functional equations can be determined by using the functional equation \[ L(\tfrac{1}{2}+\alpha,\chi_D)=q^{-2g\alpha }L(\tfrac{1}{2}-\alpha,\chi_D). \]Hence we have the following.
\begin{conjecture} We have \begin{align*}
&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\frac{L(1/2+\alpha,\chi_D)L(1/2+\beta,\chi_D)}{L(1/2+\gamma,\chi_D)L(1/2+\delta,\chi_D)}\\ &\qquad=A(\alpha,\beta,\gamma,\delta)\frac{\zeta_q(1+2\alpha)\zeta_q(1+2\beta)\zeta_q(1+\alpha+\beta)\zeta_q(1+\gamma+\delta)}{\zeta_q(1+\alpha+\gamma)\zeta_q(1+\alpha+\delta)\zeta_q(1+\beta+\gamma)\zeta_q(1+\beta+\delta)}\\ &\qquad\qquad+q^{-2g\alpha }A(-\alpha,\beta,\gamma,\delta)\frac{\zeta_q(1-2\alpha)\zeta_q(1+2\beta)\zeta_q(1-\alpha+\beta)\zeta_q(1+\gamma+\delta)}{\zeta_q(1-\alpha+\gamma)\zeta_q(1-\alpha+\delta)\zeta_q(1+\beta+\gamma)\zeta_q(1+\beta+\delta)}\\ &\qquad\qquad+q^{-2g\beta }A(\alpha,-\beta,\gamma,\delta)\frac{\zeta_q(1+2\alpha)\zeta_q(1-2\beta)\zeta_q(1+\alpha-\beta)\zeta_q(1+\gamma+\delta)}{\zeta_q(1+\alpha+\gamma)\zeta_q(1+\alpha+\delta)\zeta_q(1-\beta+\gamma)\zeta_q(1-\beta+\delta)}\\ &\qquad\qquad+q^{-2g(\alpha+\beta) }A(-\alpha,-\beta,\gamma,\delta)\frac{\zeta_q(1-2\alpha)\zeta_q(1-2\beta)\zeta_q(1-\alpha-\beta)\zeta_q(1+\gamma+\delta)}{\zeta_q(1-\alpha+\gamma)\zeta_q(1-\alpha+\delta)\zeta_q(1-\beta+\gamma)\zeta_q(1-\beta+\delta)}\\ &\qquad\qquad+O_\varepsilon\big(q^{-g+\varepsilon g}\big). \end{align*} \end{conjecture}
Notice that for a function $f(u,v)$ analytic at $(u,v)=(r,r)$ and a function $F(s)$ having a simple pole at $s=1$ with residue $r_{F}$, we have \begin{equation*}\label{trick101}
\frac{\partial}{\partial\alpha}\frac{f(\alpha,\gamma)}{F(1-\alpha+\gamma)}\bigg|_{\alpha=\gamma=r}=-\frac{f(r,r)}{r_{F}}. \end{equation*} As $r_{\zeta_q}=1/\log q$, taking derivatives with respect to $\alpha$ and $\beta$, and setting $\gamma=\alpha$, $\delta=\beta$ we obtain
\begin{conjecture} We have \begin{align*}
&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\frac{L'}{L}(\tfrac12+\alpha,\chi_D)\frac{L'}{L}(\tfrac12+\beta,\chi_D)\\ &\qquad=\frac{\zeta_q'}{\zeta_q}(1+2\alpha)\frac{\zeta_q'}{\zeta_q}(1+2\beta)+\bigg(\frac{\zeta_q'}{\zeta_q}\bigg)'(1+\alpha+\beta)\nonumber\\ &\qquad\qquad+(\log q)B(\alpha)\frac{\zeta_q'}{\zeta_q}(1+2\beta)+(\log q)B(\beta)\frac{\zeta_q'}{\zeta_q}(1+2\alpha)+(\log q)^2C(\alpha,\beta)\\ &\qquad\qquad+q^{-2g\alpha }(\log q)^2A_2(\alpha)T_2(\alpha,\beta)+q^{-2g\beta }(\log q)^2A_2(\beta)T_2(\beta,\alpha)\\ &\qquad\qquad+q^{-2g(\alpha+\beta) }(\log q)^2A(\alpha,\beta)\frac{\zeta_q(1-2\alpha)\zeta_q(1-2\beta)\zeta_q(1-\alpha-\beta)\zeta_q(1+\alpha+\beta)}{\zeta_q(1-\alpha+\beta)\zeta_q(1+\alpha-\beta)}\\ &\qquad\qquad+O_\varepsilon\big(q^{-g+\varepsilon g}\big), \end{align*} where \begin{align*} A_2(\alpha)&:=A(-\alpha,\beta,\alpha,\beta)\zeta_q(1-2\alpha)=\frac{\zeta_q(2)\zeta_q(1-2\alpha)}{\zeta_q(2-2\alpha)}\\ &=\frac{1}{1-q^{2\alpha}}+\frac{1}{q-1}=\frac{q^{2\alpha}}{1-q^{2\alpha}}+\frac{q}{q-1}, \end{align*} \begin{align*} A(\alpha,\beta)&:=A(-\alpha,-\beta,\alpha,\beta)\\
&=\prod_{P}\bigg(1+\frac{1}{|P|}\bigg)^{-1}\bigg(1-\frac{1}{|P|}\bigg)^{-2}\bigg(1-\frac{1}{|P|^{1-\alpha-\beta}}\bigg)\bigg(1-\frac{1}{|P|^{1+\alpha+\beta}}\bigg)\\
&\qquad \bigg(1-\frac{1}{|P|^{1-\alpha+\beta}}\bigg)^{-1}\bigg(1-\frac{1}{|P|^{1+\alpha-\beta}}\bigg)^{-1}\bigg(1-\frac{1}{|P|}+\frac{1}{|P|^{1-\alpha-\beta}}+\frac{1}{|P|^{1+\alpha+\beta}}\\
&\qquad\qquad-\frac{1}{|P|^{1-\alpha+\beta}}-\frac{1}{|P|^{1+\alpha-\beta}}-\frac{1}{|P|^{2-2\alpha}}-\frac{1}{|P|^{2-2\beta}}+\frac{1}{|P|^{2}}+\frac{1}{|P|^{3-2\alpha-2\beta}}\bigg), \end{align*} $B(\alpha)$ is defined in \eqref{Br}, \begin{align*}
C(\alpha,\beta)&=B(\alpha)B(\beta)+\sum_{P}\frac{d(P)^2\big(|P|^{2+\alpha+\beta}(|P|+1)(|P|^\alpha-|P|^\beta)^2-(|P|^{1+\alpha+\beta}-1)^3\big)}{(|P|^{1+2\alpha}-1)(|P|^{1+2\beta}-1)(|P|^{1+\alpha+\beta}-1)^2(|P|+1)}\\
&\qquad -\sum_{P}\frac{d(P)^2}{(|P|^{1+2\alpha}-1)(|P|^{1+2\beta}-1)(|P|+1)^2}\\
&=B(\alpha)B(\beta)+\sum_{P}\frac{d(P)^2|P|^{2+\alpha+\beta}(|P|^\alpha-|P|^\beta)^2}{(|P|^{1+2\alpha}-1)(|P|^{1+2\beta}-1)(|P|^{1+\alpha+\beta}-1)^2}\\
&\qquad-\sum_{P}\frac{d(P)^2|P|^{1+\alpha+\beta}}{(|P|^{1+2\alpha}-1)(|P|^{1+2\beta}-1)(|P|+1)}+\sum_{P}\frac{d(P)^2|P|}{(|P|^{1+2\alpha}-1)(|P|^{1+2\beta}-1)(|P|+1)^2} \end{align*} and \begin{align}\label{factno6}
T_2(\alpha,\beta)&=\frac{1}{\log q}\bigg(\frac{\zeta_q'}{\zeta_q}(1+\alpha+\beta)-\frac{\zeta_q'}{\zeta_q}(1-\alpha+\beta)-\frac{\zeta_q'}{\zeta_q}(1+2\beta)-\frac{\partial A_2(-\alpha,b,\alpha,\beta)/\partial b\big|_{b=\beta}}{A_2(-\alpha,\beta,\alpha,\beta)}\bigg)\nonumber\\
&=\sum_{P}\frac{d(P)(|P|^{2(1-\alpha)}-|P|^{1-2\alpha}-|P|^{2-3\alpha+\beta}+|P|^{2-\alpha+\beta})}{(|P|^{2(1-\alpha)}-1)(|P|^{1+2\beta}-1)}. \end{align}
\end{conjecture}
Equivalently we have
\begin{conjecture}\label{RCF2} We have \begin{align*}
&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}uv\frac{\mathcal{L}'}{\mathcal{L}}(u,\chi_D)\frac{\mathcal{L}'}{\mathcal{L}}(v,\chi_D)\\ &\qquad=u^2v^2\frac{\mathcal{Z}'}{\mathcal{Z}}(u^2)\frac{\mathcal{Z}'}{\mathcal{Z}}(v^2)+u^2v^2\bigg(\frac{\mathcal{Z}'}{\mathcal{Z}}\bigg)'(uv)+\mathcal{B}(u)v^2\frac{\mathcal{Z}'}{\mathcal{Z}}(v^2)+\mathcal{B}(v)u^2\frac{\mathcal{Z}'}{\mathcal{Z}}(u^2)+\mathcal{C}(u,v)\\ &\qquad\qquad+(qu^2)^g\mathcal{A}_2(u)\mathcal{T}_2(u,v)+(qv^2)^g\mathcal{A}_2(v)\mathcal{T}_2(v,u)\\ &\qquad\qquad+(quv)^{2g}\mathcal{A}(u,v)\frac{\mathcal{Z}\big(\frac{1}{q^2u^2}\big)\mathcal{Z}\big(\frac{1}{q^2v^2}\big)\mathcal{Z}\big(\frac{1}{q^2uv}\big)\mathcal{Z}(uv)}{\mathcal{Z}\big(\frac{v}{qu}\big)\mathcal{Z}\big(\frac{u}{qv}\big)}+O_\varepsilon\big(q^{-g+\varepsilon g}\big), \end{align*} where \begin{align*} \mathcal{A}_2(u)&=\frac{qu^2}{qu^2-1}+\frac{1}{q-1}=\frac{1}{qu^2-1}+\frac{q}{q-1}, \end{align*} \begin{align*}
\mathcal{A}(u,v)&=\prod_{P}\bigg(1+\frac{1}{|P|}\bigg)^{-1}\bigg(1-\frac{1}{|P|}\bigg)^{-2}\bigg(1-\frac{1}{|P|^{2}(uv)^{d(P)}}\bigg)\Big(1-(uv)^{d(P)}\Big)\\
&\qquad \bigg(1-\frac{v^{d(P)}}{|P|u^{d(P)}}\bigg)^{-1}\bigg(1-\frac{u^{d(P)}}{|P|v^{d(P)}}\bigg)^{-1}\bigg(1-\frac{1}{|P|}+\frac{1}{|P|^{2}(uv)^{d(P)}}+(uv)^{d(P)}\\
&\qquad\qquad-\frac{v^{d(P)}}{|P|u^{d(P)}}-\frac{u^{d(P)}}{|P|v^{d(P)}}-\frac{1}{|P|^{3}u^{2d(P)}}-\frac{1}{|P|^{3}v^{2d(P)}}+\frac{1}{|P|^{2}}+\frac{1}{|P|^{5}(uv)^{2d(P)}}\bigg), \end{align*} $\mathcal{B}(u)$ is defined in \eqref{mathcalBr}, \begin{align*} \mathcal{C}(u,v)&=\mathcal{B}(u)\mathcal{B}(v)+\sum_{P}\frac{d(P)^2(uv)^{d(P)}(u^{d(P)}-v^{d(P)})^2}{(1-u^{2d(P)})(1-v^{2d(P)})(1-(uv)^{d(P)})^2}\\
&\qquad-\sum_{P}\frac{d(P)^2(uv)^{d(P)}}{(1-u^{2d(P)})(1-v^{2d(P)})(|P|+1)}+\sum_{P}\frac{d(P)^2|P|(uv)^{2d(P)}}{(1-u^{2d(P)})(1-v^{2d(P)})(|P|+1)^2} \end{align*} and \begin{align*}
\mathcal{T}_2(u,v)&=\sum_{P}\frac{d(P)(|P|^{3}(uv)^{2d(P)}-|P|^{2}(uv)^{2d(P)}-|P|^{3}u^{3d(P)}v^{d(P)}+|P|^{2}(uv)^{d(P)})}{(|P|^{3}u^{2d(P)}-1)(1-v^{2d(P)})}. \end{align*}
\end{conjecture}
\subsection{The two level density}
Consider \begin{equation}\label{2level}
I_2(N;\alpha,\beta)=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\sum_{\substack{f_1,f_2\in\mathcal{M}\\d(f_1f_2)\leq N}}\frac{\Lambda(f_1)\Lambda(f_2)\chi_D(f_1f_2)}{|f|_1^{1/2+\alpha}|f|_2^{1/2+\beta}}. \end{equation}
Using the Perron formula \eqref{Perron} this is equal to \begin{align*}
&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\frac{1}{2\pi i}\oint_{|u|=r}\sum_{f_1,f_2\in\mathcal{M}}\frac{\Lambda(f_1)\Lambda(f_2)\chi_D(f_1f_2)u^{d(f_1)+d(f_2)}}{|f|_1^{1/2+\alpha}|f|_2^{1/2+\beta}}\frac{du}{u^{N+1}(1-u)}\\
&\qquad=\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{D\in\mathcal{H}_{2g+1}}\frac{1}{2\pi i}\oint_{|u|=r} \frac{u^2}{q^{1+\alpha+\beta}} \frac{ \mathcal{L}'}{\mathcal{L}} \Big(\frac{u}{q^{1/2+\alpha}},\chi_D \Big) \frac{ \mathcal{L}'}{\mathcal{L}} \Big(\frac{u}{q^{1/2+\beta}},\chi_D \Big)\frac{du}{u^{N+1}(1-u)} \end{align*}
for any $r<q^{-1/2-\varepsilon}$. We enlarge to contour to $|u|=r=q^{-\varepsilon}$. In view of Conjecture \ref{RCF2} we write \begin{align}
I_2(N;\alpha,\beta)=\frac{1}{2\pi i}\oint_{|u|=r}\sum_{j=1}^{4}R_j(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)}+O_\varepsilon\big(q^{-g+\varepsilon g}\big). \label{int_density} \end{align} The terms coming from the first parts of the approximate functional equations, $R_1(u,\alpha,\beta)$, correspond to the diagonal terms, while the terms coming from only $1$ swap in the approximate functional equations, $R_2(u,\alpha,\beta)$ and $R_3(u,\alpha,\beta)$, correspond to the Type-I terms. Type-II terms are the terms with $2$ swaps, $R_4(u,\alpha,\beta)$.
For the $0$ swap terms we have \begin{align}\label{type02level} R_1(u,\alpha,\beta)&=\frac{u^4}{q^{2(1+\alpha+\beta)}}\frac{\mathcal{Z}'}{\mathcal{Z}} \Big(\frac{u^2}{q^{1+2\alpha}}\Big)\frac{\mathcal{Z}'}{\mathcal{Z}}\Big(\frac{u^2}{q^{1+2\beta}}\Big)+\frac{u^4}{q^{2(1+\alpha+\beta)}}\bigg(\frac{\mathcal{Z}'}{\mathcal{Z}}\bigg)'\Big(\frac{u^2}{q^{1+\alpha+\beta}}\Big)\\ &\qquad\qquad+\mathcal{B}(u,\alpha) \frac{u^2}{q^{1+2\beta}}\frac{\mathcal{Z}'}{\mathcal{Z}}\Big(\frac{u^2}{q^{1+2\beta}}\Big)+\mathcal{B}(u,\beta)\frac{u^2}{q^{1+2\alpha}}\frac{\mathcal{Z}'}{\mathcal{Z}}\Big(\frac{u^2}{q^{1+2\alpha}}\Big)+\mathcal{C}(u,\alpha,\beta),\nonumber \end{align} where \begin{align}\label{type02level1} \mathcal{C}(u,\alpha,\beta)&:=\mathcal{C}\Big(\frac{u}{q^{1/2+\alpha}},\frac{u}{q^{1/2+\beta}}\Big)\nonumber\\
&=\mathcal{B}(u,\alpha)\mathcal{B}(u,\beta)+\sum_{P}\frac{d(P)^2|P|^{2+\alpha+\beta}u^{4d(P)}(|P|^\alpha-|P|^\beta)^2}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|^{1+\alpha+\beta}-u^{2d(P)})^2}\nonumber\\
&\qquad\qquad-\sum_{P}\frac{d(P)^2|P|^{1+\alpha+\beta}u^{2d(P)}}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|+1)}\\
&\qquad\qquad+\sum_{P}\frac{d(P)^2|P|u^{4d(P)}}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|+1)^2}.\nonumber \end{align}
Concerning the $1$ swap terms we have \begin{align} \label{r2} R_2(u,\alpha,\beta)+R_3(u,\alpha,\beta)=q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\mathcal{T}_2(u,\alpha,\beta)+q^{-2g\beta}u^{2g}\mathcal{A}_2(u,\beta)\mathcal{T}_2(u,\beta,\alpha), \end{align} where \begin{align}\label{A22} \mathcal{A}_2(u,\alpha)&=\mathcal{A}_2\Big(\frac{u}{q^{1/2+\alpha}}\Big)\nonumber\\ &=\frac{u^2}{u^2-q^{2\alpha}}+\frac{1}{q-1}=\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+\frac{q}{q-1} \end{align} and \begin{align*} \mathcal{T}_2(u,\alpha,\beta)&=\mathcal{T}_2\Big(\frac{u}{q^{1/2+\alpha}},\frac{u}{q^{1/2+\beta}}\Big)\nonumber\\
&=\sum_{P} \frac{u^{2d(P)}(|P|^{2(1-\alpha)}u^{2d(P)}-|P|^{1-2\alpha}u^{2d(P)}-|P|^{2-3\alpha+\beta}u^{2d(P)}+|P|^{2-\alpha+\beta})}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})}. \end{align*}
Note that $1$ swap terms kick in once $N \geq 2g$. In the computation of Type-I terms in section \ref{type1} we also assume that $N <4g$. We write $\mathcal{T}_2(u,\alpha,\beta)$ as a sum of four terms. For the first three terms, we claim that we can truncate the sum over $P$ to those primes $P$ with $d(P)<g$; otherwise the corresponding integrals in equation \eqref{int_density} will be equal to zero. Indeed, in order for the integrals to be non-vanishing, we need $2g+2d(P)=N$. Since $N<4g$ it follows that $d(P)<g$. We write the fourth term in the expression of $\mathcal{T}_2(u,\alpha,\beta)$ as \begin{align}
\sum_P & \frac{u^{2d(P)} |P|^{2-\alpha+\beta}}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} = \sum_{d(P) <g} \frac{u^{2d(P)} |P|^{2-\alpha+\beta}}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} \label{nonconv} \\
&+ \sum_{d(P) \geq g} \frac{u^{2d(P)} |P|^{2-\alpha+\beta}}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} \nonumber \\
&= \sum_{d(P) <g} \frac{u^{2d(P)} |P|^{2-\alpha+\beta}}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} + \sum_{d(P) \geq g} \frac{1}{|P|^{1+\beta-\alpha}} \nonumber \\
& + \sum_{d(P) \geq g} \frac{|P|^{2(1-\alpha)} u^{4d(P)}+|P|^{1+2\beta}+u^{2d(P)}}{|P|^{1+\beta-\alpha} (|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} \nonumber \\
&= \sum_{d(P) <g} \frac{u^{2d(P)} |P|^{2-\alpha+\beta}}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} + \sum_{d(P) \geq g} \frac{1}{|P|^{1+\beta-\alpha}}+O(q^{-g}). \nonumber
\end{align} We use the Prime Polynomial Theorem for the sum over $d(P)\geq g$ above and without worrying abut convergence issues since the recipe is a heuristic argument, we replace it by what we get by summing the geometric series. Then when $N<4g$ we rewrite \begin{align}
\mathcal{T}_2(u,\alpha,\beta) &= \sum_{d(P)<g} \frac{u^{2d(P)}(|P|^{2(1-\alpha)}u^{2d(P)}-|P|^{1-2\alpha}u^{2d(P)}-|P|^{2-3\alpha+\beta}u^{2d(P)}+|P|^{2-\alpha+\beta})}{(|P|^{2(1-\alpha)}u^{2d(P)}-1)(|P|^{1+2\beta}-u^{2d(P)})} \nonumber \\
&+ q^{g(-\beta+\alpha)} \frac{1}{q^{\alpha-\beta}-1}.
\label{t2}
\end{align} We remark that although the term in the second line above gives a term involving $q^{-g(\alpha+\beta)}$ in the expression of $R_2(u,\alpha,\beta)$, when we put all the terms together, the contributions of this type will cancel out.
For the $2$ swaps terms we have \begin{align*} R_4(u,\alpha,\beta) = q^{-2g(\alpha+\beta)} u^{4g} \mathcal{A} \Big( \frac{u}{q^{1/2+\alpha}}, \frac{u}{q^{1/2+\beta}}\Big) \frac{ \mathcal{Z}( \frac{1}{q^{1-2 \alpha}u^2}) \mathcal{Z}( \frac{1}{q^{1-2 \beta}u^2} ) \mathcal{Z}( \frac{1}{q^{1- \alpha-\beta}u^2} ) \mathcal{Z}( \frac{u^2}{q^{1+\alpha+\beta}} )}{\mathcal{Z} (\frac{1}{q^{1-\alpha+\beta}}) \mathcal{Z}(\frac{1}{q^{1+\alpha-\beta}})}. \end{align*} \kommentar{\Hung{I got something slightly different from what you got.} \acom{I suggest we include the expression for Type-II terms even if we don't actually compute them. Please check that I got this right. Type-II terms correspond to the integral of $R_4(u,\alpha,\beta)$, where \begin{align*} R_4(u,\alpha,\beta) = q^{-2g(\alpha+\beta)} u^{4g} \mathcal{A} \Big( \frac{u}{q^{1/2+\alpha}}, \frac{u}{q^{1/2+\beta}}\Big) \frac{ \mathcal{Z} \Big( \frac{q^{-1+2 \alpha}}{u^2} \Big) \mathcal{Z} \Big( \frac{q^{-1+2 \beta}}{u^2} \Big) \mathcal{Z} \Big( \frac{q^{-1+ \alpha+\beta}}{u^2} \Big) \mathcal{Z} \Big( \frac{u^2}{q^{1+\alpha+\beta}} \Big)}{\mathcal{Z} (q^{-1+\alpha+\beta}) \mathcal{Z}(q^{-1-\alpha-\beta})}. \end{align*}} }
\section{The two level density - The diagonal} \label{2diag} In this and the following section, we assume that $N<4g$.
The diagonal, denoted by $I_2^0(N;\alpha,\beta)$, comes of the terms with $f_1f_2=\square$ in \eqref{2level}. From Lemma \ref{L4} and the Perron formula \eqref{Perron} we have \begin{align}\label{600}
I_2^0(N;\alpha,\beta)&=\sum_{\substack{f_1,f_2\in\mathcal{M}\\d(f_1f_2)\leq N\\ f_1f_2=\square}}\frac{\Lambda(f_1)\Lambda(f_2)}{|f|_1^{1/2+\alpha}|f|_2^{1/2+\beta}}\prod_{P|f_1f_2}\bigg(1-\frac{1}{|P|+1}\bigg)+O_\varepsilon(q^{-2g+\varepsilon g})\nonumber\\
&=\frac{1}{2\pi i}\oint_{|u|=r}J_2^0(u,\alpha,\beta)\frac{du}{u^{N+1}(1-u)}+O_\varepsilon(q^{-2g+\varepsilon g}) \end{align} for any $r<q^{-1/2-\varepsilon}$, where \[
J_2^0(u,\alpha,\beta)=\sum_{\substack{f_1,f_2\in\mathcal{M}\\f_1f_2=\square}}\frac{\Lambda(f_1)\Lambda(f_2)u^{d(f_1f_2)}}{|f|_1^{1/2+\alpha}|f|_2^{1/2+\beta}}\prod_{P|f_1f_2}\bigg(1-\frac{1}{|P|+1}\bigg). \] We write \[ J_2^0(u,\alpha,\beta)=J_{2}^{0,\textrm{ee}}(u,\alpha,\beta)+J_{2}^{0,\textrm{oo}}(u,\alpha,\beta), \] where $J_{2}^{0,\textrm{ee}}(u,\alpha,\beta)$ consists of the terms $f_1=P^{2k}$, $f_2=Q^{2l}$ with $k,l\geq1$, and $J_{2}^{0,\textrm{oo}}(u,\alpha,\beta)$ consists of the terms $f_1=P^{2k+1}$, $f_2=P^{2l+1}$ with $k,l\geq0$.
We have \begin{align}\label{Jee}
J_{2}^{0,\textrm{ee}}(u,\alpha,\beta)&=\sum_{k,l\geq1}\sum_{P, Q}\frac{d(P)d(Q)u^{2kd(P)+2ld(Q)}}{|P|^{k(1+2\alpha)}|Q|^{l(1+2\beta)}}\bigg(1-\frac{1}{|P|+1}\bigg)\bigg(1-\frac{1}{|Q|+1}\bigg)\nonumber\\
&\qquad\qquad+\sum_{k,l\geq1}\sum_{P}\frac{d(P)^2|P|u^{2(k+l)d(P)}}{|P|^{k(1+2\alpha)+l(1+2\beta)}(|P|+1)^2}\nonumber\\ &=\bigg(\frac{u^2}{q^{1+2\alpha}}\frac{\mathcal{Z}'}{\mathcal{Z}} \Big(\frac{u^2}{q^{1+2\alpha}}\Big)+\mathcal{B}(u,\alpha)\bigg)\bigg(\frac{u^2}{q^{1+2\beta}}\frac{\mathcal{Z}'}{\mathcal{Z}} \Big(\frac{u^2}{q^{1+2\beta}}\Big)+\mathcal{B}(u,\beta)\bigg)\\
&\qquad\qquad+\sum_{P}\frac{d(P)^2|P|u^{4d(P)}}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|+1)^2}.\nonumber \end{align} On the other hand, \begin{align*}
J_{2}^{0,\textrm{oo}}(u,\alpha,\beta)&=\sum_{k,l\geq0}\sum_{P}\frac{d(P)^2u^{2(k+l+1)d(P)}}{|P|^{1+\alpha+\beta+k(1+2\alpha)+l(1+2\beta)}}\bigg(1-\frac{1}{|P|+1}\bigg)\\
&=\sum_{P}\frac{d(P)^2|P|^{1+\alpha+\beta}u^{2d(P)}}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})}\\
&\qquad\qquad-\sum_{P}\frac{d(P)^2|P|^{1+\alpha+\beta}u^{2d(P)}}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|+1)}. \end{align*} Note that \[ u^2\bigg(\frac{\mathcal{Z}'}{\mathcal{Z}}\bigg)'(u)=\sum_{P}\frac{d(P)^2u^{d(P)}}{(1-u^{d(P)})^2}. \] So \begin{align}\label{Joo} &J_{2}^{0,\textrm{oo}}(u,\alpha,\beta)-\frac{u^4}{q^{2(1+\alpha+\beta)}}\bigg(\frac{\mathcal{Z}'}{\mathcal{Z}}\bigg)'\Big(\frac{u^2}{q^{1+\alpha+\beta}}\Big)\nonumber\\
&\qquad\qquad=\sum_{P}\frac{d(P)^2|P|^{2+\alpha+\beta}u^{4d(P)}(|P|^\alpha-|P|^\beta)^2}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|^{1+\alpha+\beta}-u^{2d(P)})^2}\\
&\qquad\qquad\qquad\qquad-\sum_{P}\frac{d(P)^2|P|^{1+\alpha+\beta}u^{2d(P)}}{(|P|^{1+2\alpha}-u^{2d(P)})(|P|^{1+2\beta}-u^{2d(P)})(|P|+1)}.\nonumber \end{align}
We enlarge the contour in \eqref{600} to $|u|=r=q^{-\varepsilon}$. Combining \eqref{Jee} and \eqref{Joo}, and comparing with \eqref{type02level} and \eqref{type02level1} we see that \[ J_2^0(u,\alpha,\beta)=R_1(u,\alpha,\beta). \]
\section{The two level density - Type-I terms} \label{type1} \subsection{The terms $f_1=P^{2k+1}$, $f_2=Q^{2l}$ with $k\geq0$, $l\geq1$} \label{type11} We denote this contribution by $I_{2}^{\textrm{oe}}(N;\alpha,\beta)$. In this section, we assume $N \geq 2g$. We have \begin{align*}
I_{2}^{\textrm{oe}}(N;\alpha,\beta)=&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{\substack{P\ne Q\\d(P^{2k+1}Q^{2l})\leq N}}\frac{d(P)d(Q)}{|P|^{(2k+1)(1/2+\alpha)}|Q|^{l(1+2\beta)}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(PQ^2)\\
&\qquad\qquad+\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{d(P^{2k+2l+1})\leq N}\frac{d(P)^2}{|P|^{(2k+1)(1/2+\alpha)+l(1+2\beta)}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(P). \end{align*} By the Polya-Vinogradov inequality in Lemma \ref{pv}, the second term is $O(N^2q^{-2g})$. We now consider the first term with $P\ne Q$. The same argument also shows the terms with $k\geq1$ are bounded by the same error term, and the contribution of the terms with $d(P)=n$ is $O(Nq^{n-2g})$.
So \begin{align*}
I_{2}^{\textrm{oe}}(N;\alpha,\beta)=&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{\substack{d(PQ^{2l})\leq N\\d(P)\geq g+1}}\frac{d(P)d(Q)}{|P|^{1/2+\alpha}|Q|^{l(1+2\beta)}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(PQ^2)+O(Nq^{-g}). \end{align*}
Applying Lemma \ref{L1} and since $d(P) \geq g+1$, we have \begin{align}\label{P&Q}
\sum_{D \in \mathcal{H}_{2g+1}} \chi_D(PQ^2) &= \sum_{j\geq 0} \bigg(\sum_{h \in \mathcal{M}_{2g+1-2jd(Q)}} \chi_{PQ^2}(h) - q \sum_{h \in \mathcal{M}_{2g-1-2jd(Q)}} \chi_{PQ^2}(h)\bigg).
\end{align} If $d(P)$ is odd, then using Lemmas \ref{L2} and \ref{L3} it follows that the term in parenthesis is equal to \begin{align*}
&\frac{q^{2g+3/2}}{|P|^{1/2}|Q|^{2j+2}}\sum_{d(V)= d(P)+(2j+2)d(Q)-2g-2}\chi_P(V)G(V,Q^2)\\
&\qquad\qquad-\frac{q^{2g+1/2}}{|P|^{1/2}|Q|^{2j+2}}\sum_{d(V)= d(P)+(2j+2)d(Q)-2g}\chi_P(V)G(V,Q^2). \end{align*} As $V$ cannot be a square in the sums, by Lemma \ref{sumprimes}, the contribution of these terms to $I_{2}^{\textrm{oe}}(N;\alpha,\beta)$ is $O(N^2q^{N/2-2g})$.
Now consider the case $d(P)$ is even. Applying Lemmas \ref{L2} and \ref{L3}, the first sum over $h$ in \eqref{P&Q} is \begin{align*}
&\frac{q^{2g+1}}{|P||Q|^{2j+2}}\bigg(q\sum_{V\in\mathcal{M}_{\leq d(P)+(2j+2)d(Q)-2g-3}} G(V,P)G(V,Q^2)\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad- \sum_{V\in\mathcal{M}_{\leq d(P)+(2j+2)d(Q)-2g-2}}G(V,P)G(V,Q^2) \bigg)\\
&\qquad =-\frac{q^{2g+1}\chi_P(Q)}{|P|^{1/2}|Q|^{2j+1}}\bigg(q\sum_{\substack{V\in\mathcal{M}_{\leq d(P)+(2j+1)d(Q)-2g-3}\\(V,Q)=1}} \chi_P(V)- \sum_{\substack{V\in\mathcal{M}_{\leq d(P)+(2j+1)d(Q)-2g-2}\\(V,Q)=1}}\chi_P(V)\bigg)\\
&\qquad\qquad\qquad+\frac{q^{2g+1}\varphi(Q^2)}{|P|^{1/2}|Q|^{2j+2}}\bigg(q\sum_{V\in\mathcal{M}_{\leq d(P)+2jd(Q)-2g-3}} \chi_P(V)- \sum_{V\in\mathcal{M}_{\leq d(P)+2jd(Q)-2g-2}}\chi_P(V)\bigg). \end{align*} As above, the contribution of the first term and that of $V\ne\square$ in the second term to $I_{2}^{\textrm{oe}}(N;\alpha,\beta)$ is bounded by $O(N^2q^{N/2-2g})$. We are thus left with $V=\square$ in the second term above, which is equal to \begin{align*}
&\frac{q^{2g+1}\varphi(Q^2)}{|P|^{1/2}|Q|^{2j+2}}\bigg(q\sum_{\substack{V\in\mathcal{M}_{\leq d(P)/2+jd(Q)-g-2}}} 1- \sum_{\substack{V\in\mathcal{M}_{\leq d(P)/2+jd(Q)-g-1}}}1\bigg)\\ &\qquad=\begin{cases}
-\frac{q^{2g+1}\varphi(Q^2)}{|P|^{1/2}|Q|^{2j+2}} & \quad\textrm{if } d(P)+2jd(Q)>2g,\\ 0 & \quad\textrm{otherwise}. \end{cases} \end{align*}
The same argument applies to the second sum over $h$ in \eqref{P&Q}, and hence we obtain \begin{align*}
&I_{2}^{\textrm{oe}}(N;\alpha,\beta)=-\sum_{\substack{d(PQ^{2l})\leq N\\d(P)\, \textrm{even}\,\geq g+1}}\,\sum_{\substack{d(P)+2jd(Q)> 2g}}\frac{d(P)d(Q)\varphi(Q^2)}{|P|^{1+\alpha}|Q|^{l(1+2\beta)+2j+2}}\\
&\qquad+\frac{1}{q-1}\sum_{\substack{d(PQ^{2l})\leq N\\d(P)\geq g+1}}\,\,\sum_{d(P)+2jd(Q)=2g}\frac{d(P)d(Q)\varphi(Q^2)}{|P|^{1+\alpha}|Q|^{l(1+2\beta)+2j+2}}+O(N^2q^{N/2-2g})+O(Nq^{-g}). \end{align*} By the Prime Polynomial Theorem, the condition $d(P)\geq g+1$ can be removed at the cost of an error of size $O(gq^{-g/2})$. The same argument also implies that we can restrict the sum to $jd(Q)<g$. So \begin{align}
I_{2}^{\textrm{oe}}(N;\alpha,\beta) =\frac{1}{2\pi i}\oint_{|u|=r}J_{2}^{\textrm{oe}}(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)}+O(N^2q^{N/2-2g})+O(gq^{-g/2}) \label{i2} \end{align} for any $r<q^{-\varepsilon}$, where \begin{align*}
J_{2}^{\textrm{oe}}(u,\alpha,\beta)&=-\sum_{l\geq1}\sum_{d(P)\, \textrm{even}}\,\sum_{\substack{d(P)+2jd(Q)>2g\\jd(Q)<g}}\frac{d(P)d(Q)\varphi(Q^2)u^{d(P)+2ld(Q)}}{|P|^{1+\alpha}|Q|^{l(1+2\beta)+2j+2}}\\
&\qquad\qquad+\frac{1}{q-1}\sum_{l\geq1}\,\sum_{\substack{d(P)+2jd(Q)=2g}}\frac{d(P)d(Q)\varphi(Q^2)u^{d(P)+2ld(Q)}}{|P|^{1+\alpha}|Q|^{l(1+2\beta)+2j+2}}\\
&=-\sum_{d(P)\, \textrm{even}}\,\sum_{\substack{d(P)+2jd(Q)>2g\\jd(Q)<g}}\frac{d(P)d(Q)\varphi(Q^2)u^{d(P)+2d(Q)}}{|P|^{1+\alpha}|Q|^{2j+2}(|Q|^{1+2\beta}-u^{2d(Q)})}\\
&\qquad\qquad+\frac{1}{q-1}\,\sum_{\substack{d(P)+2jd(Q)=2g}}\frac{d(P)d(Q)\varphi(Q^2)u^{d(P)+2d(Q)}}{|P|^{1+\alpha}|Q|^{2j+2}(|Q|^{1+2\beta}-u^{2d(Q)})}. \end{align*}
From the Prime Polynomial Theorem we have \begin{align*}
\sum_{d(P)\, \textrm{even}\,>2g-2jd(Q)}\frac{d(P)u^{d(P)}}{|P|^{1+\alpha}}&=\sum_{n>g-jd(Q)}\frac{u^{2n}}{q^{2n\alpha}}\big(1+O(q^{-n})\big)\\
&=-q^{-2g\alpha}u^{2g-2jd(Q)}|Q|^{2j\alpha}\frac{u^{2}}{u^2-q^{2\alpha}}+O(q^{-g}|Q|^j). \end{align*} \kommentar{\acom{For the error term to have that size I think we need to assume $\alpha>0$.}\Hung{We shall assume $\alpha\ll 1/g$, and I think the above error term is fine in that case.}} Hence, using \eqref{A22}, we get \begin{align}
J_{2}^{\textrm{oe}}(u,\alpha,\beta)&=q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\sum_{j\geq0}\sum_{Q\in\mathcal{P}}\frac{d(Q)\varphi(Q^2)u^{-2jd(Q)+2d(Q)}}{|Q|^{2j(1-\alpha)+2}(|Q|^{1+2\beta}-u^{2d(Q)})}+O(gq^{-g})\nonumber\\
&=q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\sum_{Q\in\mathcal{P}}\frac{d(Q)(|Q|^{2(1-\alpha)}-|Q|^{1-2\alpha})u^{4d(Q)}}{(|Q|^{2(1-\alpha)}u^{2d(Q)}-1)(|Q|^{1+2\beta}-u^{2d(Q)})}+O(gq^{-g}), \end{align} where in the first line we have removed the condition $jd(Q)<g$ with an admissible error. Note that we can truncate the sum over $Q$ above to $d(Q)<g$ using a similar argument as in section \ref{2rc}. Indeed, when $d(Q) \geq g$ the corresponding term in integral \eqref{i2} will be equal to zero since there will be no poles inside the contour of integration. Then we rewrite \begin{equation} \label{factno1}
J_{2}^{\textrm{oe}}(u,\alpha,\beta)= q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\sum_{d(Q)<g}\frac{d(Q)(|Q|^{2(1-\alpha)}-|Q|^{1-2\alpha})u^{4d(Q)}}{(|Q|^{2(1-\alpha)}u^{2d(Q)}-1)(|Q|^{1+2\beta}-u^{2d(Q)})}+O(gq^{-g}). \end{equation}
\subsection{The terms $f_1=P^{2k+1}$, $f_2=Q^{2l+1}$ with $P\ne Q$ and $k,l\geq0$} \label{type12} We denote \begin{align*}
&\frac{1}{|\mathcal{H}_{2g+1}|}\sum_{\substack{P\ne Q\\d(P^{2k+1}Q^{2l+1})\leq N}}\frac{d(P)d(Q)}{|P|^{(2k+1)(1/2+\alpha)}|Q|^{(2l+1)(1/2+\beta)}}\sum_{D\in\mathcal{H}_{2g+1}}\chi_D(PQ)\\ &\qquad\qquad=I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)+I_{2,<}^{\textrm{oo}}(N;\alpha,\beta)+I_{2,=}^{\textrm{oo}}(N;\alpha,\beta), \end{align*}
corresponding to the terms with $d(P)> d(Q)$, $d(P)<d(Q)$ and $d(P)=d(Q)$, respectively.
Applying Lemma \ref{L1} we have \begin{align}\label{condition}
\sum_{D \in \mathcal{H}_{2g+1}} \chi_D(PQ) &= \sum_{i,j\geq0} \bigg(\sum_{h \in \mathcal{M}_{2g+1-2id(P)-2jd(Q)}} \chi_{PQ}(h) - q \sum_{h \in \mathcal{M}_{2g-1-2id(P)-2jd(Q)}} \chi_{PQ}(h)\bigg).
\end{align}
As in the previous subsection, the terms with $d(PQ)$ odd shall lead to $V\ne\square$ after applying Lemma \ref{L2}, and their contribution, as before, is bounded by $O(N^2q^{N/2-2g})$. We are left with the terms with $d(PQ)$ even. From Lemmas \ref{L2} and \ref{L3}, the expression inside the bracket is equal to \begin{align*}
&\frac{q^{2g+1}}{|P|^{2i+1/2}|Q|^{2j+1/2}}\bigg(q\sum_{V \in \mathcal{M}_{\leq d(PQ)+2id(P)+2jd(Q)-2g-3}} \chi_{PQ}(V)- \sum_{V \in \mathcal{M}_{\leq d(PQ)+2id(P)+2jd(Q)-2g-2}} \chi_{PQ}(V) \bigg)\\
&\ -\frac{q^{2g}}{|P|^{2i+1/2}|Q|^{2j+1/2}}\bigg(q\sum_{V \in \mathcal{M}_{\leq d(PQ)+2id(P)+2jd(Q)-2g-1}} \chi_{PQ}(V)- \sum_{V \in \mathcal{M}_{\leq d(PQ)+2id(P)+2jd(Q)-2g}} \chi_{PQ}(V) \bigg). \end{align*} Again the contribution from the terms $V\ne\square$ is negligible and we focus on the term with $V=\square$, which is \begin{align}\label{500}
&\frac{q^{2g+1}}{|P|^{2i+1/2}|Q|^{2j+1/2}}\bigg(q\sum_{\substack{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g-2}\\(V,PQ)=1}}1- \sum_{\substack{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g-1}\\(V,PQ)=1}} 1\bigg)\\
&\qquad-\frac{q^{2g}}{|P|^{2i+1/2}|Q|^{2j+1/2}}\bigg(q\sum_{\substack{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g-1}\\(V,PQ)=1}}1- \sum_{\substack{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g}\\(V,PQ)=1}} 1 \bigg).\nonumber \end{align}
First consider $I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)$. The treatment for $I_{2,<}^{\textrm{oo}}(N;\alpha,\beta)$ is similar. From \eqref{condition} we have $id(P)+jd(Q)\leq g$, so \[ d(V)\leq d(PQ)/2+id(P)+jd(Q)-g\leq d(PQ)/2<d(P), \] and hence $(V,P)=1$ automatically. Note that \begin{align*} &q\sum_{\substack{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g-1}\\(V,Q)=1}}1- \sum_{\substack{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g}\\(V,Q)=1}} 1\\ &\qquad=\bigg(q\sum_{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g-1}}1- \sum_{V \in \mathcal{M}_{\leq d(PQ)/2+id(P)+jd(Q)-g}} 1\bigg)\\ &\qquad\qquad\qquad-\bigg(q\sum_{V \in \mathcal{M}_{\leq (d(P)-d(Q))/2+id(P)+jd(Q)-g-1}}1- \sum_{V \in \mathcal{M}_{\leq (d(P)-d(Q))/2+id(P)+jd(Q)-g}} 1\bigg)\\ &\qquad=\begin{cases} -1 & \quad\textrm{if } (2i+1)d(P)+(2j-1)d(Q)<2g\leq (2i+1)d(P)+(2j+1)d(Q),\\ 0 & \quad\textrm{otherwise}. \end{cases} \end{align*} So \begin{align*}
\eqref{500}=\begin{cases}
-\frac{q^{2g}(q-1)}{|P|^{2i+1/2}|Q|^{2j+1/2}} & \textrm{if } (2i+1)d(P)+(2j-1)d(Q)<2g< (2i+1)d(P)+(2j+1)d(Q),\\
-\frac{q^{2g+1}}{|P|^{2i+1/2}|Q|^{2j+1/2}} & \textrm{if } (2i+1)d(P)+(2j-1)d(Q)=2g,\\
\frac{q^{2g}}{|P|^{2i+1/2}|Q|^{2j+1/2}} & \textrm{if } (2i+1)d(P)+(2j+1)d(Q)=2g,\\ 0 & \textrm{otherwise.} \end{cases} \end{align*} Hence $I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)$ is equal to, up to an error of size $O(N^2q^{N/2-2g})$, \begin{align*}
&-\sum_{\substack{d(P)>d(Q)\\d(P^{2k+1}Q^{2l+1})\, \textrm{even}\,\leq N}}\,\sum_{\substack{(2i+1)d(P)+(2j-1)d(Q)< 2g\\2g< (2i+1)d(P)+(2j+1)d(Q)}}\frac{d(P)d(Q)}{|P|^{(2k+1)(1/2+\alpha)+2i+1/2}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\
&\quad-\frac{q}{q-1}\sum_{\substack{d(P)>d(Q)\\d(P^{2k+1}Q^{2l+1})\, \textrm{even}\,\leq N}}\,\sum_{(2i+1)d(P)+(2j-1)d(Q)=2g}\frac{d(P)d(Q)}{|P|^{(2k+1)(1/2+\alpha)+2i+1/2}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\
&\quad+\frac{1}{q-1}\sum_{\substack{d(P)>d(Q)\\d(P^{2k+1}Q^{2l+1})\, \textrm{even}\,\leq N}}\,\sum_{(2i+1)d(P)+(2j+1)d(Q)=2g}\frac{d(P)d(Q)}{|P|^{(2k+1)(1/2+\alpha)+2i+1/2}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}. \end{align*}
By the Prime Polynomial Theorem, the contribution of the terms with $d(P)=m$ and $d(Q)=n$ to $I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)$ for each $i,j,k,l$ is bounded by \begin{equation}\label{bound100} \ll q^{-(k+2i)m-(l+2j)n}. \end{equation} Note that $(2i+1)m+(2j+1)n\geq2g$, so this is, in particular, bounded by $O( q^{-2g+(1-k)m+(1-l)n})$. It follows that the contribution of the terms with $m+n\leq g$ is $O(q^{-g})$. For those with $m+n>g$, the condition $m>n$ leads to $m>g/2$, and it follows from \eqref{bound100} that the contribution of such terms with $i+k\geq 1$ is $O(Nq^{-g/2})$. Hence we can restrict to the case $i=k=0$ and get \begin{align*}
I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)&=-\sum_{\substack{d(P)>d(Q)\\d(PQ^{2l+1})\, \textrm{even}\,\leq N}}\,\sum_{\substack{d(P)+(2j-1)d(Q)< 2g\\2g< d(P)+(2j+1)d(Q)}}\frac{d(P)d(Q)}{|P|^{1+\alpha}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\
&\qquad\qquad-\frac{q}{q-1}\sum_{\substack{d(P)>d(Q)\\d(PQ^{2l+1})\leq N}}\,\sum_{d(P)+(2j-1)d(Q)=2g}\frac{d(P)d(Q)}{|P|^{1+\alpha}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\
&\qquad\qquad+\frac{1}{q-1}\sum_{\substack{d(P)>d(Q)\\d(PQ^{2l+1})\leq N}}\,\sum_{d(P)+(2j+1)d(Q)=2g}\frac{d(P)d(Q)}{|P|^{1+\alpha}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\ &\qquad\qquad+O(N^2q^{N/2-2g})+O(Nq^{-g/2}). \end{align*} \kommentar{\acom{In the last two terms we need $d(PQ^{2l+1})$ to be even.}\Hung{$d(P)+(2j\pm1)d(Q)=2g$, and hence $d(PQ^{2l+1})$ even.}}
We shall write \[ I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)=I_{2,>}^{\textrm{oo}\flat}(N;\alpha,\beta)+I_{2,>}^{\textrm{oo}\dagger}(N;\alpha,\beta)+O(N^2q^{N/2-2g})+O(Nq^{-g/2}) \] to separate the cases $j+l\geq 1$ and $j=l=0$, respectively. For $I_{2,>}^{\textrm{oo}\flat}(\alpha,\beta)$, by the Perron formula we have \[
I_{2,>}^{\textrm{oo}\flat}(N;\alpha,\beta)=\frac{1}{2\pi i}\oint_{|u|=r}J_{2,>}^{\textrm{oo}\flat}(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)} \] for any $r<q^{-\varepsilon}$, where \begin{align*}
J_{2,>}^{\textrm{oo}\flat}(u,\alpha,\beta)=&-\sum_{l+j\geq1}\,\sum_{\substack{d(P)>d(Q)\\d(PQ)\, \textrm{even}\\d(P)+(2j-1)d(Q)< 2g\\2g< d(P)+(2j+1)d(Q)}}\frac{d(P)d(Q)u^{d(P)+(2l+1)d(Q)}}{|P|^{1+\alpha}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\
&\qquad\qquad-\frac{q}{q-1}\,\sum_{l+j\geq1}\,\sum_{\substack{d(P)>d(Q)\\d(P)+(2j-1)d(Q)=2g}}\frac{d(P)d(Q)u^{d(P)+(2l+1)d(Q)}}{|P|^{1+\alpha}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}\\
&\qquad\qquad+\frac{1}{q-1}\,\sum_{l+j\geq1}\,\sum_{\substack{d(P)>d(Q)\\d(P)+(2j+1)d(Q)=2g}}\frac{d(P)d(Q)u^{d(P)+(2l+1)d(Q)}}{|P|^{1+\alpha}|Q|^{(2l+1)(1/2+\beta)+2j+1/2}}. \end{align*} Given $Q$, from the Prime Polynomial Theorem we have \begin{align*}
&\sum_{\substack{dP)>d(Q)\\d(PQ)\, \textrm{even}\\d(P)+(2j-1)d(Q)< 2g\\2g< d(P)+(2j+1)d(Q)}}\frac{d(P)u^{d(PQ)}}{|P|^{1+\alpha}}=\sum_{\max\{d(Q),g-jd(Q)\}<n<g-(j-1)d(Q)}\frac{|Q|^\alpha u^{2n}}{q^{2n\alpha}}\big(1+O(q^{-n}|Q|^{1/2})\big)\\
&=\begin{cases}-q^{-2g\alpha}u^{2g-2jd(Q)}|Q|^{(2j+1)\alpha}\frac{u^{2}}{u^2-q^{2\alpha}}+q^{-2g\alpha}u^{2g-2(j-1)d(Q)}|Q|^{(2j-1)\alpha}\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+O(q^{-g}|Q|^{j+1/2})\\ \qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\text{if }(j+1)d(Q)<g,\\
-u^{2d(Q)}|Q|^{-\alpha}\frac{u^{2}}{u^2-q^{2\alpha}}+q^{-2g\alpha}u^{2g-2(j-1)d(Q)}|Q|^{(2j-1)\alpha}\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+O(|Q|^{-1/2})\\ \qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\text{if }jd(Q)< g\leq (j+1)d(Q). \end{cases} \end{align*}
\kommentar{\acom{For the error term I get that we have an extra $|Q|^{2rj}$, and since $r = \alpha-\log_q u$ if $\alpha$ is for example close to $1/4$ then the error term is much bigger.}\Hung{Again, we shall assume $\alpha \ll 1/g$, and I think the above is fine}} Hence \begin{align*}
&J_{2,>}^{\textrm{oo}\flat}(u,\alpha,\beta)=\,q^{-2g\alpha}u^{2g}\bigg(\frac{u^{2}}{u^2-q^{2\alpha}}+\frac{1}{q-1}\bigg)\sum_{l+j\geq1}\,\sum_{(j+1)d(Q)<g}\frac{d(Q)u^{2(l-j)d(Q)}}{|Q|^{1-\alpha+\beta+l(1+2\beta)+2j(1-\alpha)}}\\
&\qquad+\frac{u^{2}}{u^2-q^{2\alpha}}\sum_{l+j\geq1}\,\sum_{jd(Q)< g\leq (j+1)d(Q)}\frac{d(Q)u^{2(l+1)d(Q)}}{|Q|^{1+\alpha+\beta+l(1+2\beta)+2j}}\\
&\qquad-q^{-2g\alpha}u^{2g}\bigg(\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+\frac{q}{q-1}\bigg)\sum_{l+j\geq1}\,\sum_{jd(Q)<g}\frac{d(Q)u^{2(l-j+1)d(Q)}}{|Q|^{1+\alpha+\beta+l(1+2\beta)+2j(1-\alpha)}}+O(q^{-g/2}). \end{align*}
By the Prime Polynomial Theorem again, it is easy to see that the second expression is bounded by $O(q^{-g})$. Also, we can extend the sum over $Q$ in the third expression to all of $Q\in\mathcal{P}$ at the cost of an error of size $O_\varepsilon(q^{-2g+\varepsilon g})$. For the first expression, we write \begin{align*}
&\sum_{l+j\geq1}\,\sum_{(j+1)d(Q)<g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}\,x^ly^j = \sum_{d(Q)<g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}\sum_{\substack{l+j\geq1\\j< g/d(Q)-1}}x^ly^j\\
&\qquad\qquad=\sum_{d(Q)<g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}\sum_{l+j\geq1}x^ly^j+O_\varepsilon(q^{-g+\varepsilon g})\\
&\qquad\qquad=\sum_{d(Q)<g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}\,\bigg(\frac{x}{1-x}+\frac{y}{(1-x)(1-y)}\bigg)+O_\varepsilon(q^{-g+\varepsilon g}). \end{align*} \kommentar{\acom{I'm not sure that this is correct. For example if $j=1$ and $l=0$ I get $q^{-2g/3} q^{(\alpha-\beta)g/3}$ and again if $\alpha \sim 1/4$ the above can be bigger. Maybe you're assuming everywhere that $\Re(\alpha), \Re(\beta) \ll 1/g$?}\Hung{Same comment as above}} The identities in \eqref{A22} and an argument similar to that used in the evaluation of $J_{2}^{\textrm{oe}}(u,\alpha,\beta)$ in equation \eqref{factno1} then imply that \begin{align}\label{factno2}
&J_{2,>}^{\textrm{oo}\flat}(u,\alpha,\beta)=\,q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\bigg(\sum_{d(Q)<g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1-\alpha+\beta}(|Q|^{1+2\beta}-u^{2d(Q)})}\nonumber\\
&\qquad+\sum_{d(Q)<g}\frac{d(Q)|Q|^{\alpha+\beta}}{(|Q|^{2(1-\alpha)}u^{2d(Q)}-1)(|Q|^{1+2\beta}-u^{2d(Q)})}\\
&\qquad-\sum_{d(Q)<g}\frac{d(Q)|Q|^{2-3\alpha+\beta}u^{4d(Q)}}{(|Q|^{2(1-\alpha)}u^{2d(Q)}-1)(|Q|^{1+2\beta}-u^{2d(Q)})}+\sum_{d(Q)<g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}} \bigg)+O(q^{-g/2}).\nonumber \end{align}
For $I_{2,>}^{\textrm{oo}\dagger}(N;\alpha,\beta)$, by the Perron formula we have \[
I_{2,>}^{\textrm{oo}\dagger}(N;\alpha,\beta)=\frac{1}{2\pi i}\oint_{|u|=r}J_{2,>}^{\textrm{oo}\dagger}(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)} \label{idag} \] for any $r<q^{-\varepsilon}$, where \begin{align*}
J_{2,>}^{\textrm{oo}\dagger}(u,\alpha,\beta)&=-\sum_{\substack{d(P)>d(Q)\\d(PQ)\, \textrm{even}}}\,\sum_{\substack{d(P)-d(Q)< 2g\\2g< d(P)+d(Q)}}\frac{d(P)d(Q)u^{d(PQ)}}{|P|^{1+\alpha}|Q|^{1+\beta}}-\frac{q}{q-1}\sum_{d(P)-d(Q)=2g}\frac{d(P)d(Q)u^{d(PQ)}}{|P|^{1+\alpha}|Q|^{1+\beta}}\\
&\qquad\qquad+\frac{1}{q-1}\sum_{d(P)>d(Q)}\,\sum_{d(P)+d(Q)=2g}\frac{d(P)d(Q)u^{d(PQ)}}{|P|^{1+\alpha}|Q|^{1+\beta}}. \end{align*} The last two terms can be evaluated using the Prime Polynomial Theorem. Concerning the first term, note that given $Q$, \begin{align*}
&\sum_{\substack{d(P)>d(Q)\\d(PQ)\, \textrm{even}}}\,\sum_{\substack{d(P)-d(Q)< 2g\\2g< d(P)+d(Q)}}\frac{d(P)u^{d(PQ)}}{|P|^{1+\alpha}}=\sum_{\max\{d(Q),g\}<n<g+d(Q)}\frac{|Q|^\alpha u^{2n}}{q^{2n\alpha}}\big(1+O(q^{-n}|Q|^{1/2})\big)\\ &\qquad\qquad=\begin{cases}
-q^{-2g\alpha}u^{2g}|Q|^{\alpha}\frac{u^{2}}{u^2-q^{2\alpha}}+q^{-2g\alpha}u^{2g+2d(Q)}|Q|^{-\alpha}\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+O(q^{-g/2}) & \textrm{if }d(Q)< g,\\
-|Q|^{-\alpha}u^{2d(Q)}\frac{u^{2}}{u^2-q^{2\alpha}}+q^{-2g\alpha}u^{2g+2d(Q)}|Q|^{-\alpha}\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+O(q^{-g/2}) & \textrm{if }d(Q)\geq g, \end{cases} \end{align*} by writing $d(PQ)=2n$. \kommentar{\acom{I agree with the answer, but I find the way it is written confusing. I'd write \begin{align*}
\sum_{\substack{d(P)>d(Q)\\d(PQ)\, \textrm{even}}}\,\sum_{\substack{d(P)-d(Q)< 2g\\2g< d(P)+d(Q)}}\frac{d(P)}{|P|^{1+r}} = \sum_{\substack{\max\{d(Q),2g-d(Q)\}<n<2g+d(Q) \\ n \equiv d(Q) \pmod 2}} \Big(q^{-nr} + O(q^{-n/2-nr}) \Big). \end{align*}
}\Hung{I try to keep it consistent with the way I wrote before, but either way is fine}} So \begin{align*}
&-\sum_{\substack{d(P)>d(Q)\\d(PQ)\, \textrm{even}}}\,\sum_{\substack{d(P)-d(Q)< 2g\\2g< d(P)+d(Q)}}\frac{d(P)d(Q)u^{d(PQ)}}{|P|^{1+\alpha}|Q|^{1+\beta}}=q^{-2g\alpha}u^{2g}\frac{u^{2}}{u^2-q^{2\alpha}}\sum_{d(Q)< g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}\\
&\qquad\qquad +\frac{u^{2}}{u^2-q^{2\alpha}}\sum_{d(Q)\geq g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}-q^{-2g\alpha}u^{2g}\frac{q^{2\alpha}}{u^2-q^{2\alpha}}\sum_{Q\in\mathcal{P}}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}+O(q^{-g/2}). \end{align*} Hence, using \eqref{A22}, we have \begin{align*}
J_{2,>}^{\textrm{oo}\dagger}(u,\alpha,\beta)&=q^{-2g\alpha}u^{2g}\bigg(\frac{u^{2}}{u^2-q^{2\alpha}}+\frac{1}{q-1}\bigg)\sum_{d(Q)< g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}+\frac{u^{2}}{u^2-q^{2\alpha}}\sum_{d(Q)\geq g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}\nonumber\\
&\qquad\qquad-q^{-2g\alpha}u^{2g}\bigg(\frac{q^{2\alpha}}{u^2-q^{2\alpha}}+\frac{q}{q-1}\bigg)\sum_{Q\in\mathcal{P}}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}+O(q^{-g/2})\nonumber\\
&=q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\bigg(\sum_{d(Q)< g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}-\sum_{d(Q)<g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}\bigg)\\
&\qquad\qquad+\frac{u^{2}}{u^2-q^{2\alpha}}\sum_{d(Q)\geq g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}+O(q^{-g/2}),\nonumber \end{align*} where in the second identity we truncated the second sum over $Q$ using a similar argument as before. For the third term, from the Prime Polynomial Theorem we have \[
\sum_{d(Q)\geq g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}=\sum_{n\geq g}\frac{u^{2n}}{q^{n(\alpha+\beta)}}\big(1+O(q^{-n/2})\big)=-q^{-g(\alpha+\beta)}u^{2g}\frac{q^{\alpha+\beta}}{u^2-q^{\alpha+\beta}}+O(q^{-g/2}). \] \kommentar{\acom{Again, we need $\Re(\alpha) \geq 0$ for the error term.}\Hung{I rewrite things a bit differently}} Thus, \begin{align}
J_{2,>}^{\textrm{oo}\dagger}(u,\alpha,\beta)&=q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\bigg(\sum_{d(Q)< g}\frac{d(Q)}{|Q|^{1-\alpha+\beta}}-\sum_{d(Q)<g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1+\alpha+\beta}}\bigg)\nonumber \\ &\qquad\qquad-q^{-g(\alpha+\beta)}u^{2g}\frac{q^{\alpha+\beta}u^2}{(u^2-q^{2\alpha})(u^2-q^{\alpha+\beta})}+O(q^{-g/2}). \nonumber \\
\label{factno3} \end{align}
Combining \eqref{factno2} and \eqref{factno3} we obtain \begin{align*}
I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)=\frac{1}{2\pi i}\oint_{|u|=r}J_{2,>}^{\textrm{oo}}(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)}+O(N^2q^{N/2-2g})+O(Nq^{-g/2}), \end{align*}
where \begin{align}\label{factno4}
J_{2,>}^{\textrm{oo}}(u,\alpha,\beta)&= q^{-2g\alpha}u^{2g}\mathcal{A}_2(u,\alpha)\bigg(\sum_{d(Q)<g}\frac{d(Q)u^{2d(Q)}}{|Q|^{1-\alpha+\beta}(|Q|^{1+2\beta}-u^{2d(Q)})}\nonumber\\
&\qquad\qquad+\sum_{d(q)<g}\frac{d(Q)(|Q|^{\alpha+\beta}-|Q|^{2-3\alpha+\beta}u^{4d(Q)})}{(|Q|^{2(1-\alpha)}u^{2d(Q)}-1)(|Q|^{1+2\beta}-1)}+ \sum_{d(Q)<g} \frac{1}{|Q|^{1-\alpha+\beta}} \bigg) \\
&\qquad\qquad -q^{-g(\alpha+\beta)}u^{2g}\frac{q^{\alpha+\beta}u^2}{(u^2-q^{2\alpha})(u^2-q^{\alpha+\beta})} +O(q^{-g/2}).\nonumber \end{align} \kommentar{\begin{align}\label{factno4}
J_{2,>}^{\textrm{oo}}(u,\alpha,\beta)&=-A_2(-r,s,r,s)q^{-2gr}\zeta_q(1-2r)\sum_{Q\in\mathcal{P}}\frac{d(Q)(|Q|^{2-3r+s}-|Q|^{2-r+s})}{(|Q|^{2(1-r)}-1)(|Q|^{1+2s}-1)}\\ &\qquad\qquad-\frac{q^{-g(r+s)}}{1-q^{r-s}}\bigg(\frac{1}{1-q^{r+s}}+\frac{1}{q-1}\bigg).\nonumber \end{align} }
Now consider $I_{2,=}^{\textrm{oo}}(N;\alpha,\beta)$. As before we will have $(V,PQ)=1$ automatically in \eqref{500}. So \begin{align*}
\eqref{500}&=\frac{q^{2g+1}}{|P|^{2i+2j+1}}\bigg(q\sum_{V \in \mathcal{M}_{\leq (i+j+1)d(P)-g-2}}1- \sum_{V \in \mathcal{M}_{\leq (i+j+1)d(P)-g-1}} 1\bigg)\\
&\qquad\qquad-\frac{q^{2g}}{|P|^{2i+2j+1}}\bigg(q\sum_{V \in \mathcal{M}_{\leq (i+j+1)d(P)-g-1}}1- \sum_{V \in \mathcal{M}_{\leq (i+j+1)d(P)-g}} 1 \bigg)\\ &=\begin{cases}
-\frac{q^{2g}(q-1)}{|P|^{2i+2j+1}} & \textrm{if } (i+j+1)d(P)>g,\\
\frac{q^{2g}}{|P|^{2i+2j+1}} & \textrm{if } (i+j+1)d(P)=g,\\ 0 & \textrm{otherwise.} \end{cases} \end{align*} Hence \begin{align*}
&I_{2,=}^{\textrm{oo}}(N;\alpha,\beta)=-\sum_{\substack{P\ne Q\\d(P^{2k+2l+2})\leq N\\d(P)=d(Q)>g/(i+j+1)}}\frac{d(P)^2}{|P|^{(2k+1)(1/2+\alpha)+(2l+1)(1/2+\beta)+2i+2j+1}}\\
&\qquad\qquad+\frac{1}{q-1}\sum_{\substack{P\ne Q\\d(P^{2k+2l+2})\leq N\\d(P)=d(Q)=g/(i+j+1)}}\frac{d(P)^2}{|P|^{(2k+1)(1/2+\alpha)+(2l+1)(1/2+\beta)+2i+2j+1}}+O(N^2 q^{N/2-2g}). \end{align*}
The same argument as before shows that the contribution of the term with $i+j+k+l\geq 1$ is $O(Nq^{-g})$. For $i=j=k=l=0$, we can ignore the condition $P\ne Q$ at the cost of $O(gq^{-g})$. So using the Perron formula we obtain that \[
I_{2,=}^{\textrm{oo}}(N;\alpha,\beta)=\frac{1}{2\pi i}\oint_{|u|=r}J_{2,=}^{\textrm{oo}}(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)}+O(N^2 q^{N/2-2g})+O(gq^{-g}) \]
for any $r<q^{-\varepsilon}$, where \begin{align*}
J_{2,=}^{\textrm{oo}}(u,\alpha,\beta)&=-\sum_{d(P)=d(Q)>g}\frac{d(P)^2u^{2d(P)}}{|P|^{2+\alpha+\beta}}+\frac{1}{q-1}\sum_{d(P)=d(Q)=g}\frac{d(P)^2u^{2d(P)}}{|P|^{2+\alpha+\beta}}. \end{align*}
From the Prime Polynomial Theorem we get \begin{align}\label{fact3} J_{2,=}^{\textrm{oo}}(u,\alpha,\beta)&=-\sum_{n>g} \frac{u^{2n}}{q^{n(\alpha+\beta)}}+\frac{q^{-g(\alpha+\beta)}u^{2g}}{q-1}+O(q^{-g/2}) \nonumber\\ &=q^{-g(\alpha+\beta)}u^{2g}\bigg(\frac{u^2}{u^2-q^{\alpha+\beta}}+\frac{1}{q-1}\bigg)+O(q^{-g/2}). \end{align}
\subsection{Combining Type-I terms} \label{combine} In view of \eqref{r2}, \eqref{t2}, \eqref{factno1} and \eqref{factno4} we obtain \begin{align*}
I_{2}^{\textrm{oe}}(N;\alpha,\beta)+ I_{2,>}^{\textrm{oo}}(N;\alpha,\beta)&=\frac{1}{2\pi i}\oint_{|u|=r}J_2(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)}+O(N^2q^{N/2-2g})+O(Nq^{-g/2}), \end{align*} where \begin{align*} J_2(u,\alpha,\beta)&=R_2(u,\alpha,\beta)- \frac{q^{-g(\alpha+\beta)}u^{2g}\mathcal{A}_2(u,\alpha)}{1-q^{\alpha-\beta}}-q^{-g(\alpha+\beta)}u^{2g}\frac{q^{\alpha+\beta}u^2}{(u^2-q^{2\alpha})(u^2-q^{\alpha+\beta})}+ O(q^{-g/2})\\ &=R_2(u,\alpha,\beta)- \frac{q^{-g(\alpha+\beta)}u^{2g}}{(1-q^{\alpha-\beta})(q-1)}-\frac{q^{-g(\alpha+\beta)}u^{2g}}{1-q^{\alpha-\beta}}\frac{u^2}{u^2-q^{\alpha+\beta}}+ O(q^{-g/2}). \end{align*} \kommentar{\acom{We get that \begin{align*}
J_2(u,\alpha,\beta) &= q^{-2\alpha g} u^{2g} \mathcal{A}_2(u,\alpha) \Big(\sum_{d(Q)<g} \frac{d(Q)}{|Q|^{1-\alpha+\beta}} \\
&- \sum_{Q} \frac{d(Q)(|Q|^{3a+b}- |Q|^{-1+3a-b}u^{2d(Q)}-|Q|u^{4d(Q)}+|Q|^2u^{4d}+|Q|^{1+a-b}u^{4d}-|Q|^{2-a+b}u^{4d}}{(u^{2d}-|Q|^{1+2b})(-|Q|^{2a}+|Q|^2 u^{2d})} \Big) \\ & -q^{-g(\alpha+\beta)}u^{2g}\frac{q^{\alpha+\beta}u^2}{(u^2-q^{2\alpha})(u^2-q^{\alpha+\beta})} \\
&= -q^{-2\alpha g} u^{2g} \mathcal{A}_2(u,\alpha) \sum_{Q} \frac{d(Q)(|Q|^{3a+b}- |Q|^{-1+3a-b}u^{2d(Q)}-|Q|u^{4d(Q)}+|Q|^2u^{4d}+|Q|^{1+a-b}u^{4d}-|Q|^{2-a+b}u^{4d}}{(u^{2d}-|Q|^{1+2b})(-|Q|^{2a}+|Q|^2 u^{2d})} \\ &- \frac{q^{-g(\alpha+\beta)}u^{2g} \mathcal{A}_2(u,\alpha)}{1-q^{\alpha-\beta}} - \frac{q^{-2\alpha g} u^{2g} \mathcal{A}_2(u,\alpha)}{1-q^{\beta-\alpha})}-q^{-g(\alpha+\beta)}u^{2g}\frac{q^{\alpha+\beta}u^2}{(u^2-q^{2\alpha})(u^2-q^{\alpha+\beta})} \end{align*}}} Similarly, \begin{align*}
I_{2}^{\textrm{eo}}(N;\alpha,\beta)+ I_{2,<}^{\textrm{oo}}(N;\alpha,\beta)&=\frac{1}{2\pi i}\oint_{|u|=r}J_3(u,\alpha,\beta)\,\frac{du}{u^{N+1}(1-u)}+O(N^2q^{N/2-2g})+O(Nq^{-g/2}), \end{align*} where \[ J_3(u,\alpha,\beta)=R_3(u,\alpha,\beta)- \frac{q^{-g(\alpha+\beta)}u^{2g}}{(1-q^{-\alpha+\beta})(q-1)}-\frac{q^{-g(\alpha+\beta)}u^{2g}}{1-q^{-\alpha+\beta}}\frac{u^2}{u^2-q^{\alpha+\beta}}+ O(q^{-g/2}). \]
Now note that \[ \frac{1}{1-q^{\alpha-\beta}}+\frac{1}{1-q^{-\alpha+\beta}}=1, \] and hence, by using \eqref{fact3}, \begin{align*} &I_{2}^{\textrm{oe}}(N;\alpha,\beta)+I_{2}^{\textrm{eo}}(N;\alpha,\beta)+ I_{2}^{\textrm{oo}}(N;\alpha,\beta)\\
&\qquad\qquad=\frac{1}{2\pi i}\oint_{|u|=r}\big(R_2(u,\alpha,\beta)+R_3(u,\alpha,\beta)\big)\,\frac{du}{u^{N+1}(1-u)}+O(N^2q^{N/2-2g})+O(Nq^{-g/2}). \end{align*}
\end{document} |
\begin{document}
\begin{abstract} A point $P$ in projective space is said to be Galois with respect to a hypersurface if the function field extension induced by the projection from $P$ is Galois. We present a hyperplane section theorem for Galois points.
Precisely, if $P$ is a Galois point for a hypesurface, then $P$ is Galois for a general hyperplane section passing through $P$. As an application, we determine hypersurfaces of dimension $n$ with $n$-dimensional sets of Galois points. \end{abstract} \title{A hyperplane section theorem for Galois points and its application}
\section{Introduction} Let the base field $K$ be an algebraically closed field of characteristic $p \ge 0$ and let $X \subset \mathbb P^{n+1}$ be an irreducible and reduced hypersurface of dimension $n$ and of degree $d$. H. Yoshihara introduced the notion of the {\it Galois point} (see \cite{fukasawa1, miura-yoshihara, yoshihara1, yoshihara2, yoshihara3}). If the function field extension $K(X)/K(\mathbb P^n)$ induced by the projection $\pi_P:X \dashrightarrow \mathbb P^n$ from a point $P \in \mathbb P^{n+1}$ is Galois, then the point $P$ is said to be Galois.
Galois point theory has given a new viewpoint of classification of algebraic varieties, by the distribution of Galois points (see \cite{fukasawa1, fukasawa2, fukasawa-hasegawa, fukasawa-takahashi, miura-yoshihara, takahashi1, yoshihara1, yoshihara2, yoshihara3}). If the dimension of the singular locus $S_X$ is at most $n-2$, then Fukasawa and Takahashi \cite{fukasawa-takahashi} presented upper bounds for the number of Galois points, as a generalization of a result of Yoshihara for smooth hypersurfaces \cite{yoshihara3}. To do this, they showed a ``hyperplane section theorem'' (see \cite[Theorem 1.3]{fukasawa-takahashi}). In this article, we prove this theorem for {\it arbitrary} Galois points (which may be singular) with respect to {\it arbitrary} hypersurfaces. The intersection of (almost) all tangent spaces of $X$ is denoted by $T_X$.
\begin{theorem} \label{HyperplaneMain} Let $X \subset \mathbb P^{n+1}$ be a hypersurface of dimension $n \ge 2$ and degree $d \ge 3$ in characteristic $p \ge 0$, and let $P$ be a Galois point for $X$ with multiplicity $m$, where $0 \le m \le d-2$. Then: \begin{itemize} \item[(i)] A general hyperplane $H$ passing through $P$ satisfies the following condition: \begin{itemize} \item[$(\star)$] the hyperplane section $X_H:=X \cap H$ is an irreducible hypersurface in $H \cong \mathbb P^n$ of degree $d$ such that $S_{X_H}=S_X \cap H$, $P \not\in T_{X_H}$, and the multiplicity of $X_H$ at $P$ is equal to $m$. \end{itemize} \item[(ii)] Let $H$ be a hyperplane passing through $P$ and satisfying the condition $(\star)$. Then, the point $P$ is Galois for $X_H$. \item[(iii)] In this case, the Galois groups are isomorphic: $G_P(X) \cong G_P(X_H)$. \end{itemize} \end{theorem}
As an application, we generalize results of Fukasawa and Hasegawa \cite{fukasawa2, fukasawa-hasegawa} for plane curves with infinitely many Galois points. Let $\Delta(X)$ (resp. $\Delta'(X)$) be the set of all Galois points contained in $X \setminus S_X$ (resp. $\mathbb P^{n+1} \setminus X$).
\begin{theorem} \label{InnerMain} Let $X \subset \mathbb P^{n+1}$ be a hypersurface of dimension $n \ge 1$ and degree $d \ge 4$ in characteristic $p \ge 0$. Then, the following conditions are equivalent. \begin{itemize} \item[(i)] There exists a non-empty Zariski open subset $U$ of $X$ such that $U \subset \Delta(X)$. \item[(ii)] $p>0$, $d=p^e$ for some $e>0$, and $X$ is projectively equivalent to the hypersurface defined by $X_0^{p^e-1}X_1-X_2^{p^e}=0$. \end{itemize} In this case, $\Delta(X)=X \setminus \{X_0=X_2=0\}$, and the induced Galois group $G_P$ is a cyclic group of order $p^e-1$ for any point $P \in \Delta(X)$. \end{theorem}
\begin{theorem} \label{OuterMain} Let $X \subset \mathbb P^{n+1}$ be a hypersurface of dimension $n \ge 1$ and degree $d \ge 3$ in characteristic $p \ge 0$. Then, the following conditions are equivalent. \begin{itemize} \item[(i)] There exist an irreducible Zariski closed subset $Y \subset \mathbb P^{n+1}$ of dimension $n$ and a non-empty open subset $U_Y$ of $Y$ such that $U_Y \subset \Delta'(X)$. \item[(ii)] $p>0$, $d=p^e$ for some $e>0$, and $X$ is projectively equivalent to an irreducible hypersurface whose equation is of the form $$\sum_{j=0}^{e}\sum_{i=1}^{n+1}\alpha_{ij}X_0^{p^e-p^j}X_i^{p^{j}}=0, $$ where $\alpha_{ij} \in K$. \end{itemize} In this case, $\Delta'(X)$ is a Zariski open set of a hyperplane (see Proposition \ref{distribution}), and the induced Galois group $G_P$ is isomorphic to $(\mathbb Z/p\mathbb Z)^{\oplus e}$ for any point $P \in \Delta'(X)$. \end{theorem}
\section{Preliminaries and Lemmas} Let $X \subset \mathbb P^{n+1}$ be an irreducible hypersurface, let $S_X$ be the singular locus of $X$, and let $X_{\rm sm}=X \setminus S_X$ be the smooth locus. The projective tangent space at a smooth point $P \in X\setminus S_X$ is denoted by $T_PX \subset \mathbb P^{n+1}$. Let $\check{\mathbb P}^{n+1}$ be the dual projective space which parameterizes hyperplanes of $\mathbb P^{n+1}$. The Gauss map $\gamma$ of $X$ is a rational map from $X$ to $\check{\mathbb P}^{n+1}$ which sends a smooth point $P$ to the tangent space $T_PX$. If $F$ is the defining polynomial of $X$, then $\gamma$ is given by $(\partial F/\partial X_0: \cdots: \partial F/\partial X_{n+1})$. Let $T_X:=\bigcap_{P \in X_{\rm sm}}T_PX$. If $T_X \ne \emptyset$, then $X$ is said to be strange and $T_X$ is called a strange center. A strange center is a linear space. It is well known that (the function field extension induced by) the projection from $T_X$ is separable if and only if $X$ is a cone with center $T_X$. Therefore, any strange variety is a cone with center $T_X$ if $p=0$.
For distinct points $P, Q \in \mathbb P^{n+1}$, the line passing through $P$ and $Q$ is denoted by $\overline{PQ}$. For a point $P \in \mathbb P^{n+1}$, the projective space $\mathbb P^{n}$ parameterizes lines passing through $P$. Then, we can identify $\pi_P(Q)=\overline{PQ}$ for any point $Q \in X \setminus \{P\}$. If $P=(0:\cdots:0:1)$, then $\pi_P(X_0:\cdots:X_{n+1})=(X_0:\cdots:X_n)$, up to the projective equivalence of $\mathbb P^n$.
We note the following Bertini theorem (see \cite[Theorem 1.1]{fulton-lazarsfeld}, or \cite[II. 6.1, Theorem 1]{shafarevich} and \cite[Lemma 5]{zariski}).
\begin{lemma} \label{hyperplane section} Let $P \not\in T_X$ (i.e., the projection $\pi_P: X \dashrightarrow \mathbb P^n$ is generically finite and separable). Then, for a general hyperplane $H \subset \mathbb P^{n+1}$ with $H \ni P$, the hyperplane section $X_H:=X \cap H$ is an integral scheme. \end{lemma}
Let $P$ be a Galois point for $X$. The Galois group induced by the Galois extension is denoted by $G_P(X)$, or simply $G_P$. Then, any element of $G_P(X)$ corresponds to a birational map from $X$ to itself. Let ${\rm Bir}(X)$ be the group consisting of all birational map from $X$ to itself. We can consider $G_P(X)$ as a subgroup of ${\rm Bir}(X)$. For $\sigma \in G_P(X)$, the maximal open subset of $X$ on which $\sigma$ is defined is denoted by $U_{\sigma}$. We set $U[P]=\bigcap_{\sigma \in G_P}U_{\sigma}$.
\begin{lemma} \label{rigion} Let $P \in \mathbb P^{n+1}$ be a Galois point and let $\sigma \in G_P$ be an induced birational map from $X$ to itself. Suppose that $H$ is a hyperplane such that $P \in H$ and $X_H$ is an integral scheme. Then, $X_H \cap U_{\sigma} \ne \emptyset$.
Furthermore, if the multiplicity of $X_H$ at $P$ is less than $d$, then the restriction map $\sigma|_{X_H}$ is a birational map from $X_H$ to itself. \end{lemma}
\begin{proof} For the first assertion, see the proof of \cite[V. Lemma 5.1]{hartshorne}. Now, $X_H$ corresponds to a regular point of codimension one in the scheme $X$. We can prove the first assertion by using a valuative criterion of properness for $X$. We prove the second assertion. Let $\tau$ be the inverse of $\sigma$. Then, $\sigma$ is an isomorphism from $U[P] \cap \sigma^{-1}U[P]$ to $U[P] \cap \tau^{-1}U[P]$. By definitions of $\pi_P$ and $\sigma$, $\sigma(X_H) \subset X_H$. If $U[P] \cap \sigma^{-1}U[P] \cap H \ne \emptyset$, we have the conclusion. Assume that $U[P] \cap \sigma^{-1}U[P] \cap H =\emptyset$. Then, $\sigma(U[P] \cap H) \subset (X \cap H) \setminus (U[P] \cap H)$. Since $U[P] \cap H$ is of dimension $n-1$ and $(X \cap H) \setminus (U[P] \cap H)$ is of dimension $\le n-2$, $\sigma^{-1}(\sigma(Q))$ is one-dimensional for a general point $Q$ of $X \cap H$. By definitions of $\pi_P$ and $\sigma$, $\overline{PQ} \subset X \cap H$. Then, $X_H$ is a cone with center $P$. Therefore, the multiplicity of $X_H$ at $P$ is equal to $d$. This is a contradiction. \end{proof}
\begin{lemma} \label{transitive} Let $P$ be a Galois point for $X$ with multiplicity $m$ and let $Q, R \in X$ be points such that $\pi_P(Q)=\pi_P(R)$ and the intersection multiplicity of $X$ and $\overline{QR}$ at $P$ is equal to $m$. Assume that $Q \in U[P]$. Then, there exists $\sigma \in G_P$ such that $\sigma(Q)=R$. \end{lemma}
\begin{proof} Let $(X_0:\cdots:X_{n+1})$ be a system of homogeneous coordinates, let $x_i=X_i/X_0$ for $i=1, \ldots, n+1$, and let $F$ be the defining homogeneous polynomial of $X$. We can assume that $P=(0:\cdots:0:1)$ and $R=(1:0:\cdots:0)$ for a suitable system of coordinates. Then, the line passing through $P, Q, R$ is given by $X_1=\cdots=X_{n}=0$. Then, we can take $\pi_P(1:x_1:\cdots:x_{n+1})=(1:x_1:\cdots:x_n)$ and $\pi_P(Q)=\pi_P(R)=(1:0:\cdots:0)$. We have $$F(X_0, \ldots, X_{n+1})=A_{d-m}X_{n+1}^{d-m}+\cdots+A_1X_{n+1}+A_0, $$ where $A_i \in K[X_0, \ldots, X_n]$. We define $f(x_1, \ldots, x_{n+1}):=F(1, x_1, \ldots, x_{n+1})$ and $a_i(x_1, \ldots, x_n):=A_i(1, x_1, \ldots, x_n)$. Since $f(R)=0$, $a_0(0, \ldots, 0)=0$. Since the intersection multiplicity of $X$ and $\overline{QR}$ at $P$ is $m$, $a_{d-m} (0, \ldots, 0)=A_{d-m}(1, \ldots, 0) \ne 0$.
We consider a function $x_{n+1}$. Let $g=\prod_{\sigma} \sigma^{*}x_{n+1}$. Then, we have $g(Q)=((-1)^{d-m}a_0/a_{d-m})(Q)=0$. Assume that $\sigma(Q) \ne R$ for any $\sigma \in G_P$. Then, $x_{n+1}(\sigma(Q)) \ne 0$ for any $\sigma \in G_P$. Therefore, $g(Q) \ne 0$. This is a contradiction. \end{proof}
\section{Proof of Theorem \ref{HyperplaneMain}} \begin{proof}[Proof of Theorem \ref{HyperplaneMain}] Suppose that $P$ is Galois for $X$. Then, $P \not\in T_X$. By Lemma \ref{hyperplane section}, for a general hyperplane $H \ni P$, $X_H$ is an integral scheme. Let $W_P \subset \check{\mathbb{P}}^{n+1}$ be the set of such hyperplanes. Since $P \not\in T_X$, $W_P \setminus \gamma(X_{\rm sm}) \ne \emptyset$. We have $S_{X_H}=S_X \cap H$ for any $H \in W_P \setminus \gamma(X_{\rm sm})$, because, for a point $Q \in X_{\rm sm} \cap H$, $T_QX=H$ if and only if $X \cap H$ is singular at $Q$. Let $U_P \subset X_{\rm sm}$ be the set of all smooth points $Q$ such that the differential map of the projection $\pi_P$ at $Q$ is surjective, and let $\Sigma_P \subset \check{\mathbb P}^{n+1}$ be the set of all hyperplanes $H$ such that $H \subset (X \setminus U_P)$. Since $\pi_P$ is separable, $W_P \setminus \Sigma_P \ne \emptyset$. We have $P \not\in T_{X_H}$ for any $H \in W_P\setminus (\gamma(X_{\rm sm}) \cup \Sigma_P)$. Let $\Gamma_P$ be the (finite) set of all hyperplanes $H$ such that the multiplicity of $X \cap H$ at $P$ is less than $m$. We have $W_P \setminus (\gamma(X_{\rm sm}) \cup \Sigma_P \cup \Gamma_P) \ne \emptyset$, and any hyperplane $H$ in this set satisfies $(\star)$. We have assertion (i).
Let $H$ be a hyperplane passing through $P$ and satisfying $(\star)$. We consider a homomorphism of groups
$$ \phi: G_P \rightarrow G; \sigma \mapsto \sigma|_{X_H}, $$
where $G=\{ \sigma \in {\rm Bir}(X_H)|\sigma(X_H \cap l \setminus \{P\}) \subset X_H \cap l \mbox{ for a general line } l \mbox{ such that } P \in l \subset H \}$. Since the multiplicity of $X_H$ at $P$ is $m <d$, it follows from Lemma \ref{rigion} and the definition of a Galois point that $\phi$ is well-defined. In addition, by the condition $P \not\in T_{X_H}$, $X_H \cap l \setminus \{P\}$ consists of $d-m$ points for a general line $l \subset H$ containing $P$. It follows from Lemma \ref{transitive} that $\phi$ is injective. Since the order of $G$ is at most $d-m$, $\phi$ is an isomorphism. Then, $P$ is Galois for $X_H$. \end{proof}
\begin{remark} A general hyperplane section for a Galois point which is {\it singular} was studied by T. Takahashi in his Ph.D. thesis \cite{takahashi2}. He proved that a Galois point $P \in S_X$ is also Galois for a general hyperplane section $X_H \ni P$, under the assumption that $p=0$ and $X \subset \mathbb{P}^3$ is a normal surface. \end{remark}
\section{Case of inner Galois points} As an application of Theorem \ref{HyperplaneMain}, we have the following.
\begin{corollary} \label{reduction1} Assume that the condition (i) in Theorem \ref{InnerMain} holds. Then, for a general hyperplane $H$, there exists a non-empty Zariski open set $U_{X_H} \subset X_H$ such that $U_{X_H} \subset \Delta(X_H)$. \end{corollary}
\begin{proof} Let $U$ be an open set as in Theorem \ref{InnerMain}(i) and let $H$ be a general hyperplane. Since $H$ is general, we can assume that $U \cap H \ne \emptyset$. By Lemma \ref{hyperplane section}, $X_H$ is an integral scheme and $H$ satisfies the condition $(\star)$ for a general point $P \in X_H$. By Theorem \ref{HyperplaneMain}(ii), we can take $U_{X_H}=(U \cap X_H) \setminus (S_{X_H} \cup T_{X_H})$. \end{proof}
\begin{proof}[Proof of Theorem \ref{InnerMain}] We prove the implication (i) $\Rightarrow$ (ii). We use induction on dimension $n$. If $n=1$, then the assertion is nothing but a result of Fukasawa and Hasegawa \cite{fukasawa-hasegawa}. We consider the case where $n \ge 2$. Let $H \subset \mathbb P^{n+1}$ be a general hyperplane. It follows from Corollary \ref{reduction1} that there exists a Zariski open set $U_{X_H} \subset X_H$ such that $U_{X_H} \subset \Delta(X_H)$. By the assumption of induction, $p>0$, $d$ is a power of $p$, and $X_H \subset H \cong \mathbb P^{n}$ is projectively equivalent to $X_0^{d-1}X_1-X_2^d=0$. The Gauss map $\gamma_{X_H}$ for $X_H$ is given by $(-X_0^{d-2}X_1:X_0^{d-1}:0:\cdots:0)$, if $X_H \subset \mathbb P^n$ is given by $X_0^{d-1}X_1-X_2^d=0$. Therefore, we find that $T_{X_H}$ is a linear space of dimension $n-2$ with $T_{X_H} \not\subset X_H$, and a general fiber of $\gamma_{X_H}$ is a linear space of dimension $n-2$.
Since $H$ is general and $T_QX_H=T_QX \cap H$ for any smooth point $Q \in X_H$, $T_X$ is a linear space of dimension $n-1$ with $T_X \not\subset X$, $\gamma_{X_H}$ coincides with the restriction map $\gamma|_{X_H}$, and a general fiber of $\gamma$ is a linear space of dimension $n-1$.
Let $P \in X$ be a general point. Since the linear spaces $\gamma^{-1}(\gamma (P))$ of dimension $n-1$ and $T_X$ of dimension $n-1$ are contained in the projective space $T_PX$ of dimension $n$, these intersect along a linear space $L_P$ of dimension $n-2$. If $L_{P'} \ne L_{P}$ for a general point $P' \in X$, then $T_X$ is contained in $X$, since $L_{P'} \subset T_X$. This is a contradiction. Therefore, $L_{P'}=L_P$. Then, $X$ is a cone with a $(n-2)$-dimensional center $L$. For a suitable system of coordinates, we can assume that $L$ is defined by $X_0=X_1=X_2=0$ and $X$ is defined by $F(X_0, X_1, X_2)=0$. We can assume that $H$ is defined by $X_{n+1}-(a_0X_0+\cdots+a_{n}X_n)=0$. Then, $X_H$ is given by the same equation $F=0$ and there exists a linear transformation $\phi:H \rightarrow H$ such that $\phi(X_H)$ is defined by $F_1:=X_0^{d-1}X_1-X_2^d=0$. Then, $\phi(L \cap H)=L \cap H$. Therefore, $\phi$ gives an automorphism of the sublinear system $\langle X_0, X_1, X_2 \rangle$ of $H^0(\mathbb P^n, \mathcal{O}(1))$ which is given by $L \cap H$. This implies that $X$ is projectively equivalent to the hypersurface defined by $X_0^{d-1}X_1-X_2^d=0$.
We consider the implication (ii) $\Rightarrow$ (i). Let $F=X_0^{p^e-1}X_1-X_2^{p^e}$ be the defining polynomial. It is not difficult to check that the singular locus $S_X$ of $X$ is given by $X_0=X_2=0$. Let $P \in X \setminus S_X$. Then, $P=(1:b_1: \cdots: b_{n+1})$ for some $b_1, \ldots, b_{n+1} \in K$ with $b_1=b_2^{p^e}$. The projection $\pi_P$ is given by $(X_1-b_1X_0:\cdots:X_{n+1}-b_{n+1}X_0)$. Let $\hat{X_i}=X_i-b_iX_0$. Then, $F(X_0, \hat{X_1}+b_1X_0, \ldots, \hat{X}_{n+1}+b_{n+1}X_0)=(\hat{X}_1+b_1X_0)X_0^{p^e-1}-(\hat{X_2}+b_2X_0)^{p^e}=\hat{X}_1X_0^{p^e-1}-\hat{X}_2^{p^e}=F(X_0, \hat{X}_1, \ldots, \hat{X}_n)$. Then, $\pi_P=(X_0:\hat{X}_1:\cdots:\hat{X}_{n+1})$. Therefore, we have a field extension $K(x_0, x_2, \ldots, x_{n+1})/K(x_2, \ldots, x_{n+1})$ with a relation $F(x_0, 1, x_2, \ldots, x_{n+1})=x_0^{p^e-1}-x_2^{p^e}=0$. It is not difficult to check that this is a Galois extension, which is cyclic of degree $p^e-1$. Therefore, we have $\Delta(X)=X \setminus \{X_0=X_2=0\}$. \end{proof}
\section{Case of outer Galois points} As an application of Theorem \ref{HyperplaneMain}, we have the following.
\begin{corollary} \label{reduction2} Assume the condition (i) in Theorem \ref{OuterMain} holds. Then, for a general hyperplane $H$, there exists a non-empty Zariski open set $U_{Y_H} \subset Y_H$ such that $U_{Y_H} \subset \Delta'(X_H)$. \end{corollary}
\begin{proof} Let $Y, U_Y$ be as in Theorem \ref{OuterMain}(i) and let $H$ be a general hyperplane. Since $H$ is general, we can assume that $U \cap H \ne \emptyset$. By Lemma \ref{hyperplane section}, $X_H$ and $Y_H$ are integral and $H$ satisfies the condition $(\star)$ for a general point $P \in Y_H$. By Theorem \ref{HyperplaneMain}(ii), we can take $U_{Y_H}=(U_Y \cap H) \setminus T_{X_H}$. \end{proof}
\begin{lemma} \label{n=1} Let $n=1$. Assume that there exists an irreducible plane curve $Y \subset \mathbb P^2$ and a non-empty open set $U_Y \subset Y$ such that $U_Y \subset \Delta'(X)$. Then, \begin{itemize} \item[(1)] $Y$ is a line, and \item[(2)] if we take a linear transformation $\phi$ such that $\phi(Y)$ is defined by $X_0=0$, then the defining polynomial $\phi(X)$ is of the form $\sum_{j=0}^{e}\sum_{i=1}^{2}\alpha_{ij}x_i^{p^{j}}+c=0$, where $\alpha_{ij}, c \in K$. \end{itemize} \end{lemma}
\begin{proof} Let $(X_0:X_1:X_{2})$ be a system of homogeneous coordinates and let $x_i=X_{i}/X_0$ for $i=1, 2$. By a result of Fukasawa \cite{fukasawa2}, $Y$ is a line and there exists a linear transformation $\psi$ such that $\psi(X)$ is defined by $f:=\sum_{j=0}^{e}\sum_{i=1}^{2}\alpha_{ij}x_i^{p^{j}}=0$. Then $\psi(Y)$ is defined by $X_0=0$. Let $\phi$ be a linear transformation as in assumption (2). Then, $\phi(X)$ is given by $(\psi \circ \phi^{-1})^*f=0$. Let $\tilde{x}_i:=(\psi\circ\phi^{-1})^*x_i=\beta_{i0}+\beta_{i1}x_1+\beta_{i2}x_2$, where $\beta_{ij} \in K$, for $i=1, 2$. Then, $\phi(X)$ is given by $f(\tilde{x}_1, \tilde{x}_2)=0$, where $f(\tilde{x}_1, \tilde{x}_2)=\sum_{j=0}^{e}\sum_{i=1}^{2}\gamma_{ij}x_i^{p^{j}}+c$ for some $\alpha_{ij}, c \in K$. \end{proof}
\begin{proof}[Proof of Theorem \ref{OuterMain}] We consider the following condition ($P_n$): If $X \subset \mathbb P^{n+1}$ is an irreducible hypersurface, and there exists an irreducible hypersurface $Y \subset \mathbb P^{n+1}$ and a non-empty open set $U_Y$ of $Y$ such that $U_Y \subset \Delta'(X)$, then \begin{itemize} \item[(1)] $Y$ is a hyperplane, and \item[(2)] if we take a linear transformation $\phi$ such that $\phi(Y)$ is defined by $X_0=0$, then the defining polynomial of $\phi(X)$ is of the form $\sum_{j=0}^{e}\sum_{i=1}^{n+1}\alpha_{ij}x_i^{p^{j}}+c=0$, where $\alpha_{ij}, c \in K$. \end{itemize} We prove ($P_n$). We use induction on dimension $n$. If $n=1$, then the assertion is nothing but Lemma \ref{n=1}. We consider the case where $n \ge 2$. Let $H \subset \mathbb P^{n+1}$ be a general hyperplane. It follows from Corollary \ref{reduction2} that there exists a non-empty Zariski open set $U_{Y_H} \subset \Delta'(X_H)$. By the assumption of induction, $p>0$, $d$ is a power of $p$, and $X_H \subset H \cong \mathbb P^{n-1}$ is projectively equivalent to $\sum_{j=0}^{e}\sum_{i=1}^n\alpha_{ij}x_i^{p^j}+c=0$. Since $H$ is general, $Y$ is a hyperplane. We have result (1) of $(P_n)$.
We take a linear transformation $\phi$ such that $\phi(Y)$ is defined by $X_{0}=0$. Let $P=(1:a_1:\ldots:a_{n+1}) \in X$ be a general point and let $H$ be a general hyperplane passing through $P$. Then, there exists an open set $U_{Y_H} \subset Y_H$ such that $U_{Y_H} \subset \Delta'(X_H)$. If we take a linear transformation $\psi=(X_0:X_1-a_1X_0:\cdots:X_{n+1}-a_{n+1}X_{0})$, then $\psi(P)=(1:0:\cdots:0)$, $\psi(\phi(Y))$ is defined by $X_0=0$ and $\psi(H)$ is defined by $X_{n+1}-b_1X_1+\cdots+b_nX_n=0$ for some $b_i \in K$. Let $F(X_0, \ldots, X_{n+1})$ be the defining polynomial of $\psi(X)$, let $f=F(1, x_1, \ldots, x_{n+1})$ and let $\tilde{x}=b_1x_1+\cdots+b_nx_n$. Then, $f(0, \ldots, 0)=0$ since $(1:0:\cdots:0) \in \psi(X)$. Since $\psi(X) \cap \psi(H)$ satisfies the condition $(P_{n-1})$ by induction, $g(x_1, \ldots, x_n):=f(1, x_1, \ldots, x_{n}, \tilde{x})=\sum_{i,j}\beta_{ij}x_i^{p^j}$ for some $\beta_{ij} \in K$, similar to the proof of Lemma \ref{n=1}. If $f$ has a term of degree not equal to some power of $p$, $g$ has such a term for a general hyperplane $H$ with $P \in H$. Therefore, $f$ has only terms of degree equal to some power of $p$. Let $f_{p^i}$ be the component of $f$ of degree $p^i$ for $i=0, \ldots, e$. Then, $f_{p^i}(x_1, \ldots, x_n, \tilde{x})$ be the component of $g$ of degree $p^i$. By condition (2) of ($P_{n-1}$), $f_{p^i}(x_1, \ldots, x_n, \tilde{x})$ must be of the form $h^{p^i}$ for some linear polynomial $h(x_1, \ldots, x_n)$. Since $H$ is general, $f_{p^i}(x_1, \ldots, x_{n+1})$ must be of the form $h^{p^i}$ for some $h(x_1, \ldots, x_{n+1})$. Therefore, $\psi(X)$ is given by the polynomial as in condition (2) of ($P_n$) and hence, $\phi(X)$ also.
The implication (ii) $\Rightarrow$ (i) is derived from Proposition \ref{distribution} below. \end{proof}
\begin{proposition}[cf. \cite{fukasawa2}, Propositions 2 and 3] \label{distribution} Let $X$ be an irreducible hypersurface defined by the equation in Theorem \ref{OuterMain}(ii) and let $H_0$ be the hyperplane defined by $X_0=0$. Then, we have the following. \begin{itemize} \item[(i)] $S_X$ and $T_X$ are linear spaces of dimension $n-1$ which are contained in $H_0$. \item[(ii)] $\Delta'(X)=H_0\setminus (S_X \cup T_X)$, and all points in $S_X \setminus T_X$ are Galois. (Here, we consider a point $P$ with $\pi_P^*(\mathbb{P}^n)=K(X)$ as a Galois point.) \item[(iii)] For any Galois point $P \in H_0 \setminus T_X$, any birational map induced by $G_P$ is a restriction of a linear transformation of $\mathbb P^2$. \item[(iv)] For any Galois point $P \in H_0 \setminus T_X$, the Galois group $G_P$ is isomorphic to $(\mathbb Z/p\mathbb Z)^{\oplus m}$ for some $m \le e$. \item[(v)] $\Delta(X)=X_{\rm sm}$ if $X$ is projectively equivalent to the hypersurface defined by $X_0^{p^e-1}X_1-X_2^{p^e}=0$, and $\Delta(X)=\emptyset$ otherwise. \end{itemize} \end{proposition}
\begin{proof} Let $F=\sum_{j=0}^{e}\sum_{i=1}^{n+1}\alpha_{ij}X_0^{p^e-p^j}X_i^{p^j}$ be the defining polynomial. It is not difficult to check that $S_X$ is given by $X_0=\sum_{i=1}^{n+1}\alpha_{ie}X_i^{p^e}=0$ and $T_X$ is given by $X_0=\sum_{i=1}^{n+1}\alpha_{i0}X_i=0$. Therefore, we have $(i)$.
We prove that all points in $H_0 \setminus T_X$ are Galois. There exists $i$ such that $\alpha_{i0} \ne 0$. We can assume that $i=1$. Let $\hat{X}_1=\sum_{i=1}^{n+1}\alpha_{i0}X_i$. Then, $\phi(X_0, \ldots, X_{n+1})=(X_0, \hat{X_1}, X_2 \ldots, X_{n+1})$ is a linear transformation and $\phi(T_X)$ is given by $X_0=X_1=0$. By considering $\phi(X)$ as $X$, we can assume that $T_X$ is given by $X_0=X_1=0$. Let $P \in H_0 \setminus T_X$. Then, $P=(0:1:b_2:\cdots: b_{n+1})$ for some $b_2, \ldots, b_{n+1}$. The projection $\pi_P$ is given by $(1:x_2-b_2x_1:\cdots:x_{n+1}-b_{n+1}x_1)$. Let $\hat{x}_i=x_i-b_ix_1$. Then, we have a field extension $K(x_1, \hat{x}_2, \ldots, \hat{x}_{n+1})/K(\hat{x}_2, \ldots, \hat{x}_{n+1})$ with a relation $g(x_1, \hat{x}_2, \ldots, \hat{x}_{n+1})=F(1, x_1, \hat{x}_2+b_2x_1, \ldots, \hat{x}_{n+1}+b_{n+1}x_1)=0$. By the form of $F$, $g$ is of the form $\sum_{i,j}\beta_{ij} x_i^{p^j}$ for some $\beta_{ij} \in K$. Since $P \not\in T_X$, this extension is Galois of degree $p^m$ for some $m \le e$ and the Galois group is isomorphic to $(\mathbb Z/p\mathbb Z)^{\oplus m}$ (see \cite[pp. 117--118]{stichtenoth}). Therefore, we have $H_0 \setminus (S_X \cup T_X) \subset \Delta'(X)$, and any point in $S_X \setminus T_X$ is Galois. By considering the form $g$, we find that assertions (iii) and (iv) hold for all Galois points in $H_0\setminus T_X$.
We prove that $\Delta'(X) \subset H_0 \setminus (S_X \cup T_X)$. If this is proved, then we have (ii), (iii) and (iv). If $n=1$, then this is a result of Fukasawa (see \cite[Proposition 2]{fukasawa2}). Assume that $n \ge 2$ and there exists an outer Galois point $P \not\in H_0$ for $X$. By Theorem \ref{HyperplaneMain}, there exists a hyperplane $H \ni P$ such that $\{P\} \cup (\Delta'(X) \cap H_ 0 \cap H) \setminus T_{X_H} \subset \Delta'(X_H)$. When $n=2$, $X_H$ is a curve and this is a contradiction. By using induction, we have a contradiction for any $n \ge 2$.
We prove (v). Assume that $P \in \Delta(X)$. Since $X \cap H_0=S_X$, $P \in X \setminus H_0$. Let $P' \in X \setminus H_0$ such that the line $\overline{PP'}$ intersects the set $\Delta'(X)$. Let $R \in \overline{PP'} \cap \Delta'(X)$. Since any element of $G_R$ is a linear transformation, it follows from Lemma \ref{transitive} that there exists $\sigma \in G_R$ such that $\sigma(P)=P'$. Since $P$ is Galois, $P'$ is also Galois. Therefore, if one inner Galois point exists, then almost all points of $X$ are inner Galois points. It follows from Theorem \ref{InnerMain} that $X$ is projectively equivalent to the hypersurface defined by $X_0^{p^e-1}X_1-X_2^{p^e}=0$ and $\Delta(X)=X\setminus \{X_0=X_1=0\}=X_{\rm sm}$. \end{proof}
\end{document} |
\begin{document}
\title{
\Large\bfseries Witten-Reshetikhin-Turaev invariants for 3-manifolds from Lagrangian intersections in configuration spaces } \author{ \small Cristina Anghel \quad $/\!\!/$\quad 9\textsuperscript{th} June 2021
} \date{} \maketitle { \makeatletter \renewcommand*{\BHFN@OldMakefntext}{} \makeatother
\footnotetext{\textit{Key words and phrases}: Quantum invariants, Topological models, Witten-Reshetikhin-Turaev invariants.} }
\begin{abstract} In this paper we construct a topological model for the Witten-Reshetikhin-Turaev invariants for $3$-manifolds coming from the quantum group $U_q(sl(2))$, as graded intersection pairings of homology classes in configuration spaces. More precisely, for a fixed level $\mathcal{N} \in \mathbb N$ we show that the level $\mathcal{N}$ WRT invariant for a $3-$manifold is a state sum of Lagrangian intersections in a covering of a {\bf fixed} configuration space in the punctured disk. This model brings a new perspective on the structure of the level $\mathcal{N}$ Witten-Reshetikhin-Turaev invariant, showing that it is completely encoded by the intersection points between certain Lagrangian submanifolds in a fixed configuration space, with additional gradings which come from a particular choice of a local system. This formula provides a new framework for investigating the open question about categorifications of the WRT invariants. \end{abstract}
{\tableofcontents
}
\section{Introduction}\label{introduction}
After the discovery of the Jones polynomial for knots, the world of quantum invariants encountered a powerful development, provided by constructions due to Witten, Reshetikhin and Turaev. More precisely Witten \cite{Witt} predicted the existence of an extension of the Jones polynomial to $3$-manifolds and Reshetikhin-Turaev \cite{RT} provided an algebraic construction of such invariants. They showed that the representation theory of the quantum group $U_q(sl(2))$ leads to invariants for links coloured with finite dimensional representations of this quantum group, called coloured Jones polynomials. Further on, for any level $\mathcal{N} \in \mathbb N$, one can use linear combinations of coloured Jones polynomials with colours less than $\mathcal{N}$ in order to get a $3$-manifold invariant $\tau_{\mathcal{N}}$. However, there are open questions about the geometry and topology which is contained in the Witten-Reshetikhin-Turaev invariants. An active research area concerns categorifications for invariants of links and $3-$manifolds. For instance, Khovanov homology, which is a categorification for the Jones polynomial for knots, was proved to be a powerful tool which contains much information (\cite{K},\cite{Ras},\cite{KM},\cite{OZ},\cite{SM},\cite{M1}). The story is different for the analogous invariants for $3$-manifolds. There is an important open question about the existence of categorifications for Witten-Reshetikhin-Turaev invariants.
Our aim is to describe these invariants as intersection pairings between homology classes in coverings of configuration spaces. We refer to such a description as ``topological model''. The main result of the paper shows that the level $\mathcal{N}$ WRT invariant is a state sum of graded intersections between Lagrangian submanifolds in a fixed configuration space. This provides a new framework for the study of these invariants and a starting point in investigating categorification questions.
In the first part of this article, Theorem \ref{THEOREMM}, we generalise the author's previous work (\cite{Cr3},\cite{Cr5}) constructing a topological model for coloured Jones polynomials coloured with different colours. Then, the translation of the algebraic definition of the WRT invariant using Theorem \ref{THEOREMM} would show that the WRT invariant $\tau_{\mathcal{N}}$ is a linear combination of Lagrangian intersections in various configuration spaces. Further on, the main part of the paper is geometric. We encode the coefficients of the coloured Jones polynomials coming from the Kirby colour by adding certain circles to the supports of the Lagrangian submanifolds as well as adding extra punctures to the punctured disk. Then, we show that we can move the whole intersection formula -- which a priori would be in different configuration spaces -- in a fixed configuration space, as presented in Theorem \ref{THEOREMW}. \subsection{Homological tools} For $n,m \in \mathbb N$, we define $C_{n,m}=Conf_m(\mathscr D_n)$ to be the unordered configuration space of $m$ points in the $n$-punctured disc $\mathscr D_n$. We use two extra parameters $k,\bar{l}\in \mathbb N$ and define a local system: $$\Phi: \pi_1(C_{n+3\bar{l},m}) \rightarrow \mathbb Z^n \oplus \mathbb Z^{\bar{l}} \oplus \mathbb Z.$$
The definition of this local system depends on the parameter $k$. Roughly speaking, the monodromy around each puncture gives us one variable and the last $\mathbb Z-$component counts the winding of particles in the configuration space. The parameter $k$ is used for orientation purposes: the monodromies of $\Phi$ around the first $n-k$ punctures and the last $k$ punctures are counted with opposite orientations. In our model $\bar{l}$ will be the number of link components. The extra $3\bar{l}$ punctures will play an important role in the model for the WRT-invariants. We define $\tilde{C}^{-k}_{n+3\bar{l},m}$ to be the covering of $C_{n,m}$ corresponding to $\Phi$.
We use the homology of a quotient of this covering space (quotienting the first $n$ components of the local system towards $l$ variables, for $l \leqslant n$), as follows:
\begin{enumerate} \item[•] Lawrence representations $H^{-k}_{n,m,\bar{l}}$ which are $\mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$-modules.\\ They come from the Borel-Moore homology of $\tilde{C}^{-k}_{n+3\bar{l},m}$ and have an action of coloured braids on $n+3\bar{l}$ strands (Definition \ref{D:4}, Proposition \ref{colbr}) \item[•] Dual Lawrence representations $H^{-k,\partial}_{n,m,\bar{l}}$ (Definition \ref{D:4})\\ (using the homology relative to the boundary of the same covering space) \item[•] Graded intersection pairing (Proposition \ref{P:3'}): \begin{equation*} \left\langle , \right\rangle:H^{-k}_{n,m,\bar{l}} \otimes H^{-k,\partial}_{n,m,\bar{l}}\rightarrow \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]. \end{equation*} \end{enumerate}
{\bf Homology classes} We will construct certain classes in these homology groups, given by lifts of Lagrangian submanifolds in the base configuration space. These submanifolds are encoded by ``geometric supports'' which are sets of arcs in the punctured disc. The product of these arcs quotiented to the unordered configuration space gives the Lagrangian submanifolds. Then, the lifts in the covering will be encoded by sets of ``paths to the base points'' which are collections of arcs in the punctured disk, from the base point towards the geometric support. \begin{rmk}\label{pairing} \ \ The pairing is encoded in the base configuration space, and it is parametrised by the intersection points between the geometric supports of the homology classes, graded by monomials which are prescribed by the local system $\Phi$. \end{rmk} \subsection{Topological model coloured Jones polynomials}
In the author's earlier work, \cite{Cr3} and \cite{Cr5}, a topological model for the coloured Jones polynomial for links coloured with the same colour was constructed (i.e. each component is coloured with the same colour). In the first part of this paper we generalize this result and construct a topological model for coloured Jones polynomials for links coloured with different colours. Let $L$ be an oriented framed link with framings $f_1,...,f_l\in \mathbb Z$. We consider $\beta_n \in B_n$ a braid such that $L= \widehat{\beta_n}$ by braid closure. Now, let us fix a set of colours $N_1,...,N_l\in \mathbb N$ for the strands of the link. This colouring induces a colouring of the strands of the braid: $(C_1,...,C_n).$
We use the configuration space of $1+\sum_{i=1}^{n} (C_i-1)$ particles in the $(2n+1)$-punctured disk, and a $\mathbb Z^{2n+1}\oplus \mathbb Z$ local system constructed as above, with $k=n$ and $\bar{l}=0$. Then, we have the homologies: $$H^{-n}_{2n+1, {1+\sum_{i=1}^{n}}(C_i-1),0} \text { and } H^{-n,\partial}_{2n+1, {1+\sum_{i=1}^{n}}(C_i-1),0} \text{ which are } \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1}, d^{\pm 1}] \text{-modules.}$$ \begin{defn}(Coloured Homology classes) With the procedure described above, for any indices $i_1,...,i_{n}\in \mathbb N$ such that $0 \leqslant i_k \leqslant C_k-1$ for all $k\in \{1,...,n\}$ we define two Lagrangian submanifolds and consider the classes given by their lifts in the covering, as presented in figure \ref{Picture0}: $${\color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)} \in H^{-n}_{2n+1, {1+\sum_{i=1}^{n}}(C_i-1),0}} \ \ \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ \ \ {\color{dgreen} \mathscr L^{(C_1,...,C_n)}_{\bar{i}}\in H^{-n,\partial}_{2n+1,1+\sum_{i=1}^{n} (C_i-1),0}}.$$ The set of such sequences of indices is denoted by $C(\bar{N})$. \end{defn} \begin{thm}\label{THEOREMM}(Topological state sum model for coloured Jones polynomials for coloured links)\\ Let us fix a set of colours $N_1,..,N_l \in \mathbb N$. Then, the coloured Jones polynomial of $L$ coloured with colours $N_1,...,N_l$ has the following model: \begin{equation} \begin{aligned} J_{N_1,...,N_l}(L,q)& =~ q^{ \sum_{i=1}^{l}\left( f_i- \sum_{j \neq i} lk_{i,j}\right)(N_i-1)} \cdot \\
& \cdot \left(\sum_{\bar{i}\in C(\bar{N})} \left( \prod_{i=1}^{n}x^{-1}_{C(i)} \right)\cdot \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}. \end{aligned} \end{equation} In this expression $\psi^{C}_{q,N_1,...,N_l}$ is the specialisation of variables to one variable from formula \eqref{eq:8''''} and $C:\{1,...,2n+1\}\rightarrow \{1,...,l\}$ is the colouring presented in equation \eqref{eq:col} and remark \ref{not'}.
\end{thm} Note that this formula is a state sum of intersections in a configuration space where the number of particles depends on the choice of individual colours $N_1,..,N_l$ for colouring the link. \subsection{Topological model for WRT invariants} The second part of the paper is devoted to the construction of a topological model for the Witten-Reshetikhin-Turaev $3$-manifold invariants. Let us fix a level $\mathcal{N}\in \mathbb N$ and let us consider the $2\mathcal{N}^{th}$ root of unity $\xi=e^{\frac{2 \pi i}{2\mathcal{N}}}$. We will use the description of closed oriented $3$-manifolds as surgeries along framed oriented links. In turn, we will look at links as closures of braids. Suppose that the corresponding link has $l$ components and the braid has $n$ strands.
We start with the construction of the homology classes in this context. This time we use a covering of the configuration space of $n(\mathcal{N}-2)+l+1$ particles in the $(2n+3l+1)-$punctured disk and a $\mathbb Z^{2n+1}\oplus \mathbb Z^{3l}\oplus \mathbb Z$ local system constructed as above, with $k=n$ and $\bar{l}=l$. We consider the homology groups: $$H^{-n}_{2n+1,n(\mathcal{N}-2)+l+1,l} \text { and } H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+l+1,l} \text{ which are } \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_l^{\pm 1}, d^{\pm 1}] \text{-modules.}$$
\begin{defn} (Homology classes) Let us fix a set of indices $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$ and denote by $\bar{i}:=(i_1,...,i_n)$. We consider the classes given by the geometric supports from the picture below: $$ {\color{red} \mathscr F_{\bar{i}}^{\mathcal{N}} \in H^{-n}_{2n+1,n(\mathcal{N}-2)+l+1,l}} \ (\text{definition } \ref{D:C1}) \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}} \in H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+l+1,l}} \ ( \text{definition } \ref{D:C2})$$
\begin{figure}
\caption{WRT Homology Classes}
\label{Localsyst}
\end{figure} \end{defn} Denote by $p_1,...,p_l$ a sequence of strands of the braid that correspond to different components of the link and denote by $f_{p_i}$ the framing of the component associated to $p_i$. \begin{defn}(Lagrangian intersection in the configuration space)\\ For a multi-index $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$, we consider the following Lagrangian intersection: \begin{equation} \begin{cases} & \Lambda_{\bar{i}}(\beta_n) \in \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_l^{\pm 1}, d^{\pm 1}]\\ & \Lambda_{\bar{i}}(\beta_n):=\prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \cdot \prod_{i=1}^n x^{-1}_{C(i)}
\ \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle \end{cases} \end{equation} \end{defn}
The main result shows that the $\mathcal{N}^{th} $WRT invariant $\tau_{\mathcal{N}}(M)$ comes from a state sum of specialisations of these intersections, which take place in the configuration space $Conf_{n(\mathcal{N}-2)+l+1}(\mathscr D_{2n+3l+1})$. \begin{thm}\label{THEOREMW}(Topological state sum model for the Witten-Reshetikhin-Turaev invariants)\\ Let $\mathcal{N} \in \mathbb N$ be a fixed level and $M$ a closed oriented $3$-manifold. We consider $L$ a framed oriented link with $l$ components such that $M$ is obtained by surgery along $L$. Also, let $\beta_n \in B_n$ such that $L=\widehat{\beta_n}$ as above. Then the $\mathcal{N}^{th}$ Witten-Reshetikhin-Turaev invariant has the following model: \begin{equation} \begin{aligned}
\tau _{\mathcal{N}}(M)=\frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot {\Huge{\sum_{i_1,..,i_n=0}^{\mathcal{N}-2}}} \left(\sum_{\substack{ \tiny 1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 \\ i_k\leqslant C_k-1}} \Lambda_{\bar{i}}(\beta_n) \Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}\right). \end{aligned} \end{equation} \end{thm} In this expression $\psi^{C}_{\xi,N_1,...,N_l}$ is a specialisation of variables to complex numbers (see relation \eqref{not}). The coefficients in the above formula are presented in notation \ref{coefffrac}.
\begin{rmk} (Intersections in various configuration spaces) For a fixed colour $\mathcal{N} $, the algebraic definition of the WRT invariant $\tau_\mathcal{N}(M) $ is given by a certain linear combination of $J_{N_1,...,N_l}(L,\xi)$ for all $N_1,...,N_l\in \{1,...,\mathcal{N}-1\}$. Then, Theorem \ref{THEOREMM} would interpret this invariant as follows: \begin{equation*} \begin{aligned} \tau_\mathcal{N}(M) & \text{ is a linear combination over all } N_1,...,N_l\in \{1,...,\mathcal{N}-1\}\\ & \text{ and all } \bar{i}=(i_1,...,i_{n}) \text{ with } 0\leqslant i_k \leqslant C_k-1, k \in \{1,...,n\} \ \text{of}\\
& \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation*} Each term above is an intersection in the configuration space of $1+\sum_{i=1}^{n} (C_i-1)$ particles in the $(2n+1)$-punctured disk, which depends on the choice of colours $N_1,...,N_l$.
This means that the translation of the algebraic definition of the WRT invariant following Theorem \ref{THEOREMM} shows that $\tau_\mathcal{N}(M)$ is a linear combination of Lagrangian intersections in different configuration spaces $C_{2n+1,k}$, where the number of particles $k$ varies between $1$ and $(n-1)(\mathcal{N}-1)+1$. \end{rmk} \begin{rmk} (Intersection in a fixed configuration space) A feature of the model presented in Theorem \ref{THEOREMW} is that it globalises all these intersections from above, showing that the $\mathcal{N}^{th}$ WRT invariant is given by states of certain Lagrangian intersections in a fixed ambient space. \begin{equation*} \begin{aligned} \tau_\mathcal{N}(M) & \text{ is a scalar times the state sum over all multi-indices } i_1,...,i_n\in \{0,...,\mathcal{N}-2\} \text{ of }\\ & \text{specialisations of the intersection } \Lambda_{\bar{i}}(\beta_n) \ \text{corresponding to } N_1,...,N_l\in \{1,...,\mathcal{N}-1\} \\
& \text{ such that }i_k \leqslant C_k-1, k \in \{1,...,n\}, \text{ namely: } \Lambda_{\bar{i}}(\beta_n) \Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation*} All the intersections $\Lambda_{\bar{i}}(\beta_n)$ above are constructed from the classes $(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ \mathscr F_{\bar{i}}^{\mathcal{N}}$ and $\mathscr L_{\bar{i}}^{\mathcal{N}}$ and take place in the fixed configuration space of $n(\mathcal{N}-2)+l+1$ points in the $(2n+3l+1)$-punctured disk. \end{rmk} \begin{rmk} (Encoding the Kirby colour) Now, we discuss the coefficients which appear in the algebraic definition of the Witten-Reshetikhin-Turaev invariant for a $3$-manifold. This formula is given as linear combinations of coloured Jones polynomials of the underlying link $L$ and the coefficients come from the so-called Kirby colour and they are quantum integers.
Theorem \ref{THEOREMW} provides a globalised formula for $\tau_\mathcal{N}(M) $ and does not require individual coloured Jones polynomials. For each multi-index $\bar{i}$ bounded by the level, we consider the intersection form $\Lambda_{\bar{i}}(\beta_n)$ between globalised classes in a covering of the configuration space, which does not depend on any colouring. Then, we have to add up its specialisations, corresponding to colours which are ``bigger'' than the index $\bar{i}$.
The coefficients coming from the Kirby colour are encoded in the homology classes. Geometrically, they are given precisely by the special $l$ purple circles and $l$ blue circles from the supports of the homology classes and they correspond to the orange intersection points from figure \ref{IntersectionForm}. \end{rmk}
\subsection{Structure of the WRT invariants} Following this remark together with the fact that the intersection pairing is encoded by graded geometric intersections in the base configuration space (remark \ref{pairing}), we conclude that we have a topological formula for the WRT invariant which is obtained from the intersection points between the following geometric supports: $$(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}} \cap {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}$$ for all choices of indices $i_1,...,i_n\in \{1,...,\mathcal{N}-2\}$, graded using the local system $\Phi$. \begin{rmk}
In this way, we see that the WRT invariant at level $\mathcal{N}$ is completely encoded by the set of intersection points between certain Lagrangian submanifolds in the configuration space of $n(\mathcal{N}-2)+l+1$ points in the $(2n+3l+1)$-punctured disk.The number of particles is fixed and it is determined by the level of the invariant $\mathcal{N}$, the number of components of the link $l$ and number of strands of the braid $n$. \end{rmk} \begin{figure}
\caption{Lagrangian Intersection encoding the Kirby colour}
\label{IntersectionForm}
\end{figure}
\subsection{Questions-underlying topological information} Our main motivation for this work is the understanding of the underlying topology which is carried by the Witten-Reshetikhin-Turaev invariants. The structural description presented above, provided by intersections between Lagrangians in a fixed configuration space, brings a new approach to investigating further questions concerning categorifications for these quantum invariants.
\subsection*{Structure of the paper} This article has three main parts. In Section \ref{S:3} we introduce the homological setting that we use as well as the particular choice of a local system and the corresponding covering space and homology groups. In the second part, we construct certain homology classes and, using those, we prove the topological intersection formula for the coloured Jones polynomials for links. The following section \ref{S:5} has two main parts. First, we construct a sequence of homology classes in a fixed covering space and use them to define a state sum formula. Then, we prove that it leads to a topological model for the $\mathcal{N}^{th}$ Witten-Reshetikhin-Turaev invariant. In the last part, Section \ref{S:6}, we present the formula for these invariants in the particular case where we have $3$-manifolds which are given by surgeries along knots.
\section{Notations} In the next sections we will change the variables from the ring of Laurent polynomials using certain specialisations of coefficients. For this, we use the following definition. \begin{notation}(Specialisation)\label{N:spec}\\ Let $N$ be a module over a ring $R$. Let $R'$ be another ring and suppose that we have a specialisation of the coefficients, meaning a morphism: $$\psi: R \rightarrow R'.$$ We denote by
$$N|_{\psi}:=N \otimes_{R} R'$$ the specialisation of the module $N$ by the function $\psi$. \end{notation}
\begin{defn}(Quantum numbers) $$ \{ x \}_q :=q^x-q^{-x} \ \ \ \ [x]_{q}:= \frac{q^x-q^{-x}}{q-q^{-1}}.$$ \end{defn} \begin{defn}(Specialisations of coefficients)\\ For a set of $l$ colours $N_1,..,N_l\in\mathbb N$ and a colouring $C:\{1,...,n\}\rightarrow \{1,...,l\}$ we consider the specialisation of coefficients as below: \begin{center} \begin{tikzpicture} [x=1.2mm,y=1.4mm]
\node (b1) at (-27,0) {$\mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$}; \node (b2) at (27,0) {$\mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$}; \node (b3) at (0,-20) {$\mathbb Z[q^{\pm1}]$}; ,\draw[->] (b1) to node[xshift=1mm,yshift=5mm,font=\large]{$f_C$ \eqref{eq:8}} (b2); \draw[->] (b2) to node[right,xshift=2mm,font=\large]{$\psi^{C}_{q,N_1,...,N_l}$\ \eqref{eq:8'}} (b3); \draw[->,thick,dotted] (b1) to node[left,font=\large]{} (b3); \end{tikzpicture} \end{center} \end{defn} \begin{defn}(Our setting: specialisation corresponding to a braid closure)\\ We will use this change of coefficients in the situation where $n$ is replaced by $2n+1$ and the $2n$ points except the middle one inherit a colouring with $l$ colours coming from a braid closure of a braid with $n$ strands: $$C:\{1,...,2n\}\rightarrow \{1,...,l\}.$$ Further on, we consider an extra point in the middle which we denote by $(n+1)$ and we colour it with the label: $$C(n+1)=1.$$ This together with the colouring of the $2n$ points from above gives us a colouring of $2n+1$ points: \begin{equation}\label{eq:col} C:\{1,...,2n+1\}\rightarrow \{1,...,l\}. \end{equation} For our model, we will use the function $f_C$ corresponding to the colouring from \eqref{eq:col}. Further on, we define the specialisation of coefficients: $$ \psi^{C}_{q,N_1,...,N_l}: \mathbb Z[x_1^{\pm 1},...,x_{l}^{\pm 1},y_1^{\pm 1},...,y_{l}^{\pm 1}, d^{\pm 1}] \rightarrow \mathbb Z[q^{\pm 1}]$$ \begin{equation}\label{not} \begin{cases} &\psi^{C}_{q,N_1,...,N_l}(x_i)=q^{N_i-1}, \ i\in \{1,...,l\}\\ &\psi^{C}_{q,N_1,...,N_l}(y_i)=q^{N_i}\\ &\psi^{C}_{q,N_1,...,N_l}(d)=q^{-2}. \end{cases} \end{equation} \end{defn} \begin{rmk}\label{not'} In the formulas from the paper, we denote by $C_i:=N_{C(i)}$. \end{rmk} \section{Definition of the local system and homology groups}\label{S:3}
In order to construct the classes that will lead to the $3$-manifold invariants, we will use the homology of certain coverings of the configuration space in the punctured disk. The construction of the covering space will be more subtle than the one used in \cite{Cr5}. More specifically, we will consider two types of punctures and use a subtle local system which counts the monodromies around these punctures in different manners.
For the following part, let us fix $\bar{l},k \in \mathbb N$. Also, we consider a ``weight'' $m \in \mathbb N$.
We start with the unordered configuration space of $m$ points in the punctured disk with
$n+3\bar{l}$ punctures $\mathscr D_{n+3\bar{l}}$, denoted by:
$$C_{n+3\bar{l},m}.$$ Also, we fix $d_1,..d_m \in \partial \hspace{0.5mm}\mathscr D_{n+3\bar{l}}$ and let ${\bf d}=(d_1,...,d_m)$ to be our base point in the configuration space. Now, we define a certain local system on this configuration space. For this, we use the homology of this configuration space, which has the following description. \begin{prop} Let us suppose that $m \geqslant 2$. Let $[ \ ]: \pi_1(C_{n+3\bar{l},m}) \rightarrow H_1\left( C_{n+3\bar{l},m}\right)$ be the abelianisation map. Then the homology has the following form: \begin{equation*} \begin{aligned} H_1\left( C_{n+3\bar{l},m}\right) \simeq \ \ \ \ \ & \mathbb Z^{n} \ \ \ \ \oplus \ \ \ \ \mathbb Z^{2\bar{l}} \ \ \ \ \ \oplus \ \ \ \ \mathbb Z^{\bar{l}} \ \ \ \ \oplus \ \ \ \ \mathbb Z\\ &\langle [\sigma_i] \rangle \ \ \ \ \ \langle [\gamma_j], [\bar{\gamma}_j] \rangle \ \ \ \ \ \ \ \langle [\eta_j] \rangle \ \ \ \ \ \ \ \langle [\delta]\rangle, \ \ \ {i\in \{1,...,n\}}, j\in \{1,...,\bar{l}\}. \end{aligned} \end{equation*} The five types of generators are presented in the picture below. \begin{figure}
\caption{Local system $\Phi$}
\label{Localsyst}
\end{figure} \end{prop}
We continue with the augmentation map $$\epsilon: H_1\left( C_{n+3l,m}\right)\rightarrow \mathbb Z^n \oplus \mathbb Z^l \oplus \mathbb Z$$ $$ \hspace{30mm} \langle x_i \rangle \ \ \langle y_j\rangle \ \ \langle d' \rangle$$ given by: \begin{equation} \begin{cases} &\epsilon(\sigma_i)=2x_i, i\in \{1,...,n-k\}\\ &\epsilon(\sigma_i)=-2x_i, i\in \{n-k+1,...,n\}\\ &\epsilon(\gamma_j)=2y_j, j\in \{1,...,\bar{l}\}\\ &\epsilon(\bar{\gamma}_j)=-2y_j, j\in \{1,...,\bar{l}\}\\ &\epsilon(\eta_j)=-y_j, j\in \{1,...,\bar{l}\}\\ &\epsilon(\delta)=d'. \end{cases} \end{equation} \begin{defn}(Local system)\label{localsystem} We use the local system given by the composition of the above morphisms: \begin{equation} \begin{aligned} &\Phi: \pi_1(C_{n+3\bar{l},m}) \rightarrow \mathbb Z^n \oplus \mathbb Z^{\bar{l}} \oplus \mathbb Z\\ &\hspace{28mm} \langle x_j \rangle \ \ \langle y_j\rangle \ \ \langle d' \rangle, \ i \in \{1,...,n\}, \ j \in \{1,...,\bar{l}\}\\ &\Phi= \epsilon \circ [ \ ]. \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \end{aligned} \end{equation} \end{defn}
\begin{defn}(Covering of the configuration space)\label{D:12}\\ Let $\tilde{C}^{-k}_{n+3{\bar{l}},m}$ be the covering of $C_{n+3{\bar{l}},m}$ corresponding to the local system $\Phi$.
Also, let us fix a base point ${\bf\tilde{d}}\in \tilde{C}^{-k}_{n+3{\bar{l}},m}$ in the fiber over the base point $\bf{d}$. \end{defn} \subsection{Input of the construction} We will use the homologies of this covering space. They are modules over the group ring of deck transformations, $\mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d'^{\pm 1}]$.
For computational purposes, we will use the variable $d:=-d'$ and we consider: \begin{equation} \begin{aligned} &\gamma: \mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d'^{\pm 1}] \rightarrow \mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]\\ &\begin{cases} \bar{\Phi}(x_i)=x_i\\ \bar{\Phi}(y_j)=y_j\\ \bar{\Phi}(d')=-d. \end{cases} \end{aligned} \end{equation} Using this notation, the homology groups of the covering become modules over\\
$\mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$. Further on, we use the induced map corresponding to the local system $\Phi$ with values in the group ring of $\mathbb Z^n \oplus \mathbb Z^{\bar{l}} \oplus \mathbb Z$ : \begin{equation} \Phi: \pi_1(C_{n+3{\bar{l}},m}) \rightarrow \mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]. \end{equation} Then, taking into account the change of variables $\gamma$, we define: \begin{equation} \begin{aligned} &\bar{\Phi}: \pi_1(C_{n+3{\bar{l}},m}) \rightarrow \mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]\\ &\bar{\Phi}=\gamma \circ \Phi. \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \end{aligned} \end{equation} \begin{defn} We consider two submodules in the homologies of this covering space (which are modules over $\mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$): \begin{enumerate}
\item[$\bullet$] $\mathscr H^{-k}_{n,m,{\bar{l}}}\subseteq H^{\text{lf},\infty,-}_m(\tilde{C}^{-k}_{n+3{\bar{l}},m}, P^{-1};\mathbb Z)$ and
\item[$\bullet$] $\mathscr H^{-k,\partial}_{n,m,{\bar{l}}} \subseteq H^{\text{lf},\Delta}_m(\tilde{C}^{-k}_{n+3{\bar{l}},m},\partial;\mathbb Z)$ \end{enumerate} given by the images of the homology with twisted coefficients into the homology of the covering space, defined in an analogue manner as the homology groups from \cite{Cr5}-Section 3 (using the splitting of the boundary of the configuration space and its description from \cite{CrM}).
\end{defn} \begin{prop}(\cite{CrM})\label{P:3'''} There exists a topological intersection pairing: $$<< ~,~ >>: \mathscr H^{-k}_{n,m,{\bar{l}}} \otimes \mathscr H^{-k,\partial}_{n,m,{\bar{l}}}\rightarrow\mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}].$$ \end{prop} In the next section, we will use the exact form of this intersection pairing, so we will briefly explain its formula. Let us consider two classes $H_1 \in \mathscr H^{-k}_{n,m,{\bar{l}}}$ and $H_2 \in \mathscr H^{-k,\partial}_{n,m,{\bar{l}}}$. We suppose that these classes are given by the lifts $\tilde{X}_1, \tilde{X}_2$ of two immersed submanifolds $X_1,X_2 \subseteq C_{n+3{\bar{l}},m}$. Also, we assume that $X_1$ and $X_2$ have a transverse intersection, in a finite number of points. \begin{prop}(Intersection pairing from intersections in the base space and the local system)\label{P:3} For each intersection point $x \in X_1 \cap X_2$ we define a certain loop and denote it by $l_x \subseteq C_{n+3{\bar{l}},m}$. a) {\bf Construction of $l_x$}\\
We suppose that we have the paths $\gamma_{X_1}, \gamma_{X_2}$ which start in $\bf d$, they end on $X_1$,$X_2$ respectively and that $\tilde{\gamma}_{X_1}(1) \in \tilde{X}_1$ and $ \tilde{\gamma}_{X_2}(1) \in \tilde{X}_2$. Further on, we choose two paths $\delta_{X_1}, \delta_{X_2}:[0,1]\rightarrow C_{n+3{\bar{l}},m}$ with the property: \begin{equation} \begin{cases} Im(\delta_{X_1})\subseteq X_1; \delta_{X_1}(0)=\gamma_{X_1}(1); \delta_{X_1}(1)=x\\ Im(\delta_{X_2})\subseteq X_2; \delta_{X_2}(0)=\gamma_{X_2}(1); \delta_{x_2}(1)=x. \end{cases} \end{equation} The composition of these paths gives us the loop: $$l_x=\gamma_{X_1}\circ\delta_{X_1}\circ \delta_{X_2}^{-1}\circ \gamma_{X_2}^{-1}.$$ Also, let $\alpha_x$ be the sign of the geometric intersection between $M_1$ and $M_2$ in the base configuration space, at the point $x$.\\ b) {\bf Intersection form}\\
Then, the intersection pairing can be computed from the set of loops $l_x$ and the local system: \begin{equation}\label{eq:1} <<H_1,H_2>>=\sum_{x \in X_1 \cap X_2} \alpha_x \cdot \Phi(l_x) \in \mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]. \end{equation} \end{prop} \begin{rmk}\label{orientd} For actual computations, in the case where the homology classes come from product of one dimensional submanifolds quotiented in the configuration space, one can replace the variable $d'$ by $d$ and the local system $\Phi$ by $\bar{\Phi}$ in the previous formula, and then count just the product of local orientations in the disk around each component of the intersection point $x$ (instead of keeping track of the sign of orientations in the configuration space $\alpha_x$). \end{rmk}
\subsection{Specialisations given by colorings} \begin{defn}(Change of coefficients) For the next part, we suppose that we have a coloring $C$ of the $n$ punctures of the disk into $l$ colours: \begin{equation} C:\{1,...,n\}\rightarrow \{1,...,l\}. \end{equation} We will work in the situation where ${\bar{l}}=0$ or ${\bar{l}}=l$. Then, we fix ${\bar{l}}$ components $\bar{p}_1,...,\bar{p}_{\bar{l}}\in \{1,...,n\}$.
Then, we define the corresponding change of variables, where we change the first $n+{\bar{l}}$ components $x_1,...,x_n,y_1,...,y_{\bar{l}}$ from the ring $\mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$ to $l+{\bar{l}}$ variables, denoted by $x_1,..,x_l,y_1,...,y_{\bar{l}}$, as below: $$ f_C: \mathbb Z[x_1^{\pm 1},...,x_n^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}] \rightarrow \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$$ \begin{equation}\label{eq:8} \begin{cases} &f_C(x_i)=x_{C(i)}, \ i\in \{1,...,n\}\\ &f_C(y_j)=y_{C(\bar{p}_j)}, \ j\in \{1,...,{\bar{l}}\}. \end{cases} \end{equation} \end{defn} Now, we will change the coefficients of the homology groups using the function $f_C$. \begin{defn}(Homology groups)\label{D:4} Let us define the homologies which correspond to these coefficients, given by: \begin{enumerate}
\item[$\bullet$] $H^{-k}_{n,m,{\bar{l}}}:=\mathscr H^{-k}_{n,m,{\bar{l}}}|_{f_C}$
\item[$\bullet$] $H^{-k,\partial}_{n,m,{\bar{l}}}:=\mathscr H^{-k,\partial}_{n,m,{\bar{l}}}|_{f_C}.$ \end{enumerate} They are modules over $\mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]$. \end{defn} Now, we look at braids with $n+3{\bar{l}}$ strands which preserve the colouring $C$ and the induced colouring on the components $\bar{p}_1,...,\bar{p}_l$ and denote the set of such braids by $B^{C}_{n+3{\bar{l}}}$. \begin{prop}(\cite{CrM}) \label{colbr} There is a braid group action (which comes from the mapping class group action) which is compatible with the action of deck transformations at the homological level: $$B^{C}_{n+3{\bar{l}}} \curvearrowright H^{-k}_{n,m,{\bar{l}}} \ \left( \text{ as a module over } \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}]\right).$$ \end{prop} \begin{prop}(\cite{CrM})\label{P:3'} There is also a topological intersection pairing: $$\left\langle ~,~ \right\rangle: H^{-k}_{n,m,{\bar{l}}} \otimes H^{-k,\partial}_{n,m,{\bar{l}}}\rightarrow\mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_{\bar{l}}^{\pm 1}, d^{\pm 1}].$$ whose method of computation is the same as the one presented in Proposition \ref{P:3}, specialised using the change of coefficients $f_C$:
$$ \left\langle ~,~ \right\rangle= \ << ~,~>>|_{f_C}.$$ \end{prop} \begin{defn}(Specialisation of coefficients) Let $N_1,...,N_l\in \mathbb N$ a sequence of natural numbers. We define the specialisation of coefficients given by: $$ \psi^{C}_{q,N_1,...,N_l}: \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_l^{\pm 1}, d^{\pm 1}] \rightarrow \mathbb Z[q^{\pm 1}]$$ \begin{equation}\label{eq:8'} \begin{cases} &\psi^C_{q,N_1,...,N_l}(x_i)=q^{N_i-1}, \ i\in \{1,...,l\}\\ &\psi^C_{q,N_1,...,N_l}(y_i)=q^{N_i}, \ i\in \{1,...,{\bar{l}}\}\\ &\psi^C_{q,N_1,...,N_l}(d)=q^{-2}. \end{cases} \end{equation} \end{defn}
\section{Coloured Jones polynomials for framed links}\label{S:4} In this section, we show a topological intersection formula for coloured Jones polynomials for links whose components are coloured with different colours.
Let us start with $L=K_1 \cup ...\cup K_l$ a framed oriented link with framings $f_1,...,f_l\in \mathbb Z$.
Let us choose $\beta_n \in B_n$ a braid such that $L= \widehat{\beta_n}$. We also fix a set of colours $N_1,...,N_l\in \mathbb N$.
\begin{notation} For a natural number $M\in \mathbb N$, we denote by $V_{M}$ the $M$-dimensional representation of the quantum group $U_q(sl(2))$.
We colour the components of the link $L$ with the representations $V_{N_1},...,V_{N_l}$ and denote the coloured Jones polynomial of this framed link by $J_{N_1,...,N_l}(L,q)$ (as in \cite{Turaev}). Also, for the further notations, we consider: $$\bar{N}:=(N_1,...,N_l).$$ \end{notation} \begin{defn}(Induced colorings)\\ a) (Colourings of the braid) The colouring of the link given by $\bar{N}$ induces a colouring of the strands of the braid, and we denote the corresponding colours by: $$(C_1,...,C_n).$$ Now, we look at the link as the closure of the braid $\beta_n$ together with $n$ straight strands, and so, we have an associated colouring of $2n$ points $C:\{1,...,2n\}\rightarrow \{1,...,l\}$. This means that we have the following colours on the $2n$ points: $$(C_1,...,C_n,C_n,...,C_1).$$ We will work with the $(2n+1)$-punctured disk and for this purpose we define a colouring of $2n+1$ points as below: $$\bar{C}^{\bar{N}}:=(C_1,...,C_n,N_1,C_n,...,C_1).$$
b) (Set of states) We consider the following indexing set:
$$C(\bar{N}):= \big\{ \bar{i}=(i_1,...,i_n)\in \mathbb N^{n} \mid 0\leqslant i_k \leqslant C_k-1, \ \forall k\in \{1,...,n\} \big\}.$$ \end{defn} \subsection{Homology classes} Now that we have the induced colouring of the braid and the corresponding indexing set $C(\bar{N})$, we can present the homology groups that we will use. More specifically, we will use the configuration space of $1+\sum_{i=1}^{n} (C_i-1)$ points on the $(2n+1)$-punctured disk. Then, we consider the covering coming from the local system $\Phi$ associated to the parameters: $$ n \rightarrow 2n+1; \ \ \ m\rightarrow 1+\sum_{i=1}^{n} (C_i-1); \ \ \ {\bar{l}}\rightarrow 0; \ \ \ k\rightarrow -n.$$ We use the corresponding homology groups: $$H^{-n}_{2n+1, 1+\sum_{i=1}^{n} (C_i-1),0} \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ H^{-n,\partial}_{2n+1,1+\sum_{i=1}^{n} (C_i-1),0}.$$ For the following part, since the third component is zero, we will just erase it from the indices of the homology groups. Now we are ready to define the homology classes that will be used in the intersection model. The classes will be prescribed by a couple given by: \begin{itemize} \item[•] A {\em geometric support}, meaning a {\em set of arcs in the punctured disk}. The image of the product of these arcs in the configuration space, gives us a submanifold which has half of the dimension of the configuration space. \item[•] A set of {\em paths to the base point}, which start in the base points from the punctured disk and end on these curves. The set of these paths gives a path in the configuration space, from $\bf d$ to the submanifold mentioned above. \end{itemize} Then, we lift the path to a path in the covering space, starting from $\tilde{\bf{d}}$ and then we lift the submanifold through the end point of this path. The detailed construction of such homology classes is presented in \cite{Cr5}, Section 5. \begin{defn} (Homology classes)\\
For any set of indices $\bar{i}=(i_1,...,i_{n}) \in C(\bar{N})$ we define two homology classes, given by the geometric supports from figure \ref{Picture0}:
$${\color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)} \in H^{-n}_{2n+1, {\scriptscriptstyle 1+\sum_{i=1}^{n}}(C_i-1)}} \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ {\color{dgreen} \mathscr L^{(C_1,...,C_n)}_{\bar{i}}\in H^{-n,\partial}_{2n+1,1+\sum_{i=1}^{n} (C_i-1)}}.$$ $$\hspace{5mm}\downarrow \text{ lifts }$$
\begin{figure}
\caption{Embedded Lagrangians}
\label{Picture0}
\end{figure} \end{defn} In the next part, we use the specialisation of coefficients: $$ \psi^{C}_{q,N_1,...,N_l}: \mathbb Z[x_1^{\pm 1},...,x_{l}^{\pm 1}, d^{\pm 1}] \rightarrow \mathbb Z[q^{\pm 1}]$$ \begin{equation}\label{eq:8''''} \begin{cases} &\psi^{C}_{q,N_1,...,N_l}(x_i)=q^{N_i-1}, \ i\in \{1,...,l\}\\ &\psi^{C}_{q,N_1,...,N_l}(d)=q^{-2}. \end{cases} \end{equation} \subsection{Intersection model} Now we show that the coloured Jones polynomial of a link coloured with the colours $N_1,...,N_l$ can be obtained from an intersection pairing which uses the classes $\mathscr F_{\bar{i}}^{(C_1,...,C_n)}$ and $\mathscr L_{\bar{i}}^{(C_1,...,C_n)}$ for all $\bar{i} \in C(\bar{N})$. \begin{thm}\label{THEOREM}(Topological state sum model for coloured Jones polynomials for coloured links)
\begin{equation} \begin{aligned} J_{N_1,...,N_l}(L,q)& =~ q^{ \sum_{i=1}^{l}\left( f_i- \sum_{j \neq i} lk_{i,j}\right)(N_i-1)} \cdot \\
& \cdot \left(\sum_{\bar{i}\in C(\bar{N})} \left( \prod_{i=1}^{n}x^{-1}_{C(i)} \right)\cdot \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}. \end{aligned} \end{equation} In this formula we denote by $(lk_{i,j})_{i,j \in \{1,..,l\}}$ the linking matrix of the link $L$. \end{thm} \begin{proof} The proof of this intersection formula is a generalisation of the strategy used in the model for coloured Jones polynomials coloured with the same colour, presented in \cite{Cr5}, based on arguments from \cite{Cr3}. We outline the main steps as follows.\\ {\bf Step 1} We consider the homology classes $$ \bar{\mathscr F}_{\bar{i}}^{(C_1,...,C_n)} \in H^{-n}_{2n, {\scriptscriptstyle \sum_{i=1}^{n}}(C_i-1)} \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ \ \ { \bar{\mathscr L}^{(C_1,...,C_n)}_{\bar{i}}\in H^{-n,\partial}_{2n,\sum_{i=1}^{n} (C_i-1)}}$$ which have the same geometric support as the classes $\mathscr F_{\bar{i}}^{(C_1,...,C_n)}$ and $\mathscr L_{\bar{i}}^{(C_1,...,C_n)}$ except that we remove the 1-dimensional part which is supported around the puncture labeled by $0$, namely the purple segment and the blue circle (see a similar argument in Step 2, Section 6 from \cite{Cr5}). Then we have that: $$ \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, { \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle= d^{- \sum_{k=1}^{n}i_k} \left\langle(\beta_{n} \cup {\mathbb I}_{n} ) \ { \bar{ \mathscr F}_{\bar{i}}^{(C_1,...,C_n)}}, {\bar{\mathscr L}_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle $$ This means that we want to prove the following: \begin{equation} \begin{aligned} J_{N_1,...,N_l}(L,q)& =~ q^{ \sum_{i=1}^{l}\left( f_i- \sum_{j \neq i} lk_{i,j}\right)(N_i-1)} \cdot \\
& \cdot \left(\sum_{\bar{i}\in C(\bar{N})} \prod_{i=1}^{n}x^{-1}_{C(i)} \cdot d^{- \sum_{k=1}^{n}i_k} \left\langle(\beta_{n} \cup {\mathbb I}_{n} ) \ { \color{red} \bar{\mathscr F}_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \bar{\mathscr L}_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}. \end{aligned} \end{equation} {\bf Step 2} For the next part, we follow step by step the correspondence to the Reshetikhin-Turaev definition of the coloured Jones polynomials. More specifically, the cups of the diagram correspond to the sum of the classes $\bar{\mathscr F}_{\bar{i}}^{(C_1,...,C_n)}$ over all $\bar{i}\in C(\bar{N})$. Further on, the braid action on the quantum side and on the homological side correspond, using the identification due to Martel \cite{Martel}.
{\bf Step 3} In the end, the caps of the diagram require that after the braid group action, we evaluate just the components which are symmetric with respect to the middle of the disc. More precisely this means that the indices corresponding to the points $k$ and $2n+1-k$ should sum up to the colour $C_k-1$. This is encoded geometrically by the intersection with the dual class $\bar{\mathscr L}_{\bar{i}}^{(C_1,...,C_n)}$.
On the algebraic side, one should also encode an extra coefficient which corresponds to the caps of the diagram. We reffer to the details of the argument for a single colour as they are presented in \cite{Cr3} (Section 5 and Section 7), except that here we have a different local system. The fact that the change of the local system does not affect the flow of the proof follows by a similar computation as the one from Step 3, Section 6 from \cite{Cr5}. The main points are as follows.
This coefficient is given by the pivotal structure, more specifically by the action of the element $K^{-1}$ from the quantum group. Now, for a set of indices $i_1,...,i_n$ the $K^{-1}$ action on the corresponding tensor monomial is given by: \begin{equation}
q^{- \sum_{k=1}^{n}\left( (C_k-1)-2i_k\right)}= \left( \prod_{k=1}^{n}q^{- (C_k-1)} \right) \cdot q^{ \ \sum_{k=1}^{n}2i_k}. \end{equation} This coefficient is precisely the specialisation: \begin{equation} \psi^{C}_{N_1,...,N_l} \left(\left(\prod_{i=1}^{n}x^{-1}_{C(i)} \right) \cdot d^{- \sum_{k=1}^{n}i_k}\right). \end{equation} The remaining coefficient which appears in the formula comes from the framing contribution of the components of the link $L$. \end{proof}
\section{WRT from intersections in configuration spaces}\label{S:5} In this part we pass towards invariants for $3$-manifolds and aim to construct the intersection model for the Witten-Reshetikhin-Turaev invariants, as presented in Theorem \ref{THEOREM}. Let us fix a level $\mathcal{N} \in \mathbb N$. As in the previous section, we start with a framed oriented link with $l$ components, which is the closure of a braid with $n$ strands. \begin{defn}(Choice of $l$ points)\\ Let us choose $l$ strands of the braid $\beta_n$ which all belong to different components of the link and denote their indices by: $p_1,...,p_l$. Also, we look in the $2n+1$ punctured disk and denote the symmetric of these points with respect to the middle axis by $\bar{p}_1,...,\bar{p}_l$. \end{defn}
This time we will use the homology of the covering of the configuration space of $n(\mathcal{N}-2)+l+1$ particles in the punctured disk with $2n+3l+1$ punctures, associated to the parameters: $$ n \rightarrow 2n+1; \ \ \ m\rightarrow n(\mathcal{N}-2)+l+1; \ \ \ l={\bar{l}}; \ \ \ k\rightarrow -n.$$ More precisely, we will work with the homology groups: $$H^{-n}_{2n+1, n(\mathcal{N}-2)+l+1,l} \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+l+1,l}.$$ On the picture, we consider $3l$ blue punctures in the punctured disk such that they are split into triples which lie below the privileged punctures $\bar{p}_1,...,\bar{p}_l$, as in figure \ref{Picture}. Now we are ready to define the main tools in our construction, which are the homology classes in the homology presented above.
\subsection{Homology classes} \begin{defn} (First Homology class)\label{D:C1}\\
For a set of indices $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$ we denote $\bar{i}:=(i_1,...,i_n)$ and we consider the class given by the geometric support from picture \ref{Picture}:
$${\color{red} \mathscr F_{\bar{i}}^{\mathcal{N}} \in H^{-n}_{2n+1,n(\mathcal{N}-2)+l+1,l}}$$
\begin{figure}\label{Picture}
\end{figure} \end{defn}
\begin{rmk} When we take one of the circles from the above picture, its lift has a non-trivial monodromy, so this corresponds to an arc which starts and ends in the fiber over $w$. This shows that the lift of the geometric support from figure \ref{Picture} will lead to a well defined homology class in the homology relative to the fiber $P^{-1}$. \end{rmk} \begin{defn} (Second Homology class)\label{D:C2}\\
Also for each choice of indices $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$ we consider the geometric support given by the product of configuration spaces on the circles from figure \ref{Picture2} and define the associated homology class as below:
$${\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}} \in H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+l+1,l}}$$
\begin{figure}\label{Picture2}
\end{figure} \end{defn} \begin{rmk} All circles from the above picture have trivial monodromy, since the local system evaluates symmetric points with opposite monodromies and also it evaluates in opposite directions the loops around the blue punctures which are displayed on the vertical directions (and lie in the disks bounded by those circles). So, the geometric support from figure \ref{Picture2} leads to a well defined homology class in $H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+l+1,l}$. \end{rmk} We remind the definition of the specialisation of coefficients which is associated to this context: $$ \psi^{C}_{q,N_1,...,N_l}: \mathbb Z[x_1^{\pm 1},...,x_{l}^{\pm 1},y_1^{\pm 1},...,y_{l}^{\pm 1}, d^{\pm 1}] \rightarrow \mathbb Z[q^{\pm 1}]$$ \begin{equation}\label{not'} \begin{cases} &\psi^{C}_{q,N_1,...,N_l}(x_i)=q^{N_i-1}, \ i\in \{1,...,l\}\\ &\psi^{C}_{q,N_1,...,N_l}(y_i)=q^{N_i}\\ &\psi^{C}_{q,N_1,...,N_l}(d)=q^{-2}. \end{cases} \end{equation}
\subsection{WRT from intersections in configuration spaces}
\
\begin{defn}(Kirby colour)\\ For $\mathcal{N}\in \mathbb N$, the Kirby colour corresponding to the quantum group $U_{\xi}(sl(2))$ (\cite{Turaev}) is given by: \begin{equation} \Omega:=\sum_{N=1}^{\mathcal{N}-1}qdim(V_N) \cdot V_N= \sum_{N=1}^{\mathcal{N}-1}[N]_{\xi} \cdot V_N. \end{equation} \end{defn} \begin{notation} We denote by $b_{+},b_{-}$ and $b$ the number of positive, negative and zero eigenvalues of the linking matrix of $L$. Also, we consider: \begin{equation}\label{coefffrac} \begin{aligned} &\Delta_+=J_{\Omega}(\mathcal{U}_{+},\xi)\\ &\Delta_-=J_{\Omega}(\mathcal{U}_{-},\xi)\\ &\mathcal{D}= \ \mid \Delta_+ \mid \end{aligned} \end{equation} where $\mathcal{U}_{+}$ and $\mathcal{U}_{-}$ are the unknot with framing $+1$ and $-1$ respectively (\cite{Turaev}). \end{notation} We will use the homological classes constructed above together with the specialisation of coefficients in order to prove the main result, which we remind below. \begin{thm}(Topological state sum model for the Witten-Reshetikhin-Turaev invariants)\\ Let $M$ be a closed oriented $3$-manifold and $L$ a framed oriented link with $l$ components such that $M$ is obtained by surgery along $L$. Let us choose a braid $\beta_n$ such that $L=\widehat{\beta_n}$. Now, for $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$, we consider the following Lagrangian intersection: \begin{equation} \begin{cases} & \Lambda_{\bar{i}}(\beta_n) \in \mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_l^{\pm 1}, d^{\pm 1}]\\ & \Lambda_{\bar{i}}(\beta_n):=\prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \cdot \prod_{i=1}^n x_{C(i)}^{-1}
\ \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle. \end{cases} \end{equation} Then the $\mathcal{N}^{th}$ Witten-Reshetikhin-Turaev invariant has the following model: \begin{equation} \begin{aligned}
\tau _{\mathcal{N}}(M)=\frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot {\Huge{\sum_{i_1,..,i_n=0}^{\mathcal{N}-2}}} \left(\sum_{\substack{ \tiny 1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 \\ \bar{i}\in C(N_1,..,N_l)}} \Lambda_{\bar{i}}(\beta_n) \Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}\right). \end{aligned} \end{equation} \end{thm}
\begin{proof} The Witten-Reshetikhin-Turaev invariant at level $\mathcal{N}$ is defined using the coloured Jones polynomials of the link $L$ whose components are coloured with the Kirby colour $\Omega$ (\cite{Turaev},\cite{O}): \begin{equation} \tau_{\mathcal{N}}(M)=\frac{1}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot J_{\Omega,...,\Omega}(L,\xi). \end{equation} This means that the invariant is given by the following linear combination of coloured Jones polynomials, with colours less than $\mathcal{N}-1$: \begin{equation} \begin{aligned} \tau & _{\mathcal{N}}(M)=\frac{1}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}} \cdot \sum_{1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 }[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot J_{N_1,...,N_l}(L,\xi). \end{aligned} \end{equation} \subsection*{\bf Step I (WRT invariant as a sum of intersections in various configuration spaces)}
\
Now, we remind the topological formula for the coloured Jones polynomials, which is presented in Theorem \ref{THEOREM}: \begin{equation} \begin{aligned} J_{N_1,...,N_l}(L,q)& =~ q^{ \sum_{i=1}^{l}\left( f_i- \sum_{j \neq i} lk_{i,j}\right)(N_i-1)} \cdot \\
& \cdot \left(\sum_{\bar{i}\in C(\bar{N})} \left( \prod_{i=1}^{n}x^{-1}_{C(i)} \right)\cdot \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}. \end{aligned} \end{equation} We notice that the variables $x_{C(p_1)},...,x_{C(p_l)}$ correspond to the special strands of the braid $p_1,...,p_l$ which are all associated to different components of the link. More precisely, we have that: $$(C_{p_1},...,C_{p_l})=(N_1,...,N_l)$$ as unordered families. We remind the notation $C_{p_i}=N_{C(p_i)}$. Further on, the variables are specialised in the following manner: \begin{equation}
{\psi^{C}_{q,N_1,...,N_l}}(x_{C(p_i)})=q^{N_{C(p_i)}-1}=q^{C_{p_i}-1}, \forall i\in \{1,...,l\}. \end{equation} This remark allows us to encode the framing correction and we obtain the following formula: \begin{equation} \begin{aligned} &J_{N_1,...,N_l}(L,q) =\\ & \hspace{-3mm}\left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x_{C(i)}^{-1}
\cdot \sum_{\bar{i}\in C(\bar{N})} \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}. \end{aligned} \end{equation} (here, we used the notations from the statement of Theorem \ref{THEOREMW} concerning the framings).
This means that the $3-$manifold invariant is given by the expression presented below: \begin{equation}\label{eq:p1} \begin{aligned} &\tau_{\mathcal{N}}(M)= \frac{1}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}} \cdot \mathlarger{\mathlarger{\sum}}_{1 \leqslant N_1,...,N_l \leqslant \mathcal{N} -1}[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot\\ & \hspace{-5mm}\cdot \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x_{C(i)}^{-1}
\cdot \sum_{\bar{i}\in C(\bar{N})} \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\color{dgreen} \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle \right)\hspace{-1mm}\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation} \subsection*{\bf Step II (Construction of homology classes in a fixed configuration space)}
\
In this part we concentrate on each intersection pairing which occurs in the above formula. They are given by: \begin{equation} \begin{aligned} &\left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle.\\ \end{aligned} \end{equation} This pairing comes from an intersection in the configuration space of $1+\sum_{i=1}^{n}(C_i-1)$ points in the $(2n+1)-$ punctured disk, and the homology classes belong to the homology groups: $$\mathscr F_{\bar{i}}^{(C_1,...,C_n)} \in H^{-n}_{2n+1, {\scriptscriptstyle 1+\sum_{i=1}^{n}}(C_i-1)}; \ \ \ \ \ \ \mathscr L^{(C_1,...,C_n)}_{\bar{i}}\in H^{-n,\partial}_{2n+1,1+\sum_{i=1}^{n} (C_i-1)}.$$ We would like to arrive at an intersection in a configuration space where the number of particles does not depend on the individual components given by the set $(C_1,...,C_n)$.
In order to achieve this, we use the property that all components of this set are bounded by the level of the $3-$manifold invariant, more precisely we have that: $$0 \leqslant i_k\leqslant C_k-1 \leqslant \mathcal{N}-2, \ \ \forall k \in \{1,...,n\}.$$
Now let us investigate the geometric supports of the two classes. For any $k$, we remark that the geometric support of $\mathscr F_{\bar{i}}^{(C_1,...,C_n)}$ has: \begin{itemize} \item[•] $i_k$ curves ending in the $k^{th}$ puncture \item[•] $C_k-i_k-1$ curves ending in the $(2n-k+1)^{st}$ puncture. \end{itemize} Based on these remarks, we will ``complete'' each index which corresponds to a colour $C_k-1$ up to $\mathcal{N}-2$. We will do this using the property that the action of the braid $(\beta_n \cup {\mathbb I}_{n+1})$ is trivial on the right hand side of the $(2n+1)$-punctured disk.
For each $k \in \{1,...,n\}$ let us add $\mathcal{N}-C_k-1$ extra segments/ configuration points on the part of the geometric supports of the classes $\mathscr F_{\bar{i}}^{(C_1,...,C_n)}$ and $\mathscr L_{\bar{i}}^{(C_1,...,C_n)}$ which end/ go around the puncture $2n-1-k$. Thanks to this change, each of the new geometric supports has in total $\mathcal{N}-2$ curves/ configuration points which end/ go around symmetric punctures of the punctured disk.
\begin{defn} (Level $\mathcal{N}$ Homology classes)\\ Following this procedure, we consider the homology classes given by the geometric supports which are presented in figure \ref{Picture333}, and denote then by: $${\color{red} F_{\bar{i}}^{\mathcal{N}} \in H^{-n}_{2n+1,n(\mathcal{N}-2)+1}} \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ \ \ \ \ \ \ {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}\in H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+1}}$$
\begin{figure}
\caption{Classes corresponding to the level $\mathcal{N}$ and multi-index $\bar{i}$ }
\label{Picture333}
\end{figure} \end{defn} Further on, we show that the change of the classes does not affect the outcome of the intersection pairing.
\begin{prop}(Equality of intersection pairings in different configuration spaces)\label{eq:p2}\\ For any choice of indices $ \ 0 \leqslant i_k\leqslant C_k-1, k\in \{1,..,n\}$, we have the following relation between intersection pairings: \begin{equation} \begin{aligned} \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \mathscr F_{\bar{i}}^{(C_1,...,C_n)}}, {\mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\right\rangle =& \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { F_{\bar{i}}^{\mathcal{N}}}, { L_{\bar{i}}^{\mathcal{N}}}\right\rangle. \end{aligned} \end{equation} \end{prop} \begin{proof} This relation can be seen from the formula of the graded intersection form. The pairing is encoded by the intersection points between the geometric supports of the classes in the base configuration space, which are graded by certain coefficients coming from the local system.
We denote the geometric support of a class $\mathcal{C}$ by $s \hspace{0.5mm} \mathcal{C}$. Further on, we notice that the following intersections in the configuration space: \begin{equation} \begin{aligned} & 1) \left( (\beta_{n} \cup {\mathbb I}_{n+1} ) \ s{ \mathscr F_{\bar{i}}^{(C_1,...,C_n)}} \right) \cap {s \mathscr L_{\bar{i}}^{(C_1,...,C_n)}}\\ & 2) \left( (\beta_{n} \cup {\mathbb I}_{n+1} ) s{ F_{\bar{i}}^{\mathcal{N}}}\right)\cap s{L_{\bar{i}}^{\mathcal{N}}} \end{aligned} \end{equation} have the same intersection points in the left hand side of the disk, and they differ by the fact that the second pair has more intersection points in the right hand side of the disk. Looking at the intersection in the configuration space in the punctured disk, this remark establishes a bijection between the intersection points from 1) and the intersection points from 2).
Let us fix an intersection point $P$ from 1) and denote by $\tilde{P}$ its correspondent in 2) . Now, we look at the monomials which are associated to these points. The loop in the configuration space which corresponds to $\tilde{P}$ is obtained from the loop corresponding to $P$ union with another $$\sum_{k=1}^{n}(\mathcal{N}-C_k-1)$$ loops which pass through the extra intersection points in the right hand side of the disk. However, we see that the extra loops are evaluated trivially by the local system since they do not twist or go around any puncture, and so they contribute with coefficients which are all $1$. This concludes that the two intersection pairings lead to the same result. \end{proof} Proposition \ref{eq:p2} together with formula \eqref{eq:p1} show that we can obtain WRT invariant from intersections between the new homology classes, as below: \begin{equation}\label{eq:1'} \begin{aligned} \tau_{\mathcal{N}}(M)=& \frac{1}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}} \cdot \mathlarger{\mathlarger{\sum}}_{1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 }[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot\\ & \hspace{-5mm}\cdot \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x_{C(i)}^{-1}
\cdot \sum_{\bar{i}\in C(\bar{N})} \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation}
We arrived at a state sum model for $\tau_{\mathcal{N}}$ as intersections between homology classes which are given by geometric supports in a fixed ambient manifold, namely the configuration space of $n(\mathcal{N}-2)+1$ points on the $2n+1$ punctured disk. Then, the ``individual colours'' from the initial formula appear in the specialisations of coefficients and also in the quantum numbers coming from the Kirby colour.
{\bf Encoding the coefficients of the Kirby colour} Pursuing this line, in the following parts we aim to understand geometrically the coefficients which come from the Kirby colour and encode them by intersections between the homology classes.
For the moment, we have an intersection in the $(2n+1)$-punctured disk $\mathcal{D}_{2n+1}$ which takes values in the ring $\mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1}, d^{\pm 1}]$ (definition \ref{D:4}). Let us look at the terms which appear in formula \eqref{eq:1'}. For a fixed set of colours $N_1,...,N_l$ we have a state sum which is given by:
\begin{equation}
\begin{aligned}
&[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot \left( \sum_{\bar{i}\in C(\bar{N})} \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}=\\ & \sum_{\bar{i}\in C(\bar{N})}
[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot \left( \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}} \end{aligned} \end{equation}
Now, we want to understand topologically the term \begin{equation}\label{eq:2}
[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot \left( \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}} \end{equation} in an unified way which does not depend on the choice of individual colours $N_1,..,N_l$. More precisely, we would like to see this term as a $\psi^{C}_{\xi,N_1,...,N_l}$ specialisation of an intersection which does not depend on the individual colours.
\subsection*{\bf Step III (Add extra punctures to the punctured disk)}
We do this by adding $3l$ points to our punctured disk and work in $\mathcal{D}_{2n+3l+1}$. In this manner we have a richer local system which carries monodromies around these additional punctures. \begin{defn} (Homology classes using the $(2n+3l+1)$-punctured disk)\\ We consider the homology classes given by the geometric supports which are presented in figure \ref{Picture33}, in the configuration space in the $(2n+3l+1)$-punctured disk: $${\color{red} F_{\bar{i}}^{\mathcal{N},l} \in H^{-n}_{2n+1,n(\mathcal{N}-2)+1,l}} \ \ \ \ \ \ \ \ \ \ \ \ \ \text{ and }\ \ \ \ \ \ \ \ \ \ \ \ \ {\color{dgreen} L_{\bar{i}}^{\mathcal{N},l}\in H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+1,l}}$$
\begin{figure}
\caption{Homology Classes from the disk with extra punctures}
\label{Picture33}
\end{figure} \end{defn}
We remind that the homologies $H^{-n}_{2n+1,n(\mathcal{N}-2)+1,l}$ and $H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+1,l}$ are modules over \\ $\mathbb Z[x_1^{\pm 1},...,x_l^{\pm 1},y_1^{\pm 1}..., y_l^{\pm 1}, d^{\pm 1}]$ and in the next part we will use the new variables $y_1,...,y_l$. We remember that the monodromy of $\Phi$ around the extra blue punctures corresponding to $\bar{p}_1,...,\bar{p}_l$ is evaluated through $f_{C}$ with the variables (using definition \ref{Localsyst} and relation \eqref{eq:8}): \begin{equation} \left( \begin{array}{c} y^{2}_1 \\ y^{-2}_1 \\ y^{-1}_1 \end{array} \right) ,\ldots, \left(\begin{array}{c} y^{2}_l \\ y^{-2}_l \\ y^{-1}_l \end{array}\right). \end{equation} The order of these evaluations might vary, but the columns correspond exactly to the above triples. Then, these monodromies get evaluated through $\psi^{C}_{\xi,N_1,...,N_l}$ to the following values: \begin{equation} \left( \begin{array}{c} \xi^{2N_1} \\ \xi^{-2N_1} \\ \xi^{-N_1} \end{array} \right) ,\ldots, \left(\begin{array}{c} \xi^{2N_l} \\ \xi^{-2N_l} \\ \xi^{-N_l} \end{array}\right). \end{equation}
Based on this remark, we notice that we can encode the quantum numbers using the variables $y_1,...,y_l$ and we have following relation: \begin{equation} \label{eq:3'}
[N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi}=\{1\}^{-l}_{\xi} \left( \prod_{i=1}^{l} (y_i-y_i^{-1}) \right)|_{\psi^C_{\xi,N_1,...,N_l}}. \end{equation} \begin{lem} Using this property, we obtain a new intersection pairing and the following relation holds:
\begin{equation}\label{eq:3}
\begin{aligned}
& [N_1]_{\xi}\cdot ... \cdot [N_l]_{\xi} \cdot \left( \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}=\\
& =\{1\}^{-l}_{\xi} \left( \prod_{i=1}^{l} (y_i-y_i^{-1}) \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N},l}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N},l}}\right\rangle \right)\Bigm| _{\psi^{C}_{q,N_1,...,N_l}}
\end{aligned} \end{equation} \end{lem} \begin{proof} Following equation \eqref{eq:3'} we have the equality of the coefficients which appear in both terms from above. Now, we notice that the addition of the extra punctures does not change the intersection forms, and so we have: \begin{equation} \left\langle(\beta_{n} \cup {\mathbb I}_{n+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N}}}\right\rangle=\left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N},l}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N},l}}\right\rangle \end{equation} We can see this from the fact that the supports of the homology classes have the same intersection points, and so the only question we have concerns the gradings which they carry. The second intersection belongs to a covering where there could be potential monodromies around the $3l$-blue punctures. However, we remark that the loops which are associated to the intersection points in the right hand side of the disk do not wind around the blue punctures and so they have trivial monodromies. \end{proof}
Using this Lemma together with the formula from equation \eqref{eq:1}, we conclude the following formula: \begin{equation}\label{eq:6} \begin{aligned} &\tau_{\mathcal{N}}(M)= \frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}} \cdot \sum_{1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 } \\ & \hspace{-7mm}\hspace{-0.5mm} \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x_{C(i)}^{-1}
\cdot \prod_{i=1}^{l} (y_i-y_i^{-1}) \sum_{\bar{i}\in C(\bar{N})} \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N},l}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N},l}}\right\rangle \right)\hspace{-1.5mm}\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation} We remark that we arrived at an expression given by the graded intersections (the terms between the brackets), which do not depend anymore on the choice of colours $N_1,..,N_l$. After that we have to specialise them using the change of coefficients $\psi^{C}_{\xi,N_1,...,N_l}$. \subsection*{\bf Step IV (Coefficients of the Kirby colour encoded by circles in the supports of the homology classes)}
\
In the last part, we will show that we can encode the coefficients of the Kirby colour by adding $l$ points to our configuration space and considering the classes which are obtained from the supports of the classes $F_{\bar{i}}^{\mathcal{N},l}$ and $L_{\bar{i}}^{\mathcal{N},l}$ by adding $l$ extra circles.
More specifically, we prove that the pairing that arises from the homology classes $\mathscr F_{\bar{i}}^{\mathcal{N}}$ and $\mathscr L_{\bar{i}}^{\mathcal{N}}$ captures precisely the extra coefficients from equation \eqref{eq:3}. We remind that: $$\mathscr F_{\bar{i}}^{\mathcal{N}} \in H^{-n}_{2n+1,n(\mathcal{N}-2)+l+1,l} \ (\text{figure } \ref{Picture}) \ \ \ \ \ \ \ \mathscr L_{\bar{i}}^{\mathcal{N}} \in H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+l+1,l} \ (\text{figure } \ref{Picture2}).$$
\begin{prop} (Encoding the Kirby colour)\label{P:1} For any index $\bar{i}$ we have:
\begin{equation} \label{eq:5} \left( \prod_{k=1}^{l} (y_k-y_k^{-1}) \right) \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} F_{\bar{i}}^{\mathcal{N},l}}, {\color{dgreen} L_{\bar{i}}^{\mathcal{N},l}}\right\rangle=
\left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle. \end{equation} \end{prop} \begin{proof} For the computation of these intersections we will use the formulas for the intersection pairing presented in equation \ref{eq:1} and remark \ref{orientd}, where for each intersection point we count the product of local orientations in the disk and multiply it with the evaluation of the local system $\bar{\Phi}$ on the associated loop in the configuration space.
We notice that the classes which lead to these intersection pairings have similar geometric supports except the fact that: \begin{itemize} \item $s\mathscr F_{\bar{i}}^{\mathcal{N}}$ is constructed from $s F_{\bar{i}}^{\mathcal{N},l}$ by adding $l$ extra circles \item $s\mathscr L_{\bar{i}}^{\mathcal{N}}$ comes from $s L_{\bar{i}}^{\mathcal{N},l}$ but it has $l$ extra points, one on each circle which goes around the punctures $p_k$ for $k\in\{1,...,l\}$.
\end{itemize} Now, we look at the intersection points between the geometric supports which are obtained after we act with the braid: \begin{equation} \begin{aligned} & 1) \left( (\beta_{n} \cup {\mathbb I}_{n+3l+1} ) s{ F_{\bar{i}}^{\mathcal{N},l}}\right)\cap s{L_{\bar{i}}^{\mathcal{N},l}}\\ & 2) \left( (\beta_{n} \cup {\mathbb I}_{n+3l+1} ) s{ \mathscr F_{\bar{i}}^{\mathcal{N}}}\right)\cap s{\mathscr L_{\bar{i}}^{\mathcal{N}}}. \end{aligned} \end{equation} These two intersections have the same components in the left hand side of the punctured disk. The difference occurs in the right hand side of it.
Let us denote by: $$(q_1,r_1),...,(q_l,r_l)$$ the intersection points between the purple circles from $s{ \mathscr F_{\bar{i}}^{\mathcal{N}}}$ and the blue disks from $s{\mathscr L_{\bar{i}}^{\mathcal{N}}}$ which intersect them (they are the orange points from figure \ref{Picture3}).
Also, let us look at the first pairing $1)$ and consider the set of intersection points between the supports of the homology classes: $$\left((\beta_{n} \cup {\mathbb I}_{n+1} ) s{ F_{\bar{i}}^{\mathcal{N},l}}\right)\cap s{L_{\bar{i}}^{\mathcal{N},l}}=\{ \bar{m}_1,...,\bar{m}_s \}.$$ Here, each element is a multipoint in the configuration space, which has $n(\mathcal{N}-2)+1$ components. \begin{rmk} The set of intersection points between the new classes (from $2)$) is obtained from the above intersection points together with a choice of $l$ orange points which belong to different circles: \begin{equation}\label{eq:4} \left((\beta_{n} \cup {\mathbb I}_{n+3l+1} ) s{ \mathscr F_{\bar{i}}^{\mathcal{N}}}\right)\cap s{ \mathscr L_{\bar{i}}^{\mathcal{N}}}=\{ \bar{m}_1,...,\bar{m}_s \}\times \{q_1,r_1\}\times ... \times \{ q_l,r_l \}. \end{equation} \end{rmk}
\begin{figure}\label{Picture3}
\end{figure} \begin{proof} Let $P$ be a multipoint which belongs to the intersection $$\left((\beta_{n} \cup {\mathbb I}_{n+3l+1} ) s{ \mathscr F_{\bar{i}}^{\mathcal{N}}}\right)\cap s{ \mathscr L_{\bar{i}}^{\mathcal{N}}}.$$ This means that it has a exactly one component on each red curve and purple curve from $(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) s{ \mathscr F_{\bar{i}}^{\mathcal{N}}}$. In particular, it has exactly one point on each purple circle, which should be at the intersection with the dual support $s{ \mathscr L_{\bar{i}}^{\mathcal{N}}}$. We notice that for any fixed $k\in\{1,...,l\}$ the $k^{th}$ purple circle intersects only one component from the dual support, given by the configuration space of $\mathcal{N}-1$ points on the blue circle, in exactly two orange points: $\{q_k,r_k\}$. This means that $P$ has exactly one component from each of these sets with two elements. Then, for the rest of the points we should use the configuration space of $\mathcal{N}-2$ points on the blue circles. The rest of the red curves intersected with the configuration spaces of $\mathcal{N}-2$ particles on the circles give precisely an intersection point belonging to $$\left((\beta_{n} \cup {\mathbb I}_{n+1} ) s{ F_{\bar{i}}^{\mathcal{N},l}}\right)\cap s{L_{\bar{i}}^{\mathcal{N},l}}.$$ This procedure establishes the desired bijection. \end{proof} So far, we saw the correspondence at the level of sets. Now we are interested in the coefficients coming from the local system. For this, we turn our attention towards the coefficients which are carried by the orange points. For each $k\in \{1,...,l\}$, we have look at the chosen point on the purple circle ($q_k$ or $r_k$) and to evaluate the monodromy of the yellow path corresponding this point around the punctures of the disk. These two paths are presented in picture \ref{Picture4}.
\begin{figure}
\caption{Paths corresponding to the intersection points}
\label{Picture4}
\end{figure} We remind that the counter-clockwise monodromies around the three blue points are evaluated by the variables: \begin{equation} \left(\begin{array}{c} y^{2}_k \\ y^{-2}_k \\ y^{-1}_k \end{array}\right). \end{equation} Using this for the two paths from the picture, we see that the points $\{q_k,r_k\}$ carry the following coefficients: \begin{equation} \begin{aligned} &(q_k) \ \ \ \ \ y^{2}_k\cdot y^{-1}_k=y_k\\ &(r_k) \ \ \ \ \ \ \ \ \ \ \ \ \ \ -y^{-1}_k. \end{aligned} \end{equation} The yellow loops associated to the orange intersection points will not add extra $d$ components when we evaluate a loop corresponding to an intersection point in the configuration space. This is because these loops to not contribute to the relative winding in the configuration space. Also, the opposite sign comes from the local intersections in these two orange points.
We conclude that for any $k\in \{1,...,l\}$ the two points $\{q_k, r_k\}$ contribute to the grading with the coefficients $\{y_k, -y^{-1}_k\}$.
This property together with the correspondence presented in relation \eqref{eq:4} shows that the extra coefficients that appear in the second intersection are precisely $$\prod_{k=1}^{l} (y_k-y_k^{-1})$$ which concludes the relation between the intersection pairings from \eqref{eq:5}. \end{proof} Now, using this property together with the expression from equation \eqref{eq:6}, we obtain: \begin{equation} \begin{aligned} &\tau_{\mathcal{N}}(M)= \frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}} \cdot \sum_{1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 } \\ & \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x_{C(i)}^{-1}
\cdot \sum_{\bar{i}\in C(\bar{N})} \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red}\mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}=\\ & =\frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}} \cdot \sum_{1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 } \\ & \sum_{\bar{i}\in C(\bar{N})} \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x_{C(i)}^{-1}
\cdot \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red}\mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation}
Exchanging the two sums and taking care of the conditions which the colouring imposes on the multi-indices, we obtain the following formula: \begin{equation} \begin{aligned}
&\tau_{\mathcal{N}}(M) = \frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot {\Huge{\sum_{i_1,...,i_n=0}^{\mathcal{N}-2}}} \\
& \cdot \sum_{\substack{\bar{N}=(N_1,...,N_l) \\ 1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 \\ \bar{i}\in C(\bar{N})}} \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x^{-1}_{i}
\cdot \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}=\\
& =\frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot {\Huge{\sum_{i_1,..,i_n=0}^{\mathcal{N}-2}}} \left( \sum_{\substack{ \tiny 1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 \\ \bar{i}\in C(N_1,..,N_l)}} \Lambda_{\bar{i}}(\beta_n) \Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}\right). \end{aligned} \end{equation} This relation concludes the proof of the main formula. \end{proof}
\begin{coro}(Detailed formula for the invariant) \begin{equation} \begin{aligned} \tau & _{\mathcal{N}}(M)=\frac{\{1\}^{-l}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot {\Huge{\sum_{i_1,...,i_n=0}^{\mathcal{N}-2}}} \\
& \cdot \sum_{\substack{\bar{N}=(N_1,...,N_l) \\ 1 \leqslant N_1,...,N_l \leqslant \mathcal{N}-1 \\ \bar{i}\in C(\bar{N})}} \left( \prod_{i=1}^l x_{C(p_i)}^{ \left(f_{p_i}-\sum_{j \neq {p_i}} lk_{p_i,j} \right)} \prod_{i=1}^n x^{-1}_{C(i)}
\cdot \left\langle(\beta_{n} \cup {\mathbb I}_{n+3l+1} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle \right)\Bigm| _{\psi^{C}_{\xi,N_1,...,N_l}}. \end{aligned} \end{equation} \end{coro} \section{Topological model for the WRT invariants of $3$-manifols obtained as surgeries along knots}\label{S:6} This section is devoted to the topological model presented above, for the particular case where the link is actually a knot (this means that $l=1$). Let us consider a knot $K$ which is the closure of a braid with $n$ strands $\beta_n \in B_n$.
In this case, we work in the covering of the configuration space of $n(\mathcal{N}-2)+2$ particles in the punctured disk with $2n+4$ punctures, and we will use the homology groups: $$H^{-n}_{2n+1, n(\mathcal{N}-2)+2,1} \ \ \ \text{ and }\ \ \ \ H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+2,1} \ \ \ \text{ as } \mathbb Z[x^{\pm 1},y^{\pm 1}, d^{\pm 1}]- \text{modules}.$$ This means that we have $3$ privileged blue points in the punctured disk. \subsection{Homology classes} \begin{defn} a) (First Homology class) For any set $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$ we consider the class given by the geometric support from the figure below: $${\color{red} \mathscr F_{\bar{i}}^{\mathcal{N}} \in H^{-n}_{2n+1,n(\mathcal{N}-2)+2,1}}$$
b) (Second Homology class) The second homology class is given by the following geometric support:
$${\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}} \in H^{-n,\partial}_{2n+1,n(\mathcal{N}-2)+2,1}}$$
\end{defn} Further on, we will use the specialisation of coefficients which corresponds to a coloring with one color $N \in \mathbb N$, which is given by: $$ \psi^{C}_{q,N}: \mathbb Z[x^{\pm 1},y^{\pm 1},d^{\pm 1}] \rightarrow \mathbb Z[q^{\pm 1}]$$ \begin{equation} \begin{cases} &\psi^{C}_{q,N}(x)=q^{N-1},\\ &\psi^{C}_{q,N}(y_i)=q^{N}\\ &\psi^{C}_{q,N}(d)=q^{-2}. \end{cases} \end{equation}
\begin{coro}(Topological model for the Witten-Reshetikhin-Turaev invariants of knot surgeries)\\ Let $M$ be a closed oriented $3$-manifold obtained by surgery along a knot $K$ with framing $f \in \mathbb Z$. We choose a braid $\beta_n$ such that $K=\widehat{\beta_n}$. Further on, for $i_1,...,i_{n} \in \{0,...,\mathcal{N}-2\}$, we consider the following Lagrangian intersection: \begin{equation} \begin{cases} & \Lambda_{\bar{i}}(\beta_n) \in \mathbb Z[x^{\pm 1},y^{\pm 1}, d^{\pm 1}]\\ & \Lambda_{\bar{i}}(\beta_n):=x^{ f-w(\beta_n)} \cdot x^{-n}
\ \left\langle(\beta_{n} \cup {\mathbb I}_{n+4} ) \ { \color{red} \mathscr F_{\bar{i}}^{\mathcal{N}}}, {\color{dgreen} \mathscr L_{\bar{i}}^{\mathcal{N}}}\right\rangle \end{cases}. \end{equation} Here, $w(\beta_n)$ is the writhe of the braid. Then the $\mathcal{N}^{th}$ Witten-Reshetikhin-Turaev invariant is obtained from these intersections as below: \begin{equation} \begin{aligned}
\tau _{\mathcal{N}}(M)=\frac{\{1\}^{-1}_{\xi}}{\mathcal{D}^b \cdot \Delta_+^{b_+}\cdot \Delta_-^{b_-}}\cdot {\Huge{\sum_{i_1,..,i_n=0}^{\mathcal{N}-2}}} \left(\sum_{N=\text{max}\{i_1+1,...,i_n+1\}}^{\mathcal{N}-1} \Lambda_{\bar{i}}(\beta_n) \Bigm| _{\psi^{C}_{\xi,N}}\right). \end{aligned} \end{equation} \end{coro}
\begin{rmk}This tells as that the level $\mathcal{N}$ WRT invariant of a surgery along a knot which is the closure of a braid with $n$ strands is obtained from states of graded intersections in the configuration space of $n(\mathcal{N}-2)+2$ points in the $(2n+4)-$punctured disk. \end{rmk}
\noindent {\itshape Mathematical Institute, University of Oxford, Woodstock Road, Oxford, OX2 6GG, United Kingdom}
\noindent {\tt palmeranghel@maths.ox.ac.uk}
\noindent \href{http://www.cristinaanghel.ro/}{www.cristinaanghel.ro}
\end{document} |
\begin{document}
\title{Accuracy of discrete approximation for integral functionals of Markov processes}
\author{Iu. V. Ganychenko} \address{Department of Probability Theory, Statistics and Actuarial Mathematics, Kyiv National Taras Shevchenko University, Kyiv 01601, Ukraine} \email{iurii\_ganychenko@ukr.net} \author{V. P. Knopova} \address{V.M.Glushkov Institute of Cybernetics NAS of Ukraine, Acad. 40, Glushkov Ave., Kyiv 03187, Ukraine} \email{vicknopova@googlemail.com} \author{A. M. Kulik} \address{Institute of Mathematics, Ukrainian National Academy of Sciences, 01601 Tereshchenkivska str. 3, Kyiv, Ukraine} \email{kulik@imath.kiev.ua}
\subjclass[2010]{60H07, 60H35}
\keywords{Markov process, integral functional, approximation rate.}
\maketitle
\begin{abstract}
\noindent The article is devoted to the estimation of the rate of convergence of integral functionals of a Markov process. Under the assumption that the given Markov process admits a transition probability density which is differentiable in $t$ and the derivative has an integrable upper bound of a certain type, we derive the accuracy rates for strong and weak approximations of the functionals by Riemannian sums. Some examples are provided.
\end{abstract}
\section{Introduction} Let $X_t$, $t\geq 0$, be a Markov process with values in $\mathbb{R}^d$. Consider an integral functional of the form \begin{equation}\label{IT} I_T(h)=\int_0^Th(X_t)\, dt, \end{equation} where $h:\mathbb{R}^d\to \mathbb{R}$ is a given measurable function. In this paper we investigate the accuracy of the approximation of $I_T(h)$ by the Riemannian sums $$ I_{T,n}(h)={T\over n}\sum_{k=0}^{n-1}h(X_{(kT)/n}),\quad n\geq 1. $$ The function $h$ is assumed to be bounded, only; i.e., we do not impose any regularity assumptions on $h$. In particular, under this assumption the class of integral functionals which we investigate contains the class of \emph{occupation time} type functionals (for which $h=1_A$ for a fixed $A\in \mathcal{B}(\mathbb{R}^d)$), which are of particular importance.
Integral functionals arise naturally in a wide class of stochastic representation formulae and applied stochastic models. It is very typical that exact calculation of the respective probabilities and/or expectations is hardly possible, which naturally suggests the usage of the approximation methods. As an example of such a situation, we mention the so-called \emph{occupation time option} \cite{Linetsky}, whose price is actually given by an expression similar to the Feynman-Kac formula. The exact calculation of the price is possible only in the particular case when the underlying process is a L\'evy process which is ``spectrally negative'' (i.e. does not have positive jumps, see \cite{Guerin}), and practically more realistic cases of general L\'evy processes, solutions to L\'evy driven SDE's, etc. can be treated only numerically. To estimate the rate of convergence of the respective Monte-Carlo approximative methods, one needs to estimate the accuracy of various approximation steps involved in the algorithm. In this paper we focus on solving of such a problem for the discrete approximation of the integral functional of type \eqref{IT}.
For diffusion processes, this problem was studied in \cite{Gobet} and recently in \cite{Kohatsu-Higa}, by means of the methods involving the particular structural features of the process, e.g. the Malliavin calculus tools. On the other hand, in two recent papers \cite{kul-gan}, \cite{kul-gan2}, an alternative method is developed, which exploits only the basic Markov structure of the process and the additive structure of the integral functional and its discrete approximations. One of the aims of this paper is to extend this method for a wider class of Markov processes. To explain our goal more in details, let us formulate our principal assumption on the process $X$.
\begin{itemize}
\item[\textbf{X.}] The transition probability $P_t(x,dy)$ of $X$ admits a density $p_t(x,y)$ w.r.t. the Lebesgue measure on $\mathbb{R}^d$. This density is differentiable w.r.t. $t$, and its derivative possesses the bound \begin{equation}\label{der_bound}
\Big|\partial_tp_t(x,y)\Big|\leq B_{T,X}t^{-\beta} q_{t,x}(y), \quad t\leq T, \end{equation} for some $B_{T,X}\geq 1, \beta\geq 1$, and measurable function $q$, such that for each fixed $t,x$ the function $ q_{t,x}(\cdot)$ is a distribution density. \end{itemize}
In \cite{kul-gan}, \cite{kul-gan2}, the condition similar to \textbf{X} was formulated with $\beta=1$. Such a condition is verified for the particularly important classes of diffusion process and symmetric $\alpha$-stable processes. However, in some natural cases one can expect to get \eqref{der_bound} only with $\beta>1$. As the simplest and the most illustrative example one can take an $\alpha$-stable process with drift: $$X_t=ct+Z_t, $$ where $c\not=0$ and $Z$ is a (e.g. symmetric) $\alpha$-stable process. Then $$ p_t(x,y)=t^{-d/\alpha}g^{(\alpha)}\left(y-x-ct\over t^{1/\alpha}\right), $$ where $g^{(\alpha)}$ denotes the distribution density of $Z_1$. Straightforward calculation shows that \eqref{der_bound} holds true with $\beta=\max(1, 1/\alpha)$, which is strictly greater than 1 when $\alpha<1$. Since the L\'evy noises are now used extensively in various applied models, the simple calculation made above shows that it is highly desirable to extend the results of \cite{kul-gan} and \cite{kul-gan2}, which deal with the ``diffusive like'' class of processes satisfying \textbf{X} with $\beta=1$, to the more general case of \textbf{X} with arbitrary $\beta\geq 1$.
Another aim of this paper is to develop the tools which would allow us to get the bounds of the form \eqref{der_bound} for a wider class of solutions of L\'evy driven SDEs. One result of such a type is given in the recent preprint \cite{KK14}, with the process $X$ being a solution of the SDE \begin{equation}\label{SDE} dX_t=b(X_t)\, dt+\sigma(X_{t-})\, dZ_t \end{equation}
where $Z$ is a symmetric $\alpha$-stable process. The method used therein is a version of the parametrix method, and it is quite sensitive to the form of the L\'evy measure of the process $Z$ on the entire $\mathbb{R}^d$. Recently, apart from the stable noises, various types of ``locally stable'' noises are frequently used in applied models: tempered stable processes, damped stable processes, etc. Heuristically, for a ``locally stable'' process its ``small jumps behavior'' is the same as for the stable one, but the ``large jumps behavior'' of the former is drastically different from ``tail behavior'' of the L\'evy measure. Since \eqref{der_bound} is genuinely related to ``local behavior'' of the process, one can expect that the results of \cite{KK14} should have a natural extension to the case of ``locally stable'' $Z$. However, to make such a conjecture rigorous is a sophisticated problem; the main reason here is that the parametrix method treats the transition probability of a L\'evy process as the ``zero approximation'' for the unknown transition probability density $p_t(x,y)$, and hence any bound for $p_t(x,y)$, which one may expect to design within this method, is at least as complicated as respective bound for the process $Z$. On the other hand, there is an extensive literature on the estimates of transition probability densities for L\'evy processes (e.g. \cite{BGR14}, \cite{KK12a}, \cite{K13}, \cite{KR15}, \cite{M12}, \cite{PT69}, \cite{RS10}, \cite{St10a}, \cite{St10b}, \cite{W07}; this list is far from being complete), which shows that these densities inherit the structure of the densities of the corresponding L\'evy measures. In particular, in order to get the exact two-sided bounds for $p_t(x,y)$ one should impose quite non-trivial structural assumptions on the ``tails'' of the L\'evy measure even in a comparatively simple ``locally stable'' case. Motivated by this observation on one hand, and by the initial approximation problem which suggests the condition \eqref{der_bound} on the other hand, we pose the following general question: \emph{Is it possible to give a ``rough'' upper bound, which would be the same for a large class of transition probability densities of ``locally stable processes'', without assuming complicated conditions on the ``tails'' of their L\'evy measures?} The answer is positive, and it roughly says that one can get the bound \eqref{der_bound}, where at the left hand side we have the transition probability density of the SDE driven by a ``locally stable'' process, and at the right hand side we have a (properly shifted) transition probability density of an $\alpha$-stable process. This bound is not necessarily precise: the power-type ``tail'' of the $\alpha$-stable density might be essentially larger than the ``tail'' e.g. for \emph{exponentially tempered} $\alpha$-stable law. The gain is, however, that under a mild set of assumptions we obtain a uniform-in-class upper bound, which is clearly easy to use in applications. To keep the exposition reasonably compact, we treat this problem in a comparatively simple case of a one-dimensional SDE of the form \eqref{sde1}, see below. The extension of these results to a more general multidimensional case is much more technical, and we postpone it to a separate publication.
The structure of the paper is the following. In Section \ref{s2} we formulate and prove two our main results concerning the accuracy of the \emph{strong} and \emph{weak} approximations of an integral functional by Riemannian sums, provided that condition \textbf{X} is satisfied. In Section \ref{s3} we outline a version of the parametrix method, which makes it possible to obtain \eqref{der_bound} for solutions to L\'evy driven SDEs without strong structural assumptions on the ``tails'' of the L\'evy measure of the noise. In Section \ref{s4} an application for the price of an occupation time option is given.
\section{Accuracy of discrete approximation for integral functionals}\label{s2}
In this section we will prove two results. The first one concerns the ``strong approximation rate'', i.e. the control on the $L_p$-distance between $I_{T}(h)$ and its approximation $I_{T,n}(h)$.
\begin{thm}\label{t1} Suppose that \textbf{X} holds. Then for any $p>0$ $$
\left(\mathds{E}_x\Big|I_{T}(h)-I_{T,n}(h)\Big|^p\right)^{1/ p}\leq C_{T,p} \|h\| (D_{T,\beta} (n))^{1/2}, $$
where $\|h\|=\sup_x|h(x)|,$ \begin{equation}\label{DC} D_{T,\beta} (n) = \begin{cases} n^{-1} \log n,& \beta=1,\\ \max\left(1,\frac{T^{1- \beta}}{\beta-1}\right) n^{-1/ \beta},& \beta>1, \end{cases}\quad C_{T,p}= \begin{cases} (14p(p-1)B_{T,X})^{1/2} T,& p\geq 2,\\ C_{T,2}=(28 B_{T,X})^{1/2} T,& p\in (0,2). \end{cases} \end{equation} \end{thm}
\begin{rem} This theorem extends \cite[Theorem 2.1]{kul-gan}, where it was assumed that $\beta=1$. \end{rem}
The second result concerns the ``weak approximation'', i.e. the control on the difference between the expectations of certain terms, which involve $I_{T}(h)$ together with its approximation $I_{T,n}(h)$.
\begin{thm}\label{t2} Suppose that \textbf{X} holds. Then for any $k \in \mathbb{N}$ and any bounded function $f$ we have \begin{equation}\label{t2-eq}
\Big|\mathds{E}_x (I_{T}(h))^k f(X_T)- \mathds{E}_x(I_{T,n}(h))^kf(X_T)\Big|
\leq 2^{\beta\vee 2} k^2 B_{T,X} T^{k+1} \|h\|^k \|f\| D_{T,\beta} (n). \end{equation} \end{thm} \begin{rem} This theorem extends \cite[Theorem~1.1]{kul-gan2}, where it was assumed that $\beta=1$. In the proof below, we concentrate on the case $\beta>1$. \end{rem}
Using the Taylor expansion, one can obtain directly the following corollary of Theorem \ref{t2}.
\begin{cor}\label{cor1}
Suppose that \textbf{X} holds, and let $\varphi$ be an analytic function defined in some neighbourhood of 0. In addition, suppose that the constants $D_{\varphi},R_{\varphi}>0$ are such that $$
\Big|\frac{\varphi^{(m)}(0)}{m!}\Big| \leq D_{\varphi} \left( \frac{1}{R_{\varphi}} \right)^m,\quad m\geq 0. $$
Then for any bounded function $f$ and a function $h$ such that $T\|h\|< R_\varphi$, we have the following bound: $$
\Big|\mathds{E}_x \varphi( I_{T}(h)) f(X_T)- \mathds{E}_x \varphi( I_{T,n}(h))f(X_T)\Big| \leq C_{T,X,h,\varphi} \|f\| D_{T,\beta} (n), $$ where $$
C_{T,X,h,\varphi}=2^{\beta\vee 2} D_{\varphi}B_{T,X}\frac{ T^2 \|h\|}{R_{\varphi}} \left(1+\frac{ T\|h\|}{R_{\varphi}}\right)\left(1-\frac{ T\|h\|}{R_{\varphi}}\right)^{-3}. $$ \end{cor}
Before proceeding to the proof of Theorem \ref{t1}, we give an auxiliary result this proof is based on. This result is, in fact, a weaker version of Theorem \ref{t2} with $k=1$ and $f\equiv 1$, but we give it separately to make the exposition more transparent.
\begin{prop}\label{p1} Suppose that \textbf{X} holds. Then $$
\Big|\mathds{E}_xI_{T}(h)-\mathds{E}_xI_{T,n}(h)\Big|\leq 5B_{T,X} T \|h\| D_{T,\beta} (n). $$ \end{prop} \begin{proof} Let us introduce the notation used throughout the whole section: for $t\in [kT/n, (k+1)T/n), k\geq 0 $, we put $\eta_n(t)={kT\over n}, \ \zeta_n(t)={(k+1)T\over n}$; that is, $\eta_n(t)$ is the point of the partition $\{Tk/n,\, k\geq 0\}$ of the time axis, closest to $t$ from the left, and $\zeta_n(t)$ is the point closest to $t$ from the right, which is strictly larger than $t$.
We have \begin{align*} \mathds{E}_xI_{T}(h)-\mathds{E}_xI_{T,n}(h) &= \int_0^T \mathds{E}_x[h(X_s) - h(X_{\eta_n(s)})]\,ds\\ & = \int_0^T \int_{\mathbb{R}^d}h(y) [p_s(x,y) - p_{\eta_n(s)}(x,y)]\,dyds \\ &= M_1 + M_2, \end{align*} where $$\begin{aligned} &M_1 = \int_0^{k_{n,\beta}T/n} \int_{\mathbb{R}^d}h(y) [p_s(x,y) - p_{\eta_n(s)}(x,y)]\,dyds,\\& M_2 = \int_{k_{n,\beta}T/n}^T \int_{\mathbb{R}^d}h(y) [p_s(x,y) - p_{\eta_n(s)}(x,y)]\,dyds, \end{aligned}$$ for some $1\leq k_{n,\beta} \leq n$, which will be chosen later. We estimate each term separately.
For $M_1$ we have $$
|M_1| \leq \|h\| \int_0^{k_{n,\beta}T/n} \int_{\mathbb{R}^d} [p_s(x,y) + p_{\eta_n(s)}(x,y)]\,dyds = 2\|h\| T \frac{k_{n,\beta}}{n}. $$ Further, using (\ref{der_bound}), we get \begin{equation}\label{M2}\begin{aligned}
|M_2| &\leq \|h\| \int_{k_{n,\beta}T/n}^T \int_{\mathbb{R}^d} |p_s(x,y) - p_{\eta_n(s)}(x,y)|\,dyds \\&
\leq \|h\| \int_{k_{n,\beta}T/n}^T \int_{\eta_n(s)}^s \int_{\mathbb{R}^d} |\partial_u p_u(x,y)|\,dy duds \\
&\leq B_{T,X}\|h\| \int_{k_{n,\beta}T/n}^T \int_{\eta_n(s)}^s \int_{\mathbb{R}^d} u^{-\beta} q_{u,x}(y)
\,dy duds = B_{T,X} \|h\| \int_{k_{n,\beta}T/n}^T \int_{\eta_n(s)}^s u^{-\beta}\,duds\\
&= B_{T,X} \|h\| \sum_{i = k_{n,\beta}}^{n-1} \int_{iT/n}^{(i+1)T/n} \int_{iT/n}^s u^{-\beta}\,duds
\\&= B_{T,X} \|h\| \sum_{i = k_{n,\beta}}^{n-1} \int_{iT/n}^{(i+1)T/n} \int_u^{(i+1)T/n} u^{-\beta}\,dsdu\\&
\leq \frac{T}{n} B_{T,X} \|h\| \sum_{i = k_{n,\beta}}^{n-1} \int_{iT/n}^{(i+1)T/n} u^{-\beta}\,du = \frac{T}{n} B_{T,X} \|h\| \int_{k_{n,\beta}T/n}^{T} u^{-\beta}\,du. \end{aligned}\end{equation}
Now we finalize the argument.
1) If $\beta = 1$, put $k_{n,\beta} = 1, \ n\geq 1$. Then we get $$\begin{aligned}
&|M_1| \leq 2\|h\| T n^{-1},\\&
|M_2| \leq \frac{T}{n} B_{T,X} \|h\| \int_{T/n}^{T} u^{-1}\,du = B_{T,X} T \|h\| n^{-1} \log n. \end{aligned}$$
2) If $\beta > 1$, put $k_{n,\beta} = [n^{1-1/\beta}]+1$. Then $$
|M_1| \leq 2\|h\| T \frac{[n^{1-1/\beta}]+1}{n} \leq 2\|h\| T \frac{n^{1-1/\beta}+1}{n} \leq 4\|h\| T n^{-1/\beta}. $$ To estimate $M_2$ observe that \begin{equation}\label{beta1} \begin{split} \int_{k_{n,\beta}T/n}^{T} u^{-\beta}\,du\leq \frac{T^{1-\beta}}{\beta-1}\left(\frac{k_{n,\beta}}{n}\right)^{1-\beta}\leq \frac{T^{1-\beta}}{\beta-1} \left(\frac{n^{1-1/\beta}}{n}\right)^{1-\beta}\leq \frac{T^{1-\beta}}{\beta-1}n^{1-1/\beta}. \end{split} \end{equation} Therefore, \begin{align*}
|M_2| &\leq \frac{T}{n} B_{T,X} \|h\| \int_{k_{n,\beta}T/n}^{T} u^{-\beta}\,du
\leq \frac{B_{T,X}}{\beta-1} T^{2- \beta} \|h\| n^{-1/\beta}. \end{align*}
\end{proof}
\begin{proof}[Proof of Theorem \ref{t1}] Since we can obtain the required bound for $p<2$ from the bound with $p=2$ by the H\"older inequality, we consider the case $p\geq 2$ only.
Define $$ J_{t,n}(h):=I_{t}(h)-I_{t,n}(h)=\int_0^t \Delta_n(s)ds, \quad \Delta_n(s):=h(X_s) - h(X_{\eta_n (s)}). $$ By definition, the function $t\mapsto J_{t,n}(h)$ is absolutely continuous. Then using the Newton-Leibnitz formula twice we get $$
\Big|J_{T,n}(h)\Big|^p=p(p-1)\int_0^T\Big|J_{s,n}(h)\Big|^{p-2}\Delta_n(s)\left(\int_s^T\Delta_n(t)\, dt\right)ds. $$ Therefore, $$
\Big|J_{T,n}(h)\Big|^p\leq p(p-1)(H_{T,n,p}^1(h)+H_{T,n,p}^2(h)), $$ where $$
H_{T,n,p}^1(h)=\int_0^T\Big|J_{s,n}(h)\Big|^{p-2}|\Delta_n(s)|\left|\int_s^{\zeta_n(s)}\Delta_n(t)\, dt\right|ds, $$ $$
H_{T,n,p}^2(h)=\int_0^T\Big|J_{s,n}(h)\Big|^{p-2}\Delta_n(s)\left(\int_{\zeta_n(s)}^T\Delta_n(t)\, dt\right)ds. $$ Let us estimate separately the expectations of $ H_{T,n,p}^1(h)$ and $H_{T,n,p}^2(h)$. By the H\"older inequality, \begin{align*}
\mathds{E}_x H_{T,n,p}^1(h)&\leq \left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p}\left(\mathds{E}_x\int_0^T|\Delta_n(s)|^{p/2}\left|\int_s^{\zeta_n(s)}\Delta_n(t)\, dt\right|^{p/2}\, ds\right)^{2/p} \\
&\leq \left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p} \left((2\|h\|)^{p/2} T (2\|h\|)^{p/2} \left(\frac{T}{n}\right)^{p/2}\right)^{2/p}\\ &
= 4 T^{1+2/p} n^{-1} \|h\|^2 \left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p}. \end{align*}
Further, observe that for every $s$ the variables $$\Delta_n(s), \quad |J_{s,n}(h)|^{p-2}\Delta_n(s) $$ are $\mathcal{F}_{\zeta_n(s)}$-measurable; here and below $\{\mathcal{F}_t, t\geq 0\}$ denotes the natural filtration for the process $X$. Hence, $$\begin{aligned}
\mathds{E}_xH_{T,n,p}^2(h)&=\mathds{E}_x\left(\int_0^T\Big|J_{s,n}(h)\Big|^{p-2}\Delta_n(s)\mathds{E}_x\left(\int_{\zeta_n(s)}^T\Delta_n(t)\, dt\Big| \mathcal{F}_{\zeta_n(s)}\right)ds\right)\\&
\leq \mathds{E}_x\left(\int_0^T\Big|J_{s,n}(h)\Big|^{p-2}|\Delta_n(s)|\left|\mathds{E}_x\left(\int_{\zeta_n(s)}^T\Delta_n(t)\, dt\Big| \mathcal{F}_{\zeta_n(s)}\right)\right|ds\right). \end{aligned} $$ By Proposition \ref{p1} and the Markov property of $X$, we have $$
\left|\mathds{E}_x\left(\int_{\zeta_n(s)}^T\Delta_n(t)\, dt\Big|\mathcal{F}_{\zeta_n(s)}\right)\right| = \left|E_{X_{\zeta_n(s)}}\int_{0}^{T-\zeta_n(s)}\Delta_n(t)\, dt\right| \leq 5B_{T,X} T D_{T,\beta} (n) \|h\|. $$ Therefore, using the H\"older inequality, we get $$\begin{aligned}
\mathds{E}_xH_{T,n,p}^2(h)&\leq 5B_{T,X} T D_{T,\beta} (n) \|h\| \left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p}\left(\mathds{E}_x\int_0^T|\Delta_n(s)|^{p/2}\, ds\right)^{2/p}\\&
\leq 10B_{T,X} T^{1+2/p} D_{T,\beta} (n) \|h\|^2\left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p}. \end{aligned} $$ Note that $n^{-1}\leq D_{T,\beta} (n)$, hence the above bounds for $\mathds{E}_x H_{T,n,p}^1(h)$ and $\mathds{E}_xH_{T,n,p}^2(h)$ finally yield the estimate \begin{equation}\label{recur_bound}
\mathds{E}_x \Big|J_{T,n}(h)\Big|^{p}\leq 14p(p-1)B_{T,X} T^{1+2/p} D_{T,\beta} (n) \|h\|^2\left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p}. \end{equation}
It can be easily seen that the above inequality also holds true if $J_{T,n}(h)$ in the left hand side is replaced by $J_{t,n}(h)$. Taking the integral over $t\in [0,T]$, we get $$
\mathds{E}_x\int_0^T\Big|J_{t,n}(h)\Big|^{p}\, dt\leq 14p(p-1)B_{T,X} T^{2+2/p} D_{T,\beta} (n) \|h\|^2\left(\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\, ds\right)^{1-2/p}. $$
Because $h$ is bounded, the left hand side expression in the above inequality is finite. Hence, resolving this inequality, we get $$
\mathds{E}_x\int_0^T\Big|J_{s,n}(h)\Big|^{p}\,ds\leq (14p(p-1)B_{T,X})^{p/2} T^{p+1} (D_{T,\beta} (n))^{p/2} \|h\|^p, $$ which together with (\ref{recur_bound}) gives the required statement.\color{black}
\end{proof}
\begin{proof}[Proof of Theorem \ref{t2}] Denote $$S_{k,a,b}:= \{(s_1,s_2,...,s_k) \in \mathbb{R}^k : a\leq s_1 < s_2 < ...< s_k \leq b\}, \ k \in \mathbb{N}, \ a,b \in \mathbb{R}.$$
We have \begin{equation}\begin{aligned} \label{sumJi} &\mathds{E}_x \Big[(I_{T}(h))^k-(I_{T,n}(h))^k \Big] f(X_T)\\& = k!\,\mathds{E}_x \int_{S_{k,0,T}}[h(X_{s_1})h(X_{s_2})...h(X_{s_k}) - h(X_{\eta_n(s_1)})h(X_{\eta_n(s_2)})...h(X_{\eta_n(s_k)})] f(X_T) \prod_{i=1}^k ds_{i} \\& = k!\, \int_{S_{k,0,T}}\int_{(\mathbb{R}^d)^{k+1}}\left(\prod_{i=1}^kh(y_i)\right)f(z) \left(\prod_{i=1}^k p_{s_i-s_{i-1}}(y_{i-1},y_i)\right)p_{T-s_k}(y_k,z)dz \prod_{j=1}^k dy_j \prod_{i=1}^k ds_{i}\\& -k!\, \int_{S_{k,0,T}}\int_{(\mathbb{R}^d)^{k+1}}\left(\prod_{i=1}^kh(y_i)\right)f(z) \left(\prod_{i=1}^k p_{\eta_n(s_i)-\eta_n(s_{i-1})}(y_{i-1},y_i)\right)\\& \times p_{T-\eta_n(s_k)}(y_k,z) dz \prod_{j=1}^k dy_j \prod_{i=1}^k ds_{i}\\& = k!\sum_{r=1}^{k}\int_{S_{k,0,T}}\int_{(\mathbb{R}^d)^{k+1}} \left(\prod_{i=1}^kh(y_i)\right)f(z) J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z)dz \prod_{j=1}^k dy_j \prod_{i=1}^k ds_{i}, \end{aligned}\end{equation} where the convention $s_0 = 0, s_{k+1}=T, y_0 = x, y_{k+1}=z$ is used and the functions $J^{(r)}, r=1, \dots, k$ are defined by the relations \begin{align*} J_{s_1, \dots, s_k,T}^{(r)}&(x,y_1, \dots, y_k,z) = \left(\prod_{i=1}^{r-1} p_{\eta_n(s_i)-\eta_n(s_{i-1})}(y_{i-1},y_i)\right)\\ &\quad \times \Big(p_{s_r-s_{r-1}}(y_{r-1},y_r) - p_{\eta_n(s_r)-\eta_n(s_{r-1})}(y_{r-1},y_r)\Big) \left(\prod_{i=r}^k p_{s_{i+1}-s_{i}}(y_{i},y_{i+1})\right). \end{align*}
Let us estimate the $r$-th term in the last line in (\ref{sumJi}). We have \begin{align*} \int_{S_{k,0,T}}&\int_{(\mathbb{R}^d)^{k+1}} \left(\prod_{i=1}^kh(y_i)\right)f(z) J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z) dz \prod_{j=1}^k dy_j \prod_{i=1}^k ds_{i} \\&
\leq \|h\|^k \|f\| \int_{S_{k,0,T}}\int_{(\mathbb{R}^d)^{k+1}} \ \big| J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z) \big| dz \prod_{j=1}^k dy_j \prod_{i=1}^k ds_{i}. \end{align*} \begin{comment}By the Chapman-Kolmogorov equation and \eqref{der_bound}, we get \begin{align*}
&J_{s_1, \dots, s_k,T}^{(r)}:= \int_{(\mathbb{R}^d)^{k+1}}\big|J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z)\big| dz \prod_{j=1}^k dy_j \\ &\leq B_{T,X} \int_{(\mathbb{R}^d)^2}p_{\eta_n(s_{r-1})-\eta_n(s_0)}(x,y_{r-1})
\Big|p_{s_r-s_{r-1}}(y_{r-1},y_r) - p_{\eta_n(s_r)-\eta_n(s_{r-1})}(y_{r-1},y_r)\Big| dy_{r-1}dy_r. \end{align*} \end{comment} Since the case $\beta=1$ was already treated in \cite{kul-gan2}, for the rest of the proof we assume that $\beta>1$.
Consider two cases: a) $s_r-s_{r-1}> k_{n,\beta}T/n$ and b) $s_r-s_{r-1}\leq k_{n,\beta}T/n$.
In case a), using condition \textbf{X} and the Chapman-Kolmogorov equation, we derive $$\begin{aligned}
&\int_{(\mathbb{R}^d)^{k+1}} \big| J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z) \big| dz \prod_{j=1}^k dy_j \\&\leq B_{T,X} \int_{(\mathbb{R}^d)^2}p_{\eta_n(s_{r-1})-\eta_n(s_0)}(x,y_{r-1})
\left|\int_{\eta_n(s_r)-\eta_n(s_{r-1})}^{s_r-s_{r-1}} v^{-\beta} q_{v,y_{r-1}}(y_r)dv\right|dy_{r-1}dy_r.
\end{aligned}$$
Since $k_{n,\beta}\geq 2$, then in case a) we have $s_r-s_{r-1}\geq 2T/n$, and hence $$ \eta_n(s_r)-\eta_n(s_{r-1})\geq s_r-\frac{T}{n}-s_{r-1}\geq \frac{s_r-s_{r-1}}{2}. $$ Therefore, using the fact that $q_{t,y}(\cdot)$ is the probability density for any $t>0$ and $y\in \mathbb{R}^d$, we finally get \begin{align*}
\int_{(\mathbb{R}^d)^{k+1}} \big| J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z) \big| dz \prod_{j=1}^k dy_j&\leq B_{T,X} \int_{s_r-s_{r-1}-T/n}^{s_r-s_{r-1}} v^{-\beta}dv\leq \frac{B_{T,X}T}{n} \Big(\frac{s_r-s_{r-1}}{2}\Big)^{-\beta}. \end{align*} In case b) we simply apply the Chapman-Kolmogorov equation:
$$\begin{aligned}
&\int_{(\mathbb{R}^d)^{k+1}} \big| J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z) \big| dz \prod_{j=1}^k dy_j\\ &\leq \int_{(\mathbb{R}^d)^2}p_{\eta_n(s_{r-1})-\eta_n(s_0)}(x,y_{r-1}) \Big(p_{s_r-s_{r-1}}(y_{r-1},y_r) + p_{\eta_n(s_r)-\eta_n(s_{r-1})}(y_{r-1},y_r)\Big) dy_{r-1}dy_r\\
&\leq 2.
\end{aligned}$$ Therefore, summarizing the estimates obtained in cases a) and b) we get, using \eqref{beta1}, the estimates \begin{align*}
\int_{S_{k,0,T}}&\int_{(\mathbb{R}^d)^{k+1}} \ \big| J_{s_1, \dots, s_k,T}^{(r)}(x,y_1, \dots, y_k,z) \big| dz \prod_{j=1}^k dy_j \prod_{i=1}^k ds_{i}\\ &\leq \frac{B_{T,X}T}{n} 2^{\beta} \int_0^T \int_0^{s_r-k_{n,\beta}T/n} \frac{s_{r-1}^{r-1}}{(r-1)!}(s_r-s_{r-1})^{-\beta}\frac{(T-s_r)^{k-r}}{(k-r)!}ds_{r-1}ds_r\\ &+ 2\int_0^T\int_{s_r-k_{n,\beta}T/n}^{ s_r}\frac{s_{r-1}^{r-1}}{(r-1)!}\frac{(T-s_r)^{k-r}}{(k-r)!} ds_{r-1}ds_r\\ &\leq \frac{B_{T,X}T}{n} 2^{\beta} \int_0^T \frac{s_{r}^{r-1}}{(r-1)!}\frac{(T-s_r)^{k-r}}{(k-r)!}ds_r\Big(\int_{k_{n,\beta}T/n}^{T}u^{-\beta}du\Big)\\ &+ \frac{2Tk_{n,\beta}}{n}\int_0^T\frac{s_{r}^{r-1}}{(r-1)!}\frac{(T-s_r)^{k-r}}{(k-r)!} ds_r\\ &\leq \frac{ 2^{\beta} B_{T,X}T^{k+2-\beta}}{(\beta-1)(k-1)!} n^{-1/\beta}+ \frac{4T^{k+1}}{(k-1)!} n^{-1/\beta}\\ &\leq \frac{2^{\beta\vee 2} T^{k+1}B_{T,X} D_{T,\beta}(n)}{(k-1)!}, \end{align*} where in the fourth and the fifth lines we used that $s_{r-1}^{r-1}\leq s_r^{r-1}$. Taking into account that in \eqref{sumJi} we have $k$ terms, and the common multiplier $k!$, we finally arrive at \eqref{t2-eq}. \end{proof}
\section{Condition \textbf{X} for solutions to L\'evy driven SDEs}\label{s3}
Consider the SDE \begin{equation}\label{sde1} dX_t = b(X_t) dt + dZ_t, \quad X_0=x, \end{equation} where $Z$ is a real-valued L\'evy process. In \cite{KK14} it was shown that if $Z_t$ is a symmetric $\alpha$-stable process and $b(\cdot)$ is bounded and Lipschitz continuous, then the solution to equation \eqref{sde1} satisfies condition \textbf{X} with $\beta=\max(1, 1/\alpha)$ (in fact, in \cite{KK14} more general multidimensional SDEs of the form \eqref{SDE} are considered). In this section we outline the argument which makes it possible to extend the class of ``L\'evy noises''. Namely, we will omit the requirement on $Z$ to be \emph{symmetric}, and relax the stability assumption, demanding $Z$ to be ``locally $\alpha$-stable'' in the sense we specify below.
Recall that for a real-valued L\'evy process the characteristic function is of the form $$ \mathbb{E} e^{i\xi Z_t}= e^{-t\psi(\xi)}, \quad t>0, \, \xi \in \mathds{R}, $$ where the \emph{characteristic exponent} $\psi$ admits the L\'evy-Khinchin representation \begin{equation}\label{psi1}
\psi(\xi) =-ia\xi+{1\over 2}\sigma^2\xi^2+\int_\mathds{R} \big(1-e^{i \xi u}+ i \xi u\mathds{1}_{\{|u|\leq 1\}} \big)\mu(du). \end{equation} In what follows, we assume that $\sigma^2=0$ and the \emph{L\'evy measure} $\mu$ is of the form \begin{equation}\label{tilm}
\mu(du)=C_{+} u^{-1-\alpha}\mathds{1}_{u\in (0,1)}du +C_{-} |u|^{-1-\alpha}\mathds{1}_{u\in (-1,0)}du+ m(u) du,
\end{equation}
with some $C_\pm\geq 0$ and $m(u)\geq 0$ such that $m(u)=0$ for $|u|\leq 1$, and
\begin{equation}\label{m1}
m(u)\leq c|u|^{-1-\alpha}, \quad |u|\geq 1. \end{equation} On the interval $[-1,1]$ the L\'evy measure $\mu$ given by (\ref{tilm}) coincides with the L\'evy measure of a (non-symmetric) $\alpha$-stable process. This is the reason for us to call $Z$ a ``locally $\alpha$-stable'' process: its ``local behavior'' near the origin is similar to those of the $\alpha$-stable process. In that context condition (\ref{m1}) means that the ``tails'' of the L\'evy measure for $\mu$ are dominated by the ``tails'' the $\alpha$-stable L\'evy measure.
Let us impose three minor conventions, which will simplify the technicalities below. First, since we are mostly interested in the case $\beta>1$, we assume that $\alpha<1$. Second, the latter assumption assures that the integral $$
\int_{\{|u|\leq 1\}} u\mu(du) $$ is well defined, and we assume that the constant $a$ in \eqref{psi1} equals to this integral; that is, $\psi$ has the form $$ \psi(\xi) =\int_\mathds{R} \big(1-e^{i \xi u}\big)\mu(du). $$ Clearly, this does not restrict the generality because one can change the constant $a$ by changing respectively the drift coefficient $b(\cdot)$ in \eqref{sde1}. Finally, in order to avoid the usage of the Rademacher theorem (see \cite[Lemma~7.4]{KK14} for the case when $b$ is just Lipschitz continuous), let us assume that $b\in C^1(\mathds{R})$.
In what follows we show how the \emph{parametrix construction} developed in \cite{KK14} can be modified to provide the representation and the bonds for the transition probability density $p_t(x,y)$ of the solution to \eqref{sde1} driven by the ``locally stable'' noise $Z$.
Let us introduce some notation and give some preliminaries. We denote the space and the space-time convolutions respectively by $$ (f\ast g)(x,y):=\int_{\mathbb{R}^d}f(x,z)g(z,y)\, dz, $$ $$ (f\circledast g)_t(x,y):=\int_0^t(f_{t-s}\ast g_s)(x,y)\, ds=\int_0^t\int_{\mathbb{R}^d}f_{t-s}(x,z)g_{s}(z,y)\, dzds. $$
Generically, the parametrix construction provides the representation of the required transition probability density in the form \begin{equation}\label{p10} p_t(x,y)= p_t^0(x,y)+ \int_0^t \int_\mathds{R} p^0_{t-s}(x,z) \Psi_s (z,y) dzds, \quad t>0, \quad x,y\in \mathds{R}. \end{equation} Here $p^0_t(x,y)$ is a ``zero approximation term'' for the unknown $p_t(x,y)$, the function $\Psi_t(x,y)$ is given by the ``convolution series'' \begin{equation}\label{Psi} \Psi_t(x,y)= \sum_{k=1}^\infty \Phi_t^{\circledast k} (x,y), \quad t>0, \quad x,y\in \mathds{R}, \end{equation}
the function $\Phi_t(x,y)$ depends on the particular choice of $p^0_t(x,y)$, and equals \begin{equation}\label{phi10} \Phi_t(x,y):= \big(L_x - \partial_t\big)p_t^0(x,y), \end{equation} where \begin{align*} L f(x):&= b(x) f'(x)+ \int_\mathds{R} \big(f(x+u)-f(x)\big) \mu(du), \quad f\in C^2_b (\mathds{R}) \end{align*} is the formal generator of the process $X$. The subscript $x$ in above expressions means that the operator is applied with respect to the variable $x$. Note that to make the above construction feasible, one should properly choose the ``zero approximation term'' $p_t^0(x,y)$, so that the convolution series \eqref{Psi} converges and the space-time convolution in (\ref{p10}) is well defined. To introduce in our setting such $p_t^0(x,y)$, and then to construct the bounds for the associated $\Phi_t(x,y)$ and its convolution powers, we need some more notation.
Denote by $Z^{(\alpha, C_\pm)}$ the $\alpha$-stable process with the L\'evy measure $\mu_{\alpha, C_\pm}(du)=m^{(\alpha, C_\pm)}(u)\, du$, $$ m^{(\alpha, C_\pm)}(u):=C_{+} u^{-1-\alpha}\mathds{1}_{u>0}du +C_{-} (-u)^{-1-\alpha}\mathds{1}_{u<0}, $$ and the characteristic exponent $$ \psi^{(\alpha, C_\pm)}(\xi)= \int_\mathds{R} \big(1-e^{i\xi u}\big)\mu^{(\alpha, C_\pm)}(du). $$ Note that since $$ \psi^{(\alpha, C_\pm)}(c\xi)=c^\alpha \psi^{(\alpha, C_\pm)}(\xi),\quad c>0, $$ the process $Z^{(\alpha, C_\pm)}$ possesses the scaling property $$ \mathrm{Law}\,\big(Z_{ct}^{(\alpha, C_\pm)}\big)=\mathrm{Law}\,\big(c^{1/\alpha}Z_t^{(\alpha, C_\pm)}\big), \quad c>0. $$ Denote by $g_t^{(\alpha,C_\pm)} $ the distribution density of $Z^{(\alpha, C_\pm)}_t$. By the scaling property we have $$ g_t^{(\alpha,C_\pm)}(x)=t^{-1/\alpha}g^{(\alpha,C_\pm)}\left(xt^{-1/\alpha}\right), \quad g^{(\alpha,C_\pm)}:=g^{(\alpha,C_\pm)}_1. $$ Denote also by $Z^{(\alpha)}$ the \emph{symmetric} $\alpha$-stable process; that is, the process of the form introduced above with $C_+=C_-=1$. Let $g_t^{(\alpha)} $ be the respective distribution density and $g^{(\alpha)}:= g_1^{(\alpha)}$.
Finally, denote by $\chi_t(x)$ and $\theta_t(y)$, respectively, the solutions to the ODEs \begin{equation}\label{ODE} d\chi_t = b(\chi_t)dt, \quad \chi_0=x,\quad d\theta_t =- b(\theta_t)dt, \quad \theta_0=y. \end{equation} Note that these solutions exist, because $b(\cdot)$ is Lipschitz continuous.
Now we are ready to formulate the main statement of this section.
\begin{thm}\label{lem-der} Let \begin{equation}\label{p0} p^0_t(x,y):=g^{(\alpha,C_\pm)}_t(\theta_t(y)-x). \end{equation} Then the convolution series \eqref{Psi} is well defined, and the formula (\ref{p10}) gives the representation of the transition probability density $p_t(x,y)$ of the process $X$. This density and its time derivative have the following upper bounds: \begin{equation}\label{ptx} p_t(x,y)\leq C \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(y-\chi_t(x)),\quad t\in (0,T], \, x,y\in \mathds{R}, \end{equation} \begin{equation}\label{ptx1} \partial_t p_t(x,y)\leq C t^{-1/\alpha}\big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(y-\chi_t(x)), \quad t\in (0,T], \, x,y\in \mathds{R}, \end{equation} Consequently, the process $X$ satisfies condition \textbf{X} with $\beta= 1/\alpha$ and $$ q_{t,x}(y)=\big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(y-\chi_t(x)). $$
\end{thm}
\begin{proof} First we evaluate $\Phi_t(x,y)$. If it is not stated otherwise, we assume in all estimates obtained below that $t\in (0,T]$ for some $T>0$, and $x,y\in \mathds{R}$. Observe that $g^{(\alpha,C_\pm)}\in C_b^2(\mathds{R})$. Indeed, this property easily follows from the Fourier inversion formula and the expression for the characteristic function. It is known that $g_t^{(\alpha,C_\pm)}(y-x)$ is the fundamental solution to $\partial_t -L^{(\alpha,C_\pm)}$, where $L^{(\alpha,C_\pm)}$ denotes the generator of the process $Z^{(\alpha,C_\pm)}$: \begin{equation}\label{la} L^{(\alpha,C_\pm)}f(x)=\int_\mathds{R} \big( f(x+u)-f(x)\big) \mu^{(\alpha,C_\pm)}(du), \quad f\in C^2_b(\mathds{R}). \end{equation} Since $$ (\partial_t-L_x^{(\alpha,C_\pm)})g_t^{(\alpha,C_\pm)}(y-x)=0, $$ we have $$\begin{aligned}
\partial_tp_t^0(x,y)&=\left[\partial_tg_t^{(\alpha,C_\pm)}(w)+\frac{\partial_t\theta_t(y)}{t^{2/\alpha} } (g^{(\alpha,C_\pm)})'\left(\frac{w}{t^{1/\alpha}} \right)\right]\Big|_{w=\theta_t(y)-x}
\\&=\left[\frac{1}{t^{1/\alpha} } (L^{(\alpha,C_\pm)} g^{(\alpha,C_\pm)})\left(\frac{w}{t^{1/\alpha}} \right)+\frac{\partial_t\theta_t(y)}{t^{2/\alpha} } (g^{(\alpha,C_\pm)})'\left(\frac{w}{t^{1/\alpha}} \right)\right]\Big|_{w=\theta_t(y)-x}, \end{aligned} $$ where in the last identity we used the scaling property of $g_t^{(\alpha,C_\pm)}$ and the fact that $L^{(\alpha,C_\pm)}$ is a homogeneous operator of order $1/\alpha$. Next, by the very definition of $L $ and $ L^{(\alpha,C_\pm)}$ we get \begin{equation*} \begin{split}
L_x&p_t^0(x,y)=\left[\frac{1}{t^{1/\alpha} } (L^{(\alpha,C_\pm)} g^{(\alpha)})\left(\frac{w}{t^{1/\alpha}} \right)-\frac{b(x)}{t^{2/\alpha} } (g^{(\alpha,C_\pm)})'\left(\frac{w}{t^{1/\alpha}} \right)\right]\Big|_{w=\theta_t(y)-x}\\
&+ \left[ \int_{|u|\geq 1} \left(\frac{1}{t^{1/\alpha} } g^{(\alpha,C_\pm)}\left(\frac{w-u}{t^{1/\alpha}} \right)-
\frac{1}{t^{1/\alpha} } g^{(\alpha,C_\pm)}\left(\frac{w}{t^{1/\alpha}} \right)\right)\big(m(u)du-m^{(\alpha,C_\pm)}(du)\big)\right]\Big|_{w=\theta_t(y)-x}. \end{split} \end{equation*} Therefore, using the relation $\partial_t\theta_t(y)=-b(\theta_t(y))$, we get \begin{equation}\label{phi-n10} \begin{split} \Phi_t(x,y)&=\big(L_x - \partial_t\big)p_t^0(x,y)=\frac{b(x)-b(\theta_t(y))}{t^{2/\alpha} } (g^{(\alpha,C_\pm)})'\left(\frac{\theta_t(y)-x}{t^{1/\alpha}} \right)\\
&+\frac{1}{t^{1/\alpha} }\int_{|u|\geq 1} \left( g^{(\alpha,C_\pm)}\left(\frac{\theta_t(y)-x-u}{t^{1/\alpha}} \right)-
g^{(\alpha,C_\pm)}\left(\frac{\theta_t(y)-x}{t^{1/\alpha}} \right)\right)\big(m(u)-m^{(\alpha,C_\pm)}(u)\big)\, du\\ &=: \Phi_t^1(x,y)+\Phi_t^2(x,y). \end{split} \end{equation} Further, we give the bounds for the absolute values of $\Phi^1_t(x,y)$, $\Phi^2_t(x,y)$, and $\Phi_t(x,y)$. In what follows, $C$ denotes a generic constant, whose value might be different from place to place. One has \begin{equation}\label{ga9} g^{(\alpha,C_\pm)}(x)\leq C g^{(\alpha)}(x), \quad \quad x\in \mathds{R}, \end{equation} \begin{equation}\label{ga91}
\big|(g^{(\alpha,C_\pm)})'(x)\big|\leq C (1+|x|)^{-1} g^{(\alpha)}(x),\quad x\in \mathds{R}, \end{equation} \begin{equation}\label{ga92}
\big|(g^{(\alpha,C_\pm)})''(x)\big|\leq C (1+|x|)^{-2} g^{(\alpha)}(x),\quad x\in \mathds{R}. \end{equation} Since the argument used in the proof of (\ref{ga9}) -- (\ref{ga92}) is quite standard (see e.g. \cite[Appendix A]{KK14}), we omit the details.
By \eqref{ga91} and the Lipschitz continuity of $b(\cdot)$ we have $$
|\Phi_t^1(x,y)|\leq\frac{C|x-\theta_t(y)|}{t^{2/\alpha} }\left| (g^{(\alpha,C_\pm)})'\left(\frac{\theta_t(y)-x}{t^{1/\alpha}} \right)\right|\leq \frac{C}{t^{1/\alpha} } g^{(\alpha)}\left(\frac{\theta_t(y)-x}{t^{1/\alpha}} \right)=Cg_t^{(\alpha)}(\theta_t(y)-x). $$
To get the estimate for $|\Phi_t^2(x,y)|$, we first observe that $$
\big|m(u)-m^{(\alpha,C_\pm)}(u)\big|I_{|u|\geq 1} \leq C g^{(\alpha)}(u), $$ which implies $$\begin{aligned}
|\Phi_t^2(x,y)|&\leq \frac{C}{t^{1/\alpha} }\int_{|u|\geq 1} g^{(\alpha,C_\pm)}\left(\frac{\theta_t(y)-x-u}{t^{1/\alpha}} \right)
g^{(\alpha)}(u)\, du+\frac{C}{t^{1/\alpha} }g^{(\alpha,C_\pm)}\left(\frac{\theta_t(y)-x}{t^{1/\alpha}} \right). \end{aligned} $$ Taking into account \eqref{ga9}, we deduce that $$
|\Phi_t^2(x,y)|\leq C \big( g_t^{(\alpha)}* g_1^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x)= C \big( g_{t+1}^{(\alpha)}(x)+ g_t^{(\alpha)}(x)\big)(\theta_t(y)-x). $$ Combining the estimates for $\Phi_t^1(x,y)$ and $\Phi_t^2(x,y)$, we get \begin{equation}\label{phi-n30}
|\Phi_t(x,y)| \leq C \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x). \end{equation}
Our next step is to estimate the convolution powers of $\Phi$. It is shown in \cite[Appendix B]{KK14}, that the kernel $g_t^{(\alpha)}(\theta_t(y)-x)$ possess the following \emph{sub-convolution property}: \begin{equation}\label{sub10} \int_\mathds{R} g_{t-s}^{(\alpha)}\big(\theta_{t-s}(z)-x\big)g_s^{(\alpha)}\big(\theta_s(y)-z\big)dz \leq C g_t^{(\alpha)}(\theta_t(y)-x). \end{equation} By this property we get \begin{equation}\label{sub20} \begin{split} \int_\mathds{R} g_{t+1-s}^{(\alpha)}\big(\theta_{t-s}(z)-x\big)g_s^{(\alpha)}\big(\theta_s(y)-z\big)dz &\leq C g_{t+1}^{(\alpha)}(\theta_t(y)-x), \\ \int_{\mathds{R}} g_{t-s+1}^{(\alpha)}(\theta_{t-s}(z)-x)g_{s+1}^{(\alpha)}(\theta_s(y)-z)dz &\leq C g_{t+2}^{(\alpha)}(\theta_t(y)-x)\leq C g_{t+1}^{(\alpha)}(\theta_t(y)-x), \end{split} \end{equation} where in the last line we used that $g_2^{(\alpha)}\leq C g_1$, and therefore $g_{t+2}^{(\alpha)}=g_{t}^{(\alpha)}*g_{2}^{(\alpha)}\leq Cg_{t+2}^{(\alpha)}$. Having these estimates, we deduce in the same way as in \cite[Section 3]{KK14} that \begin{equation}\label{phik}
|\Phi_t^{\circledast k} (x,y)|\leq\frac{ C_0(C t)^{k-1}}{k!} \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x). \end{equation} Therefore, the series \eqref{Psi} converges absolutely for $(t,x,y)\in (0,\infty)\times \mathds{R}\times \mathds{R}$, and $$
|\Psi_t(x,y)|\leq C \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x). $$ Applying once again the sub-convolution property \eqref{sub10}, we see that the convolution $p^0\circledast \Psi$ is well defined, and $$
|\big(p^0\circledast \Psi\big)_t(x,y)|\leq C \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x). $$ Thus, the expression \eqref{p10} for $p_t(x,y)$ is well defined for any $(t,x,y)\in (0,\infty)\times \mathds{R}\times \mathds{R}$, and $$
|p_t(x,y)|\leq C \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x). $$ Finally, to get \eqref{ptx} we use the following inequalities, which were proved in \cite[Appendix B]{KK14}: \begin{equation}\label{flow}
c |\theta_t(y)-x|\leq |\chi_t(x)-y|\leq C |\theta_t(y)-x|. \end{equation} Since for any constant $c>0$ we have $g_t^{(\alpha)}(x)\asymp g_t^{(\alpha)}(c x)$ for any $t\in (0,T]$, $x,y\in \mathds{R}$, this completes the proof of \eqref{ptx}.
Our final step is to use representation \eqref{p10} in order to find the bounds for $\partial_t p_t(x,y)$. Since $p_t^0(x,y)$ and $\Phi_t(x,y)$ are given explicitly, it is straightforward to show that these functions are differentiable with respect to $t$, and to check using \eqref{ga9} -- \eqref{ga92} that \begin{equation}\label{p20}
\big| \partial_t p_t^0(x,y)|\leq C t^{-1/\alpha} g_t^{(\alpha)}(\theta_t(y)-x), \end{equation}
\begin{equation}\label{tphi}
|\partial_t \Phi_t(x,y)|\leq C t^{-1/\alpha}\big( g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x).
\end{equation}
To show that the convolution powers $\Phi_t^{\circledast k}(x,y)$ are differentiable in $t$ and to get the upper bounds, we use the following trick. The expression for $\Phi_t^{\circledast (k+1)}(x,y)$ can be re-organized as follows: \begin{equation}\label{44} \begin{split} \Phi^{\circledast (k+1)}_t(x,y)&=\int_0^{t}\int_{\mathds{R}}\Phi_{t-s}^{\circledast k}(x,z)\Phi_s(z,y)\,dzds\\ &=\int_0^{t/2}\int_{\mathds{R}}\Phi_{t-s}^{\circledast k}(x,z)\Phi_s(z,y)\,dz ds+\int_0^{t/2}\int_{\mathds{R}}\Phi_{s}^{\circledast k}(x,z)\Phi_{t-s}(z,y)\,dz ds. \end{split} \end{equation} If $k=1$, the first line in \eqref{44} does not allow us to differentiate $\partial_t\Phi^{\circledast (2)}_t(x,y)$, because the upper bound for $\partial_t\Phi_{t-s}(x,z)$ has a non-integrable singularity $(t-s)^{-1/\alpha}$ at the vicinity of the point $s=t$ (recall that $\alpha<1$). However, the identity given by the second line in \eqref{44} does not contain such singularities, and we can show using induction that
for any $k\geq 1$ the function $\Phi^{\circledast k}_t(x,y)$ is continuously differentiable in $t$, satisfies \begin{align*} \partial_t\Phi^{\circledast (k+1)}_t(x,y)&=\int_0^{t/2}\int_{\mathbb{R}^d}(\partial_t\Phi^{\circledast k})_{t-s}(x,z)\Phi_s(z,y)\,dz ds+ \int_0^{t/2}\int_{\mathbb{R}^d}\Phi_{s}^{\circledast k}(x,z)(\partial_t\Phi)_{t-s}(z,y)\,dz ds\\ &\quad +\int_{\mathbb{R}^d}\Phi_{t/2}^{\circledast k}(x,z)\Phi_{t/2}(z,y)\, dz. \end{align*} and possesses the bound \begin{equation}\label{phikd}
|\partial_t\Phi_t^{\circledast k} (x,y)|\leq \frac{ C_0(Ct)^{k-1} t^{-1/\alpha}}{k!} \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big) (\theta_t(y)-x), \quad\quad k\geq 1. \end{equation} Since the proof is completely analogous to the proof of \cite[Lemma~7.3]{KK14}, we omit the details.
From \eqref{phikd} we derive the following bound for the derivative of $\Psi_t(x,y)$: \begin{equation}\label{phikd2}
|\partial_t\Psi_t^{\circledast k} (x,y)|\leq C t^{-1/\alpha} \big(g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big) (\theta_t(y)-x). \end{equation} Re-organizing representation (\ref{p10}) in the same way as (\ref{44}), we get
$$ p_t(x,y) =p_t^0(x,y)+\int_0^{t/2}\int_{{\mathbb{R}^d}}p_{t-s}^0(x,z)\Psi_s(z,y)\,dzds+\int_{0}^{t/2}\int_{{\mathbb{R}^d}}p_{s}^0(x,z)\Psi_{t-s}(z,y)\,dz\, ds. $$ Using the above representation of $p_t(x,y)$ together with \eqref{p20} and \eqref{phikd2}, we derive the existence of the continuous derivative $\partial_t p_t(x,y)$, which satisfies the inequality $$
|\partial_t p_t(x,y)|\leq C t^{-1/\alpha}\big( g_{t+1}^{(\alpha)}+ g_t^{(\alpha)}\big)(\theta_t(y)-x). $$ Using estimates (\ref{flow}) in the same way as we did that in the proof of (\ref{ptx}), we can change the argument $\theta_t(y)-x$ in the right hand side of the above estimate to $y-\chi_t(x)$, which completes the proof of (\ref{ptx1}).
\end{proof}
\section{Application: the price of an occupation time option}\label{s4}
In this section, we consider an \emph{occupation time option} (see \cite{Linetsky}), with the price of the option depending on the time spent by an asset price process in a given set. Comparing to the standard barrier options, which are activated or cancelled when the asset price process hits some definite level (barrier), the payoff of the occupation time option depends on the time during which the asset price process stays below or above such a barrier.
For instance, for the strike price $K$, the barrier level $L$ and the knock-out rate $\rho$, the payoff of a down-and-out call occupation time option equals $$ \exp \left( - \rho \int_0^T \mathbb{I}_{\{S_t \leq L\}} dt \right) (S_T - K)_+, $$ and the price $\mathbf{C}(T)$ of the option is defined as $$ \textbf{C}(T) = \exp (-rT) E \left[ \exp \left( - \rho \int_0^T \mathbb{I}_{\{S_t \leq L\}} dt \right) (S_T - K)_+ \right] $$ where $r$ is the risk-free interest rate (see \cite{Linetsky}).
Assume that the price of an asset $S=\{S_t, t\geq 0\}$ is of the form $$ S_t = S_0 \exp(X_t), $$
where $X$ is the Markov process studied in previous sections. Then the time spent by the process $S$ in a set $J\subset \mathbb{R}$ equals to the time spent by $X$ in the set $J'=\log J$.
Let us approximate the price $\textbf{C}(T)$ of our option by $$ \textbf{C}_n(T) = \exp (-rT) E \left[ \exp \left( - \rho T/n \sum_{k=0}^{n-1} \mathbb{I}_{\{S_{kT/n} \leq L\}} dt \right) (S_T - K)_+ \right]. $$ Then using the results from the previous sections we can get the control on the accuracy of such an approximation.
First we apply Theorem \ref{t1} and derive the strong approximation rate.
\begin{prop}\label{p41} Suppose that the process $X$ satisfies condition \textbf{X}, and assume that there exists $\lambda>1$ such that $G(\lambda):= E \exp (\lambda X_T) = E (S_T)^\lambda < + \infty$.
Then $$
\Big|\textbf{C}_n(T)- \textbf{C}(T)\Big| \leq \exp(-rT) \rho G(\lambda)^{1/\lambda} C_{T,\lambda/(\lambda-1)} (D_{T,\beta} (n))^{1/2}, $$ with constants $C_{T,\lambda/(\lambda-1)}$ and $D_{T,\beta} (n)$ are given by \eqref{DC}. \end{prop} \begin{proof} The proof is a simple corollary of Theorem \ref{t1}. Denote $h(x)=\rho \mathbb{I}_{x\leq \log L}$. Then keeping the notation of Section \ref{s2} we get $$ \textbf{C}(T)=e^{-rT}Ee^{-I_T(h)}(S_T- K)_+, \quad \textbf{C}_n(T)=e^{-rT}Ee^{-I_{T,n}(h)}(S_T- K)_+. $$ By the H\"older inequality with $p=\lambda$ and $q=\lambda/(\lambda-1)$, $$
\Big|\textbf{C}_n(T)- \textbf{C}(T)\Big| \leq e^{-rT} \Big(E(S_T- K)_+^\lambda\Big)^{1/\lambda}\left(E\left|e^{-I_T(h)}-e^{-I_{T,n}(h)}\right|^{\lambda/(\lambda-1)}\right)^{(\lambda-1)/\lambda}. $$
Since for positive $a$ and $b$ we have $|e^{-a}-e^{-b}|\leq |a-b|$, then $$
\Big|\textbf{C}_n(T)- \textbf{C}(T)\Big| \leq e^{-rT} G(\lambda)^{1/\lambda} \left(E\left|I_T(h)-I_{T,n}(h)\right|^{\lambda/(\lambda-1)}\right)^{(\lambda-1)/\lambda}, $$ and thus the required statement follows directly from Theorem~\ref{t1} with $p=\lambda/(\lambda-1)$. \end{proof}
We also can control the accuracy of the approximation using the weak rate bound from Theorem \ref{t2}. Observe that the bound given below is sharper than those obtained in the previous proposition precisely when $\lambda>2$.
\begin{prop} Under the assumptions of Proposition~\ref{p41}, we have $$
\Big|\textbf{C}_n(T)- \textbf{C}(T)\Big| \leq 2^{\beta\vee 2+1}\max\{B_{T,X} \rho T^2 (1+\rho T) \exp(\rho T), G(\lambda) \} \exp (-rT) \widetilde{D}_{T, \beta} (n), $$ where $$ \widetilde{D}_{T,\beta} (n) = \begin{cases} n^{-(1-1/\lambda)} \log n,& \beta=1,\\ \max\left(1,\frac{T^{1- \beta}}{\beta-1}\right) n^{-1/ \beta (1-1/\lambda)},& \beta>1. \end{cases} $$
\end{prop}
\begin{proof}
For some $N>0$ denote $$\begin{aligned} &\textbf{C}^N(T) = \exp (-rT) E \left[ \exp \left( - \rho \int_0^T \mathbb{I}_{\{S_t \leq L\}} dt \right) ((S_T - K)_+ \wedge N) \right],\\& \textbf{C}^N_n(T) = \exp (-rT) E \left[ \exp \left( - \rho T/n \sum_{k=0}^{n-1} \mathbb{I}_{\{S_{kT/n} \leq L\}} dt \right) ((S_T - K)_+ \wedge N) \right]. \end{aligned}$$
Then \begin{equation} \label{exmpl_bound}
\Big|\textbf{C}_n(T)- \textbf{C}(T)\Big| \leq \Big|\textbf{C}^N_n(T)- \textbf{C}^N(T)\Big| + \Big|\textbf{C}(T)- \textbf{C}^N(T)\Big| + \Big|\textbf{C}_n(T)- \textbf{C}^N_n(T)\Big|. \end{equation}
We estimate each term separately.
Using Theorem \ref{t2} and the Taylor expansion of the exponent, we derive \begin{align*}
\Big|\textbf{C}^N_n(T)- \textbf{C}^N(T)\Big| &\leq 2^{\beta\vee 2}B_{T,X} NT\exp (-rT) \sum_{k=1}^{\infty} \frac{\rho^k}{k!} k^2 T^{k} D_{T,\beta} (n)\\ &= 2^{\beta\vee 2}B_{T,X} \rho N T^2 (1+\rho T) \exp(\rho T-rT) D_{T,\beta} (n). \end{align*} For the last two terms we get \begin{align*}
\Big|\textbf{C}(T)- \textbf{C}^N(T)\Big| + \Big|\textbf{C}_n(T)&- \textbf{C}^N_n(T)\Big| \leq 2 \exp (-rT) E\left[(S_T - K)_+ -(S_T - K)_+ \wedge N \right] \\ & \leq 2 \exp (-rT) E [S_T \mathbb{I}_{\{S_T>N\}}] \\ &= 2 \exp (-rT) E\left[ \frac{S_T N^{\lambda-1}\mathbb{I}_{\{S_T>N\}}}{N^{\lambda-1}} \right] \leq \frac{2G(\lambda)}{N^{\lambda-1}} \exp (-rT). \end{align*} To complete the proof, put $N = n^{1/(\beta \lambda)}$.
\end{proof}
\end{document} |
\begin{document}
\title[On the periodicity of a class of arithmetic functions] {On the periodicity of a class of arithmetic functions associated with multiplicative functions} \author{Guoyou Qian}
\address{Center for Combinatorics, Nankai University, Tianjin 300071, P.R. China} \email{qiangy1230@gmail.com, qiangy1230@163.com} \author{Qianrong Tan} \address{School of Mathematics and Computer Science, Panzhihua University, Panzhihua 617000, P.R. China} \email{tqrmei6@126.com} \author{Shaofang Hong*}
\address{Yangtze Center of Mathematics, Sichuan University, Chengdu 610064, P.R. China and
Mathematical College, Sichuan University, Chengdu 610064, P.R. China}
\email{sfhong@scu.edu.cn, s-f.hong@tom.com, hongsf02@yahoo.com} \thanks{*Hong is the corresponding author and was supported partially by the National Science Foundation of China Grant \# 10971145 and by the Ph.D. Programs Foundation of Ministry of Education of China Grant \#20100181110073}
\keywords{periodic arithmetic function, arithmetic progression, least common multiple, $p$-adic valuation, Euler phi function, smallest period} \subjclass[2000]{Primary 11B25, 11N13, 11A05}
\begin{abstract} Let $k\ge 1,a\ge 1,b\ge 0$ and $ c\ge 1$ be integers. Let $f$ be a multiplicative function with $f(n)\ne 0$ for all positive integers $n$. We define the arithmetic function $g_{k,f}$ for any positive integer $n$ by $g_{k,f}(n):=\frac{\prod_{i=0}^k f(b+a(n+ic))} {f({\rm lcm}_{0\le i\le k} \{b+a(n+ic)\})}$. We first show that $g_{k,f}$ is periodic and $c {\rm lcm}(1,...,k)$ is its period. Consequently, we provide a detailed local analysis to the periodic function $g_{k,\varphi}$, and determine the smallest period of $g_{k,\varphi}$, where $\varphi$ is the Euler phi function. \end{abstract}
\maketitle
\section{\bf Introduction} Chebyshev \cite{[Ch]} initiated the study of the least common multiple of consecutive positive integers for the first significant attempt to prove prime number theorem. An equivalent of prime number theorem says that $\log {\rm lcm}(1, ...,n)\sim n$ as $n$ goes to infinity. Hanson \cite{[Ha]} and Nair \cite{[N]} got the upper bound and lower bound of ${\rm lcm}_{1\le i\le n}\{i\}$ respectively. Bateman, Kalb and Stenger \cite{[BKS]} obtained an asymptotic estimate for the least common multiple of arithmetic progressions. Hong, Qian and Tan \cite{[HQT1]} obtained an asymptotic estimate for the least common multiple of a sequence of products of linear polynomials.
On the other hand, the study of periodic arithmetic function has been a common topic in number theory for a long time. For the related background information, we refer the readers to \cite{[A]} and \cite{[M]}. Recently, this topic is still active. When studying the arithmetic properties of the least common multiple of finitely many consecutive positive integers, Farhi \cite{[F]} defined the arithmetic function $g_k$ for any positive integer $n$ by $g_k(n):=\frac{\prod_{i=0}^{k}(n+i)}{{\rm lcm}_{0\le i\le k}\{n+i\}}$. In the same paper, Farhi showed that $g_k$ is periodic of $k!$ and posed an open problem of determining the smallest period of $g_k$. Let $P_k$ be the smallest period of $g_k$. Define $L_0:=1$ and for any integer $k\ge 1$, we define $L_k:={\rm lcm}(1,...,k)$. Subsequently, Hong and Yang \cite{[HY]} improved the period $k!$ to $L_k$ and produced a conjecture stating that $\frac{L_{k+1}}{k+1}$ divides $P_k$ for all nonnegative integers $k$. By proving the Hong-Yang conjecture, Farhi and Kane \cite{[FK]} determined the smallest period of $g_k$ and finally solved the open problem posed by Farhi \cite{[F]}. Let $k\ge 1,a\ge 1,b\ge 0$ and $ c\ge 1$ be integers. Let ${\mathbb Q}$ and ${\mathbb N}$ denote the field of rational numbers and the set of nonnegative integers. Define ${\mathbb N}^*:={\mathbb N}\setminus\{0\}$. In order to investigate the least common multiple of any $k+1$ consecutive terms in the arithmetic progression $\{b+am)\}_{m\in \mathbb{N}^*}$, Hong and Qian \cite{[HQ]} introduced the arithmetic function $g_{k,a,b}$ defined for any positive integer $n$ by $g_{k,a,b}(n):=\frac{\prod_{i=0}^k (b+a(n+i))}{{\rm lcm}_{0\le i\le k}\{b+a(n+i)\}}.$ They \cite{[HQ]} showed that $g_{k,a,b}$ is periodic and obtained the formula of the smallest period of $g_{k,a,b}$, which extends the Farhi-Kane theorem to the general arithmetic progression case.
Let $f$ be a multiplicative function with $f(n)\ne 0$ for all $n\in\mathbb{N}^*$. To measure the difference between $\prod_{i=0}^kf(b+a(n+ic))$ and $f({\rm lcm}_{0\le i\le k}\{b+a(n+ic)\})$, we define the arithmetic function $g_{k,f}$ for any positive integer $n$ by $$ g_{k,f}(n):=\frac{\prod_{i=0}^k f(b+a(n+ic))} {f({\rm lcm}_{0\le i\le k} \{b+a(n+ic)\})}. \eqno (1.1) $$ One naturally asks the following interesting question.\\
\noindent{\bf Problem 1.1.} Let $f$ be a multiplicative function such that $f(n)\ne 0$ for all positive integers $n$. Is $g_{k,f}$ periodic, and if so, what is the smallest period of $g_{k, f}$?\\
As usual, for any prime number $p$, we let $v_{p}$ be the normalized $p$-adic valuation of ${\mathbb Q}$, i.e., $v_p(a)=s$ if $p^{s}\parallel a$. For any real number $x$, by $\lfloor x\rfloor$ we denote the largest integer no more than $x$. Evidently, $v_p(L_k)={\rm max}_{1\leq i\leq k}\{v_{p}(i)\}=\lfloor {\rm log}_{p}k\rfloor$ is the largest exponent of a power of $p$ that is at most $k$. We have the first main result of this paper which answers the first part of Problem 1.1.\\
\noindent{\bf Theorem 1.2.} {\it Let $k\ge 1, a\ge 1, b\ge 0$ and $c\ge 1$ be integers. If $f$ is a multiplicative function so that $f(n)\neq 0$ for all $n\in \mathbb{N}^*$, then the arithmetic function $g_{k,f}$ is periodic and $cL_k$ is its period.}\\
It seems to be difficult to answer completely the second part of Problem 1.1. We here are able to answer it for the Euler phi function $\varphi $ case. In fact, we first prove a generalization of Hua's identity and then use it to show that the arithmetic function $g_{k, \varphi}$ is periodic. Subsequently, we develop $p$-adic techniques to determine the exact value of the smallest period of $g_{k, \varphi}$. Note that it was proved by Farhi and Kane \cite{[FK]} that there is at most one prime $p\le k$ such that $v_p(k+1)\ge v_p(L_k)\ge 1$. We can now state the second main result of this paper as follows.\\
\noindent{\bf Theorem 1.3.} {\it Let $k\ge 1, a\ge 1, b\ge 0$ and $c\ge 1$ be integers. Let $d:={\rm gcd}(a,b)$ and $a':=a/d$. Then $g_{k,\varphi}$ is periodic, and its smallest period equals $Q_{k, a', c}$ except that $v_p(k+1)\ge v_p(L_k)\ge 1$ for at most one odd prime $p\nmid a'$, in which case its smallest period is equal to $\frac{Q_{k, a', c}}{p^{v_p(L_k)}}$, where $$Q_{k, a', c}:=\frac{cL_k}{\eta _{2,k,a',c}\prod_{{\rm prime}
\ q|a'}q^{v_{q}(cL_k)}},\eqno(1.2)$$ and \begin{align*} \eta_{2,k,a',c}:={\left\{ \begin{array}{rl} 2^{v_2(L_k)}, &\text{if} \ 2\nmid a' \ {\text and} \ v_2(k+1)\ge v_2(L_k)\ge 2, \\
2, &\text{if} \ 2\nmid a \ and\ v_2(cL_k)=1, {\text or} \ k=3, 2\nmid a\ {\text and} \ 2|c, {\text or} \ k=3, 2\nmid a'\ {\it and}\ 2|d,\\ 1, &\text{otherwise}. \end{array} \right.} \end{align*}} \\ So we answer the second part of Problem 1.1 for the Euler phi function.
The paper is organized as follows. In Section 2, we show that $g_{k,f}$ is periodic and $cL_k$ is its period. In Section 3, we provide a detailed $p$-adic analysis to the periodic arithmetic function $g_{k,\varphi}$, and finally we determine the smallest period of $g_{k,\varphi}$. The final section is devoted to the proof of Theorem 1.3.
\section{\bf Proof of Theorem 1.2}
In this section, we give the proof of Theorem 1.2. We begin with the following lemma.\\
\noindent{\bf Lemma 2.1.} {\it Let $A$ be any given totally ordered set, and $a_1, ..., a_n$ be any $n$ nonzero elements of $A$ (not necessarily different). If we can define formal multiplication and formal division for the set $A$, then we have $$\max(a_1, ..., a_n)=a_1\cdots a_n \prod_{r=2}^n\prod_ {1\le i_1<\cdots<i_r\le n}(\min(a_{i_1},\ldots,a_{i_r}))^{(-1)^{r-1}}.$$}
\begin{proof} Rearrange these $n$ elements $a_1, ..., a_n$ such that $a_{j_1}\ge \cdots\ge a_{j_n}$. For convenience, we let $b_i=a_{j_i}, \ i=1,2,\ldots,n$. Then the desired result in Lemma 2.1 becomes $$b_1=b_1\cdots b_n\prod_{r=2}^n\prod_{1\le i_1<\cdots<i_r\le n} (\min(b_{i_1},\ldots,b_{i_r}))^{(-1)^{r-1}}.\eqno (2.1)$$ To prove the result, it suffices to prove that for each $b_i$ the number of times that $b_i$ occurs on the left side of (2.1) equals the number of times that $b_i$ occurs on the right side of (2.1). We distinguish the following two cases.
{\sc Case 1.} If $b_1=b_2=\cdots =b_n$, then the number of times that $b_1$ occurs on the right side of (2.1) is $$n-{n\choose 2}+\cdots+(-1)^{n-1}{n\choose n}= -1+\sum_{r=1}^n(-1)^{r-1}{n\choose r}+1=-(1-1)^n+1=1.$$ Whereas, 1 is just the number of times that $b_1$ occurs on the left side of (2.1).
{\sc Case 2.} If there exists a positive integer $s< n$ such that $b_1=b_2=\cdots=b_s>b_{s+1}$, then the number of times $b_1$ occurs on the right side of (2.1) is: $s-{s\choose 2}+\cdots+(-1)^{s-1}{s\choose s}=1$. For any $j>s$, we can always assume that $b_{t+1},...,b_j,..., b_{t+l}$ are just the $l$ terms of the sequence $\{b_i\}_{i=1}^n$ such that $b_s>b_{t+1}=\cdots=b_j=\cdots=b_{t+l}$ for some $t\ge s$. Thus, the number of times that $b_j$ occurs on the right side of (2.1) is \begin{align*} &l-\Big({t+l\choose 2}-{t\choose 2}\Big)+\cdots+(-1)^{t-1}\Big({t+l\choose t}-{t\choose t}\Big)+(-1)^{t}{t+l\choose t+1}+\cdots+(-1)^{t+l-1}{t+l\choose t+l}\\ &=l+\sum_{r=2}^t(-1)^r{t\choose r}+\sum_{i=2}^{t+l}(-1)^{i-1}{t+l\choose i}\\ &=l+(1-1)^t+{t\choose 1}-{t\choose 0}-(1-1)^{t+l}+{t+l\choose 0}-{t+l\choose 1}=0. \end{align*} This completes the proof of Lemma 2.1. \end{proof}
In \cite{[Hu]}, Hua gave the following beautiful identity $$ {\rm lcm}(a_1, ..., a_n)=a_1\cdots a_n \prod_{r=2}^{n}\prod_{1\leq i_1<\cdots<i_{r}\leq n}({\rm gcd}(a_{i_1}, ..., a_{i_{r}}))^{(-1)^{r-1}},$$ where $a_1, ..., a_n$ are any given $n$ positive integers. In what follows, using Lemma 2.1, we generalize the above Hua's identity to the multiplicative function case.\\
\noindent{\bf Lemma 2.2.} {\it Let $f$ be a multiplicative function, and $a_1,a_2,\ldots,a_n$ be any $n$ positive integers. If $f(m)\ne 0$ for each $m\in \mathbb{N}^*$, then $$f({\rm lcm}(a_1,a_2,\ldots,a_n))=f(a_1)\cdots f(a_n)\cdot \prod_{r=2}^{n}\prod_{1\leq i_1<\cdots<i_{r}\leq n}\big(f({\gcd}(a_{i_1}, ..., a_{i_{r}}))\big)^{(-1)^{r-1}}.$$ }
\begin{proof}
Since $f$ is a multiplicative function, we have $$f({\rm lcm}(a_1,a_2,\ldots,a_n))=\prod_{p \ {\rm prime}} f(p^{\max(v_p(a_1),v_p(a_2),\ldots,v_p(a_n))})$$ and $$f(\gcd(a_{i_1},\ldots,a_{i_r}))=\prod_{p \ {\rm prime}} f(p^{\min (v_p(a_{i_1}),\ldots,v_p(a_{i_r}))}).$$ Thus it suffices to prove that \begin{align*} f(p^{\max(v_p(a_1),v_p(a_2),\ldots,v_p(a_n))})= \prod_{r=1}^{n}\prod_{1\leq i_1<\cdots<i_{r}\leq n}\big(f(p^{\min((v_p(a_{i_1}), ..., v_p(a_{i_{r}}))})\big)^{(-1)^{r-1}}\ (2.2)
\end{align*} for every prime $p$. Now we define an order $\succeq$ for the set $S=\{f(p^m): \ m\in \mathbb{N}\}$ according to the size of the power $m$ of the prime $p$. That is, $f(p^i)\succeq f(p^j)$ if $i\ge j$ and $f(p^i)\succ f(p^j)$ if $i> j$. It is easy to check that $S$ is a totally ordered set for the order $\succeq$. So the equality (2.2) follows immediately from Lemma 2.1 by letting $a_i$ be $f(p^{v_p(a_i)})$ for $1\le i\le n$ in Lemma 2.1. The proof of Lemma 2.2 is complete. \end{proof}
If ${\rm gcd}(a_{i}, a_{j})={\rm gcd}(b_{i }, b_{j})$ for any $1\leq i<j\le n$, then for any $t\ge 3$, one has ${\rm gcd}(a_{i_1},a_{i_2},\ldots,a_{i_t})={\rm gcd}(b_{i_1},b_{i_2},\ldots,b_{i_t})$ for any $1\leq i_1<\cdots<i_t\leq n$. Therefore we immediately derive the following result from Lemma 2.2.\\
\noindent{\bf Lemma 2.3.} {\it Let $a_1,a_2,\ldots,a_n$ and $b_1,b_2,\ldots,b_n$ be any $2n$ positive integers. Let $f$ be a multiplicative function with $f(n)\ne 0$ for all $n\in \mathbb{N}^*$. If ${\rm gcd}(a_{i}, a_{j})={\rm gcd}(b_{i }, b_{j})$ for any $1\leq i<j\leq n$, then we have \begin{align*} \frac{\prod_{1\le i\le n}f(a_i)}{f({\rm lcm}_{1\le i\le n}\{a_i\})} =\frac{\prod_{1\le i\le n}f(b_i)}{f({\rm lcm}_{1\le i\le n}\{b_i\})}. \end{align*}}\\
We are now in a position to show Theorem 1.2.\\ \\ {\it Proof of Theorem 1.2.} Let $n$ be a given positive integer. For any $0\leq i<j\leq k$, we have \begin{align*} {\rm gcd}(b+a(n+ic+cL_k),b+a(n+jc+cL_k))&={\rm gcd}(b+a(n+ic+cL_k),(j-i)ac)\\ &={\rm gcd}(b+a(n+ic),(j-i)ac)\\ &={\rm gcd}(b+a(n+ic),b+a(n+jc)). \end{align*} Thus by Lemma 2.3, we obtain that $g_{k,f}(n+cL_k)=g_{k,f}(n)$ for any positive integer $n$. Therefore $g_{k,f}$ is periodic and $cL_k$ is its period.
$\square$\\
Obviously, by Theorem 1.2, the arithmetic function $g_{k,\varphi}$ is periodic and $cL_k$ is its period. In the next section, we will provide detailed $p$-adic analysis to the arithmetic function $g_{k, \varphi}$ which leads us to determine the exact value of the smallest period of $g_{k, \varphi}$.
\section{\bf Local analysis of $g_{k,\varphi}$}
Throughout this section, we let $a'=a/d$ and $ b'=b/d$ with $d=\gcd(a, b)$. Then $\gcd(a',b')=1$. Let $$ S_{k,a',b',c}(n):=\{b'+a'n,b'+a'(n+c),\ldots,b'+a'(n+kc)\} $$ be the set consisting of $k+1$ consecutive jumping terms with gap $c$ in the arithmetic progression $\{b'+a'm\}_{m\in \mathbb{N}}$. For any given prime number $p$, define $g_{p,k,\varphi}$ for any $n\in \mathbb {N}^*$ by $g_{p,k,\varphi}(n):=v_{p}(g_{k,\varphi}(n))$. Let $P_{k, \varphi}$ be the smallest period of $g_{k,\varphi}$. Then $g_{p,k,\varphi}$ is a periodic function for each prime $p$ and $P_{k,\varphi}$ is a period of $g_{p,k,\varphi}$. Let $P_{p, k, \varphi}$ be the smallest period of $g_{p,k,\varphi}$. Since \begin{align*} \varphi(b+a(n+ic))&=\varphi(d(b'+a'(n+ic)))=\varphi\big(
\prod_{p|d}p^{v_p(b'+a'(n+ic))+v_p(d)}\prod_{p\nmid d}p^{v_p(b'+a'(n+ic))}\big)\\
&=\bigg(\prod_{p|d}p^{v_p(d)-1}(p-1)\bigg)\bigg
(\prod_{p|d}p^{v_p(b'+a'(n+ic))}\bigg)\bigg(\prod_{p\nmid d}\varphi\big(p^{v_p(b'+a'(n+ic))}\big)\bigg)\\
&=\varphi(d)\bigg(\prod_{p|d}p^{v_p(b'+a'(n+ic))}\bigg)\bigg(\prod_{p\nmid d}\varphi\big(p^{v_p(b'+a'(n+ic))}\big)\bigg), \end{align*} we have \begin{align*}
g_{k,\varphi}(n)&=\frac{\prod_{i=0}^k \varphi(b+a(n+ic))} {\varphi({\rm lcm}_{0\le i\le k} \{b+a(n+ic)\})}=\frac{\prod_{i=0}^k \varphi(d(b'+a'(n+ic)))} {\varphi(d\cdot {\rm lcm}_{0\le i\le k} \{b'+a'(n+ic)\})}\\
&= \frac{\prod_{i=0}^k\Big(\varphi(d)\Big(\prod_{p| d}
p^{v_p(b'+a'(n+ic))}\Big)\Big(\prod_{p\nmid d}\varphi(p^{v_p(b'+a'(n+ic))})\Big)\Big)}{\varphi(d)\Big(\prod_{p| d}p^{\max_{0\le i\le k}\{v_p(b'+a'(n+ic))\}}\Big)\Big(\prod_{p\nmid d}\varphi(p^{\max_{0\le i\le k}\{v_p(b'+a'(n+ic))\}})\Big)}\\
&=(\varphi(d))^k\frac{\prod_{i=0}^k \Big(\Big(\prod_{p| d}
p^{v_p(b'+a'(n+ic))}\Big)\Big(\prod_{p\nmid d}\varphi(p^{v_p(b'+a'(n+ic))})\Big)\Big)}{ \Big(\prod_{p| d}p^{\max_{0\le i\le k}\{v_p(b'+a'(n+ic))\}}\Big)\Big(\prod_{p\nmid d} \varphi(p^{\max_{0\le i\le k}\{v_p(b'+a'(n+ic))\}})\Big)}. \end{align*} Note that for any prime $q$, we have that for any positive integer $e$, $\varphi(q^e)=q^{e-1}(q-1)$. So when computing $p$-adic valuation of $g_{k, \varphi}(n)$, we not only need to compute $v_p(\varphi (p^{\alpha }))$ for $\alpha \ge 2$, but also need to consider $p$-adic valuation of $q-1$ for those primes $q$ with
$p|(q-1)$. By some computations, we obtain the following two equalities.
If $p\nmid d$, then \begin{align*} g_{p,k,\varphi}(n)&= \sum_{ m\in S_{k,a',b',c}(n)}\max(v_p(m)-1,0)- \max(\max_{ m\in S_{k,a',b',c}(n)}\{v_p(m)-1\},0)\\
& +\sum_{{\rm prime}\ q:\ q\nmid d, q\neq p}\max(0,\#\{m\in S_{k,a',b',c}(n): q| m\}-1)\cdot v_p(q-1)+kv_p(\varphi(d))\\
&= kv_p(\varphi(d))+\sum_{e\ge 2}{\max} (0, \#\{m\in S_{k,a',b',c}(n):p^e| m\}-1)\\
& \ \ +\sum_{{\rm prime} \ q:\ q\nmid d, p|(q-1)}\max(0,\#\{m\in S_{k,a',b',c}(n): q| m\}-1)\cdot v_p(q-1). \ \ \ \ \ \ \
\ (3.1) \end{align*}
If $p| d$, then \begin{align*} g_{p,k,\varphi}(n)&= kv_p(\varphi(d))+\sum_{i=0}^kv_p(b'+a'(n+ic))-\max_{0\le i\le k}\{v_p(b'+a'(n+ic))\}\\
& \ \ +\sum_{{\rm prime}\ q:\ q\nmid d, q\neq p}\max(0,\#\{m\in S_{k,a',b',c}(n): q| m\}-1)\cdot v_p(q-1)\\
&=kv_p(\varphi(d))+\sum_{e\ge 1}{\max} (0, \#\{m\in S_{k,a',b',c}(n):p^e| m\}-1)\\
& \ \ +\sum_{{\rm prime} \ q:\ q\nmid d, p|(q-1)}\max(0,\#\{m\in S_{k,a',b',c}(n): q| m\}-1)\cdot v_p(q-1). \ \ \ \ \ \
\ (3.2) \end{align*}
In order to analyze the function $g_{p,k,\varphi}$ in detail, we need the following results.\\
\noindent{\bf Lemma 3.1.} {\it Let $e$ and $m$ be positive integers. If $p\nmid a'$, then any $p^e$ consecutive terms in the arithmetic progression $\{b'+a'(m+ic)\}_{i\in \mathbb{N}}$ are pairwise incongruent modulo $p^{v_p(c)+e}$. In particular, there is at most one term divisible by $p^e$ in $S_{k,a',b',c}(n)$ for $e>v_p(cL_k)$.} \begin{proof} Suppose that there are two integers $i,j$ such that $0<j-i\le p^e-1$ and $b'+(m+ic)a'\equiv b'+(m+jc)a' \pmod {p^{v_p(c)+e}}$. Then $p^e\mid (j-i)a'$. Since ${\rm gcd}(p,a')=1$, we have $p^{e}\mid (j-i)$. This is a contradiction. \end{proof}
\noindent{\bf Lemma 3.2.} {\it Let $F$ be a positive rational-valued arithmetic function. For any prime $p$, define $F_p$ by $F_p(n):=v_p(F(n))$ for $n\in {\mathbb N}^*$. Then $F$ is periodic if and only if $F_p$ is periodic for each prime $p$ and ${\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}$ is finite, where $T_{p,F}$ is the smallest period of $F_p$. Furthermore, if $F$ is periodic, then the smallest period $T_F$ of $F$ is equal to ${\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}$.} \begin{proof}
$\Rightarrow)$ Since $F$ is periodic and $T_F$ is its smallest period, we have $F(n+T_F)=F(n)$ for any $n\in \mathbb{N}^*$, and hence $F_p(n+T_F)=v_{p}(F(n+T_F))=v_{p}(F(n))=F_p(n)$. In other words, $F_p$ is periodic and $T_F$ is a period of $F_p$ for every prime $p$. So we have ${\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}| T_{F}$ and ${\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}$ is finite.
$\Leftarrow)$ Since for any $n\in \mathbb{N}^*$, we have that $v_{q}(F(n+{\rm lcm}_{{\rm prime} \ p \ }\{T_{p,F}\}))=v_{q}(F(n))$ for each prime $q$. Thus $F(n+{\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\})=F(n)$ for any $n\in \mathbb{N}^*$. So $F$ is periodic and ${\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}$ is a period of it. Hence $T_{F}$ divides ${\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}$.
From the above discussion, we immediately derive that $T_{F}={\rm lcm}_{{\rm prime} \ p}\{T_{p,F}\}$ if $F$ is periodic. \end{proof}
For any prime $p\ge cL_k+1$, we have by Lemma 3.1 that there is at most one term divisible by $p$ in $S_{k,a',b',c}(n)$ and there is at most one element divisible by the prime $q$ satisfying $p| (q-1)$ in $S_{k,a',b',c}(n)$. Thus for any prime $p\ge cL_k+1$, we can get from (3.1) and (3.2) that $g_{p,k,\varphi}(n)=kv_p(\varphi(d))$ for every positive integer $n$. Namely, we have $P_{p,k,\varphi}=1$ for each prime $p$ such that $p\ge cL_k+1$. Thus by Lemma 3.2, we immediately have the following.\\
\noindent{\bf Lemma 3.3.} {\it We have} $$ P_{k,\varphi}={\rm lcm}_{{\rm prime} \ p\le cL_k} \{P_{p,k,\varphi}\}. $$\\
In what follows it is enough to compute $P_{p,k,\varphi}$ for every prime $p$ with $p\le cL_k$. First we need to simplify $g_{p,k,\varphi}$ for $p\le cL_k$. For any prime $q$ satisfying
$q\nmid cL_k$, we obtain by Lemma 3.1 that there is at most one term divisible by $q$ in $S_{k,a',b',c}(n)$. On the other hand, for any prime $q$ satisfying $q| a'$, we have $\gcd (q,b')=1$ since ${\rm gcd}(a',b')=1$. Thus for $0\leq i\leq k$, we have that $\gcd(q, b'+a'(n+ic))=1$ for all $n\in \mathbb{N}^*$. So there is no term divisible by any prime factor $q$ of $a'$ in $S_{k,a',b',c}(n)$. Thus from (3.1) and (3.2), we derive the following equality:\\ $$g_{p,k,\varphi}(n)=kv_p(\varphi(d))+\sum_{e=1}^{v_p(cL_k)}f_{e}(n)+
\sum_{{\rm prime} \ q: \ q| cL_k \atop p|(q-1), \ q\nmid a}h_q(n), \eqno (3.3) $$ where \begin{align*} f_{e}(n):={\left\{
\begin{array}{rl} 0, \quad&\text{if} \ p\nmid d\ \text{and}\ e= 1,\\
\max (0, \#\{m\in S_{k,a',b',c}(n):p^e| m\}-1), \quad&\text{otherwise}
\end{array} \right.} \end{align*}
and $$ h_q(n):=\max(0,\#\{m\in S_{k,a',b',c}(n): q| m\}-1)\cdot v_p(q-1).$$ For any positive integer $n$, it is easy to check that $f_{e}(n+p^{v_p(cL_k)})=f_{e}(n)$ for each $1\le e\le v_p(cL_k)$ and
$h_q(n+q)=h_q(n)$ for each prime $q$ such that $q\nmid a, \ q|
cL_k$ and $p| (q-1)$. Consequently, we obtain that
$p^{v_p(cL_k)}\prod_{{\rm prime}\ q:\ q| cL_k \atop q\nmid a, \ p| (q-1)}q$ is a period of the function $g_{p,k,\varphi}$. To get the smallest period of $g_{p,k,\varphi}$ for each prime $p\le cL_k$, we need to make more detailed $p$-adic analysis about $g_{p,k,\varphi}$. We divide it into the following four cases.\\
\noindent{\bf Lemma 3.4.} {\it Let $p$ be a prime such that $p\le cL_k$ and $p\nmid cL_k$. Then \begin{align*}
P_{p,k,\varphi}= \prod _{{\rm prime} \ q :\ q| c \atop q\nmid a, \
p| (q-1)} q. \end{align*}
} \begin{proof} Since $p\le cL_k$ and $p\nmid cL_k$, we have $k<p\le cL_k$ and $v_p(cL_k)=0$. Hence we have that
$g_{p,k,\varphi}(n)=kv_p(\varphi(d))+\sum_{{\rm prime}\ q: \ q| cL_k
\atop p| (q-1), \ q\nmid a}h_q(n)$ by (3.3). If there is no prime
$q$ satisfying $q| cL_k$, $p| (q-1)$ and $q\nmid a$, then we have
$g_{p,k,\varphi}(n)=kv_p(\varphi(d))$ for any positive integer $n$, and hence $P_{p,k,\varphi}=1$ for such primes $p$. If there is a prime $q$ satisfying $q| cL_k$, $p| (q-1)$ and $q\nmid a$, then we must have $q| c$, since such $q$ must satisfy $q\ge p+1>k$. So by the argument before Lemma 3.4, we have that $A:=\prod_{ \ {\rm prime} \
q : \ q| c\atop q\nmid a, \ p| (q-1)}q$ is a period of $g_{p,k,\varphi}$.
Now it remains to prove that $A$ is the smallest period of $g_{p,k,\varphi}$. For any prime factor $q$ of $A$, we can choose a positive integer $n_0$ such that $v_{q}(b'+a'n_0)\ge 1$
because $q\nmid a$. Since $q>k$ and $q| c$, we have that $v_q(b'+a'(n_0+ic))\ge 1$ and $v_q(b'+a'(n_0+ic+A/q))=v_q(a'A/q)=0$ for each $0\le i\le k$. Hence there is no term divisible by $q$ in $S_{k,a',b',c}(n_0+A/q)$. Thus $h_q(n_0)=kv_p(q-1)\ge k>0=h_q(n_0+A/q)$. On the other hand, $h_{q'}(n_0)=h_{q'}(n_0+A/q)$ for any other prime factors $q'\ne q$ of $A$. It then follows that $g_{p,k,\varphi}(n_0)\ne g_{p,k,\varphi}(n_0+A/q)$. Therefore $A$ is the smallest period of $g_{p,k,\varphi}$. This completes the proof of Lemma 3.4. \end{proof}
\noindent{\bf Lemma 3.5.} {\it Let $p$ be a prime such that $p|
cL_k$ and $p|a'$. Then \begin{align*}
P_{p,k,\varphi}=\bigg(\prod_{{\rm prime} \ q:\ q\nmid ac, \ q| L_k
\atop p| (q-1), \ k+1\not\equiv 0\pmod {q}} \ q
\bigg)\bigg(\prod_{{\rm prime}\ q:\ q\nmid a,\atop q| c,\
p|(q-1)}q\bigg). \end{align*} } \begin{proof} For convenience, we let $A$ denote the number on the right side of the equality in Lemma 3.5. First, we prove that $A$ is a period of $g_{p,k,\varphi}$. Since there is no term divisible by $p$ in
$S_{k,a',b',c}(n)$ if $p| a'$, we have that $f_e(n)=0$ for any positive integer $n$ and for each $1\le e\le v_p(cL_k)$. Hence we have $g_{p,k,\varphi}(n)=kv_p(\varphi(d))+\sum_{{\rm prime}\ q:\ q|
cL_k \atop p| (q-1), \ q\nmid a}h_q(n)$. If there is no prime $q$
such that $q| cL_k$, $p| (q-1)$ and $q\nmid a$, then
$g_{p,k,\varphi}(n)=kv_p(\varphi(d))$ for any positive integer $n$, and hence $P_{p,k,\varphi}=1$ for such primes $p$. If $q\nmid ac, q|L_k$ and $k+1\equiv0\pmod{q}$, then any $q$ consecutive terms in the arithmetic progression $\{b'+a'(m+ic)\}_{i\in \mathbb{N}}$ are pairwise incongruent modulo $q$ by Lemma 3.1. Therefore, we have $h_q(n)=h_q(n+1)=v_p(q-1)(\frac{k+1}{q}-1)$ for any positive integer $n$. Namely, 1 is a period of $h_q$ for such primes $q$. Since $q$
is a period of $h_q$ for any other primes $q$ such that $q| cL_k, q\nmid a$ and $ p| (q-1)$, we have that $A$ is a period of the function $g_{p,k,\varphi}$. To prove that $A$ is the smallest period of $g_{p,k,\varphi}$, it is enough to show that $A/q$ is not the period of $g_{p,k,\varphi}$ for every prime factor $q$ of $A$. We divide the prime factors of $A$ into the following two cases.
{\sc Case 1.} $q$ is a prime factor of $A$ such that $q\nmid ac$,
$q| L_k$, $p| (q-1)$ and $k+1\not\equiv0\pmod{q}$. To prove $A/q$ is not the period of $g_{p,k,\varphi}$, it suffices to prove that $A/q$ is not the period of the function $h_q$ since $A/q$
is a period of $h_{q'}$ for any other primes $q'\ne q$ such that $q'|
cL_k, p| (q'-1)$ and $q'\nmid a$. Since $\gcd(A/q, q)=1$, there exists a positive integer $r_0$ such that $r_0A/q\equiv 1\pmod{q}$. We pick a positive integer $n_0$ so that $v_{q}(b'+a'n_0)\ge 1$ since $q\nmid a$. So we have that the terms divisible by $q$ in the arithmetic progression $\{b'+a'(n_0+ic)\}_{i\in \mathbb{N}}$ must be of the form $b'+a'(n_0+tcq)$ for some $t\in \mathbb{N}$, and there are at least two terms divisible by $q$ in $S_{k,a',b',c}(n_0)$
since $q|L_k$. Comparing $S_{k,a',b',c}(n_0)$ with $S_{k,a',b',c}(n_0+r_0A/q)$, we obtain that $b'+a'(n_0+j)\equiv b'+a'(n_0+r_0A/q+j-1) \pmod{q} \ {\rm for \ each} \ 1\le j\le k$ while $b'+a'(n_0+r_0A/q+k)\equiv b'+a'(n_0+k+1)\not\equiv b'+a'n_0\equiv 0\pmod{q}$. Thus the number of terms divisible by $q$ in $S_{k,a',b',c}(n_0+r_0A/q)$ equals the number of terms divisible by $q$ in $S_{k,a',b',c}(n_0)$ minus one, which means that $h_q(n_0+r_0A/q)= h_q(n_0)-v_p(q-1)$. Therefore, $A/q$ is not the period of $g_{p,k,\varphi}$ in this case.
{\sc Case 2.} $q$ is a prime factor of $A$ satisfying $q\nmid a$,
$q| c$, and $p| (q-1)$. As above, we select two positive integers $r_0$ and $n_0$ such that $r_0A/q\equiv1\pmod{q}$ and $v_{q}(b'+a'n_0)\ge 1$. So we obtain that $v_q(b'+a'(n_0+ic))\ge 1$ and $v_q(b'+a'(n_0+r_0A/q+ic))=v_q(a'r_0A/q)=0$ for each $0\le i\le k$. In other words, all the $k+1$ terms are divisible by $q$ in $S_{k,a',b',c}(n_0)$ while no term is divisible by $q$ in $S_{k,a',b',c}(n_0+r_0A/q)$. Thus $h_q(n_0)=kv_p(q-1)\ge k>0=h_q(n_0+r_0A/q)$. It follows immediately that $A/q$ is not the period of $g_{p,k,\varphi}$ in this case.
So $A$ is the smallest period of $g_{p,k,\varphi}$. The proof of Lemma 3.5 is complete. \end{proof}
\noindent{\bf Lemma 3.6.} {\it Let $p$ be a prime such that $p| cL_k$, $p\nmid a'$ and $p\nmid d$. Then \begin{align*}
P_{p,k,\varphi}= p^{e(p,k)}\bigg(\prod_{{\rm prime} \ q:\ q\nmid ac, \ q| L_k \atop p| (q-1), \ k+1\not\equiv 0\pmod {q}} \ q
\bigg)\bigg( \prod_{{\rm prime}\ q: \ q\nmid a,\atop q| c, \ p| (q-1)}q\bigg), \end{align*} where \begin{align*} e(p,k):={\left\{
\begin{array}{rl} 0, \quad&\text{if} \ v_p(cL_k)=1,\\ v_p(c), \quad&\text{if} \ v_{p}(k+1)\geq v_p(L_k) \ and \ v_p(cL_k)\ge 2,\\ v_p(cL_k), \quad&\text{if} \ v_p(k+1)< v_p(L_k) \ and \ v_p(cL_k)\ge 2.
\end{array} \right.} \end{align*} } \begin{proof} From (3.3), we get that $$g_{p,k,\varphi}(n)=kv_p(\varphi(d))+\sum_{e= 2}^{v_p(cL_k)}f_e(n)+
\sum_{{\rm prime} \ q: \ q| cL_k \atop p| (q-1), \ q\nmid a}h_q(n).
\eqno (3.4)$$ Let $A$ denote the number $p^{e(p,k)}\big(\prod_{{\rm prime} \ q:\ q\nmid ac, \ q| L_k \atop p| (q-1), \ k+1\not\equiv 0\pmod {q}} q\big)\big(\prod_{{\rm prime}\ q: \ q\nmid a\atop q| c, \ p| (q-1)}q\big)$. We distinguish the following three cases.
{\sc Case 1.} $v_p(cL_k)= 1$. Since $v_p(cL_k)= 1$, we have $$
g_{p,k,\varphi}(n)=kv_p(\varphi(d))+\sum_{{\rm prime}\ q: \ q| cL_k
\atop p| (q-1), \ q\nmid a}h_q(n) $$ for every positive integer $n$ by (3.4). The process of proving that $A$ is the smallest period of $g_{p,k,\varphi}$ is the same as the proof of Lemma 3.5, one can easily check it.
{\sc Case 2.} $v_p(k+1)\ge v_p(L_k)$ and $v_p(cL_k)\ge 2$. We consider the following two subcases.
{\sc Subcase 2.1.} $p>k$, $v_p(k+1)\ge v_p(L_k)$ and $ v_p(cL_k)\ge 2$. In this case, we have $v_p(cL_k)=v_p(c)\ge 2$. So we obtain that $p^{v_p(c)}$ is a period of $f_e$ for each $2\le e\le v_p(cL_k)=v_p(c)$. By the same method as in the proof of Lemma 3.5, we can derive that $A/p^{v_p(c)}$ is a period of $\sum_{{\rm prime}
\ q:\ q| cL_k \atop p| (q-1), \ q\nmid a}h_q(n)$, and hence by (3.4) $A$ is a period of $g_{p,k,\varphi}$. Now it suffices to prove that $A/P$ is not the period of $g_{p,k,\varphi}$ for any prime factor $P$ of $A$.
For the prime $p$, we have by (3.4) that $A/p$ is a period of $f_e$
for each $2\le e\le v_p(c)-1$ and is also a period of $h_q$ for each prime $q$ such that $q\nmid a, \ q| cL_k$ and $ p| (q-1)$. So it is enough to prove that $A/p$ is not the period of $f_{v_p(c)}$.
Since $p\nmid a'$, we can choose a positive integer $n_0$ such that
$v_p(b'+a'n_0)=v_p(c).$ It is easy to see that $p^{v_p(c)}| b'+a'(n_0+ic)$ and $p^{v_p(c)}\nmid b'+a'(n_0+A/p+ic)$ for each $0\le i\le k$. Thus comparing the two sets $S_{k,a',b',c}(n_0)=\{ b'+a'(n_{0}+ic)\}_{0\le i\le k}$ and $S_{k,a',b',c}(n_0+A/p)=\{b'+a'(n_0+A/p+ic)\}_{0\le i\le k}$, we obtain that
$$f_{v_p(c)}(n_0)= \max (0, \#\{m\in S_{k,f,c}(n_0):p^{v_p(c)}| m\}-1)=k,$$ \begin{align*}
f_{v_p(c)}(n_0+A/p)&= \max (0, \#\{m\in S_{k,f,c}(n_0+A/p):p^{v_p(c)}| m\}-1)=0. \end{align*} Therefore, $A/p$ is not the period of $g_{p,k,\varphi}$.
For any prime factor $q$ of $A$ such that $q| cL_k$, $p| (q-1)$ and $q\nmid a$. It is easy to see that $A/q$ is a period of $\sum_{e=
2}^{v_p(cL_k)}f_e(n)$ and $ h_{q'}(n)$ for each prime factor $q'\ne q$ of $A$ satisfying $ q'|cL_k, p|(q'-1)$ and $q'\nmid a$. Similarly to the proof of Lemma 3.5, we can deduce that $A/q$ is not the period of $h_q$, and hence $A/q$ is not the period of $g_{p,k,\varphi}$. Therefore, $A$ is the smallest period of $g_{p,k,\varphi}$ in this subcase.
{\sc Subcase 2.2.} $p\le k$, $v_p(k+1)\ge v_p(L_k)$ and $ v_p(cL_k)\ge 2$. To prove that $A$ is a period of $g_{p,k,\varphi}$, it suffices to prove that $p^{v_p(c)}$ is a period of $f_e$ for each $1\le e\le v_p(cL_k)$ by the argument in Subcase 2.1 of this proof. For any given positive integer $n$, comparing the two sets $S_{k,a',b',c}(n)=\{ b'+a'(n+ic)\}_{0\le i\le k}$ and $S_{k,a',b',c}(n+c)=\{ b'+a'(n+ic)\}_{1\le i\le k+1}$, we find that their distinct terms are $b'+a'n$ and $b'+a'(n+(k+1)c)$. From $v_p(k+1)\ge v_p(L_k)$ we deduce that $$b'+a'n\equiv b'+a'(n+(k+1)c)\pmod {p^{v_p(cL_k)}}.$$ Therefore we obtain that $f_e(n)=f_e(n+c)$ for each $e\in \{2,\ldots, v_p(cL_k)\}$. Since $\gcd(c/p^{v_p(c)},p)=1$, we can always find two integers $t,t_1$ such that $tc/p^{v_p(c)}=t_1{p^{v_p(L_k)}}+1$. Note that $p^{v_p(cL_k)}$ is a period of $f_e$ for each $2\le e\le v_p(cL_k)$. Therefore, we have $f_e(n+p^{v_p(c)})=f_e(n +p^{v_p(c)}+t_1p^{v_p(cL_k)})=f_e(n+tcp^{v_p(c)}/p^{v_p(c)})=f_e(n+tc)=f_e(n)$ for each positive integer $n$ and each $2\le e\le v_p(cL_k)$.
Thus $A$ is a period of $g_{p,k,\varphi}$ as required.
Now we only need to prove that $A/P$ is not the period of $g_{p,k,\varphi}$ for any prime factor $P$ of $A$. For any prime factor $q$ of $A$ such that
$q| cL_k$, $p| (q-1)$ and $q\nmid a$, the proof is similar to Subcase 2.1. If $v_p(c)=0$, then $p$ is not a prime factor of $A$, and the proof of this case is complete. In the following, we need to prove that $A/p$ is not the period of $g_{p,k,\varphi}$ if
$v_p(c)\ge 1$. Since $A/p$ is a period of $h_q$ for each $q$ such that $q\nmid a, \ q| cL_k$ and $ p| (q-1)$, it is enough to prove that $A/p$ is not the period of the function $\sum_{e=2}^{v_p(cL_k)}f_{e}(n)$.
If $v_p(c)\ge 2$, we choose $n_0\in \mathbb{N}^*$ such that $v_p(b'+a'n_0)=v_p(c)$. Comparing $S_{k,a',b',c}(n_0)=\{ b'+a'(n_{0}+ic)\}_{0\le i\le k}$ with $ S_{k,a',b',c}(n_0+A/p)=\{b'+a'(n_0+A/p+ic)\}_{0\le i\le k}$, we obtain that each term of $S_{k,a',b',c}(n_0)$ is divisible by $p^{v_p(c)}$, while there is no term divisible by $p^{v_p(c)}$ in $S_{k,a',b',c}(n_0+A/p)$ since $v_p(b'+a'(n_0+A/p+ic))=\min\big( v_p(b'+a'(n_0+ic)), v_p(a'A/p)\big)=v_p(c)-1$ for each $0\le i\le k$. So
$$\sum_{e=2}^{v_p(cL_k)}f_{e}(n_0)\ge k(v_p(c)-1)>k(v_p(c)-2) =\sum_{e=2}^{v_p(cL_k)}f_{e}(n_0+A/p).$$
If $v_p(c)=1$, then $v_p(A/p)=0$. Choosing $n_0\in \mathbb{N}^*$ such that $v_p(b'+a'n_0)=v_p(cL_k)$, we have that there is at least two terms $b'+a'n_0$ and $b'+a'(n_0+p^{v_p(L_k)}c)$ divisible by $p^{v_p(cL_k)}$ in $S_{k,a',b',c}(n_0)$ but no term is divisible by $p$ in $S_{k,a',b',c}(n_0+A/p)$ since $v_p(b'+a'(n_0+A/p+ic))=0$ for all $0\le i\le k$. Therefore, we have $$\sum_{e=2}^{v_p(cL_k)}f_{e}(n_0)\ge v_p(cL_k)-1>0 =\sum_{e=2}^{v_p(cL_k)}f_{e}(n_0+A/p).$$ Thus $A/p$ is not the period of $g_{p,k,\varphi}$ in this case.
{\sc Case 3.} $p\le k$, $v_p(k+1)< v_p(L_k)$ and $v_p(cL_k)\ge 2$. By the discussion before Lemma 3.4, it is easy to get that $A$ is a period of $g_{p,k,\varphi}$. As above, it suffices to prove that $A/P$ is not the period of $g_{p,k,\varphi}$ for any prime factor $P$ of $A$ in the following. By a similar argument as in Subcase 2.1, we now only need to show that $A/p$ is not the period of $f_{v_p(cL_k)}$. Since $v_p(A/p)=v_p(cL_k)-1$, we can select a positive integer $r_0$ such that $r_0A/p\equiv p^{v_p(cL_k)-1}\pmod{p^{v_p(cL_k)}}$. In the following, we prove that $p^{v_p(cL_k)-1}$ is not the period of $f_{v_p(cL_k)}$, from which we can deduce that $A/p$ is not the period of $g_{p,k,\varphi}$. Since $v_{p}(k+1)<v_p(L_k)$, we can always suppose that $k+1\equiv r\pmod {p^{v_p(L_k)}} \ {\rm for \ some}\ 1\leq r\leq p^{v_p(L_k)}-1.$ We distinguish the following two subcases.
{\sc Subcase 3.1.} $1\le r\le p^{v_p(L_k)}-p^{v_p(L_k)-1}$. Choose a positive integer $n_0$ such that $v_p(b'+a'n_0)\ge v_p(cL_k)$. Compare the number of terms divisible by $p^{v_p(cL_k)}$ in the two sets $S_{k,a',b',c}(n_0)=\{b'+a'(n_0+kc)\}_{0\le i\le k}$ and $ S_{k,a',b',c}(n_0+p^{v_p(L_k)-1}c)=\{b+a(n_0+(p^{v_p(L_k)-1}+i)c)\}_{0\le i \le k}$. Since $\{b'+a'(n_0+p^{v_p(L_k)-1}c),\ldots, b'+a'(n_0+kc)\}$ is the intersection of $S_{k,a',b',c}(n_0)$ and $S_{k,a',b',c}(n_0+p^{v_p(L_k)-1}c)$, it suffices to compare the set $\{b'+a'n_0,\ldots,b'+a'(n_0+(p^{v_p(L_k)-1}-1)c)\}$ with the set $\{b'+a'(n_{0}+(k+1)c),\ldots,b'+a'(n_0+(k+p^{v_p(L_k)-1})c)\}$. By Lemma 3.1, we know that the terms divisible by $p^{v_p(cL_k)}$ in the arithmetic progression $\{b'+a'(n_0+ic)\}_{i\in \mathbb{N}}$ are of the form $b'+a'(n_0+tp^{v_p(L_k)}c), \ t\in \mathbb{N}$. Since $k+1\equiv r\pmod {p^{v_p(L_k)}}$ and $1\leq r\leq p^{v_p(L_k)}-p^{v_p(L_k)-1}$, we have $k+j\equiv r+j-1\not\equiv 0 \pmod {p^{v_p(L_k)}}$ for all $1\leq j\leq p^{v_p(L_k)-1}$. Hence $p^{v_p(cL_k)}\nmid (b'+a'(n_0+(k+j)c))$ for all $1\leq j\leq p^{v_p(L_k)-1}$. Whereas, $b'+a'n_0$ is the only term in the set $\{b'+a'n_0,b'+a'(n_{0}+c),\ldots,b'+a'(n_{0}+(p^{v_p(L_k)-1}-1)c)\}$ which is divisible by $p^{v_p(cL_k)}$. Therefore we have $$ f_{v_p(cL_k)}(n_0+p^{v_p(L_k)-1}c)=f_{v_p(cL_k)}(n_0)-1.$$
{\sc Case 3.2.} $p^{v_p(L_k)}-p^{v_p(L_k)-1}<r\leq p^{v_p(L_k)}-1$. Pick a positive integer $n_0$ such that $v_p(b'+a'(n_0+(p^{v_p(L_k)-1}-1)c))\ge v_p(cL_k)$. Then the terms divisible by $p^{v_p(cL_k)}$ in the arithmetic progression $\{b'+a'(n_0+ic)\}_{i\in \mathbb{N}}$ should be of the form $b'+a'(n_0+(p^{v_p(L_k)-1}-1+tp^{v_p(L_k)})c)$, where $t\in \mathbb{N}$. As in the discussion of Case 3.1, it is sufficient to compare $\{b'+a'n_0,\ldots,b'+a'(n_0+(p^{v_p(L_k)-1}-1)c)\}$ with $\{b'+a'(n_0+(k+1)c),\ldots,b'+a'(n_0+(k+p^{v_p(L_k)-1})c)\}$. By comparison, we obtain that $p^{v_p(cL_k)}\nmid (b'+a'(n_0+(k+j)c))$ for all $1\leq j\leq p^{v_p(L_k)-1}$, while the term $b'+a'(n_0+(p^{v_p(L_k)-1}-1)c)$ is the only term divisible by $p^{v_p(cL_k)}$ in the set $\{b'+a'n_0,\ldots,b'+a'(n_0+(p^{v_p(L_k)-1}-1)c)\}$. Hence we have $f_{v_p(cL_k)}(n_0+p^{v_p(L_k)-1}c)=f_{v_p(cL_k)}(n_0)-1$. From the argument in the above two subcases, we deduce that $p^{v_p(L_k)-1}c$ is not the period of $f_{v_p(cL_k)}$, which shows that $A/p$ is not the period of $g_{p,k,\varphi}$ in Case 3.
Thus $A$ is the smallest period of $g_{p,k,\varphi}$ as desired. This completes the proof of Lemma 3.6. \end{proof}
\noindent{\bf Lemma 3.7.} {\it Let $p$ be a prime such that $p|
cL_k$, $p\nmid a'$ and $p| d$. Then \begin{align*}
P_{p,k,\varphi}= p^{e(p,k)}\bigg(\prod_{{\rm prime} \ q:\ q\nmid ac, \ q| L_k\atop p| (q-1), \ k+1\not\equiv 0\pmod {q}} \
q\bigg)\bigg(\prod_{{\rm prime} \ q:\ q\nmid a,\atop q| c, \ p| (q-1)}q\bigg), \end{align*}
where \begin{align*} e(p,k):={\left\{
\begin{array}{rl} v_p(c), \quad&\text{if} \ v_{p}(k+1)\geq v_p(L_k),\\ v_p(cL_k), \quad&\text{if} \ v_p(k+1)< v_p(L_k).
\end{array} \right.} \end{align*} }
\begin{proof} Similarly to the proof of Lemma 3.6, it is enough to show that $p^{e(p,k)}$ is the smallest period of $\sum_{e=1}^{v_p(cL_k)}f_e(n)$ by (3.3). We divide the proof into the following two cases.
{\sc Case 1.} $v_p(k+1)\ge v_p(L_k)$. As in the proof of Subcase 2.2
in Lemma 3.6, since $b'+a'n\equiv b'+a'(n+(k+1)c)\pmod{p^{v_p(cL_k)}}$, we can obtain that $p^{v_p(c)}$ is a period of $\sum_{e=1}^{v_p(cL_k)}f_e(n)$. If $v_p(c)=0$, it is complete. If $v_p(c)\ge 1$, then choosing a positive integer $n_0$ such that $v_p(b'+a'n_0)=v_p(c)$, we can show that $p^{v_p(c)-1}$ is not the period of $\sum_{e=1}^{v_p(cL_k)}f_e(n)$ using a similar method as in the proof of Case 2 in Lemma 3.6.
{\sc Case 2.} $v_p(k+1)< v_p(L_k)$. Using the same way as the proof of Case 3 in Lemma 3.6, one can easily check that $p^{v_p(cL_k)-1}$ is not the period of $\sum_{e=1}^{v_p(cL_k)}f_e(n)$. The proof of Lemma 3.7 is complete. \end{proof}
\section{\bf Proof of Theorem 1.3 and examples}
In this section, we first use the results presented in the previous section to show Theorem 1.3. \\ \\ {\it Proof of Theorem 1.3.} By Theorem 1.2, we know that $g_{k,
\varphi}$ is periodic and $P_{k, \varphi}|cL_k$. To determine the exact value of $P_{k, \varphi}$, it is sufficient to determine the $p$-adic valuation of $P_{k, \varphi}$ for each prime $p$. By Lemma 3.3, we have $P_{k, \varphi}={\rm lcm}_{{\rm prime}\ p\le cL_k} \{P_{p, k,\varphi}\}$. So it is enough to compute $\max_{{\rm prime}\ q\le cL_k} \{v_p(P_{q, k, \varphi})\}$ for each prime $p$. We consider the following four cases.
{\sc Case 1.} $p\nmid cL_k$. Since $P_{k, \varphi}|cL_k$, it is clear that $v_p(P_{k, \varphi})=v_p(cL_k)=0$.
{\sc Case 2.} $p| cL_k$ and $p|a'$. Observe from Lemmas 3.4-3.7 that $v_p(P_{q, k, \varphi})=0$ for each prime $q\le cL_k$. So we have $v_p(P_{k, \varphi})=0$.
{\sc Case 3.} $p=2$. From the discussion in Case 1 and Case 2, we know that $v_2(P_{k,\varphi})=0$ if $2|cL_k$ and $2| a'$ or if
$2\nmid cL_k$. It remains to consider the case $2|cL_k$ and $2\nmid a'$. By Lemmas 3.4-3.7, we know that $v_2(P_{p, k, \varphi})=0$ for all odd primes $p$. So we only need to compute $v_2(P_{2,k,\varphi})$. We now distinguish the following four subcases.
{\sc Subcase 3.1.} $2\nmid a$ and $v_2(cL_k)=1$. In this case, by Lemma 3.6, one has $v_2(P_{k, \varphi})=0=v_2(cL_k)-1$.
{\sc Subcase 3.2.} $2\nmid a$, $v_2(cL_k)\ge 2$ and $v_2(k+1)\ge v_2(L_k)=1$, or $2\nmid a', 2|d$ and $v_2(k+1)\ge v_2(L_k)=1$. Since $v_2(k+1)\ge v_2(L_k)$ and $v_2(L_k)=1$, we get $k=3$. Thus by Lemmas 3.6 and 3.7, we have that if $k=3$, $2\nmid a$ and $v_2(c)=v_2(cL_k)-v_2(L_k)\ge 2-1=1$, or if $k=3$, $2\nmid a'$ and
$2|d$, then $v_2(P_{k, \varphi})=v_2(c)=v_2(cL_k)-1$.
{\sc Subcase 3.3.} $v_2(k+1)\ge v_2(L_k)\ge 2$. Using Lemmas 3.6 and 3.7, we obtain that $v_2(P_{k, \varphi})=v_2(c)=v_2(cL_k)-v_2(L_k)$.
{\sc Subcase 3.4.} $2\nmid a, v_2(L_k)=0$ and $v_2(cL_k)\ge 2$, or
$2\nmid a', 2|d$ and $v_2(L_k)=0$, or $2\nmid a$, $v_2(cL_k)\ge 2$
and $v_2(k+1)<v_2(L_k)$, or $2\nmid a'$, $2|d$ and
$v_2(k+1)<v_2(L_k)$. If $2\nmid a, v_2(L_k)=0$ and $v_2(cL_k)\ge 2$, or if $2\nmid a', 2|d$ and $v_2(L_k)=0$, we get $v_2(P_{2, k,\varphi})=v_2(c)=v_2(cL_k)$. So we have $v_2(P_{k, \varphi})=v_2(cL_k)$ in this case.
Combining all the above information on $v_2(P_{k,\varphi})$, we have \begin{align*} v_2(P_{k, \varphi})={\left\{ \begin{array}{rl}
0, &\text{if} \ 2| a', \\ v_2(cL_k)-v_2(L_k), &\text{if} \ 2\nmid a' \ {\rm and} \ v_2(k+1)\ge v_2(L_k)\ge 2, \\ v_2(cL_k)-1, &\text{if} \ 2\nmid a \ \text{and}\ v_2(cL_k)=1,
\text{or} \ k=3, 2\nmid a\ {\rm and} \ 2|c,\\ &\quad {\rm or} \
k=3, 2\nmid a'\ {\rm and}\ 2|d,\\ v_2(cL_k), &\text{otherwise}.
\end{array} \right.}(4.1) \end{align*}
{\sc Case 4.} $p\ne 2, p|cL_k$ and $p\nmid a'$. Note that $2|(p-1)$
for each odd prime $p$. Evidently, if $2\nmid cL_k$, then $k=1$. So there is no odd prime $p$ so that $p| L_k$ if $2\nmid cL_k$. Thus by Lemmas 3.4-3.7, for all odd prime factors $p$ of $cL_k$, we obtain that $ v_p(P_{2, k, \varphi})=1$ except that
{either $p\nmid ac$}, $p|L_k$ and $k+1\equiv0\pmod p$ {or} $p|d$, in which case $v_p(P_{2, k, \varphi})=0$. On the other hand, for all odd primes $q$ such that $q\ne p$ and $q\le cL_k$, we have by Lemmas 3.4-3.7 that $v_p(P_{q, k, \varphi})=0$
if $p|d$ or if $p\nmid ac$, $p|L_k$ and $k+1\equiv0\pmod p$, and $v_p(P_{q, k, \varphi})\le 1$ otherwise. Hence $v_p(P_{2,k,\varphi})\ge v_p(P_{q, k, \varphi})$ for all odd primes $q$ such that $q\ne p$ and $q\le cL_k$. Therefore we deduce immediately that $$ v_p(P_{k, \varphi})=\max_{{\rm prime}\ q\le cL_k} \{v_p(P_{q, k, \varphi})\}=\max( v_p(P_{2, k, \varphi}), v_p(P_{p, k, \varphi})).\eqno(4.2) $$ Using Lemmas 3.6 and 3.7 to compute $v_p(P_{p,k,\varphi})$, we get that \begin{align*} v_p(P_{p, k, \varphi})={\left\{
\begin{array}{rl} 0, &\text{if} \ v_p(cL_k)=1\ \mbox{and}\ p\nmid d,\\ v_p(c), &\text{if} \ v_{p}(k+1)\ge v_p(L_k), p\nmid d\ \mbox{and}\ v_p(cL_k)\ge 2,\\
&\ \mbox{or\ if}\ v_{p}(k+1)\ge v_p(L_k)\ \mbox{and}\ p|d,\\ v_p(cL_k), &\text{if} \ v_{p}(k+1)< v_p(L_k), p\nmid d\ \mbox{and}\ v_p(cL_k)\ge 2,\\
&\ \mbox{or\ if}\ \ v_{p}(k+1)< v_p(L_k)\ \mbox{and}\ p|d.
\end{array} \right.}\quad\quad\quad \quad\quad(4.3) \end{align*}
For all the primes $p$ such that $p\nmid d$ and $v_p(cL_k)=1$, we have by the above discussion that $v_p(P_{2, k, \varphi})=0$ only if $v_p(c)=0$, $v_p(L_k)=1$ and $k+1\equiv0\pmod p$. Equivalently, $v_p(P_{2, k, \varphi})=v_p(c)=v_p(cL_k)-v_p(L_k)$ if $v_p(k+1)\ge v_p(L_k)
\ge 1$, and $v_p(P_{2, k, \varphi})=v_p(cL_k)$ otherwise. Therefore, for all the odd primes $p$ satisfying $p|cL_k$ and $p\nmid a'$, we derive from (4.2) and (4.3) that $$ v_p(P_{k, \varphi})={\left\{
\begin{array}{rl} v_p(cL_k)-v_p(L_k), &\text{if}\ v_{p}(k+1)\geq v_p(L_k)\ge 1,\\ v_p(cL_k), &\text{otherwise}.
\end{array} \right.}\eqno(4.4) $$
Now putting all the above cases together, we get \begin{align*} P_{k, \varphi}&=2^{v_2(P_{k, \varphi})}\bigg(\prod_{{\rm prime}\ p:\
p\ne2 \atop p|a',\ p|cL_k}p^{v_p(P_{k,
\varphi})}\bigg)\bigg(\prod_{{\rm prime}\ p:\ p \ne 2 \atop p\nmid a',\ p|cL_k}p^{v_p(P_{k,\varphi})}\bigg)\\
&=\frac{cL_k}{2^{v_2(cL_k)-v_2(P_{k, \varphi})}\bigg(\prod_{{\rm prime}\ p:\ p\ne 2\atop p|a',\ p|cL_k}p^{v_p(cL_k)-v_p(P_{k, \varphi})}\bigg)\bigg(\prod_{{\rm prime}\ p:\ p\ne 2
\atop p\nmid a',\ p|cL_k}p^{v_p(cL_k)-v_p(P_{k,\varphi})}\bigg)}\\
&=\frac{cL_k}{2^{v_2(cL_k)-v_2(P_{k,\varphi})}\bigg(\prod_{{\rm prime}\ q:\ q\ne 2,\ q|a'}q^{v_q(cL_k)}\bigg)\bigg(\prod_{{\rm prime}\ q:\ q\ne 2\atop q\nmid a',\ q|cL_k}q^{v_q(cL_k)-v_q(P_{k,\varphi})}\bigg)}\\ &=\frac{cL_k}{2^{\delta_{2, k,\varphi}}\bigg(\prod_{{\rm prime}\
q|a'}q^{v_q(cL_k)}\bigg)\bigg(\prod_{{\rm prime}\ q:\ q\ne 2 \atop q\nmid a',\ q|cL_k}q^{v_q(cL_k)-v_q(P_{k,\varphi})}\bigg)}, \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ (4.5) \end{align*} where \begin{align*} \delta_{2, k,\varphi}:={\left\{ \begin{array}{rl} v_2(cL_k)-v_2(P_{k, \varphi }), &\text{\rm if} \ 2\nmid a',\\
0, &\text{\rm if} \ 2|a'.
\end{array} \right.} \end{align*} It then follows from (4.1) that \begin{align*} \delta_{2, k,\varphi}={\left\{ \begin{array}{rl} v_2(L_k), &\text{\rm if} \ 2\nmid a' \ {\rm and} \ v_2(k+1)\ge v_2(L_k)\ge 2, \\ 1, &\text{\rm if} \ 2\nmid a \ {\rm and}\ v_2(cL_k)=1, {\rm or} \
k=3, 2\nmid a\ {\rm and} \ 2|c, {\rm or} \ k=3, 2\nmid a'\ {\rm and}\ 2|d,\\ 0, &\text{\rm otherwise,}
\end{array} \right.} \end{align*} which implies that $\eta_{2,k, a', c}=2^{\delta_{2,k,\varphi}}$. Hence by (1.2) we get
$$Q_{k, a', c}=\frac{cL_k}{2^{\delta_{2, k,\varphi}}\prod_{{\rm prime}\ q|a'}q^{v_q(cL_k)}}. \eqno(4.6)$$
Since there is at most one odd prime $p\le k$ such that $v_p(k+1)\ge v_p(L_k)\ge 1$ (see \cite{[FK]}), we derive from (4.4) that \begin{align*} \prod_{{\rm prime}\ q:\ q \ne 2,\atop q\nmid a',\
q|cL_k}q^{v_q(cL_k)-v_q(P_{k,\varphi})}={\left\{
\begin{array}{rl} p^{v_p(L_k)}, &\text{if} \ v_p(k+1)\geq v_p(L_k)\ge 1 \ \mbox{for an odd prime}\ p\nmid a',\\ 1, &\text{otherwise.}
\end{array} \right.} \end{align*} Thus it follows from (4.5) and (4.6) that $P_{k,\varphi}$ is equal to $Q_{k, a', c}$ except that $v_p(k+1)\ge v_p(L_k)\ge 1$ for at most one odd prime $p\nmid a'$, in which case $P_{k, \varphi}$ equals $\frac{Q_{k, a', c}}{p^{v_p(L_k)}}$.
The proof of Theorem 1.3 is complete.
$\square$\\
Now we give some examples to illustrate Theorem 1.3.\\ \\ {\bf Example 4.1.} Let $ a\ge 1, b\ge 0$ and $c\ge 1$ be integers, and let $a':=a/\gcd(a, b)$ be odd. Let $k=2^t-1$, where $t\in \mathbb{N}$ and $t\ge 3$. Since $v_2(k+1)=t>v_2(L_k)=t-1\ge 2$, we obtain by Theorem 1.3 that $\eta_{2,k,a',c}=2^{v_2(L_k)}$. On the other hand, there is no odd prime $p$ satisfying $v_p(k+1)\ge v_p(L_k)\ge 1$. Thus we have $$ P_{k, \varphi}=\frac{cL_k}{2^{v_2(L_k)}
\prod_{{\rm prime} \ q|a'}q^{v_q(cL_k)}}. $$\\ \\ {\bf Example 4.2.} Let $ a\ge 1, b\ge 0$ and $c\ge 1$ be integers, and let $a':=a/{\rm gcd}(a, b)$. Let $p$ be any given odd prime with $p\nmid a'$, and let $k=p^{\alpha}-1$ for some integer $\alpha\ge 2$. Since $k=p^{\alpha}-1>3$ and $v_2(k+1)=v_2(p^{\alpha})=0$, we have $\eta_{2,k, a',c}=1$. The odd prime $p$ satisfies that $v_p(k+1)=\alpha>\alpha-1=v_p(L_k)\ge 1$. Hence we get by Theorem 1.3 that $$ P_{k, \varphi}=\frac{cL_k}{p^{v_p(L_k)}\prod_{{\rm prime} \
q|a'}q^{v_q(cL_k)}}. $$\\ \\ {\bf Example 4.3.} Let $ a\ge 1, b\ge 0$ and $c\ge 1$ be integers, and let $a':=a/{\rm gcd}(a, b)$. If $k$ is an integer of the form $35^{\alpha}-1$ with $\alpha\ge 2$ and $\alpha\in \mathbb{N}$, then $$
P_{k, \varphi}=\frac{cL_k}{\prod_{{\rm prime} \ q|a'}q^{v_q(cL_k)}}. \eqno(4.7) $$ Actually, since $35^{\alpha}-1>3$ and $v_2(35^{\alpha})=0<v_2(L_k)$, we obtain by Theorem 1.3 that $\eta_{2, k,a',c}=1$. On the other hand, we have that $v_5(k+1)=\alpha<v_5(L_k)$, $v_7(k+1)=\alpha<v_7(L_k)$ and $v_q(k+1)=0$ for any other odd prime $q$. Hence we get $P_{k,\varphi}$ as in (4.7).
Furthermore, if $a|b$ or $a'$ is a prime greater than $cL_k$, then there is no prime factor of $a'$ dividing $cL_k$. Therefore for any $k=35^{\alpha}-1$ with $\alpha\ge 2$ and $\alpha\in \mathbb{N}$, one has $P_{k, \varphi}=cL_k$.\\
Finally, by Theorem 1.3, we only need to compute the first $P_{k, \varphi}$ values of $g_{k,\varphi}$ so that we can estimate the difference between $\prod_{0\le i\le k} \varphi(b+a(n+ic))$ and $\varphi({\rm lcm}_{0\le i\le k}\{b+a(n+ic)\})$ for large $n$. In other words, we have $$\min_{1\le m\le P_{k,\varphi}}\{g_{k,\varphi}(m)\} \le \frac{\prod_{0\le i\le k} \varphi(b+a(n+ic))} {\varphi({\rm lcm}_{0\le i\le k}\{b+a(n+ic)\})}=g_{k,\varphi}\big(\langle n\rangle_{P_{k,\varphi}}\big)\le \max_{1\le m\le P_{k,\varphi}}\{g_{k,\varphi}(m)\},$$ where $\langle n\rangle_{P_{k,\varphi}}$ means the integer between $1$ and $P_{k,\varphi}$ such that $n\equiv\langle n\rangle_{P_{k,\varphi}}\pmod{P_{k,\varphi}}$.
On the other hand, estimating the difference between $\prod_{0\le i\le k}\varphi(b+a(n+ic))$ and ${\rm lcm}_{0\le i\le k}\{\varphi(b+a(n+ic))\}$ is also an interesting problem. For this purpose, we define the arithmetic function $G_{k, \varphi}$ for any positive integer $n$ by $$ G_{k, \varphi}(n):=\frac{\prod_{i=0}^k \varphi(b+a(n+ic))} {{\rm lcm}_{0\le i\le k} \{\varphi(b+a(n+ic))\}}. $$ Unfortunately, $G_{k, \varphi}$ may not be periodic. For instance, taking $a=1, b=0$ and $c=1$, then the arithmetic function $\bar G_{k, \varphi}$ defined by $\bar G_{k, \varphi}(n):=\frac{\prod_{i=0}^n \varphi(n+i)}{{\rm lcm}_{0\le i\le k}\{\varphi(n+i)\}}$ for $n\in \mathbb{N}^*$ is not periodic. Indeed, for any given positive integer $M$, we can always choose a prime $p>M$ since there are infinitely many primes. By Dirichlet's theorem, we know that there exists a positive integer $m$ such that the term $mp^2+1$ is a prime in the arithmetic progression $\{np^2+1\}_{n\in \mathbb{N}^*}$. Letting $n_0=mp^2$ gives us that
$p|\varphi(n_0)$ and $p| \varphi(n_0+1)=\varphi(mp^2+1)=mp^2$. Thus
$p|\bar G_{k, \varphi}(n_0)$ and $\bar G_{k, \varphi}(n_0)\ge p>M$. That is, $\bar G_{k, \varphi}$ is unbounded, which implies that $\bar G_{k, \varphi}$ is not periodic. Applying Theorem 1.3, we can give a nontrivial upper bound about the integer ${\rm lcm}_{0\le i\le k}\{\varphi(b+a(n+ic))\}$ as follows.\\ \\ \noindent{\bf Proposition 4.4.} {\it Let $k\ge 1, a\ge 1, b\ge 0$ and $c\ge 1$ be integers. Then for any positive integer $n$, we have \begin{align*} {\rm lcm}_{0\le i\le k}\{\varphi(b+a(n+ic))\} \le \frac{\prod_{i=0}^k\varphi(b+a(n+ic))}{g_{k,\varphi}\big(\langle n\rangle_{P_{k,\varphi}}\big)} \end{align*} with $\langle n\rangle_{P_{k,\varphi}}$ being defined as above.} \begin{proof}
For each $0\le i\le k$, since $\varphi$ is multiplicative and $b+a(n+ic)| {\rm lcm}_{0\le j\le k}\{b+a(n+jc)\}$, we have $$
\varphi(b+a(n+ic))\big| \varphi({\rm lcm}_{0\le j\le k}\{b+a(n+jc)\}). $$
So we get ${\rm lcm}_{0\le i\le k}\{\varphi(b+a(n+ic))\}\big| \varphi({\rm lcm}_{0\le i\le k}\{b+a(n+ic)\})$. Thereby $$g_{k,\varphi}\big(\langle n\rangle_{P_{k, \varphi}}\big)=g_{k,\varphi}(n)\le \frac{\prod_{0\le i\le k}\varphi(b+a(n+ic))}{{\rm lcm}_{0\le i\le k}\{\varphi(b+a(n+ic))\}} $$ for any positive integer $n$. The desired result then follows immediately. \end{proof}
Theorem 1.3 answers the second part of Problem 1.1 for the Euler phi function. However, the smallest period problem is still kept open for all other multiplicative functions $f$ with $f(n)\ne 0$ for all positive integers $n$. For example, if one picks $f=\sigma _{\alpha}$ with
$\sigma _{\alpha}(n):=\sum_{d|n\atop d\ge 1}d^{\alpha }$ for $\alpha \in \mathbb {N}$, then what is the smallest period of $g_{k, f}$? If $f=\xi _{\varepsilon}$ with $\xi _{\varepsilon}(n):=n^\varepsilon$ for $\varepsilon \in \mathbb{R}$, then what is the smallest period of $g_{k, f}$?\\
\begin{center} {\bf Acknowledgements} \end{center} The authors are grateful to the anonymous referees for careful reading of the manuscript and for helpful comments and suggestions.
\end{document} |
\begin{document}
\dedicatory{Dedicated to Adriano Garsia on the occasion of his 90th birthday}
\title{$e$-positivity of vertical strip LLT polynomials}
\begin{abstract} In this article we prove the $e$-positivity of $G_{\mathbf{\nu}}[X;q+1]$ when $G_{\mathbf{\nu}}[X;q]$ is a vertical strip LLT polynomial. This property has been conjectured in \cite{Alexandersson_Panova_cycles} and \cite{Garsia_Haglund_Qiu_Romero}, and it implies several $e$-positivities conjectured in those references and in \cite{Bergeron_Open_Questions}.
We make use of a result of Carlsson and Mellit \cite{Carlsson-Mellit-ShuffleConj-2015} that shows that a vertical strip LLT polynomial can be obtained by applying certain compositions of operators of the Dyck path algebra to the constant $1$. Our proof gives in fact an algorithm to expand these symmetric functions in the elementary basis, and it shows, as a byproduct, that these compositions of operators are actually multiplication operators. \end{abstract}
\tableofcontents
\section*{Introduction}
In \cite{Bergeron_Open_Questions} Bergeron conjectured that several symmetric functions $G[X;q]$ arising from the theory of Macdonald polynomials have the property that $G[X;q+1]$ is \emph{$e$-positive}, i.e.\ the coefficients of the expansion of $G[X;q+1]$ in the elementary symmetric function basis are in $\mathbb{N}[q,t]$. Later similar conjectures have been made in \cite{Garsia_Haglund_Qiu_Romero}. Many of these conjectures would follow easily if that same property were true for vertical strip LLT polynomials. This last property has been conjectured both in \cite{Alexandersson_Panova_cycles} and \cite{Garsia_Haglund_Qiu_Romero}, and it was observed in \cite{Alexandersson_Panova_cycles} that it has an interesting parallel with a famous $e$-positivity conjecture about the \emph{chromatic symmetric functions} introduced by Shareshian and Wachs in \cite{Shareshian_Wachs_Original}. Notice also that in \cite{Alexandersson_Panova_cycles}, \cite{Garsia_Haglund_Qiu_Romero} and \cite{Alexandersson_lollipop} some special cases have been proved.
The LLT polynomials were introduced by Lascoux, Leclerc and Thibon in \cite{LLT_Original}, and they can be seen as a $q$-deformation of products of skew Schur functions. These symmetric functions $G_{\mathbf{\nu}}[X;q]$ are associated to a tuple $\mathbf{\nu}$ of skew Young diagrams. It turns out that many of the symmetric functions arising in the study of modified Macdonald polynomials exhibit a natural expansion in LLT polynomials with coefficients in $\mathbb{N}[q,t]$. In fact, a lot of these expansions only involve vertical strip LLT polynomials.
When all the skew shapes in $\mathbf{\nu}$ are (continuous) vertical strips of cells, we say that $G_{\mathbf{\nu}}[X;q]$ is a \emph{vertical strip LLT polynomial}. The main result of this article is the proof of the $e$-positivity of $G_{\mathbf{\nu}}[X;q+1]$ when $G_{\mathbf{\nu}}[X;q]$ is a vertical strip LLT polynomial. In fact our proof gives an algorithm to compute the expansion of vertical strip LLT polynomials in the elementary symmetric function basis.
The main ingredient of our proof is a result of Carlsson and Mellit in \cite{Carlsson-Mellit-ShuffleConj-2015}, which states that these symmetric functions can be computed by applying a certain composition of operators coming from their \emph{Dyck path algebra} to the constant function $1$. As a corollary of our proof, we get that these compositions of operators are actually multiplication operators. We notice here that this fact is probably related to the same property conjectured by Bergeron in \cite{Bergeron_Open_Questions} for the specialization at $t=1$ of related symmetric functions, though we do not discuss this further in this article.
As a corollary of our main result, we get many of the $e$-positivity conjectured in \cite{Bergeron_Open_Questions}, \cite{Alexandersson_Panova_cycles} and \cite{Garsia_Haglund_Qiu_Romero}. But we want to stress here that not all the $e$-positivities observed in \cite{Bergeron_Open_Questions}, \cite{Alexandersson_Panova_cycles} and \cite{Garsia_Haglund_Qiu_Romero} follow from our results. Moreover, in \cite{Alexandersson_lollipop} and \cite{Garsia_Haglund_Qiu_Romero} the authors conjecture explicit formulas for the expansion in the elementary symmetric functions basis of $G_{\mathbf{\nu}}[X;q+1]$ when $G_{\mathbf{\nu}}[X;q]$ is a vertical strip LLT polynomial. We do not say anything about these formulas, though it is conceivable that a deeper analysis of our algorithmic proof could yield new insights into these problems. We leave these considerations for future investigations.
The paper is organized in the following way. In Section~1 we introduce some notation from symmetric function theory, while in Section~2 we define vertical strip LLT polynomials and we associate to them certain Schr\"{o}der paths. In Section~3 we introduce the operators of the Dyck path algebra, and we collect some identities that we are going to use in the sequel. In Section~4 we define the path operators corresponding to the Schr\"{o}der paths defined in Section~2, and we state the theorem of Carlsson and Mellit. In Section~5 we prove our main results. Finally in Section~6 we discuss some consequences.
\section{Symmetric function notations}
In this section we limit ourselves to introduce the necessary notation to state our results. We refer to \cite{Stanley-Book-1999} and \cite{Haglund-Book-2008} for the basic symmetric function theory tools that we use freely in the sequel.
We denote by $\Lambda$ the algebra of symmetric functions in the variables $x_1,x_2,\dots$ with coefficients in the field $\mathbb{Q}(q,t)$. We denote by $e_k$ the \emph{elementary symmetric function} of degree $k$, and given a partition $\mu=(\mu_1,\mu_2,\dots)$ we set $e_\mu=e_{\mu_1}e_{\mu_2}\cdots$. We use similar notations for the \emph{power symmetric functions} $p_k$'s.
It is well-known that both $\{p_\mu\}_\mu$ and $\{e_\mu\}_\mu$ are bases of $\Lambda$.
\begin{definition}
We say that a symmetric function $f\in \Lambda$ is \emph{$e$-positive} if the coefficients of the expansion of $f$ in the basis $\{e_\mu\}_\mu$ are all in $\mathbb{N}[q,t]$. \end{definition}
We will make also use of the \emph{plethystic notation}. With this notation we will be able to add and subtract alphabets, which will be represented as sums of monomials $X = x_1 + x_2 + x_3+\cdots $. Then, given a symmetric function $f$, we denote by $f[X]$ the expansion of $f$ in the basis $\{p_\mu\}_\mu$ with $p_k$ replaced by $x_{1}^{k}+x_{2}^{k}+x_{3}^{k}+\cdots$, for all $k$. More generally, given any expression $Q(z_1,z_2,\dots)$, we define the plethystic substitution $f[Q(z_1,z_2,\dots)]$ to be the expansion of $f$ in the basis $\{p_\mu\}_\mu$ with $p_k$ replaced by $Q(z_1^k,z_2^k,\dots)$.
\section{Vertical strip LLT polynomials} \label{sec:LLT}
In this section we define the main characters of this article, i.e.\ the vertical strip LLT polynomials.
We use standard definitions and French notations for Young (Ferrers) diagrams and partitions: e.g.\ see \cite{Haglund-Book-2008}.
\subsection{Definition of LLT polynomials}
We identify a partition $\lambda=(\lambda_1,\lambda_2,\dots,\lambda_k)$ with its \emph{Ferrers diagram}, i.e.\ the set of unit squares in $\mathbb{R}^2$, called \emph{cells}, with centers $\{(i,j)\in \mathbb{Z}^2\mid 1\leq i\leq \lambda_j, 1\leq j\leq k \}$ (we identify these with the corresponding cells). We will consider also \emph{skew diagrams} $\lambda/\mu$ with $\mu\subseteq \lambda$, and \emph{semi-standard Young tableaux} of \emph{shape} $\lambda/\mu$, i.e.\ fillings $T:\lambda/\mu\to \mathbb{Z}_{>0}$ such that $T((i,j))<T((i+1,j))$ whenever $(i,j),(i+1,j)\in \lambda/\mu$, and $T((i,j))\leq T((i,j+1))$ whenever $(i,j),(i,j+1)\in \lambda/\mu$. We denote by $SSYT(\lambda/\mu)$ the set of skew semi-standard Young tableaux of shape $\lambda/\mu$. Given $T\in SSYT(\lambda/\mu)$ we set $\mathbf{x}^T:=\prod_{c\in \lambda/\mu}x_{T(c)}$.
We define the LLT polynomials as in \cite{HHL_JAMS}.
\begin{definition}
Given a $k$-tuple $\mathbf{\nu}=(\mathbf{\nu}^1,\dots,\mathbf{\nu}^k)$ of skew Young diagrams, we let $SSYT(\mathbf{\nu})=SSYT(\mathbf{\nu}^1)\times \cdots \times SSYT(\mathbf{\nu}^k)$. Given $T=(T^1,\dots,T^k)\in SSYT(\mathbf{\nu})$, let $\mathbf{x}^T$ denote the product $\mathbf{x}^{T^1}\cdots \mathbf{x}^{T^k}$. We say that $T^i(u)<T^j(v)$ form an \emph{inversion} if either
\begin{itemize}
\item $i<j$ and $c(u)=c(v)$, or
\item $i>j$ and $c(u)=c(v)+1$,
\end{itemize}
where $c(u)$ is the \emph{content} of $u$: if $u$ has (row, column) coordinates $(i,j)$, then $c(u):=i-j$.
We call $\mathsf{inv}(T)$ the total number of inversions occurring in $T$. \end{definition} \begin{remark}
Notice that our convention differs from the one in \cite{HHL_JAMS}: to go from one convention to the other it is enough to reverse the order of the components of the tuple $\mathbf{\nu}$. \end{remark} We define the \emph{LLT polynomial} \begin{equation} G_{\mathbf{\nu}}[X;q]:=\sum_{T\in SSYT(\mathbf{\nu})}q^{\mathsf{inv}(T)}\mathbf{x}^T. \end{equation}
It turns out that the $G_{\mathbf{\nu}}[X;q]$'s are symmetric functions, i.e.\ elements of $\Lambda$, though this is far from obvious from their definition: see \cite{HHL_JAMS} for an elementary proof of this fact.
A \emph{vertical strip} (or \emph{column}) \emph{LLT polynomial} is a $G_{\mathbf{\nu}}[X;q]$ where each skew diagram of $\mathbf{\nu}$ consists of a (continuous) \emph{vertical strip} of cells in the same column.
To visualize a tuple of skew Young diagrams, it is custom to arrange them in the first quadrant, in such a way that cells with the same content appear on the same diagonal in a non-overlapping fashion: we will call this a \emph{LLT diagram} associated to the tuple of skew Young diagrams, or skew Young tableaux.
As an example, the LLT diagram associated to a tuple of vertical strips is shown on the left of Figure~\ref{fig:LLT}, with letters inside the cells. Notice that, if this were the LLT diagram of a tuple of skew Young tableaux, then for example the pair $b,c$ would give an inversion if $b<c$, the pair $c,d$ would give an inversion if $d>c$, we must have $b<d$, and there is no way in which $d$ and $h$ can create an inversion.
\begin{figure*}\end{figure*}
\subsection{Schr\"{o}der paths associated to vertical strip LLT diagrams} \label{Sec:Schroeder_LLT}
In order to state (later in this article) a result of Carlsson and Mellit, we describe now a procedure to associate a \emph{Schr\"{o}der path} to a \emph{vertical strip LLT diagram} (i.e.\ the LLT diagram of a tuple of vertical strips).
We borrow the notation for the pictures from \cite{Garsia_Haglund_Qiu_Romero}, in which more details can be found.
We start by labelling with $a,b,c,d,\dots$ the cells of the vertical strip LLT diagram in \emph{reading order}, i.e.\ from left to right along the diagonals with constant content, starting from the lowest diagonal and moving upward: see Figure~\ref{fig:LLT} on the left for an example. Then we place the letters $a,b,c,d,\dots$ in this order along the diagonal of a square grid: see Figure~\ref{fig:LLT} on the right for an example.
Then for every pair of letters $(p,q)$ with $p$ to the left of $q$ we draw a blue dot in the cell at the intersection of the column containing the letter $p$ with the row containing $q$ if and only if $p$ and $q$ can potentially create an inversion in the vertical strip diagram. It is easy to see that these blue dots will always determine a \emph{Dyck path}, like the red one in Figure~\ref{fig:LLT}. Moreover, for every pair of letters $(p,q)$ with $p$ to the left of $q$ we draw a blue cross if and only if $q$ is in the cell right above the cell containing $p$ in the LLT diagram (therefore we must have $p<q$). It is easy to see that these blue crosses always end up decorating a \emph{valley} that is not on the main diagonal $x=y$ of the aforementioned Dyck path, i.e.\ in the cells immediately east and immediately south of the cell containing the blue cross there are always blue dots: see Figure~\ref{fig:LLT} on the right for an example.
Now replacing the valleys with the blue crosses (together with the two adjacent steps of the Dyck path) by diagonal edges we get a \emph{Schr\"{o}der path} that does not have diagonal steps along the diagonal $x=y$: see Figure~\ref{fig:LLT2} for an example.
\begin{figure*}
\caption{This is the Schr\"{o}der path corresponding to the diagram on the right in Figure~\ref{fig:LLT}}
\label{fig:LLT2}
\end{figure*}
It is not hard to see that any such Schr\"{o}der path comes from a vertical strip LLT diagram via this construction. For more details on all this, see \cite{Garsia_Haglund_Qiu_Romero} or \cite{Alexandersson_Panova_cycles}.
We call $\mathcal{P}$ the set of these paths. More precisely, we denote by $\mathcal{P}$ the family of \emph{paths} $P$ going from $(0,0)$ to $(n,n)$ for some $n\in \mathbb{N}$ which consist of vertical north steps, obtained by adding $(0,1)$, horizontal east steps, obtained by adding $(1,0)$, and diagonal northeast steps, obtained by adding $(1,1)$, with the property that $P$ always stays weakly above the \emph{base diagonal} $y=x$, and no diagonal step can take place along that line.
Given a tuple $\mathbf{\nu}$ of vertical strips, we call $P_{\mathbf{\nu}}$ the element of $\mathcal{P}$ associated to $\mathbf{\nu}$ via the construction that we explained in this subsection. \begin{remark} As we will only use this construction to state Theorem~\ref{thm:CM_LLT}, we omit the discussion of the classical combinatorial bijection (sending $(\mathsf{dinv},\mathsf{area})$ into $(\mathsf{area}',\mathsf{bounce})$) lying behind it: see \cite{Carlsson-Mellit-ShuffleConj-2015} or \cite{Garsia_Haglund_Qiu_Romero} for more on this. \end{remark}
\section{Dyck path algebra operators}
In this section we introduce the operators of the Dyck path algebra of Carlsson and Mellit, and we collect some basic properties that we are going to use in the sequel.
We start by recalling some definitions from \cite[Section~4]{Carlsson-Mellit-ShuffleConj-2015}.
Given a polynomial $P$ depending on variables $u,v$, define the operator $\Upsilon_{uv}$ as
\begin{align*}
(\Upsilon_{uv} P)(u,v) & \coloneqq \frac{(q-1)vP(u,v) + (v-qu)P(v,u)}{v-u}, \end{align*}
In \cite{Carlsson-Mellit-ShuffleConj-2015} these operators are called $\Delta_{uv}$, but we changed the notation in order to avoid confusion with the Delta operators $\Delta_f$ defined on $\Lambda$.
For $k \in \mathbb{N}$, define $V_k \coloneqq \Lambda[y_1, \dots, y_k]=\Lambda\otimes \mathbb{Q}[y_1,\dots,y_k]$, so that $V_0=\Lambda$. For $k\geq 2$ and $1 \leq i \leq k-1$, let \[T_i \coloneqq \Upsilon_{y_i y_{i+1}} \colon V_k \rightarrow V_k.\] Notice that the $T_i$ are invertible operators (see \cite[Section~4]{Carlsson-Mellit-ShuffleConj-2015} for an explicit formula of the inverse).
For $k\geq 0$, we define the operators $d_+ \colon V_k \rightarrow V_{k+1}$ as \begin{align*}
(d_+ F)[X] & \coloneqq T_1 T_2 \cdots T_k (F[X + (q-1) y_{k+1}])\qquad \text{ for any }F[X]\in V_k, \end{align*} while for $k\geq 1$ we define the operators $d_- \colon V_k \rightarrow V_{k-1}$ as \begin{align*}
(d_- F)[X] & \coloneqq -F[X - (q-1)y_k] \sum_{i\geq 0} \left. (-1/y_k )^{i}e_i[X] \right|_{{y_k}^{-1}}\qquad \text{ for any }F[X]\in V_k. \end{align*} Finally, for $k\geq 1$ we define the operators $\varphi:V_k\to V_k$ as \begin{equation} \label{eq:phi_def} \varphi:=\frac{1}{q-1}(d_-d_+-d_+d_-). \end{equation}
Notice that, following Carlsson and Mellit, in the notation of the operators $T_i$, $d_-$, $d_+$ and $\varphi$, the $k$ indicating the domain $V_k$ does not appear. To keep track of this in our arguments, when the domain of (compositions of) such operators is $V_k$ we say that they have \emph{degree} $k$.
We record here a few identities satisfied by these operators that we are going to need. A proof of these identities can be found in \cite[Lemma~5.3]{Carlsson-Mellit-ShuffleConj-2015} and \cite[Section~3.1]{Carlsson_Gorsky_Mellit}. \begin{proposition} In all the following identities, all sides have degree $k$: we have \begin{align} \label{eq:Ti_rel} (T_i-1)(T_i+q)& =0 \\ \label{eq:Ti_phi} \varphi T_i & = T_{i+1} \varphi\quad (i\leq k-2)\\ \label{eq:phi2} \varphi^2 T_{k-1} & = T_1 \varphi^2\\ \label{eq:Ti_dm} d_-T_i & =T_id_-\quad (1\leq i\leq k-2)\\ \label{eq:dm2} d_-^2T_{k-1} & =d_-^2\\ \label{eq:dphiT} d_-\varphi T_{k-1} & =q\varphi d_-\\ \label{eq:dplusphi} T_1\varphi d_+ & = q d_+\varphi \, . \end{align} \end{proposition} From these we can deduce the following identities, which will be useful in the sequel. \begin{proposition} In degree $k\geq 2$ we have \begin{align} \label{eq:auxdplus} \varphi d_+ & = T_1d_+\varphi +(q-1) d_+\varphi \\ \label{eq:auxdminus} \varphi d_-T_{k-1} & =d_-\varphi - (q-1)\varphi d_- \, . \end{align} \end{proposition} \begin{proof} Observe that for any $i$ \begin{align*}
(T_{i}-1)(T_{i}+q) = T_{i}^2+(q-1)T_{i}-q \end{align*} so that, multiplying by $T_{i}^{-1}$, we can rewrite \eqref{eq:Ti_rel} as \begin{equation} \label{eq:auxiliar} T_{i}=qT_{i}^{-1} -(q-1). \end{equation} Now multiplying \eqref{eq:dplusphi} by $T_1^{-1}$ we get \begin{align*}
\varphi d_+ & = q T_1^{-1} d_+\varphi\\
\text{(using \eqref{eq:auxiliar})} & = T_1 d_+\varphi + (q-1) d_+\varphi \end{align*} giving \eqref{eq:auxdplus}.
On the other hand, using again \eqref{eq:auxiliar} we get \begin{align*}
\varphi d_-T_{k-1} & = q\varphi d_- T_{k-1}^{-1} -(q-1) \varphi d_-\\ \text{(using \eqref{eq:dphiT})} & = d_-\varphi T_{k-1} T_{k-1}^{-1} -(q-1) \varphi d_- \\ & = d_-\varphi -(q-1) \varphi d_- \end{align*} giving \eqref{eq:auxdminus}. \end{proof}
Moreover, by \cite[Lemma~5.4]{Carlsson-Mellit-ShuffleConj-2015}, for $F\in V_k$ \begin{equation} \label{eq:phi_yk} \varphi F=T_1T_2\cdots T_{k-1}(-y_kF). \end{equation}
\section{Path operators}
Consider the family $\mathcal{P}$ of \emph{paths} defined in Section~\ref{sec:LLT}, i.e.\ Schr\"{o}der paths with no diagonal steps on the main diagonal $x=y$.
\emph{As we will only use paths coming from $\mathcal{P}$, in the rest of this article the word ``path'' without further specifications will be used to mean ``path in $\mathcal{P}$''.}
It will be convenient to encode such paths with \emph{words} in the alphabet $\{-,0,+\}$ where a north step corresponds to a `$-$', a northeast step to a `$0$', and an east step to a `$+$'. For example the path in Figure~\ref{fig:LLT2} gets encoded by $(-,-,0,0,-,+,-,-,+,+,0,0,-,+,+,+)$.
The \emph{degree} of a step of a path $P\in \mathcal{P}$ is the ordinate of the highest rightmost point of the step. Equivalently, it is the number of `$-$' minus the number of `$+$' occurring in the word encoding $P$ weakly to the left of the considered step. Observe that, by definition of $\mathcal{P}$, every `$0$' has positive degree, while the highest rightmost step of a path has always degree $0$.
A \emph{partial path} is simply a prefix of a path $P\in \mathcal{P}$, i.e.\ it consists of the first few consecutive steps of a path in $\mathcal{P}$ (so that its highest rightmost step can possibly have positive degree). The \emph{degree} of a partial path is defined to be the degree of its highest rightmost step.
A path is said to be \emph{non-touching} if it touches the base line only at the beginning and at the end, equivalently if its only step of degree $0$ is its rightmost one.
Observe that any path $P$ can be decomposed in a unique way as the concatenation $P=P_1P_2\cdots P_k$ of non-touching paths $P_i$. \begin{definition} We associate to each path $P\in \mathcal{P}$ a \emph{path operator} $d_P$ on $V_0=\Lambda$, by replacing each `$-$' with a $d_-$, each `$0$' with a $\varphi$, and each `$+$' with a $d_+$.
We define similarly \emph{partial path operators} $d_P$ associated to partial paths $P$, where now $d_P:V_k\to V_0$, where $k$ is the degree of $P$. \end{definition} For example, if $P$ is encoded by $(-,-,0,0,+,0,+)$, then we get $d_P=d_-d_-\varphi \varphi d_+ \varphi d_+$.
The main result that we are going to need about path operators is the following theorem of Carlsson and Mellit which is implict in \cite{Carlsson-Mellit-ShuffleConj-2015}, and made explicit in \cite{Garsia_Haglund_Qiu_Romero}. \begin{theorem} \label{thm:CM_LLT} Let $\mathbf{\nu}$ be a tuple of vertical strips. Then \begin{equation} G_{\mathbf{\nu}}[X;q]=d_{P_{\mathbf{\nu}}}(1), \end{equation} where $P_{\mathbf{\nu}}$ is defined in Section~\ref{Sec:Schroeder_LLT}. \end{theorem} Thanks to this theorem, we can work with path operators in order to prove the main result of this article.
\section{$e$-positivity}
In this section we prove the main result of this article, i.e.\ the $e$-positivity of $G_{\mathbf{\nu}}[X;q+1]$ where $G_{\mathbf{\nu}}[X;q]$ is a vertical strip LLT polynomial.
We start with a lemma. \begin{lemma} \label{lem:ekoperator}
For $m\geq 0$, the path operator $d_-\varphi^m d_+$ equals the operator of multiplication by $e_{m+1}$. \end{lemma} \begin{proof} Using \eqref{eq:phi_yk} in degree $1$, we get the equality in degree $0$ \[d_-\varphi^m d_+=(-1)^m d_-y_1^m d_+.\]
For any $F[X]\in \Lambda=V_0$ we compute \begin{align*}
d_-\varphi^m d_+F[X] & = (-1)^md_-y_1^m d_+F[X] \\
& = (-1)^md_-y_1^m F[X + (q-1) y_{1}]\\
& = (-1)^{m+1} y_1^m F[X + (q-1) y_{1}- (q-1)y_1] \sum_{i\geq 0} \left. (-1/y_1 )^{i}e_i[X] \right|_{{y_1}^{-1}} \\
& = F[X] \sum_{i\geq 0} \left. (-1)^{m+i+1} (y_1)^{m-i}e_i[X] \right|_{{y_1}^{-1}}\\
& =e_{m+1}F[X]. \end{align*} \end{proof}
The key ingredient of our main result is the following lemma.
\begin{lemma} \label{lem:crux}
Consider a partial path operator $d_P$ of the form
\[d_P=d_-\varphi^{a_1} d_-\varphi^{a_2} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_s} d_+\]
with $s\geq 2$, and $a_i\geq 0$ for all $i=1,2,\dots,s$.
If $a_s=0$ then
\begin{equation} \label{eq:case0}
d_P=d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_+ d_- +(q-1)d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} \varphi.
\end{equation}
If $a_s>0$ then either
\begin{equation} \label{eq:case1}
d_P=qd_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} d_+ \varphi ,
\end{equation}
or
\begin{align}\label{eq:case2} d_P & =q^\epsilon d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}-1} d_-\varphi^{a_{s}} d_+ \varphi \\
\notag & + (q^\epsilon-1)d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} d_+ \varphi ,
\end{align}
with $a_{s-1}>0$ and $\epsilon\in \{0,1\}$, or
\begin{align} \label{eq:lem_case3}
d_P & =q^\epsilon d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_i-1} d_- \varphi^{a_{i+1}+1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} d_+ \varphi\\
\notag & + (q^\epsilon-1)d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} d_+ \varphi
\end{align}
for some $1\leq i\leq s-2$ for which $a_i>0$, and $\epsilon\in \{0,1\}$. \end{lemma}
\begin{proof}
If $a_s=0$, then we can use the definition \eqref{eq:phi_def} of $\varphi$ to get
\begin{align*}
& \hspace{-0.5cm} d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_s} d_+ =\\
& = d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_- d_+ \\
& = d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} (d_- d_+ - d_+d_-) +d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_+ d_- \\
& = (q-1)d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} \varphi +d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_+ d_-,
\end{align*}
which gives \eqref{eq:case0}.
If $a_s>0$, then we can use \eqref{eq:auxdplus} to get
\begin{align*}
& \hspace{-0.5cm} d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_s} d_+ =\\
& = d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} \textcolor{red}{\varphi d_+} \\
& = d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} \textcolor{blue}{T_1d_+ \varphi } + \textcolor{blue}{(q-1)}d_-\varphi^{a_1} d_-\cdots d_-\varphi^{a_{s-1}} d_-\varphi^{a_{s}-1} \textcolor{blue}{d_+ \varphi} .
\end{align*}
Now, in order to get rid of $T_1$ in the first summand, we can use the commutation relations \eqref{eq:Ti_phi}, \eqref{eq:phi2} and \eqref{eq:Ti_dm}, i.e.\
\[ \varphi T_i = T_{i+1} \varphi\quad (i\leq k-2),\qquad \varphi^2 T_{k-1} = T_1 \varphi^2, \qquad d_-T_i=T_id_-\quad (1\leq i\leq k-2) \]
to commute the $T_1$ to the left, possibly changing it into another $T_i$, until we reach one of the following situations:
\begin{enumerate}
\item in degree $k\geq 2$, we reach $\cdots d_-^2T_{k-1}$: in this case the relation \eqref{eq:dm2}, i.e.\ $d_-^2T_{k-1}=d_-^2$, allows us to simply remove the $T_{k-1}$;
\item in degree $k\geq 2$, we reach $\cdots d_-\varphi T_{k-1}$: in this case the relation \eqref{eq:dphiT}, i.e.\ $d_-\varphi T_{k-1}=q\varphi d_-$, allows us again to remove the $T_{k-1}$, getting a factor $q$;
\item in degree $k\geq 3$, we reach $\cdots \varphi d_- T_{k-1}$: in this case the relation \eqref{eq:auxdminus}, i.e.\ \[ \varphi d_- T_{k-1}= d_- \varphi -(q-1) \varphi d_-, \]
will give a summand that cancels the one that we left out before, leaving us again with a summand without $T_{k-1}$.
\end{enumerate}
In situation (1) we get \eqref{eq:case1}, while in situations (2) and (3) we get the remaining claimed possibilities, i.e. \eqref{eq:case2} and \eqref{eq:lem_case3}, with $\epsilon=1$ and $\epsilon=0$ respectively. \end{proof}
\begin{example} \label{ex:lem} Consider the path $P\in \mathcal{P}$ encoded by $(-,0,-,-,0,0,0,0,+,+,+)$. We compute \begin{align*}
d_P & = d_-\varphi d_-^2\varphi^4 d_+^3\\
& = d_-\varphi d_-^2\varphi^3 \textcolor{red}{\varphi d_+}d_+^2\\ \text{(using \eqref{eq:auxdplus})} & = d_-\varphi d_-^2\varphi^3 \textcolor{blue}{T_1 d_+ \varphi}d_+^2 +\textcolor{blue}{(q-1)} d_-\varphi d_-^2\varphi^3 \textcolor{blue}{d_+ \varphi}d_+^2\\
& = d_- \varphi d_-^2\varphi^2\textcolor{red}{\varphi T_1} d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2\\ \text{(using \eqref{eq:Ti_phi})} & = d_-\varphi d_-^2\varphi^2\textcolor{blue}{T_2 \varphi} d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2 \\ & = d_-\varphi d_-^2\textcolor{red}{\varphi^2 T_2 } \varphi d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2 \\ \text{(using \eqref{eq:phi2})} & = d_-\varphi d_-^2\textcolor{blue}{T_1\varphi^2 } \varphi d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2 \\ & = d_-\varphi d_- \textcolor{red}{d_-T_1}\varphi^3 d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2\\ \text{(using \eqref{eq:Ti_dm})}& = d_-\varphi d_- \textcolor{blue}{T_1 d_-}\varphi^3 d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2\\ & = d_-\textcolor{red}{\varphi d_- T_1} d_- \varphi^3 d_+ \varphi d_+^2 + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2\\ \text{(using \eqref{eq:auxdminus})}& = d_-\textcolor{blue}{d_- \varphi } d_- \varphi^3 d_+ \varphi d_+^2 \textcolor{blue}{-(q-1)}d_-\textcolor{blue}{\varphi d_- } d_- \varphi^3 d_+ \varphi d_+^2\\ & + (q-1) d_-\varphi d_-^2\varphi^3 d_+ \varphi d_+^2\\ & = d_-^2 \varphi d_- \varphi^3 d_+ \varphi d_+^2\, , \end{align*} which gives the case \eqref{eq:lem_case3} with $\epsilon=0$. \end{example}
We are now able to prove our main result.
\begin{theorem} \label{thm:main}
If $d_P$ is a path operator, then the symmetric function $\left. d_P(1)\right|_{q=q+1}$ is $e$-positive. \end{theorem} \begin{proof} Let $P\in \mathcal{P}$, and consider its decomposition $P=P_1P_2\cdots P_k$ in the concatenation of non-touching paths $P_i$. Clearly $d_P=d_{P_1}d_{P_2}\cdots d_{P_k}$. Consider the leftmost $d_+$ of degree $\geq 1$ in $d_P$, and suppose that it occurs in $d_{P_i}$ with $1\leq i\leq k$. By construction we have that all the $d_{P_j}$ with $j<i$ are of the form $d_-\varphi^{m_j}d_+$ for some $m_j\geq 0$. The prefix of $d_{P_i}$ that is cut by our $d_+$ (included) is a partial path operator of the form in the hypothesis of Lemma~\ref{lem:crux}. Therefore we can apply the lemma to make a substitution and replace our $d_P$ with a sum of one or two new path operators, each multiplied by a coefficient in $\{1,q,q-1\}$. The new operator paths that occur all have one of the following three properties: \begin{enumerate}
\item there is one $d_+$ less occurring, as it got replaced together with a $d_-$ by a $\varphi$;
\item the $d_+$ occurs one place to the left of what it used to, in the same degree, as it passed by a $\varphi$;
\item the $d_+$ occurs in a lower degree, as it passed by a $d_-$. \end{enumerate} Notice that nothing before $d_{P_i}$ and nothing after our $d_+$ got modified in our new path operators.
Therefore, iterating this process on the new paths operators, in finitely many steps either our $d_+$'s disappear as in (1), or they reach degree $0$, in which case they cut a prefix of the form $d_-\varphi^{m_i}d_+$ with $m_i\geq 0$.
Hence, until among our path operators there are $d_+$'s of degree $\geq 1$, we can iterate this all process.
At the end, in finitely many steps, we will have reached a combination of path operators in which every $d_+$ has degree $0$, i.e.\ these operators are compositions of operators of the form $d_-\varphi^{m}d_+$ with $m\geq 0$, with coefficients in $\mathbb{N}[q,q-1]=\mathbb{N}[q-1]$.
By Lemma~\ref{lem:ekoperator} the operators of the form $d_-\varphi^{m}d_+$ with $m\geq 0$ are multiplications by $e_{m+1}$, so in the end, applying the resulting formula for $d_P$ to the constant $1$ we ended up expanding the resulting symmetric function in the elementary symmetric function basis $\{e_\mu\}_\mu$ with coefficients in $\mathbb{N}[q-1]$. The substitution $q\mapsto q+1$ will therefore result in the $e$-positivity, as claimed. \end{proof} \begin{remark}
Observe that our proof gives actually an algorithm to expand $d_P(1)$ (and hence $\left.d_P(1)\right|_{q=q+1}$) in the elementary symmetric function basis. \end{remark} \begin{example} Consider the path $P\in \mathcal{P}$ encoded by $(-,0,-,0,+,+)$. We start with the leftmost $d_+$, which appears in degree $1$: we compute \begin{align*}
d_P & = d_- \varphi d_- \varphi d_+^2\\
\text{(using \eqref{eq:auxdplus})} & = d_- \varphi d_- \textcolor{blue}{T_1d_+ \varphi} d_+ +\textcolor{blue}{(q-1)} d_- \varphi d_- \textcolor{blue}{d_+ \varphi} d_+ \\
\text{(using \eqref{eq:auxdminus})} & = d_- \textcolor{blue}{d_- \varphi} d_+ \varphi d_+ \textcolor{blue}{-(q-1)} d_- \textcolor{blue}{\varphi d_-} d_+ \varphi d_+ + (q-1) d_- \varphi d_- d_+ \varphi d_+ \\
& = d_-^2 \varphi d_+ \varphi d_+ \end{align*} which is the case \eqref{eq:case2} with $\epsilon=0$. We can iterate on the leftmost $d_+$, which is still in degree $1$, to get \begin{align*}
d_P& = d_-^2 \varphi d_+ \varphi d_+ \\
\text{(using \eqref{eq:auxdplus})}& = d_-^2 \textcolor{blue}{T_1 d_+ \varphi} \varphi d_+ + \textcolor{blue}{(q-1)}d_-^2 \textcolor{blue}{d_+ \varphi} \varphi d_+\\
\text{(using \eqref{eq:dm2})}& = d_-^2 d_+ \varphi^2 d_+ + (q-1) d_-^2 d_+ \varphi^2 d_+\\
& =q d_-^2 d_+ \varphi^2 d_+ \end{align*} which is the case \eqref{eq:case1}.
Iterating again on the leftmost $d_+$, which is in degree $1$, we get \begin{align*}
d_P&=q d_-^2 d_+ \varphi^2 d_+ \\
\text{(using \eqref{eq:phi_def})}& =q d_- \textcolor{blue}{d_+ d_-} \varphi^2 d_+ + q\textcolor{blue}{(q-1)} d_-\textcolor{blue}{\varphi} \varphi^2 d_+\\
& =q d_- d_+ d_- \varphi^2 d_+ + q (q-1) d_- \varphi^3 d_+, \end{align*} which is the case \eqref{eq:case0}.
Now all the $d_+$'s appear in degree $0$, so, applying Lemma~\ref{lem:ekoperator}, we finally get \begin{align*}
d_P(1)& =q d_- d_+ d_- \varphi^2 d_+(1) + q (q-1) d_- \varphi^3 d_+(1) \\
& =qe_1e_3+q(q-1)e_4. \end{align*} \end{example}
The following corollary is now immediate. \begin{corollary}
If $G_{\mathbf{\nu}}[X;q]$ is a vertical strip LLT polynomial, then $G_{\mathbf{\nu}}[X;q+1]$ is $e$-positive. \end{corollary} \begin{proof}
The statement follows by combining Theorem~\ref{thm:CM_LLT} and the previous theorem. \end{proof}
\section{Some consequences}
As we already mentioned in the introduction, our result implies several $e$-positivity conjectured in \cite{Alexandersson_Panova_cycles}, \cite{Bergeron_Open_Questions} and \cite{Garsia_Haglund_Qiu_Romero}. For example, we have the following corollary. \begin{corollary} All the conjectured $e$-positivities left open in the introduction of \cite{Garsia_Haglund_Qiu_Romero} (the (7) conditioned to the validity of the Delta conjecture) are true. \end{corollary} \begin{proof}[Sketch of the proof] All the occurring symmetric functions are either (special cases of) vertical strip LLT polynomials or positive combinations (i.e.\ with coefficients in $\mathbb{N}[q,t]$) of them: see for example \cite{Haglund-Book-2008} and \cite{BGSX_Rational_Shuffle} for more informations on these decompositions. \end{proof}
In a different direction, the following corollary is an immediate consequence of the proof of Theorem~\ref{thm:main}. \begin{corollary}
The path operator $d_P$ is simply a multiplication operator by $d_P(1)$. \end{corollary} \begin{proof} In the proof of Theorem~\ref{thm:main} we showed that $d_P$ is a linear combination of compositions of path operators of the form $d_-\varphi^{m}d_+$ with $m\geq 0$, which are multiplication operators by Lemma~\ref{lem:ekoperator}. Therefore $d_P$ is itself a multiplication operator, obviously by $d_P(1)$. \end{proof} \begin{remark} This fact is probably related to the same property conjectured by Bergeron in \cite{Bergeron_Open_Questions} for the specialization at $t=1$ of related symmetric functions. We do not discuss this further in this article. \end{remark} An interesting consequence of the last corollary, which is not at all clear from the definitions, is that the path operators actually commute with each others.
\section*{Acknowledgment}
We thank Adriano Garsia for attracting our attention to this topic in general, and to this specific problem in particular.
\end{document} |
\begin{document}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{prop}{Proposition}[section] \newtheorem{lemma}{Lemma}[section] \newtheorem{defin}{Definition}[section] \newtheorem{rem}{Remark}[section] \newtheorem{example}{Example}[section] \newtheorem{corol}{Corollary}[section]
\title{Lagrange geometry on tangent manifolds} \author{{\small by}
\\Izu Vaisman} \date{} \maketitle {\def*{*}\footnotetext[1] {{\it Mathematics Subject Classification:} 53C15, 53C60. \newline\indent{\it Key words and phrases}: Tangent manifold. Locally Lagrange metric.}} \begin{center} \begin{minipage}{12cm} A{\footnotesize BSTRACT. Lagrange geometry is the geometry of the tensor field defined by the fiberwise Hessian of a non degenerate Lagrangian function on the total space of a tangent bundle. Finsler geometry is the geometrically most interesting case of Lagrange geometry. In this paper, we study a generalization, which consists of replacing the tangent bundle by a general tangent manifold, and the Lagrangian by a family of compatible, local, Lagrangian functions. We give several examples, and find the cohomological obstructions to globalization. Then, we extend the connections used in Finsler and Lagrange geometry, while giving an index free presentation of these connections.} \end{minipage} \end{center}
\section{Preliminaries} Lagrange geometry \cite{{Kern},{M1},{M2}} is the extension of Finsler geometry (e.g., \cite{Bao}) to transversal ``metrics" (non degenerate quadratic forms) of the vertical foliation (the foliation by fibers) of a tangent bundle, which are defined as the Hessian of a non degenerate Lagrangian function. In the present paper, we study the generalization of Lagrange geometry to arbitrary tangent manifolds \cite{BC}. The locally Lagrange-symplectic manifolds \cite{V1} are an important particular case. In this section, we recall various facts about the geometric structures that we need for the generalization. Our framework is the $C^\infty$-category, and we will use the Einstein summation convention, where convenient.
First, a {\it leafwise locally affine} foliation is a foliation such that the leaves have a given locally affine structure that varies smoothly with the leaf. In a different formulation \cite{V2}, if $M$ is a manifold of dimension $m=p+q$, a $p$-dimensional locally leafwise affine foliation $ \mathcal{F}$ on $M$ is defined by a maximal, differential, {\it affine atlas} $\{U_\alpha\}$, with local coordinates $(x^a_\alpha,y^u_\alpha)$ $(a=1,...,q;\,u=1,...,p)$, and transition functions of the local form \begin{equation} \label{trfunct} x^a_\beta=x^a_\beta(x^b_\alpha), \;y^u_\beta=\sum_{v=1}^pA^u_{(\alpha\beta)v}(x^b_\alpha)y^v_\alpha +B^u_{(\alpha\beta)}(x^b_\alpha)\end{equation} on $U_\alpha\cap U_\beta$. Then, the leaves of $ \mathcal{F}$ are locally defined by $x^a=const.$, and their local parallelization is defined by the vector fields $\partial/\partial y^u$. Furthermore, if the atlas that defines a locally leafwise affine foliation has a subatlas such that $B_{(\alpha\beta)}^u=0$ for its transition functions, the foliation, with the structure defined by the subatlas, will be called a {\it vector bundle-type foliation}. Notice that, if one such subatlas exists, similar ones are obtained by coordinate changes of the local form \begin{equation} \label{translations} \tilde x^a_\alpha=\tilde x^a_\alpha(x^b_\alpha),\;\tilde y_\alpha = y^a_\alpha+\xi_{(\alpha\beta)}^a(x^b_\alpha). \end{equation}
For any foliation $ \mathcal{F}$, geometric objects of $M$ that either project to the space of leaves or, locally, are pull-backs of objects on the latter are said to be {\it projectable} or {\it foliated} \cite{{Mol},{V3}}. In particular, a foliated bundle is a bundle over $M$ with a locally trivializing atlas with foliated transition functions. The transversal bundle $\nu\mathcal{F}=TM/T\mathcal{F}$ is foliated. Formulas (\ref{trfunct}) show that for a locally leafwise affine foliation $ \mathcal{F}$ the tangent bundles $T\mathcal{F}$, $TM$ are foliated bundles as well. For a foliated bundle, we can define foliated cross sections. Notice that, if $ \mathcal{F}$ is a locally leafwise affine foliation, a vector field on $M$ which is tangent to $ \mathcal{F}$ is foliated as a vector field, since it projects to $0$, but, it may not be a foliated cross sections of $T\mathcal{F}$!
Furthermore, for a locally leafwise affine foliation one also has {\it leafwise affine} objects, which have an affine character with respect to the locally affine structure of the leaves. For instance, a locally leafwise affine function is a function $f\in C^\infty(M)$ such that $Yf$ is foliated for any local parallel vector field $Y$ along the leaves of $ \mathcal{F}$. With respect to the affine atlas, a locally leafwise affine function has the local expression \begin{equation} \label{affunct} f=\sum_{u=1}^p \alpha_u(x^a)y^u+\beta(x^a). \end{equation} A locally leafwise affine $k$-form is a $k$-form $\lambda$ such that $i(Z)\lambda=0$ for all the tangent vector fields $Z$ of $ \mathcal{F}$ and $L_Y\lambda$ is a foliated $k$-form for all the parallel fields $Y$. Then, $\lambda$ has an expression of the form (\ref{affunct}) where $\alpha_u,\beta$ are foliated $k$-forms. A locally leafwise affine vector field is an infinitesimal automorphism of the foliation and of the leafwise affine structure, and has the local expression \begin{equation}\label{afvect} X=\sum_{a=1}^q\xi^a(x^b)\frac{\partial}{\partial x^a}+\sum_{u=1}^p[\sum_{v=1}^p \lambda^u_v(x^b)y^v+\mu^u(x^b)]\frac{\partial}{\partial y^u}. \end{equation} Etc. \cite{V2}
Any foliated vector bundle $V\rightarrow M$ produces a sheaf $\underline{V}$ of germs of differentiable cross sections, and a sheaf $\underline{V}_{pr}$ of germs of foliated cross sections. The corresponding cohomology spaces $H^k(M,\underline{V}_{pr})$ may be computed by a de Rham type theorem \cite{V3}. Namely, let $N\mathcal{F}$ be a complementary (normal) distribution of $T\mathcal{F}$ in $TM$. The decomposition $TM=N\mathcal{F}\oplus T\mathcal{F}$ yields a bigrading of differential forms and tensor fields, and a decomposition of the exterior differential as \begin{equation} \label{decompd} d=d'_{(1,0)}+d''_{(0,1)}+\partial_{(2,-1)}.\end{equation} The operator $d''$ is the exterior differential along the leaves of $ \mathcal{F}$, it has square zero and satisfies the Poincar\'e lemma. Accordingly, \begin{equation}\label{resol} 0\rightarrow \underline{V}_{pr}\stackrel{\subseteq}{\rightarrow}\underline{V}_{pr}\otimes_\Phi \underline{\Omega}^{(0,0)} \stackrel{d''}{\rightarrow}\underline{V}_{pr}\otimes_\Phi \underline{\Omega}^{(0,1)}\stackrel{d''}{\rightarrow}...,\end{equation} where $\Omega$ denotes spaces of differential forms, $\underline\Omega$ is the corresponding sheaf of differentiable germs and $\Phi$ is the sheaf of germs of foliated functions, is a fine resolution of $ \underline{V}_{pr}$.
Furthermore, if $ \mathcal{F}$ is locally leafwise affine, one also has the spaces $A^k(M,\mathcal{F})$ of locally leafwise affine $k$-forms and the corresponding sheaves of germs $\underline{A} ^k(M,\mathcal{F})$. These sheaves define interesting cohomology spaces, which may be studied by means of the exact sequences \cite{V2} \begin{equation} \label{exact1} 0\rightarrow \underline{\Omega}^{(k,0)}_{pr}\stackrel{\subseteq}{\rightarrow} \underline{A}^k(M,\mathcal{F})\stackrel{\pi}{\rightarrow} \underline{\Omega}^{(k,0)}_{pr}\otimes_\Phi \underline{T^*\mathcal{F}}_{pr} \rightarrow0,\end{equation} where, for $f$ defined by (\ref{affunct}), $\pi(f)=\alpha_u\otimes[dy^u]$, $[dy^u]$ being the projections of $dy^u$ on $T^*\mathcal{F}$.
It is important to recognize the vector bundle-type foliations among the locally leafwise affine foliations. First, notice that a vector bundle-type foliation possesses a global vector field, which may be seen as the leafwise infinitesimal homothety namely, \begin{equation} \label{homothety} E=\sum_{u=1}^p y^u\frac{\partial}{\partial y^u}, \end{equation} called the {\it Euler vector field}. In the general locally leafwise affine case, (\ref{homothety}) only defines local vector fields $E_\alpha$ on each coordinate neighborhood $U_\alpha$, and the differences $E_\beta-E_\alpha$ yield a cocycle and a cohomology class $[E](\mathcal{F})\in H^1(M,\underline{T\mathcal{F}}_{pr})$, called the {\it linearity obstruction} \cite{V2}. It follows easily that the locally leafwise affine foliation $ \mathcal{F}$ has a vector bundle-type structure iff $[E](\mathcal{F})=0$ \cite{V2}. With a normal distribution $N\mathcal{F}$, we may use the foliated version of de Rham's theorem, and $[E](\mathcal{F})$ will be represented by the global $T\mathcal{F}$-valued $1$-form $\{d''E_\alpha\}$. Accordingly, $[E](\mathcal{F})=0$ iff there exists a global vector field $E$ on $M$, which is tangent to the leaves of $ \mathcal{F}$ and such that $\forall\alpha$, \begin{equation} \label{genEuler}
E|_{U_\alpha}=E_\alpha+Q_\alpha, \end{equation} where $Q_\alpha$ are projectable. $E$ is defined up to the addition of a global, projectable, cross section of $T\mathcal{F}$, and these vector fields $E$ will be called {\it Euler vector fields}. The choice of a Euler vector field $E$ is equivalent with the choice of the vector bundle-type structure of the foliation.
We also recall the following result \cite{V2}: the vector bundle-type foliation $ \mathcal{F}$ on $M$ is a vector bundle fibration $M\rightarrow N$ iff the leaves are simply connected and the flat connections defined by the locally affine structure of the leaves are complete. \begin{example}\label{example0} {\rm On the torus $ \mathbb{T}^{p+q}$ with the Euclidean coordinates $(x^a,y^u)$ defined up to translations $$\tilde x^a=x^a+h^a,\,\tilde y^u=y^u+k^u,\hspace{5mm}h^a,k^u\in \mathbb{Z},$$ the foliation $x^a=const.$ is locally leafwise affine and has the normal bundle $dy^u=0$. The linearity obstruction $[E]$ is represented by the form $\sum_{u=1}^q dy^u\otimes(\partial/\partial y^u)$, which is not $d''$-exact. Therefore, $[E]\neq0$, and $\mathcal{F}$ is not a vector bundle-type foliation.}\end{example} \begin{example}\label{example1} {\rm Consider the compact nilmanifold $M(1,p)=\Gamma(1,p)\backslash H(1,p)$ where \begin{equation} \label{Heisenberg} H(1,p)=\left\{\left(\begin{array}{ccc} Id_p&X&Z\\ 0&1&y\\0&0&1 \end{array}\right)\,/\,X,Z\in \mathbb{R}^p,\,y\in \mathbb{R}\right\}\end{equation} is the generalized Heisenberg group, and $\Gamma(1,p)$ is the subgroup of matrices with integer entries. $M(1,p)$ has an affine atlas with the transition functions \begin{equation} \label{trHeis} \tilde x^i=x^i+a^i,\,\tilde y=y+b,\, \tilde z^i=z^i+a^iy+c^i,\end{equation} where $x^i,z^i$ $(i=1,...,p)$ are the entries of $X,Z$, respectively, and $a^i,b,c^i$ are integers. Accordingly, the local equations $x^i=const.,\, y=const.$ define a locally leafwise affine foliation $ \mathcal{F}$ of $M$, which, in fact, is a fibration by $p$-dimensional tori over a $(p+1)$-dimensional torus. The manifold $M$ is parallelizable by the global vector fields \begin{equation}\label{paralHeis} \frac{\partial}{\partial x^i}, \frac{\partial}{\partial y}+\sum_{i=1}^px^i\frac{\partial}{\partial z^i},\frac{\partial}{\partial z^i},\end{equation} and the global $1$-forms \begin{equation}\label{paralHeis2} dx^i,dz^i-x^idy,dy, \end{equation} and we see that $$span\left\{\frac{\partial}{\partial x^i}, \frac{\partial}{\partial y}+\sum_{i=1}^px^i\frac{\partial}{\partial z^i}\right\}$$ may serve as a normal bundle of $ \mathcal{F}$. It follows that the linearity obstruction is represented by $$\sum_{i=1}^p (dz^i-x^idy)\otimes\frac{\partial}{\partial z^i},$$ which is not $d''$-exact. Therefore, $ \mathcal{F}$ is not a vector bundle-type foliation.} \end{example} \begin{example}\label{example2} {\rm Take the {\it real Hopf manifold} $H^{(p+q)}=S^{p+q-1}\times S^1$ seen as $ (\mathbb{R}^q\times \mathbb{R}^p\,\backslash\,\{0\})\,/\,G_\lambda$, where $\lambda\in(0,1)$ is constant and $G_\lambda$ is the group \begin{equation}\label{groupG} \tilde x^a=\lambda^nx^a, \tilde y^u=\lambda^ny^u,\hspace{5mm}n\in\mathbb{Z},\end{equation} where $x^a,y^u$ are the natural coordinates of $ \mathbb{R}^q$ and $ \mathbb{R}^p$, respectively. Then, the local equations $x^a=const.$ define a vector bundle-type foliation, which has the global Euler field $E=\sum_{u=1}^qy^u(\partial/\partial y^u)$. This example shows that compact manifolds may have vector bundle-type foliations.}\end{example} \begin{example}\label{example3} {\rm Consider the manifold \begin{equation}\label{example31} M^{2n}=[( \mathbb{R}^n\,\backslash\,\{0\})\times \mathbb{R}^n] \,/\,K_\lambda,\end{equation} where $\lambda\in(0,1)$ and $K_\lambda$ is the cyclic group generated by the transformation \begin{equation} \label{eqexample3} \tilde x^i=\lambda x^i,\, \tilde y^i=\lambda y^i+(1-\lambda)\frac{x^i}{\sqrt{\sum_{j=1}^n(x^j)^2}} \end{equation} $(i=1,...n)$. It is easy to check that the equality $$E=\sum_{i=1}^n y^i \frac{\partial}{\partial y^i}-\sum_{i=1}^n\frac{x^i}{\sqrt{\sum_{j=1}^n(x^j)^2}}\frac{\partial}{\partial y^i}$$ defines a global vector field on $M$, which has the property of the Euler field for the foliation $x^i=const.$ Therefore, the latter is a vector bundle-type foliation. The change of coordinates $$x'^i=x^i,\,y'^i=y^i-\frac{x^i}{\sqrt{\sum_{j=1}^n(x^j)^2}}$$ provides a vector bundle-type atlas, and (\ref{eqexample3}) becomes $$\tilde x'^i=\lambda x^i,\,\tilde y'^i=\lambda y^i.$$ This shows that $M$ is the tangent bundle of the Hopf manifold $H^n$ defined in Example \ref{example2}}. \end{example}
Now, let us recall the basics of tangent manifolds \cite{BC}. An {\it almost tangent structure} on a manifold $M$ is a tensor field $S\in \Gamma End(TM)$ such that \begin{equation}\label{almosttg} S^2=0,\;im\,S=ker\,S.\end{equation} In particular, the dimension of $M$ must be even, say $2n$, and $rank\,S=n$. Furthermore, $S$ is a {\it tangent structure} if it is integrable i.e., locally, $S$ looks like the vertical twisting homomorphism of a tangent bundle. This means that there exists an atlas with local coordinate $(x^i,y^i)$ $(i=1,...,n)$ such that \begin{equation}\label{strtg} S\left(\frac {\partial}{\partial x^i}\right)=\frac{\partial}{\partial y^i},\, S\left(\frac {\partial}{\partial y^i}\right)=0. \end{equation} The integrability property is equivalent with the annulation of the Nijenhuis tensor \begin{equation} \label{NijS} \mathcal{N}_S(X,Y)= [SX,SY]-S[SX,Y]-S[X,SY]+S^2[X,Y]=0. \end{equation} A pair $(M,S)$, where $S$ is a tangent structure, is called a {\it tangent manifold}.
On a tangent manifold $(M,S)$, the distribution $im\,S$ is integrable, and defines the {\it vertical foliation} $ \mathcal{V}$ with $T\mathcal{V}=im\,S$. It is easy to see that the transition functions of the local coordinates of (\ref{strtg}) are of the local form (\ref{trfunct}) with $q=p=n$ and \begin{equation}\label{transtg} A^i_{(\alpha\beta)j}=\frac{\partial x^i_\beta}{\partial x^j_\alpha}. \end{equation} Therefore, $ \mathcal{V}$ is a locally leafwise affine foliation, and the local parallel vector fields along the leaves are the vector fields of the form $SX$, where $X$ is a foliated vector field. In particular, a tangent manifold has local Euler fields $E_\alpha$, and a linearity obstruction $[E]\in H^1(M, \underline{T\mathcal{V}}_{pr})$. If $[E]=0$, the foliation $ \mathcal{V}$ will be a vector bundle-type foliation, and $M$ has global Euler vector fields $E$ defined up to the addition of a foliated cross section of $T\mathcal{V}$. Furthermore, if we fix the vector-bundle type structure by fixing a Euler vector field $E$, the triple $(M,S,E)$ will be called a {\it bundle-type tangent manifold}.
Using the general result of \cite{V2}, we see that a tangent manifold is a tangent bundle iff it is a bundle-type tangent manifold and the vertical foliation has simply connected, affinely complete leaves. \begin{example}\label{example4}{\rm The Hopf manifold $H^{2n}$ of Example \ref{example2} with $q=p=n$ and $S$ defined by (\ref{strtg}) is a compact, bundle-type, tangent manifold.}\end{example} \begin{example}\label{example5} {\rm The torus of Example \ref{example0} with $q=p$ and $S$ of (\ref{strtg}) is a compact non bundle-type tangent manifold.}\end{example} \begin{example}\label{example6} {\rm The manifold $M(1,p)\times( \mathbb{R}/\mathbb{Z})$, with the coordinates of Example \ref{example1} and a new coordinate $t$ on $ \mathbb{R}$, and with $S$ defined by \begin{equation} \label{ex1tg} S\left(\frac {\partial}{\partial x^i}\right)=\frac{\partial}{\partial z^i},\, S\left(\frac {\partial}{\partial y}\right)=\frac{\partial}{\partial t},\, S\left(\frac {\partial}{\partial z^i}\right)=0,\, S\left(\frac {\partial}{\partial t}\right)=0 \end{equation} is a compact non bundle-type tangent manifold. The linearity obstruction $[E]$ of this manifold is represented by $$\sum_{i=1}^p (dz^i-x^idy)\otimes\frac{\partial}{\partial z^i} +dt\otimes\frac{\partial}{\partial t},$$ and $[E]\neq0$.} \end{example}
Tangent bundles posses {\it second order vector fields} ({\it semisprays} in \cite{M1}), so called because they may be locally expressed by a system of second order, ordinary, differential equations. A priori, such vector fields may be defined on any tangent manifold \cite{V4} namely, the vector field $X\in \Gamma TM$ ($\Gamma$ denotes the space of global cross sections) is of the {\it second order} if $SX|_{U_\alpha}-E_\alpha$ is foliated for all $\alpha$. But, this condition means that $SX$ is a global Euler vector field, hence, only the bundle-type tangent manifolds can have global second order vector fields.
It is important to point out that, just like on tangent bundles (e.g., \cite{{Kern},{M1},{V5}}), if $(M,S,E)$ is a bundle-type tangent manifold, and $X$ is a second order vector field on $M$, the Lie derivative $F=L_XS$ defines an almost product structure on $M$ ($F^2=Id$), with the associated projectors \begin{equation} \label{almprod}V=\frac{1}{2}(Id+F),\;H=\frac{1}{2}(Id-F),\end{equation} such that $im\,V=T\mathcal{V}$ and $im\,H$ is a normal distribution $N\mathcal{V}$ of the vertical foliation $ \mathcal{V}$.
Finally, we give \begin{defin}\label{tangautomorf} {\rm A vector field $X$ on a tangent manifold $(M,S)$ is a {\it tangential infinitesimal automorphism} if $L_XS=0$ ($L$ denotes the Lie derivative).} \end{defin}
Obviously, a tangential infinitesimal automorphism $X$ preserves the foliation $ \mathcal{V}$ and its leafwise affine structure. Therefore, $X$ is a leafwise affine vector field with respect to $ \mathcal{V}$. Furthermore, in the bundle-type case, if $E$ is a Euler vector field, $[X,E]$ is a foliated cross section of $T\mathcal{V}$. \section{Locally Lagrange spaces} Lagrange geometry is motivated by physics and, essentially, it is the study of geometric objects and constructions that are transversal to the vertical foliation of a tangent bundle and are associated with a {\it Lagrangian} (a name taken from Lagrangian mechanics) i.e., a function on the total space of the tangent bundle. (See \cite{M1} and the $d$-objects defined there.) Here, we use the same approach for a general tangent manifold $(M,S)$, and we refer to functions on $M$ as {\it global Lagrangians} and to functions on open subsets as {\it local Lagrangians}.
If $ \mathcal{L}$ is a Lagrangian, the derivatives in the vertical directions yield symmetric tensor fields of $M$ defined by \begin{equation} \label{Hessk} (Hess_{(k)}\mathcal{L})_x(X_1,...,X_k)
=(S\tilde X_k)\cdots(S\tilde X_1)\mathcal{L}|_x,\;x\in M,\; X_i\in T_xM,\end{equation} where $\tilde X_i$ $(i=1,...,k)$ are extensions of $X_i$ to local, $ \mathcal{V}$-foliated, vector fields on $M$. (Of course, the result does not depend on the choice of the extensions $\tilde X_i$.) $Hess_k\mathcal{L}$ is called the $k$-{\it Hessian} of $ \mathcal{L}$. Notice that definition (\ref{Hessk}) may also be replaced by the recurrence formula \begin{equation}\label{Hessrec} (Hess_{(k)}\mathcal{L})_x(\tilde X_1,...,\tilde X_k)= [L_{S\tilde X_k}(Hess_{k-1} \mathcal{L})]_x(\tilde X_1,...,\tilde X_{k-1}), \end{equation} where the arguments are foliated vector fields.
It is worthwhile to notice the following general property \begin{prop} \label{HessX} for any function $ \mathcal{L}\in C^\infty(M)$, any tangential infinitesimal automorphism $X$ of the tangent manifold $(M,S)$, and any $k=1,2,...$, one has \begin{equation} \label{LieHessX} Hess_k(X\mathcal{L})=L_X(Hess_k\mathcal{L}). \end{equation} \end{prop} \noindent{\bf Proof.} Proceed by induction on $k$, while evaluating the Hessian of $X\mathcal{L}$ on foliated arguments and using the recurrence formula (\ref{Hessrec}). Q.e.d.
For $k=1$, we get a $1$-form, say $\theta_\mathcal{L}$, and for $k=2$, we get the usual Hessian of $ \mathcal{L}$ with respect to the affine vertical coordinates $y^i$ (see Section 1), hereafter to be denoted by either $Hess\,\mathcal{L}$ or $g_\mathcal{L}$. Obviously, $g_\mathcal{L}$ vanishes whenever one of the arguments is vertical hence, it yields a well defined cross section of the symmetric tensor product $\odot^2\nu^*\mathcal{V}$ $(\nu\mathcal{V}=TM/T\mathcal{V})$, which we continue to denote by $g_\mathcal{L}$. If $g_\mathcal{L}$ is non degenerate on the transversal bundle $\nu\mathcal{F}$, the Lagrangian $ \mathcal{L}$ is said to be {\it regular} and $g_\mathcal{L}$ is called a {\it (local) Lagrangian metric}. We note that if the domain of $ \mathcal{L}$ is connected, the regularity of $ \mathcal{L}$ also implies that $g_\mathcal{L}$ is of a constant signature. With respect to the local coordinates of (\ref{strtg}), one has \begin{equation}\label{Hesslocal} \theta_{ \mathcal{L}}= \frac{\partial\mathcal{L}}{\partial y^i}dx^i,\;g_{ \mathcal{L}}= \frac{1}{2}\frac{\partial^2\mathcal{L}}{\partial y^i\partial y^j}dx^i\odot dx^j.\end{equation}
Lagrangian mechanics shows the interest of one more geometric object related to a Lagrangian namely, the differential $2$-form \begin{equation}\label{formasimpl} \omega_\mathcal{L}=d\theta_\mathcal{L}= \frac{\partial^2\mathcal{L}}{\partial x^i\partial y^j}dx^i \wedge dx^j + \frac{\partial^2\mathcal{L}}{\partial y^i\partial y^j}dy^i\wedge dx^j.\end{equation} If $ \mathcal{L}$ is a regular Lagrangian $\omega_\mathcal{L}$ is a symplectic form, called the {\it Lagrangian symplectic form}.
In \cite{{V1},{V4}}, we studied particular symplectic forms $\Omega$ on a tangent manifold $(M,S)$ that are {\it compatible with the tangent structure} $S$ in the sense that \begin{equation} \label{symplcomp} \Omega(X,SY)=\Omega(Y,SX).\end{equation} If this happens, $\Omega$ is called a {\it locally Lagrangian-symplectic form} since the compatibility property is equivalent with the existence of an open covering $M=\cup U_\alpha$, and of local regular Lagrangian functions $ \mathcal{L}_\alpha$ on $U_\alpha$, such that,
$\Omega|_{U_\alpha}=\omega_{ \mathcal{L}_\alpha}$ for all $\alpha$. On the intersections $U_\alpha\cap U_\beta$ the local Lagrangians satisfy a compatibility relation of the form \begin{equation} \label{Lrel} \mathcal{L}_\beta-\mathcal{L}_\alpha=a(\varphi_{(\alpha\beta)}) +b_{(\alpha\beta)},\end{equation} where $\varphi_{(\alpha\beta)}$ is a closed, foliated $1$-form, $b_{(\alpha\beta)}$ is a foliated function, and $a(\varphi)=\varphi_iy^i$ where the local coordinates and components are taken either in $U_\alpha$ or in $U_\beta$. Furthermore, if it is possible to find a compatible (in the sense of (\ref{Lrel})) global Lagrangian $ \mathcal{L}$, $\Omega$ is a {\it global Lagrangian symplectic form}. Conditions for the existence of a global Lagrangian were given in \cite{{V1},{V4}}. In particular, a globally Lagrangian-symplectic manifold $M^{2n}$ cannot be compact since it has the exact volume form $\omega_{ \mathcal{L}}^n$.
Following the same idea, we give \begin{defin} \label{varlocLagr} {\rm Let $(M^{2n},S)$ be a tangent manifold, and $g\in\Gamma\odot^2\nu^*\mathcal{V}$ a non degenerate tensor field. Then $g$ is a {\it locally Lagrangian metric (structure)} on $M$ if there exists an open covering $M=\cup U_\alpha$ with local regular Lagrangian functions $ \mathcal{L}_\alpha$ on $U_\alpha$
such that $g|_{U_\alpha}=g_{ \mathcal{L}_\alpha} =Hess\,\mathcal{L}_\alpha$ for all $\alpha$. The triple $(M,S,g)$ will be called a {\it locally Lagrange space or manifold}.}\end{defin}
It is easy to see that the local Lagrangians $ \mathcal{L}_\alpha$ of a locally Lagrange space must again satisfy the compatibility relations (\ref{Lrel}), where the $1$-forms $\varphi_{(\alpha\beta)}$ may not be closed. In particular, we see that a locally Lagrangian-symplectic manifold is a locally Lagrange space with the metric defined by \cite{V1} \begin{equation}\label{metricoflocally Lagrangian-symplectic} g([X],[Y])=\Omega(SX,Y),\end{equation} where $X,Y\in\Gamma TM$ and $[X],[Y]$ are the corresponding projections on $\nu\mathcal{F}$. Furthermore, if there exists a global Lagrangian $ \mathcal{L}$ that is related by (\ref{Lrel}) with the local Lagrangians of the structure, $(M,S,g,\mathcal{L})$ will be a {\it globally Lagrange space}. A globally Lagrange space also is a globally Lagrangian-symplectic manifold hence, it cannot be compact.
We can give a global characterization of the locally Lagrange metrics. First, we notice that the bundles $\otimes^k\nu^*\mathcal{V}$ of covariant tensors transversal to the vertical foliation $ \mathcal{V}$ of a tangent manifold $(M,S)$ may also be seen as the bundles of covariant tensors on $M$ that vanish if evaluated on arguments one of which belongs to $im\,S$. (This holds because $\nu*\mathcal{V}\subseteq T^*M$.) In particular, a transversal metric $g$ of $ \mathcal{V}$ may be seen as a symmetric $2$-covariant tensor field $g$ on $M$ which is annihilated by $im\,S$. With $g$, one associates a $3$-covariant tensor, called the {\it derivative} or {\it Cartan tensor} \cite{{Bao},{M1},{M2}} defined by \begin{equation}\label{defC} C_x(X,Y,Z)=(L_{S\tilde X}g)_x (Y,Z),\;x\in M,\;X,Y,Z\in T_xM,\end{equation} where $\tilde X$ is a foliated extension of $X$. Obviously, $C\in\Gamma\otimes^3\nu^*\mathcal{V}$. Then, we get \begin{prop}\label{propos25} The transversal metric $g$ of the vertical foliation $ \mathcal{V}$ of a tangent manifold $(M,S)$ is a locally Lagrange metric iff the tensor field $C$ is totally symmetric. \end{prop} \noindent {\bf Proof.} Since $$C_{ijk}=C(\frac{\partial}{\partial x^i},\frac{\partial}{\partial x^j}, \frac{\partial}{\partial x^k})=\frac{\partial g_{jk}}{\partial y^i},$$ the symmetry of $C$ is equivalent with the existence of the required local Lagrangians $ \mathcal{L}$. Q.e.d.
We give a number of examples of locally Lagrange manifolds. \begin{example}\label{example21} {\rm Consider the torus of Example \ref{example5}. Then $$ \mathcal{L}=\frac{1}{2}\sum_{i=1}^n(y^i)^2 $$ define compatible local Lagrangians with the corresponding Lagrange metric $\sum_{i=1}^n(dx^i)^2$. (Notice also the existence of the locally Lagrange symplectic form $\Omega=\sum_{i=1}^{n}dx^i \wedge dy^i$.)} \end{example} \begin{example}\label{example22}{\rm Consider the tangent manifold $M(1,p)\times( \mathbb{R}/\mathbb{Z})$ of Example \ref{example6}, with the tangent structure defined by (\ref{ex1tg}). The $ \mathcal{V}$-transversal metric $$\sum_{i=1}^p(dx^i)^2+(dy)^2$$ is the Lagrange metric of the local compatible Lagrangians $$\frac{1}{2}(\sum_{i=1}^p(z^i)^2+t^2).$$ (In this example the forms $\varphi_{(\alpha\beta)}$ of (\ref{Lrel}) are not closed.)} \end{example}
Examples \ref{example21}, \ref{example22} are interesting because the manifolds involved are compact manifolds. \begin{example}\label{example23} {\rm The manifold $M^{2n}$ of Example \ref{example3} is diffeomorphic with the tangent bundle $TH^n$. With the coordinates $(x'^i,y'^i)$ (see Example \ref{example3}), we see that the function $$ \mathcal{L}=\frac{\sum_{i=1}^{n}(y'^i)^2}{2\sum_{i=1}^{n}(x'^i)^2}$$ is a global, regular Lagrangian, and it produces a positive definite Lagrange metric.}\end{example} \begin{example}\label{example24} {\rm Consider the Hopf manifold $H^{2n}$ of Example \ref{example4} with the tangent structure (\ref{strtg}), and define the local compatible Lagrangians \begin{equation}\label{metricaln} \mathcal{L}=\frac{1}{2}\ln{\rho},\;\;\rho=\sum_{i=1}^n[(x^i)^2+(y^i)^2]. \end{equation} An easy computation yields \begin{equation} \label{Hessln} \frac{\partial^2\mathcal{L}}{\partial y^i\partial y^j} =-\frac{1}{\rho^2}(2y^iy^j-\rho\delta_{ij}).\end{equation} The determinant of the Hessian (\ref{Hessln}) can be easily computed as a characteristic polynomial and we get $$det\left(\frac{\partial^2\mathcal{L}}{\partial y^i\partial y^j}\right)= \frac{\sum_{i=1}^n[(x^i)^2-(y^i)^2]} {\{\sum_{i=1}^n[(x^i)^2+(y^i)^2]\}^{n+1}}.$$ Now, the local equation $$\sum_{i=1}^n(x^i)^2=\sum_{i=1}^n(y^i)^2$$ defines a global hypersurface $\Sigma$ of $H^{2n}$, and (\ref{Hessln}) provides a locally Lagrange metric structure on $H^{2n}\backslash\Sigma$.} \end{example} \begin{example} \label{example26} {\rm On any tangent manifold $(M,S)$, any non degenerate, foliated, transversal metric $g$ of the vertical foliation (if such a metric exists \cite{Mol}) is locally Lagrange. Indeed, this kind of metric is characterized by $C=0$, and the result follows from Proposition \ref{propos25}.}\end{example}
A natural question implied by Definition \ref{varlocLagr} is: assume that $(M,S,g,\mathcal{L}_\alpha)$ is a locally Lagrange space; what conditions ensure the existence of a global compatible, regular Lagrangian?
The compatibility relations (\ref{Lrel}) endow $M$ with an $\underline{A}^0$-valued $1$-cocycle defined by any of the members of equation (\ref{Lrel}), hence, with a cohomology class $ \mathcal{G}\in H^1(M,\underline{A}^0)$, which we call the {\it total Lagrangian obstruction}, and it is obvious that $ \mathcal{G}=0$ iff the manifold $M$ with the indicated structure is a globally Lagrange space.
Furthermore, the total Lagrangian obstruction may be decomposed into two components determined by the exact sequence (\ref{exact1}) with $k=0$, which in our case becomes \begin{equation} \label{exact3} 0\rightarrow\Phi\stackrel{\subseteq}{\rightarrow}\underline{A}^0 (M,\mathcal{V})\stackrel{\pi'}{\rightarrow}\underline{\Omega}^{(1,0)}_{pr} \rightarrow0,\end{equation} where $\pi'$ is the composition of the projection $\pi$ of (\ref{exact1}) by $S$.
It is easy to see that the connecting homomorphism of the exact cohomology sequence of (\ref{exact3}) is zero in dimension $0$. Accordingly, we get the exact sequence \begin{equation} \label{exactobstruction} 0\rightarrow H^1(M,\Phi)\stackrel{\iota*}{\rightarrow}H^1(M,\underline{A}^0 )\stackrel{\pi*}{\rightarrow}H^1(M,\underline{\Omega}^{(1,0)}_{pr}) \stackrel{\partial}{\rightarrow}H^2(M,\Phi)\rightarrow\cdots,\end{equation} where $\iota^*,\pi^*$ are induced by the inclusion and the homomorphism $\pi'$ of (\ref{exact3}). Accordingly, we get the cohomology class $ \mathcal{G}_1=\pi^*( \mathcal{G})\in H^1(M,\underline{\Omega}^{(1,0)}_{pr})$, and we call it the {\it first Lagrangian obstruction}. $ \mathcal{G}_1=0$ is a necessary condition for $M$ to be a globally Lagrange space. Furthermore, if $ \mathcal{G}_1=0$, the exact sequence (\ref{exactobstruction}) tells us that there exist a unique cohomology class $ \mathcal{G}_2\in H^1(M,\Phi)$ such that $ \mathcal{G}=\iota^*( \mathcal{G}_2)$. We call $ \mathcal{G}_2$ the {\it second Lagrangian obstruction} of the given structure, and $ \mathcal{G}=0$ iff $ \mathcal{G}_1=0$ and $ \mathcal{G}_2=0$.
We summarize the previous analysis in \begin{prop} \label{propos21} The locally Lagrange space $(M,S,g,\mathcal{L}_\alpha)$ is a globally Lagrange space iff both the first and the second Lagrangian obstructions exist and are equal to zero. \end{prop}
Let us assume that a choice of a normal bundle $N\mathcal{V}$ has been made. Then we can use the de Rham theorem associated with the relevant resolution (\ref{resol}) in order to get a representation of the Lagrangian obstructions. The definition of $ \mathcal{G}_1$ shows that the first Lagrangian obstruction is represented by the cocycle $\{\theta_{ \mathcal{L}_\beta}-\theta_{ \mathcal{L}_\alpha}\}$. Accordingly, $ \mathcal{G}_1$ may be seen as the $d''$-cohomology class of the global form $\Theta$ of type $(1,1)$ defined by gluing up the local forms $\{d''\theta_{ \mathcal{L}_\alpha}\}$. If we follow the notation of \cite{V3} and take bases \begin{equation} \label{bases} N\mathcal{V}=span\left\{X_i=\frac{\partial}{\partial x^i}-t^j_i \frac{\partial}{\partial y^j}\right\},\;T\mathcal{V}=span\left\{ Y_i=\frac{\partial}{\partial y^i}\right\},\end{equation} with the dual cobases \begin{equation}\label{cobases} \begin{array}{l} N^*\mathcal{V}=ann(T\mathcal{V})=span\{dx^i\},
\\ T^*\mathcal{V}=ann(N \mathcal{V})=span\{\vartheta^i=dy^i+t^i_jdx^j\},\end{array}\end{equation} where $t^i_j(x^i,y^i)$ are local functions, we get \begin{equation}\label{formagamma}\Theta= \frac{\partial^2 \mathcal{L}_\alpha}{\partial y^i\partial y^j}\vartheta^i\wedge dx^j.\end{equation} The result may be written as \begin{prop} \label{propos22} Let $(M,S,g,\mathcal{L}_\alpha)$ be a locally Lagrange space. Then, each choice of a normal bundle $N \mathcal{V}$ defines an almost symplectic structure of $M$, given by the non degenerate $d''$-closed $2$-form $\Theta$. The first Lagrangian obstruction $ \mathcal{G}_1$ vanishes iff the form $\Theta$ is $d''$-exact. \end{prop} \begin{corol}\label{corolar20} A compact, connected, bundle-type, tangent manifold, with the Euler vector field $E$ has no locally Lagrange metric $g$ such that $L_Eg=sg$ where $s$ is a function that never takes the value $-1$. \end{corol} \noindent{\bf Proof.} Essentially, the hypothesis on $E$ means $E$ cannot be a conformal infinitesimal automorphism of $g$. From (\ref{formagamma}) we get \begin{equation}\label{eqPsi} \Psi=\frac{1}{n!}\Theta^n= (-1)^{\frac{n(n+1)}{2}}det\left(\frac{\partial^2 \mathcal{L}_\alpha}{\partial y^i\partial y^j}\right) \end{equation} $$\cdot dx^1\wedge\ldots\wedge dx^n\wedge dy^1\wedge\ldots \wedge dy^n$$ and \begin{equation} \label{Liegamman} L_E\Psi=(-1)^{\frac{n(n+1)}{2}}\left[E\, det\left(\frac{\partial^2 \mathcal{L}_\alpha}{\partial y^i\partial y^j}\right)+n\,det\left(\frac{\partial^2 \mathcal{L}_\alpha}{\partial y^i\partial y^j}\right)\right] \end{equation} $$\cdot dx^1\wedge\ldots\wedge dx^n\wedge dy^1\wedge\ldots \wedge dy^n,$$ where the local coordinates belong to an affine atlas where $E=y^i(\partial/\partial y^i)$. If $M$ is compact, $\int_M L_E\Psi=0$, and the coefficient of the right hand side of (\ref{Liegamman}) cannot have a fixed sign. But, the latter property holds under the hypothesis of the corollary. Q.e.d.
For instance, the Hopf manifold $H^n$ has no locally Lagrange metric with homogeneous with respect to the coordinates $(y^i)$ Lagrangians $ \mathcal{L}_\alpha$. Indeed, homogeneity of degree $s\neq-1$ is impossible because of the previous corollary, and homogeneity of degree $-1$ contradicts the transition relations (\ref{Lrel}). \begin{rem}\label{remexHopf} {\rm Because of Corollary \ref{corolar20}, we conjecture that a compact, bundle-type, tangent manifold cannot have a locally Lagrange metric.} \end{rem} \begin{prop}\label{corolar21} The first Lagrangian obstruction of a locally Lagrange metric structure of $M$ with the local Lagrangians $\{ \mathcal{L}_\alpha\}$ vanishes iff there exists a subordinated structure $\{\tilde{ \mathcal{L}}_\alpha\}$ such that the $1$-forms $\theta_{\tilde{\mathcal{L}}_\alpha}$ glue up to a global $1$-form. This subordinated structure defines a locally Lagrangian-symplectic structure on the manifold $M$. Furthermore, in this case the second Lagrangian obstruction $ \mathcal{G}_2$ is represented by the global $d''$-closed form $\kappa$ of type $(0,1)$ defined by gluing up the local forms $\{ d''\tilde{ \mathcal{L}}_\alpha\}$.\end{prop} \noindent{\bf Proof.} Under the hypothesis, there exists a global form $\lambda$ of type $(1,0)$ such that $\Theta=d''\theta_{ \mathcal{L}_\alpha}=d''\lambda$, therefore, $\theta_{
\mathcal{L}_\alpha}=\lambda|_{U_\alpha}+\xi_\alpha$, with some local foliated $1$-forms $\xi_\alpha=\xi_{\alpha,i}(x^j)dx^i$. Accordingly, we get \begin{equation}\label{eqcorol} \frac{\partial( \mathcal{L}_\beta-\mathcal{L}_\alpha)}{\partial y^i} =\xi_{\beta,i}-\xi_{\alpha,i}, \end{equation} whence $$ \mathcal{L}_\beta- \mathcal{L}_\alpha=a(\xi_\beta-\xi_\alpha)+b_{(\alpha\beta)},$$ where $a$ has the same meaning as in (\ref{Lrel}) and $b_{(\alpha\beta)}$ are foliated functions. Now, if we define \begin{equation} \label{eq2corol} \tilde{\mathcal{L}}_\alpha=\mathcal{L}_\alpha-a(\xi_\alpha) \end{equation} we are done. The last assertion follows from the definition of $ \mathcal{G}_2$. Q.e.d. \begin{corol} \label{corolar211} The locally Lagrange metric of Proposition \ref{corolar21} is defined by a global Lagrangian iff $\kappa=d''k$ for a function $k\in C^\infty(M)$. \end{corol}
In order to give an application of this result we recall \begin{lemma}\label{contractibility} For the vertical foliation $ \mathcal{V}$ of a tangent bundle $TN$, one has $H^k(TN,\Phi)=0$ for any $k>0$.\end{lemma} \noindent{\bf Proof.} Use a normal bundle $N\mathcal{V}$, and let $\lambda$ be a $d''$-closed form of type $(p,q)$ on $TN$. Since the fibers of $TN$ are contractible, if $N=\cup U_\alpha$ is a covering by small enough, $TN$-trivializing neighborhoods, we have
$\lambda|_{p^{-1}(U_\alpha)}=d''\mu_\alpha$ ($p:TN\rightarrow N$) for some local forms $\mu_\alpha$ of type $(p,q-1)$. The local forms $\mu_\alpha$ can be glued up to a global form $\mu$ by means of the pullback to $TN$ of a partition of unity on $N$, i.e., by means of foliated functions. Accordingly, we will have $\lambda=d''\mu$. Q.e.d.
From Corollary \ref{corolar211} and Lemma \ref{contractibility} we get \begin{prop}\label{propos23} Any locally Lagrange metric of a tangent bundle $TN$ is a globally Lagrange metric. \end{prop} \begin{rem} \label{observatie} {\rm Propositions \ref{propos25}, \ref{propos23} imply that, in the case of a tangent bundle $M=TN$, the symmetry of $C$ is a necessary and sufficient condition for $g$ to be a global Lagrangian metric. It was well known that this condition is necessary \cite{M1}. On the other hand the metrics of \cite{M1} usually are differentiable only on the complement of the zero section of $TN$, where Proposition \ref{propos23} does not hold, hence, the condition is not a sufficient one.} \end{rem}
We also mention the inclusion $\sigma:\underline{Z}^{(1,0)}_{pr}\rightarrow\underline{\Omega}^{(1,0)}_{pr}$, where $Z$ denotes spaces of closed forms, and the obvious \begin{prop}\label{propos24} The locally Lagrange metric structure defined by $\{ \mathcal{L}_\alpha\}$ is reducible to a locally Lagrangian-symplectic structure iff $ \mathcal{G}_1\in im\,\sigma^*$, where $\sigma^*$ is induced by $\sigma$ in cohomology. \end{prop}
Other important notions are defined by \begin{defin} \label{automorfLagr} {\rm Let $(M,S,g)$ be a locally Lagrange space, and $X\in\Gamma TM$. Then: i) $X$ is a {\it Lagrange infinitesimal automorphism} if $L_Xg=0$, where $g$ is seen as a $2$-covariant tensor field on $M$; ii) $X$ is a {\it strong Lagrange infinitesimal automorphism} if it is a Lagrange and a tangential infinitesimal automorphism of $(M,S)$, simultaneously.} \end{defin}
Notice that \begin{equation} \label{eqptderivLie} (L_Xg)(Y,SZ)=-g(Y,[X,SZ])\hspace{5mm}(X,Y,Z\in\Gamma TM). \end{equation} From (\ref{eqptderivLie}) and the non degeneracy of $g$ on $\nu\mathcal{V}$ it follows that a Lagrange infinitesimal automorphism necessarily is a $\mathcal{V}$-projectable vector field. But, it may not be locally leafwise affine. Indeed, if $g$ is a foliated metric of $\nu\mathcal{V}$ (Example \ref{example26}) every tangent vector field of $\mathcal{V}$ is a Lagrange infinitesimal automorphism, even if it is not locally leafwise affine.
We finish this section by considering a more general structure. \begin{defin} \label{def22} {\rm Let $(M,S)$ be a tangent manifold. A {\it locally conformal Lagrange structure} on $M$ is a maximal open covering $M=\cup U_\alpha$ with local regular Lagrangians $ \mathcal{L}_\alpha$ such that, over the intersections $U_\alpha\cap U_\beta$, the local Lagrangian metrics satisfy a relation of the form \begin{equation}\label{conformality} g_{\mathcal{L}_\beta}=f_{(\alpha\beta)} g_{\mathcal{L}_\alpha}, \end{equation} where $ f_{(\alpha\beta)}>0$ are foliated functions. A tangent manifold endowed with this type of structure is a {\it locally conformal Lagrange space or manifold}.}\end{defin}
Clearly, condition (\ref{conformality}) is equivalent with the transition relations \begin{equation}\label{conform2} \mathcal{L}_\beta=f_{(\alpha\beta)} \mathcal{L}_\alpha+a(\varphi_{(\alpha\beta)})+b_{(\alpha\beta)},\end{equation} where the last two terms are like in (\ref{Lrel}). On the other hand, $ \{\ln{ f_{(\alpha\beta)}}\}$ is a $\Phi$-valued
$1$-cocycle, and may be written as $\ln f_{(\alpha\beta)}=\psi_\beta-\psi_\alpha$ where $\psi_\alpha$ is a differentiable function on $U_\alpha$ (which may be assumed projectable only if the cocycle is a coboundary). Accordingly the formula \begin{equation}\label{gconformal}g|_{U_\alpha}= e^{-\psi_\alpha}g_{ \mathcal{L}_\alpha}\end{equation} defines a global transversal metric of the vertical foliation, which is locally conformal with local Lagrange metrics. As a matter of fact, we have \begin{prop}\label{propos26} Let $(M^{2n},S)$ be a tangent manifold, and $n>1$. Then, $M$ is locally conformal Lagrange iff $M$ has a global transversal metric $g$ of the vertical foliation, which is locally conformal with local Lagrange metrics. \end{prop} \noindent {\bf Proof.} We still have to prove that the existence of the metric $g$ that satisfies (\ref{gconformal}) implies (\ref{conformality}), which is clear, except for the fact that the functions $ f_{(\alpha\beta)}=e^{\psi_\beta-\psi_\alpha}$ are projectable. This follows from the Lagrangian character of the metrics $g_{ \mathcal{L}_\alpha}$. Indeed, with the usual local coordinates $(x^i,y^i)$, the symmetry of the derivative tensors $C$ of $g_{ \mathcal{L}_\alpha},g_{ \mathcal{L}_\beta}$ implies $$\frac{\partial f_{(\alpha\beta)}}{\partial y^k}(g_{ \mathcal{L}_\alpha})_{ij}= \frac{\partial f_{(\alpha\beta)}}{\partial y^i}(g_{ \mathcal{L}_\alpha})_{kj},$$ and a contraction by $(g_{ \mathcal{L}_\alpha})^{ij}$ yields $\partial f_{(\alpha\beta)}/\partial y^k=0$. Q.e.d.
The cohomology class $\eta=[\ln{f_{(\alpha\beta)}}]\in H^1(M,\Phi)$ will be called the {\it complementary class} of the metric $g$, and the locally conformal Lagrange metric $g$ is a locally Lagrange metric iff $\eta=0$. Indeed, if $\eta=0$, we may assume that the functions $\psi_\alpha$ are foliated, and the derivative tensor $C$ of $g=e^{-\psi_\alpha} g_{ \mathcal{L}_\alpha}$ is completely symmetric.
Furthermore, using a normal bundle $N\mathcal{V}$ and the leafwise version of the de Rham theorem, the complementary class may be seen as the $d''$-cohomology class of the global, $d''$-closed {\it complementary form} $\tau$ obtained by gluing up the local forms $\{d''\psi_\alpha\}$. In particular, Lemma \ref{contractibility} and Proposition \ref{propos23} imply that any locally conformal Lagrange metric $g$ of a tangent bundle must be a locally, therefore, a globally Lagrange metric. \begin{example}\label{example25} {\rm Consider the Hopf manifold $H^{2n}$ of Example \ref{example4}. The local functions $ \sum_{i=1}^n(y^i)^2$ define a locally conformal Lagrange structure on $H^{2n}$, and $$g=\frac{\sum_{i=1}^n(y^i)^2}{\sum_{i=1}^n[(x^i)^2+(y^i)^2]}$$ is a corresponding global metric, which, with the previously used notation, corresponds to $$\psi_\alpha=\ln{\{\sum_{i=1}^n[(x^i)^2+ (y^i)^2]\}}.$$ The corresponding complementary form is $$\tau=\frac{2\sum_{i=1}^n y^idy^i}{\sum_{i=1}^n[(x^i)^2+ (y^i)^2]}.$$} \end{example} \begin{prop}\label{propos27} Let $(M,S)$ be a tangent manifold and $g$ a global transversal metric of the vertical foliation $
\mathcal{V}$ of $S$. Then, $g$ is locally conformal Lagrange iff there exists a $d''$-closed form $\tau$ of type $(0,1)$ such that the tensor $\tilde C=C-(\tau\circ S)\otimes g$, where $C$ is the derivative tensor of $g$, is a completely symmetric tensor.\end{prop} \noindent {\bf Proof.} Define $\tilde g=e^{-\psi_\alpha}g$, where $\tau|_{U_\alpha}=d''\psi_\alpha$ for a covering $M=\cup U_\alpha$. Then, $e^{-\psi_\alpha}\tilde C$ is the derivative tensor of $\tilde g$, and the result follows from Proposition \ref{propos25}. Q.e.d. \section{Transversal Riemannian geometry} The aim of this section is to give an index free presentation of the connections used in Finsler and Lagrange geometry \cite{{Bao},{M1},{M2}}, while also extending these connections to tangent manifolds.
Let $(M,S)$ be a tangent manifold and $g$ a metric of the transversal bundle of the vertical foliation $ \mathcal{V}$ ($ T\mathcal{V}=im\,S$). (The metrics which we consider are non degenerate, but may be indefinite.) We do not get many interesting differential-geometric objects on $M$, unless we fix a normal bundle $N\mathcal{V}$, also called the {\it horizontal bundle}, i.e., we decompose \begin{equation} \label{normal} TM=N\mathcal{V}\oplus T\mathcal{V}.\end{equation} We will say that $N\mathcal{V}$ is a {\it normalization}, and $(M,S,N\mathcal{V})$ is a {\it normalized tangent manifold}. Where necessary, we shall use the local bases (\ref{bases}), (\ref{cobases}). The projections on the two terms of (\ref{normal}) will be denoted by $p_N$, $p_T$, respectively, and $P=p_N-p_T$ is an almost product structure tensor that has the horizontal and vertical distribution as $\pm 1$-eigendistributions, respectively.
For a normalized tangent manifold, the following facts are well known: {\it i)} $S|_{N\mathcal{V}}$ is an isomorphism $Q:N\mathcal{V}\rightarrow T\mathcal{V}$, {\it ii)} $S=Q\oplus0$, {\it iii)} $S'=0\oplus Q^{-1}$ is an almost tangent structure, {\it iv)} $F=S'+S$ is an almost product structure, {\it v)} $J=S'-S$ is an almost complex structure on $M$.
On a normalized tangent manifold $(M,S,N\mathcal{V})$, a pseudo-Riemannian metric $\gamma$ is said to be a {\it compatible metric} if the subbundles $T\mathcal{V},N\mathcal{V}$ are orthogonal with respect to $\gamma$ and \begin{equation} \label{compatiblemetric} \gamma(SX,SY)=\gamma(X,Y),\hspace{5mm} \forall X,Y\in\Gamma N\mathcal{V}.\end{equation} It is easy to see that these conditions imply the compatibility of $\gamma$ with the structures $J$ and $F$ i.e., \begin{equation} \label{compatiblemetric2} \gamma(JX,JY)=\gamma(X,Y),\, \gamma(FX,FY)=\gamma(X,Y), \hspace{5mm} \forall X,Y\in\Gamma TM.\end{equation}
Furthermore, if $(M,S)$ is a tangent manifold and $\gamma$ is a pseudo-Riemannian metric on $M$, we will say that $\gamma$ is compatible with the tangent structure $S$ if the $\gamma$-orthogonal bundle $N\mathcal{V}$ of $im\,S$ is a normalization, and $\gamma$ is compatible for the normalized tangent manifold $(M,S,N\mathcal{V})$.
The following result is obvious \begin{prop} \label{proposit31} On a normalized, tangent manifold, any transversal metric $g$ of the vertical foliation defines a unique compatible metric $\gamma$, such that
$\gamma|_{N\mathcal{V}}=g$. \end{prop}
In what follows, we will refer at the metric $\gamma$ as the {\it canonical extension} of the transversal metric $g$. On the other hand, a pseudo-Riemannian metric $\gamma$ of a tangent manifold $(M,S)$ which is the canonical extension of a locally Lagrange metric $g$ will be called a {\it locally Lagrange-Riemann metric}. This means that the restriction of $\gamma$ to the $\gamma$-orthogonal subbundle $N\mathcal{V}$ of the vertical foliation $ \mathcal{V}$ of $S$ is a locally Lagrange metric $g= g_{\mathcal{L}_\alpha}$, and $\gamma$ is compatible with $(M,S,N\mathcal{V})$ . Then, $(M,S,\gamma)$ will be called a {\it locally Lagrange-Riemann manifold}. Notice that, since the induced metric of $N\mathcal{V}$ is non degenerate, $N\mathcal{V}$ is a normalization of the vertical foliation, and the compatibility condition of the definition makes sense. Thus, any normalized locally Lagrange space with the canonical extension $\gamma$ of the Lagrange metric $g$ is a locally Lagrange-Riemann manifold, and conversely. \begin{example} \label{example32} {\rm The Euclidean metric $\sum_{i=1}^n[(dx^i)^2+(dy^i)^2]$ is the canonical extension of the locally Lagrange metric defined in Example \ref{example21} on the torus $ \mathbb{T}^{2n}$.}\end{example} \begin{example}\label{example33} {\rm The metric $$\sum_{i=1}^n(dx^i)^2+(dy)^2+\sum_{i=1}^n(dz^i-x^idy)^2+(dt)^2$$ is the canonical extension of the locally Lagrange metric defined in Example \ref{example22} on $M(1,p)\times( \mathbb{R}/\mathbb{Z})$.}\end{example}
Now, let $(M,S,N\mathcal{V},g)$ be a normalized tangent manifold with a transversal metric of the vertical foliation $ \mathcal{V}$ and let $\nabla$ be the Levi-Civita connection of the canonical extension $\gamma$ of $g$.
We are going to define a general connection that includes the connections used in Finsler and Lagrange geometry \cite{{Bao},{M1},{M2}}) as particular cases determined by specific normalizations. This will be the so-called {\it second canonical connection} $D$ of a foliated, pseudo-Riemannian manifold $(M,\gamma)$, defined the following conditions \cite{V3}: i) $N\mathcal{V}$ and $T\mathcal{V}$ are parallel, ii) the restrictions of the metric to $N\mathcal{V}$ and $T\mathcal{V}$ are preserved by parallel translations along curves that are tangent to $N\mathcal{V},T\mathcal{V}$, respectively, iii) the $ \mathcal{V}$-normal, respectively $ \mathcal{V}$-tangent, component of the torsion $T_D(X,Y)$ vanishes if one of the arguments is normal, respectively tangent, to $ \mathcal{V}$. This connection is given by \begin{equation} \label{secondc} \begin{array}{ll} D_{Z_1}Z_2 =p_N\nabla_{Z_1}Z_{2},&D_{Y_1}Y_2 = p_T\nabla_{Y_1}Y_{2},
\\ D_{Y_1}Z_2 =p_N[{Y_1},Z_{2}],&D_{Z_1}Y_2 =p_T[{Z_1},Y_{2}], \end{array} \end{equation} where $Y_1,Y_2\in\Gamma T\mathcal{V}$ and $Z_1,Z_2\in\Gamma N\mathcal{V}$. We will say that $D$ is the {\it canonical connection}, and the connection induced by $D$ in the normal bundle $N\mathcal{V}$, or, equivalently, in the transversal bundle $\nu\mathcal{V}=TM/T\mathcal{V}$, will be called the {\it canonical transversal connection}. The canonical, transversal connection is a Bott (basic) connection \cite{Mol}. The total torsion of the connection $D$ is not zero, namely one has \begin{equation} \label{torsionD} T_D(X,Y)=-p_T[p_NX,p_NY], \hspace{5mm}\forall X,Y\in\Gamma TM. \end{equation}
\begin{prop}\label{propos33} Let $(M,S,g)$ be a locally Lagrange manifold, and $\gamma$ the canonical extension of $g$. Then, the derivative tensor field of $g$ has the following expressions \begin{equation} \label{CRiemann} \begin{array}{l}C(X,Y,Z)=(D_{SX}g)(Y,Z) =(D_{SX}\gamma)(Y,Z)
\\ =\gamma(\nabla_Y(SX),Z) +\gamma(Y,\nabla_Z(SX)),\end{array} \end{equation} where $X,Y,Z\in\Gamma N\mathcal{V}$. \end{prop} \noindent{\bf Proof.} Of course, in (\ref{CRiemann}), $g$ is seen as a $2$-covariant tensor field on $M$ (see Section 2). First, we refer to the first two equalities (\ref{CRiemann}). These are pointwise relations, hence, it will be enough to prove these equalities for foliated cross sections of the normal bundle $N\mathcal{V}$. Indeed, a tangent vector at a point can always be extended to a projectable vector field on a neighborhood of that point. But, in this case, the first and second equalities are straightforward consequences of the definitions of the tensor field $C$ and of the connection $D$. Then, since $\nabla$ has no torsion, (\ref{secondc}) implies \begin{equation} \label{D-nabla} D_{SX}Y=\nabla_{SX}Y-p_T\nabla_{SX}Y-p_N\nabla_Y(SX), \end{equation} and, also using $\nabla\gamma=0$, we get the required result. Q.e.d.
The first two expressions of $C$ actually hold for any vector fields $X,Y,Z\in\Gamma TM$. \begin{corol} \label{corolar32} The canonical extension $\gamma$ of a transversal metric $g$ is a locally Lagrange-Riemann metric iff one of the following two equivalent relations holds \begin{equation}\label{simetria1}\begin{array}{rcl} (D_{SX}\gamma)(Y,Z)& =&(D_{SY}\gamma)(X,Z),
\\ \gamma(\nabla_Y(SX),Z) &+& \gamma(Y,\nabla_Z(SX))
\\ = \gamma(\nabla_X(SY),Z)&+& \gamma(X,\nabla_Z(SY)), \end{array} \end{equation} where $X,Y,Z\in\Gamma N\mathcal{V}$. \end{corol} \begin{corol}\label{corolar31} On a tangent manifold, if $\gamma$ is a compatible pseudo-Rieman\-nian metric such that $\nabla S=0$, then $\gamma$ is a projectable, locally Lagrange-Riemann metric.\end{corol} \noindent {\bf Proof.} If $\nabla S=0$, the third equality (\ref{CRiemann}) yields $C=0$, which is the characterization of this type of metrics. Q.e.d.
Now we consider the curvature of $D$. The curvature is a tensor, and it suffices to evaluate it pointwisely. For this reason, whenever we need an evaluation of the curvature (as well as of any other tensor) that involves vector fields, it will suffice to make that evaluation on $\mathcal{V}$-projectable vector fields. \begin{prop} \label{valoricurb} The curvature $R_D$ of the canonical connection has the following properties \begin{equation} \label{Bott1} R_{D}(SX,SY)Z=0,\end{equation} \begin{equation}\label{Bott2}R_{D}(SX,Y)Z=p_N[SX,D_YZ],\end{equation} \begin{equation}\label{Bott3} R_{D}(X,Y)(SZ)=-D_{SZ}(p_T[X,Y]), \end{equation} \begin{equation}\label{Bott4} R_{D}(SX,Y)Z=R_{D}(SX,Z)Y, \end{equation} for any foliated vector fields $X,Y,Z\in\Gamma N\mathcal{V}$. Moreover, formulas (\ref{Bott1}), (\ref{Bott3}), and (\ref{Bott4}) hold for any arguments $X,Y,Z\in\Gamma N\mathcal{V}$.\end{prop} \noindent{\bf Proof.} Equality (\ref{Bott1}) is in agreement with the fact that $D$ is a Bott connection \cite{Mol}. Formulas (\ref{Bott1})-(\ref{Bott3}) follow from (\ref{secondc}) and (\ref{torsionD}). Formula (\ref{Bott4}) is a consequence of (\ref{Bott2}). In the computation, one will take into account the fact that for any foliated vector field $X\in\Gamma TM$ and any vector field $Y\in\Gamma T\mathcal{V}$ one has $[X,Y]\in\Gamma T\mathcal{V}$ \cite{Mol}. Q.e.d. \begin{prop} \label{proposit30} For the canonical connection $D$, the first Bianchi identity is equivalent to the following equalities, where $X,Y,Z\in\Gamma N\mathcal{V}$ \begin{equation}\label{Bianchi0} \sum_{Cycl(X,Y,Z)} R_D(SX,SY)(SZ)=0, \end{equation} \begin{equation}\label{Bianchi1} R_{D}(SX,Z)SY=R_{D}(SY,Z)SX, \end{equation} \begin{equation}\label{Bianchi3} \sum_{Cycl(X,Y,Z)} R_D(X,Y)Z=0.\end{equation} \end{prop} \noindent{\bf Proof.} Write down the general expression of the Bianchi identity of a linear connection with torsion (e.g., \cite{KN}) for arguments tangent and normal to $ \mathcal{V}$. Then, compute using (\ref{secondc}), (\ref{torsionD}) and projectable vector fields as arguments. The fourth relation included in the Bianchi identity reduces to (\ref{Bott3}). Q.e.d. \begin{prop} \label{proposit39} For the canonical connection $D$, the second Bianchi identity is equivalent to the following equalities, where $X,Y,Z\in\Gamma N\mathcal{V}$, \begin{equation} \label{Bianchi23} \sum_{Cycl(X,Y,Z)}(D_{SX}R_{D})(SY,SZ)=0.\end{equation} \begin{equation}\label{Bianchi21} (D_{SX}R_{D})(SY,Z)-(D_{SY}R_{D})(SX,Z) =(D_ZR_D)(SX,SY), \end{equation} \begin{equation} \label{Bianchi22} (D_{X}R_{D})(Y,SZ)-(D_{Y}R_{D})(X,SZ)+(D_{SZ}R_{D})(X,Y) \end{equation}$$=R_D(p_T[X,Y],SZ),$$ \begin{equation}\label{Bianchi20} \sum_{Cycl(X,Y,Z)}(D_{X}R_{D})(Y,Z)= \sum_{Cycl(X,Y,Z)}R_D(p_T[X,Y],Z),\end{equation} \end{prop} \noindent{\bf Proof.} This is just a rewriting of the classical second Bianchi identity \cite{KN} that uses (\ref{torsionD}). Q.e.d.
Like in Riemannian geometry, we also define a covariant curvature tensor \begin{equation} \label{covarcurb} R_D(X,Y,Z,U)=\gamma(R_D(Z,U)Y,X),\hspace{5mm}X,Y,Z,U\in\Gamma TM. \end{equation} In particular, we have \begin{prop} \label{curb+C} \begin{equation} \label{cocurb} R_{D}(U,Z,SX,Y)=g([SX,D_YZ],U)\end{equation} $$=(SX)(g(D_YZ,U))-C(X,D_YZ,U),$$ where the arguments are foliated vector fields in $\Gamma N\mathcal{V}$, and $g$ is seen as a tensor on $M$. \end{prop}
Formula (\ref{Bianchi3}) yields the Bianchi identity \begin{equation}\label{Bianchicovar1} \sum_{Cycl(X,Y,Z)}R_D(U,X,Y,Z)=0,\hspace{5mm}\forall X,Y,Z\in\Gamma N\mathcal{V}. \end{equation} But, the other Riemannian symmetries may not hold. Indeed, we have \begin{prop} \label{antisym1-2} For any arguments $X,Y,Z,U\in \Gamma N\mathcal{V}$ one has \begin{equation}\label{arg1-2} R_D(X,Y,Z,U)+R_D(Y,X,Z,U)=(D_{p_T[Z,U]}\gamma)(X,Y)\end{equation} $$=C(S'p_T[Z,U],X,Y).$$ \end{prop} \noindent{\bf Proof.} Express the equality $$(ZU-UZ-[Z,U])(\gamma(X,Y))=0$$ for normal foliated arguments, and use the transversal metric character of the canonical connection $D$ and Proposition \ref{propos33}. Q.e.d.
\begin{prop}\label{symperechi} For any arguments $X,Y,Z,U\in \Gamma N\mathcal{V}$ one has \begin{equation}\label{arg1-2,3-4} R_D(X,Y,Z,U)-R_D(Z,U,X,Y)\end{equation} $$=\frac{1}{2}\{C(S'p_T[Z,U],X,Y) - C(S'p_T[X,Y],Z,U)\}.$$ \end{prop} \noindent{\bf Proof.} Same proof as for Proposition 1.1 of \cite{KN}, Chapter V. Q.e.d.
The other first and second Bianchi identities may also be expressed in a covariant form. From (\ref{covarcurb}) we get \begin{equation} \label{DRcovar} (D_FR_D)(A,B,C,E)=\gamma((D_FR_D)(C,E)B,A)\end{equation} $$+(D_F\gamma)(R_D(C,E)B,A),$$ where $(A,B,C,E,F\in \Gamma TM)$. Accordingly, (\ref{Bianchi22}) yields \begin{equation}\label{Bianchi21covar} (D_{SZ}R_D)(V,U,X,Y) +(D_XR_D)(V,U,Y,SZ)-(D_YR_D)(V,U,X,SZ) \end{equation} $$=(D_{SZ}\gamma)(R_D(X,Y)U,V) -(D_X\gamma)(p_N[SZ,D_YU],V) +(D_Y\gamma)(p_N[SZ,D_XU],V), $$ (\ref{Bianchi20}) yields \begin{equation}\label{Bianchi22covar} \sum_{Cycl(X,Y,Z)}(D_XR_D)(V,U,Y,Z)= \sum_{Cycl(X,Y,Z)}R_D(V,U,p_T[X,Y],Z)\end{equation} $$-\sum_{Cycl(X,Y,Z)}(D_X\gamma)(R_D(Y,Z)U,V), $$ etc., where $X,Y,Z,U,V\in \Gamma N\mathcal{V}$. \begin{example}\label{exemplu1} {\rm On the torus $ \mathbb{T}^{2n}$ with the metric of Example \ref{example32}, the usual flat connection is both the Levi-Civita connection and the canonical connection $D$, and it has zero curvature. On the manifold $M(1,p)\times( \mathbb{R}/\mathbb{Z})$ with the metric of Example \ref{example33}, the connection that parallelizes the orthonormal basis shown by the expression of the metric is not the Levi-Civita connection, since it has torsion, but, it follows easily that it has the characteristic properties of the canonical connection $D$. Accordingly, we are in the case of a locally Lagrange-Riemann manifold with a vanishing curvature $R_D$ and a non vanishing torsion $T_D$.} \end{example}
\begin{prop} \label{proposit38} The Ricci curvature tensor $\rho_D$ of the connection $D$ is given by the equalities \begin{equation}\label{Ricci1} \rho_D(SX,SY)= \sum_{i=1}^n<\vartheta^i,R_D(\frac{\partial}{\partial y^i},SX)SY>, \end{equation} \begin{equation} \label{Ricci2}\rho_D(SX,Y)= \sum_{i=1}^n<dx^i,p_N[D_{X_i}Y,SX]>, \end{equation} \begin{equation}\label{Ricci3} \begin{array}{rcl} \rho_D(X,Y)&=&tr[Z\mapsto R_D(Z,X)Y]
\\ &=&\sum_{i=1}^n<dx^i,R_D(X_i,X)Y>,\end{array}\end{equation} where $X,Y,Z\in\Gamma N\mathcal{V}$, and in (\ref{Ricci2}) $Y$ is projectable. \end{prop} \noindent{\bf Proof.} The definition of the Ricci tensor of a linear connection (e.g., \cite{KN}), and the use of the bases (\ref{bases}) and (\ref{cobases}) yield \begin{equation}\label{Ricci} \rho_D(X,Y)= \sum_{i=1}^n<dx^i,R_D(X_i,X)Y> + \sum_{i=1}^n<\theta^i,R_D(\frac{\partial}{\partial y^i},X)Y>. \end{equation} Then, the results follow from (\ref{secondc}) and (\ref{Bott2}). Q.e.d. \begin{rem} \label{curbscalara} {\rm In view of (\ref{Ricci3}), we may speak of $\kappa_D=tr\,\rho_D$ on $N\mathcal{V}$, and call it the {\it transversal scalar curvature}.}\end{rem}
In the case of a normalized, bundle-type, tangent manifold $(M,S,E,N\mathcal{V})$, with a compatible metric $\gamma$ ($E$ is the Euler vector field), the curvature has some more interesting features, which were studied previously in Finsler geometry \cite{Bao}. These features follow from \begin{lemma} \label{lemaDSZS'E} For any $Z\in\Gamma N\mathcal{V}$ one has \begin{equation}\label{eqlemei} D_{SZ}(S'E)=Z. \end{equation}\end{lemma} \noindent{\bf Proof.} $S'$ is the tensor defined at the beginning of this section, and with local bundle-type coordinates $(x^i,y^i)_{i=1}^n$ and bases (\ref{bases}), we have $$SZ=\xi^i(x^j,y^j)\frac{\partial}{\partial y^i}, \;S'E=y^iX_i.$$ Now, (\ref{eqlemei}) follows from (\ref{secondc}). Q.e.d.
Using Lemma \ref{lemaDSZS'E} one can prove \begin{prop} \label{valoricurbura}
The curvature operator $R_D(X,Y)|_{N\mathcal{V}}$
$(X,Y\in\Gamma N\mathcal{V})$ is determined by its action on $S'E$ and by $R_D(V,SW)|_{N\mathcal{V}}$ where $V,W\in\Gamma N\mathcal{V}$. \end{prop} \noindent{\bf Proof.} Denote \begin{equation} \label{defr} r(X,Y)=R_D(X,Y)S'E. \end{equation} The covariant derivative of this tensor contains a term, which, in view of (\ref{eqlemei}), is equal to $R_D(X,Y)Z$, and we get \begin{equation} \label{ecuatiar} R_D(X,Y)Z=D_{SZ}(r(X,Y)) -r(D_{SZ}X,Y) \end{equation} $$-r(X,D_{SZ}Y)-(D_{SZ}R_D)(X,Y)S'E.$$ Now, if the last term of (\ref{ecuatiar}) is expressed by means of the Bianchi identity (\ref{Bianchi22}) one gets an expression of
$R_D(X,Y)Z$ in terms of $r$ and $R_D(V,SW)|_{N\mathcal{V}}$ for various arguments $V,W$. Q.e.d.
Notice that, by (\ref{Bott2}), the computation of $R_D(V,SW)|_{N\mathcal{V}}$ on normal arguments requires only a first order covariant derivative.
From Proposition \ref{valoricurbura} we also see that the curvature values $R_D(U,Z,X,Y)$ $(X,Y,Z,U\in\Gamma N\mathcal{V})$ are determined by the values $R_D(U,S'E,X,Y)$ and of $R_D(U,V,W,SK)$ for convenient normal arguments. Therefore, it should be interesting to study manifolds where $R_D(U,S'E,X,Y)$ has a simple expression. If we fix a direction $span\{U\}$ and a $2$-dimensional plan $\sigma=span\{X,Y\}$ $(U,X,Y\in\Gamma N\mathcal{V})$, the formula \begin{equation} \label{Ucurbsect} k_U(\sigma)=\frac{R_D(U,S'E,X,Y)}{\gamma(S'E,X)\gamma(U,Y) - \gamma(S'E,Y)\gamma(U,X)} \end{equation} defines an invariant, which we will call the $U$-{\it sectional curvature} of $\sigma$. $k_U(\sigma)$ is independent of $U$ iff \begin{equation} \label{Uindep} R_D(X,Y)S'E=k(\sigma) [\gamma(S'E,X)Y-\gamma(S'E,Y)X], \end{equation} where $k(\sigma)$ is a function of the point of $M$ and the plan $\sigma$ only. Furthermore, if $k(\sigma)=f(x)$, $x\in M$, i.e. $k(\sigma)$ is pointwise constant, (\ref{Uindep}) is a natural simple expression of the transversal curvature tensor.
On the other hand, we can generalize the notion of {\it flag curvature}, which is an important invariant in Finsler geometry \cite{Bao}. Namely, a {\it flag} $\phi$ at a point $x\in M$ is a $2$-dimensional plane $\phi\subseteq T_xM$ which contains the vector $E_x$. Such a flag is $\phi= span\{E_x,X_x\}$, where $X_x\in N_x\mathcal{V}$ is defined up to a scalar factor, and following \cite{Bao}, the flag curvature is defined by \begin{equation} \label{flagcurv} k(\phi)=k(X)=\frac{R_D(X,S'E,X,S'E)}{g(S'E,S'E)g(X,X)-g^2(S'E,X)}. \end{equation}
If $g$ is not positive definite, the flag curvature may take infinite values. \begin{prop} \label{propflag} The flag curvature $k$ is pointwise constant iff \begin{equation} \label{constantflagc} R_D(X,S'E,Y,S'E)=f[g(S'E,S'E)g(X,Y)-g(S'E,X)g(S'E,Y)],\end{equation} where $f\in C^\infty(M)$. If the $U$-sectional curvature is independent of $U$ and poinwise constant, the flag curvature is pointwise constant too. \end{prop} \noindent{\bf Proof.} For the first assertion, use $k(X+Y)=k(X)=k(Y)$. The second follows because, if $k(\sigma)=f(x)$, (\ref{Uindep}) implies \begin{equation} \label{Rtriplu} R_D(U,S'E,X,Y)=f(x)[\gamma(S'E,X)\gamma(Y,U)-\gamma(S'E,Y\gamma(X,U)], \end{equation} which reduces to (\ref{constantflagc}) for $Y=S'E$. Q.e.d. \begin{rem} \label{obsFinsler} {\rm The curvature $R_D$ has more interesting properties in the case of a bundle-type, locally Lagrange manifold such that the metric tensor $g$ is homogeneous of degree zero with respect to the coordinates $y^i$. The invariant characterization of this situation is that the derivative tensor $C$ is symmetric, and such that \begin{equation} \label{eqobsFinsler} i(S'E)C=0. \end{equation} Indeed, in this case, formulas (\ref{arg1-2}), (\ref{arg1-2,3-4}), etc., yield simpler symmetry properties if one of the arguments is $S'E$. The Finsler metrics satisfy the homogeneity condition (\ref{eqobsFinsler}).} \end{rem} \begin{rem}\label{alteconex} {\rm On a locally Lagrange-Riemann manifold $(M,S,\gamma)$ there exist other geometrically interesting connections as well. One such connection is \begin{equation}\label{nablabar} \nabla'_{X}Y=p_{N}(\nabla _X(p_{N}Y)) + p_{T}(\nabla_{X}(p_{T}Y)).\end{equation} The connection $\nabla'$ preserves the vertical and horizontal distributions and the metric, but has a non zero torsion. Then, we have the connections $ ^C\hspace{-1mm}D,^C\nabla'$, which can be defined by using formulas (\ref{secondc}), (\ref{nablabar}) with the Levi-Civita connection $\nabla$ replaced by the {\it Chern connection} $^C\nabla$ i.e., the $\gamma$-metric, $J$-preserving connection that has a torsion with no component of $J$-type $(1,1)$ $(J=S'-S)$ \cite{KN}.}\end{rem}
We finish by recalling the well known fact \cite{Kern,M1,M2} that global Finsler and Lagrange structures of tangent bundles have an invariant normalization. This normalization may be defined as follows.
Let $ \mathcal{L}$ be the global Lagrangian function.Then the {\it energy function} \begin{equation} \label{energy}\mathcal{E}_\mathcal{L}=E\mathcal{L}-\mathcal{L} \end{equation} has a Hamiltonian vector field $X_\mathcal{E}$ defined by \begin{equation} \label{HamE} i(X_{{\mathcal E}})\omega_\mathcal{L}=-d\mathcal{E}_\mathcal{L}, \end{equation} where $\omega_\mathcal{L}$ is the Lagrangian symplectic form (\ref{formasimpl}), which turns out to be a second order vector field. Accordingly, $L_{X_\mathcal{E}}S$ is an almost product structure on $M$ (see Section 1), and $N_\mathcal{E}\mathcal{V}= im\,H$, with $H$ defined by (\ref{almprod}) is a canonical normal bundle of $ \mathcal{V}$.
A locally Lagrangian structure $\{\mathcal{L}_\alpha\}$ on a bundle-type tangent manifold $(M,S,E)$ defines a global function ({\it second order energy}) \begin{equation} \label{secondenerg} \mathcal{E}'=E^2\mathcal{L}_\alpha -E\mathcal{L}_\alpha, \end{equation} but, generally, it has no global Hamiltonian vector field, and, even if such a field exists, is may not be a second order vector field.
\\ \noindent{\it Acknowledgement}. Part of the work on this paper was done during a visit at the Erwin Schr\"odinger International Institute for Mathematical Physics, Vienna, Austria, October 1-10, 2002, in the framework of the program ``Aspects of foliation theory in geometry, topology and physics". The author thanks the organizers of the program, J. Glazebrook, F. Kamber and K. Richardson, the ESI and its director prof. P. Michor for having made that visit possible.
\hspace*{7.5cm}{\small \begin{tabular}{l} Department of Mathematics\\ University of Haifa, Israel\\ E-mail: vaisman@math.haifa.ac.il \end{tabular}}
\end{document} |
\begin{document}
\title{Entangled-photon Fourier optics}
\author{Ayman F. Abouraddy, Bahaa E. A. Saleh, Alexander V. Sergienko, and Malvin C. Teich} \address{Quantum Imaging Laboratory, Departments of Electrical $\&$ Computer Engineering and Physics, Boston University, Boston, MA $02215-2421$}
\maketitle \begin{abstract} Entangled photons, generated by spontaneous parametric down-conversion from a second-order nonlinear crystal, present a rich potential for imaging and image-processing applications. Since this source is an example of a three-wave mixing process, there is more flexibility in the choices of illumination and detection wavelengths and in the placement of object(s) to be imaged. Moreover, this source is entangled, a fact that allows for imaging configurations and capabilities that cannot be achieved using classical sources of light. In this paper we examine a number of imaging and image-processing configurations that can be realized using this source. The formalism that we utilize facilitates the determination of the dependence of imaging resolution on the physical parameters of the optical arrangement. \end{abstract}
\section{INTRODUCTION}
The process of optical parametric three-wave mixing in a second-order nonlinear medium \cite{JA,DKleinman,RK,SA,NB,FZ,AY,PB,BS1,DM} involves the coherent interaction between three optical fields with, generally, different wavelengths: pump, signal, and idler. Because of the phase-matching requirements \cite{FZ,AY,DM}, the wave-vectors are related and the spatial distributions of the fields are therefore highly coupled. This process may therefore be utilized in {\it distributed multi-wavelength imaging} or image processing, where objects are placed in the path of one or more of these fields of different wavelengths and the intensities or cross-correlations are measured. Two examples of phenomena based on three-wave mixing, optical parametric oscillation (OPO) and optical parametric amplification (OPA), have been studied extensively and many interesting phenomena of spatial correlation \cite{AG}, pattern formation \cite{AB1,LL}, and reduced-noise image amplification \cite{SC,IS} have been reported.
A third example is the process of spontaneous parametric down-conversion (SPDC) \cite{DK1}, a phenomenon that exhibits quantum entanglement \cite{ES}. The signal and idler waves, created when the nonlinear medium is illuminated by an intense laser beam (pump), are produced in the form of photon pairs in an entangled quantum state (biphotons). Spatial and spectral entanglement are a consequence of the multiple possibilities for satisfying conservation of energy and momentum for each photon pair. Interest in the SPDC process has spurred many studies of its spatial and spatio-temporal photon correlation properties \cite{DK2,AM,DK3,PR1,AJ1,DS,AJ2,AB2,CM,BM,AA1}, and some imaging applications based on the measurement of photon-pair coincidence have been proposed \cite{AB3,BS2} and tested \cite{TP1,TP2}.
In this paper we develop a general Fourier-optics theory of image formation based on the SPDC process. In Section 2 we explore new configurations for multi-wavelength distributed imaging and image-processing applications. We follow an approach introduced in a previous paper \cite{BS3}, in which we established a duality between partial quantum entanglement and classical partial coherence theory. We use the formalism developed in Ref. [\cite{BS3}] and apply it to the image-formation process in distributed multi-wavelength imaging configurations made possible by the nature of this SPDC source. In Section 3 we study the imaging resolution of these configurations and the effect on it of the various physical parameters of the system. In the Appendix we provide a brief review of classical imaging theory in the framework of the optical bilinear transformation.
\section{CONFIGURATIONS FOR SPDC BIPHOTON IMAGING AND IMAGE PROCESSING}
The principal function of an optical imaging system is to transfer the spatial distribution of some physical property of an object (transmittance, reflectance, or absorbance), via an optical wave, to a remote location where it is measured with a photodetector \cite{BS1,MB}. An image-processing system transforms one image into another with enhanced features, or obtains a new image from more than one image, such as the correlation of two images \cite{JG,GR,FY}. We examine here various configurations for imaging and image processing based on biphoton beams generated by SPDC. As mentioned above, the existence of three optical beams (pump, signal, and idler) allows us to construct imaging configurations that are not achievable with other single-beam optical sources. We may place an object that is to be imaged in either of the three beams. We may alternatively place more than one object in these beams and obtain the correlation of their images.
All these configurations include two features. The first is that they are examples of distributed imaging \cite{AB3,BS2,AA2}. In analogy to distributed computing, where the computation resources are distributed over a network, distributed imaging allows us to reallocate the imaging components from the particular path connecting the source to the object to be imaged. The second is that they allow the possibility of multi-wavelength imaging: the object may be illuminated with one wavelength whereas detection takes place at another.
The use of other nonclassical sources of light in imaging has been shown to lead to noise reduction \cite{SC,IS,MK1,MK2}. In this paper we direct our attention to various {\it imaging configurations}. The quantum nature of the SPDC source offers the additional advantage of noise reduction, but this is immaterial to the task at hand.
In the process of SPDC, an intense laser beam (pump) illuminates a nonlinear crystal (NLC) with quadratic nonlinear susceptibility \cite{AY,DK1}. Some of the pump photons disintegrate into pairs of photons (known traditionally as signal and idler), which conserve the energy and momentum of the parent pump photon. Consider the situation depicted in Fig. \ref{overall layout}. The pump beam illuminates the NLC and the signal and idler beams are measured by the single-photon detectors D$_1$ and D$_2$, respectively. We assume throughout a planar source and a one-dimensional geometry in the transverse plane for the sake of simplicity but without loss of generality. Optical systems, containing objects to be imaged and any optical components, may be placed in any of the three available beams.
The signal and idler photons can be emitted from the NLC in a variety of configurations. The two photons may be emitted in two different and distinct directions, in which case each photon will pass through a different (and possibly remote) optical system; this configuration is denoted non-collinear. The two photons may be emitted in the same spatial wave packet, the collinear case, but have some distinguishing characteristic, such as frequency or polarization, whereupon they effectively pass through different optical systems if the components are dispersive or polarization dependent. The two photons are detected in the same output plane in this case. Finally, the two photons may be emitted in the same spatial wave packet and have no distinguishing characteristic, the collinear degenerate case, and thus they pass through the same optical system and are detected in the same output plane.
The coincidence rate of photon pairs at the two detectors, D$_1$ and D$_2$ located at positions $x_{1}$ and $x_{2}$, respectively, is proportional to the fourth-order correlation function of the fields, $G^{(2)}(x_{1},x_{2})$ \cite{BS3,RG}, the biphoton rate. The signal and idler beams traverse optical systems described by their impulse response functions $h_{s}(x_{1},x)$ and $h_{i}(x_{2},x)$, respectively. It has been shown that the biphoton rate is given by \cite{BS3} \begin{equation}\label{G2psi}
G^{(2)}(x_{1},x_{2})=|\psi(x_{1},x_{2})|^{2}, \end{equation} where the biphoton amplitude $\psi(x_{1},x_{2})$ is \begin{equation}\label{basicEq} \psi(x_{1},x_{2})=\int dxE_{p}(x)h_{s}(x_{1},x)h_{i}(x_{2},x); \end{equation} here $E_{p}(x)$ is the spatial distribution of the pump field at the entrance to the NLC. The result in Eq. (\ref{basicEq}) was derived assuming a thin NLC and the presence of narrowband spectral filters in the optical system. These two assumptions simplify the analysis considerably without overshadowing the physics of the imaging processes discussed. They will be relaxed in Section 3.
An interpretation of Eq. (\ref{basicEq}) that is useful in understanding the behavior of such a system was advocated by Klyshko \cite{DK2,DK3,DS,AB3} under the name "advanced wave interpretation". In this picture, the biphoton amplitude in Eq. (\ref{basicEq}) can be viewed as the impulse response function of an optical system represented by the cascade of three systems: propagation from D$_{1}$ at $x_{1}$ back through a system with impulse response function $h_{s}^{r}(x,x_{1})=h_{s}(x_{1},x)$, modulation by the pump field $E_{p}$, and subsequent transmission through a system with impulse response function $h_{i}(x_{2},x)$. An intuitive advantage can be gained by unfolding the system in this way as will become clear shortly.
Two special correlation functions deriving from $G^{(2)}(x_{1},x_{2})$ in Eq. (\ref{G2psi}) are of interest: the marginal coincidence rate $I^{(2)}(x_{2})$, and the conditional coincidence rate $I_{0}^{(2)}(x_{2})$, defined by \begin{equation}\label{I2x} I^{(2)}(x_{2})=\int dx_{1}G^{(2)}(x_{1},x_{2}), \end{equation} \begin{equation}\label{I20x} I_{0}^{(2)}(x_{2})=G^{(2)}(0,x_{2}). \end{equation} The marginal coincidence rate $I^{(2)}(x_{2})$ is proportional to the probability of detecting a photon at $x_{2}$ by D$_{2}$ when detector D$_{1}$ detects a photon at any location $-\infty<x_{1}<\infty$. The conditional coincidence rate $I_{0}^{(2)}(x_{2})$ is proportional to the probability of detecting a photon at $x_{2}$ by D$_{2}$ when D$_{1}$ detects a photon at $x_{1}=0$.
We proceed to examine the five configurations that are possible with this optical source and explore their imaging and image-processing potential.
\subsection{Object in the signal (or idler) beam}
The generalized biphoton optical system described by Eq. (\ref{basicEq}) permits the object to be placed in {\it either} the signal {\it or} the idler beams such that its transmittance (or reflectance) modifies either of the impulse response functions $h_{s}$ or $h_{i}$. Without loss of generality, we assume that the object is placed in the signal beam. However, the choice of either beam might be dictated by wavelength considerations.
Consider the situation depicted in Fig. \ref{object in the signal beam}. The reverse signal system $h_{s}^{r}(x,x_{1})=h_{s}(x_{1},x)$ is regarded as a cascade of two linear systems of impulse response functions $h_{1}$ and $h_{2}$ with the object $t$ sandwiched in between. The biphoton amplitude can thus be written as \begin{equation}\label{basicEqObjInSignal} \psi(x_{1},x_{2})=\int dx't(x')h_{1}(x_{1},x')h_{3}(x_{2},x'), \end{equation} where $h_{3}$ is the impulse response function of a system composed of a cascade of the reverse of system $h_{2}$, an aperture $E_{p}$, and the system $h_{i}$, and is given by \begin{equation}\label{h3} h_{3}(x_{2},x')=\int dxE_{p}(x)h_{i}(x_{2},x)h_{2}(x',x). \end{equation} Equation (\ref{basicEqObjInSignal}) states that the overall system is composed of an illumination system $h_{1}$ illuminating the object $t$, followed by an imaging system $h_{3}$, which is dependent on $h_{2}$, $h_{i}$, and $E_{p}$, in accordance with Eq. (\ref{h3}). The uniformity of the illumination system $h_{1}$ and the resolution of the system $h_{3}$ determine the quality of the overall imaging system.
In this configuration, then, the conditional coincidence rate, obtained using Eqs. (\ref{G2psi}), (\ref{I20x}), and (\ref{basicEqObjInSignal}), is \begin{equation}\label{I20ObjInSig}
I_{0}^{(2)}(x_{2})=|\int dx't(x')h_{1}(0,x')h_{3}(x_{2},x')|^{2}. \end{equation} This system is mathematically equivalent to a coherent optical system where the object is modulated by the illumination distribution $h_{1}(0,x')$ and transformed by a linear system of point spread function $h_{3}(x_{2},x')$, followed by a squarer, viz. the bilinear transformation of Eq. (\ref{A2}).
On the other hand, using Eqs. (\ref{G2psi}), (\ref{I2x}), and (\ref{basicEqObjInSignal}), the marginal coincidence rate, measured when D$_{1}$ collects photons from all points in its plane (i.e., acts as a bucket detector), is given by \begin{eqnarray}\label{I2ObjInSig}
I^{(2)}(x_{2})&&=\int dx_{1}|\int dx't(x')h_{1}(x_{1},x')h_{3}(x_{2},x')|^{2}\nonumber\\&&=\int\int dx'dx''t^{*}(x')t(x'')g(x',x'')h^{*}_{3}(x_{2},x')h_{3}(x_{2},x''). \end{eqnarray} The quadratic transformation of the object $t(x)$ in Eq. (\ref{I2ObjInSig}) is clearly the mathematical equivalent of the bilinear transformation in Eq. (\ref{A1}), representing a partially coherent imaging system. The function $g(x',x'')$ is given by \begin{equation}\label{gxxdash} g(x',x'')=\int dx_{1} h_{1}^{*}(x_{1},x')h_{1}(x_{1},x''), \end{equation} and plays the role of the coherence function of the field.
Two limiting forms of $g(x',x'')$ are of interest. The first is $g(x',x'')=\delta(x'-x'')$, which leads to \begin{equation}
I^{(2)}(x_{2})=\int dx'|t(x')|^{2}|h_{3}(x_{2},x')|^{2}, \end{equation} which is the equivalent of an incoherent imaging system [Eq. (\ref{A3})]. The other limit is $g(x',x'')=f^{*}(x')f(x'')$, in which case \begin{equation}
I^{(2)}(x_{2})=|\int dx't(x')f(x')h_{3}(x_{2},x')|^{2}, \end{equation} which is the equivalent of a coherent imaging system [Eq. (\ref{A2})]. We can achieve the first limit by using a $2-f$ (Fourier transform) system or a $4-f$ (imaging) system for $h_{1}$ followed by a bucket detector. Moving the bucket detector in the $2-f$ system away from the back focal plane or changing the area of the detector would lead to a gradual transition from coherent to incoherent imaging. This was performed experimentally in Ref. [\cite{PR1}] where the change from coherent to incoherent imaging, achieved by changing the detector aperture size in one beam, was observed by monitoring the loss of the fringe visibility of a double-slit placed in the other beam.
We now examine a few examples based on this configuration that manifest its usefulness.
\subsubsection{Example: Fourier transform imaging}
Consider the system illustrated in Fig. \ref{object in the signal beam examples 1 and 2}. The signal and idler systems are unfolded for sake of clarity. We assume for simplicity in this and in the following examples, except if indicated otherwise, that the pump and the NLC are of infinite transverse extent. The signal arm includes the object, $t$, and the system $h_{2}$ is nothing but free space propagation at the signal wavelength $\lambda_{s}$ for a distance $d_{s}$. The idler system comprises a lens of focal length $f$ at a distance $d_{i}$ from the NLC, and a distance $d_{2}$ from the detection plane, as shown in Fig. \ref{object in the signal beam example 3}. Free-space propagation for the distance $d_{s}$ at $\lambda_{s}$, and for the distance $d_{i}$ at $\lambda_{i}$, may be substituted by free-space propagation for an equivalent distance $d_{1}$ at wavelength $\lambda_{i}$, where \begin{equation}\label{d1} d_{1}=d_{i}+d_{s}\frac{\lambda_{s}}{\lambda_{i}}. \end{equation} If we take $d_{2}$ to be equal to the focal length of the lens $f$, and also choose $d_{i}$ and $d_{s}$ such that $d_{1}=f$ according to Eq. (\ref{d1}), then the system becomes a Fourier transform system with impulse response function \begin{equation}\label{h3ObjInSigExample1} h_{3}(x_{2},x')=\textmd{exp}(j2\pi[\frac{d_{s}}{\lambda_{s}}+\frac{d_{i}+f}{\lambda_{i}}])\textmd{exp}(-j\frac{2\pi}{\lambda_{i}f}x_{2}x'). \end{equation}
If we now take the illumination system $h_{1}$ to be uniform, so that $h_{1}(0,x')=1$, then the overall system is a Fourier transform system when the conditional coincidence rate is considered. Equations (\ref{I20ObjInSig}) and (\ref{h3ObjInSigExample1}) then yield \begin{equation}
I_{0}^{(2)}(x_{2})=|T(\frac{2\pi}{\lambda_{i}f}x_{2})|^{2}, \end{equation} where $T(q)$ is the Fourier transform of $t(x)$. The system simply generates the diffraction pattern of the object distribution.
\subsubsection{Example: Ideal single-lens imaging}
In the same configuration depicted in Fig. \ref{object in the signal beam examples 1 and 2}, we may choose the distance $d_{1}$, calculated according to Eq. (\ref{d1}), and the distance $d_{2}$ to satisfy \begin{equation}\label{IdealImagingEq} \frac{1}{d_{1}}+\frac{1}{d_{2}}=\frac{1}{f}, \end{equation} which is the geometrical-optics imaging equation of a thin lens of focal length $f$. In this case the impulse response function of the system $h_{3}$ becomes \begin{equation} h_{3}(x_{2},x')=\textmd{exp}(j2\pi[\frac{d_{s}}{\lambda_{s}}+\frac{d_{i}+d_{2}}{\lambda_{i}}])\textmd{exp}(j\frac{\pi x_{2}^{2}}{\lambda_{i}d_{2}}[1-\frac{1}{M}])\delta(x_{2}-Mx'), \end{equation} where $M=-\frac{d_{2}}{d_{1}}$ is the magnification of the imaging system. If the illumination system $h_{1}$ is uniform, then the conditional coincidence rate $I_{0}^{(2)}(x_{2})$ is proportional to the magnified object intensity transmittance,
$|t(\frac{x_{2}}{M})|^{2}$. The marginal coincidence rate $I^{(2)}(x_{2})$, however, is
$I^{(2)}(x_{2})=g(\frac{x_{2}}{M},\frac{x_{2}}{M})|t(\frac{x_{2}}{M})|^{2}$, where $g(x',x'')$ is given by Eq. (\ref{gxxdash}). If $g(x',x')$ is uniform over an area larger than that of the image, $I^{(2)}(x_{2})$ becomes proportional to the magnified object intensity transmittance as is the case for $I_{0}^{(2)}(x_{2})$.
Note that the lens may equivalently be put in the signal beam and the distances readjusted so as to satisfy a condition similar to Eq. (\ref{IdealImagingEq}). The system developed by Pittman {\it et al.} \cite{TP1} is an example of this case in which the object is placed directly in the plane of D$_{1}$ so that $h_{1}(x_{1},x')=\delta(x_{1}-x')$.
\subsubsection{Example: Lens in the pump}
We now study another example where we manipulate the pump beam and place the object in either the signal or idler paths. An example of this configuration is the system proposed by Belinsky and Klyshko \cite{AB3} and demonstrated experimentally by Pittman {\it et al} \cite{TP2}.
The configuration is shown schematically in Fig. \ref{object in the signal beam example 3}. A plane-wave pump beam is focused by a lens of focal length $f$ , located at a distance $d<f$ from the NLC. The pump wave front now has a radius of curvature $R=f-d$ at the NLC entrance, and consequently acts as a lens or spherical mirror in the advanced wave interpretation. The signal system is comprised of free space propagation for a distance $d_{1}$ at $\lambda_{s}$, followed by the object $t$ and an optical system $h_{1}$. The idler system is simply free space propagation for a distance $d_{2}$ at $\lambda_{i}$.
If the following relationship is satisfied by the various distances and wavelengths: \begin{equation}\label{IdealImagingEqLensInPump} \frac{1}{\lambda_{s}d_{1}}+\frac{1}{\lambda_{i}d_{2}}=\frac{1}{\lambda_{p}R}, \end{equation} then $h_{3}(x_{2},x')$, from Eq. (\ref{h3}), is \begin{equation} h_{3}(x_{2},x')=\textmd{exp}(j2\pi[\frac{d_{1}}{\lambda_{s}}+\frac{d_{2}}{\lambda_{i}}])\textmd{exp}(j\frac{\pi x_{2}^{2}}{\lambda_{i}d_{2}}[1-\frac{1}{M}])\delta(x_{2}-Mx'), \end{equation} This is the impulse response function of an imaging system of magnification $M=-\frac{d_{2}\lambda_{i}}{d_{1}\lambda_{s}}$, and Eq. (\ref{IdealImagingEqLensInPump}) is the corresponding imaging equation. Note the similarities between Eqs. (\ref{IdealImagingEq}) and (\ref{IdealImagingEqLensInPump}), despite the fact that the lens is in the signal beam for the former and in the pump beam for the latter.
In the degenerate case where the signal and idler frequencies are equal, $\lambda_{s}=\lambda_{i}=2\lambda_{p}$, this imaging equation (Eq. (\ref{IdealImagingEqLensInPump})), becomes \begin{equation}\label{IdealImagingEqLensInPumpDegenerate} \frac{1}{d_{1}}+\frac{1}{d_{2}}=\frac{2}{R}. \end{equation} This is the imaging equation of a spherical mirror of radius of curvature $R$, or is the geometrical-optics imaging equation of a thin lens of focal length $\frac{R}{2}$. Both $I_{0}^{(2)}(x_{2})$
and $I^{(2)}(x_{2})$ are proportional to the magnified object intensity transmittance $|t(\frac{x_{2}}{M})|^{2}$ if $h_{1}$ is uniform.
\subsection{Object in Both Signal and Idler Beams}
If the signal and idler systems are identical and the object $t$ is placed at the same location in each, as shown in Fig. \ref{object in both signal and idler beams}, then we may substitute $h_{s}(x_{1},x)=h_{i}(x_{1},x)=\int dx'h_{1}(x_{1},x')t(x')h_{2}(x',x)$ in Eq. (\ref{basicEq}) to obtain \begin{equation}\label{EpsiObjInBothSignalIdler} \psi(x_{1},x_{2})=\int\int dx'dx''t(x')t(x'')\psi_{c}(x',x'')h_{2}(x_{1},x')h_{2}(x_{2},x''), \end{equation} where \begin{equation}\label{PsiC} \psi_{c}(x',x'')=\int dxE_{p}(x)h_{1}(x',x)h_{1}(x'',x). \end{equation} Comparing Eq. (\ref{EpsiObjInBothSignalIdler}) with Eq. (\ref{A1}) shows that $\psi(x_{1},x_{2})$ is analogous to a partially coherent imaging system, where $\psi_{c}(x',x'')$ plays the role of the correlation function of the field. In this case, though, in accordance with Eq. (\ref{G2psi}) the biphoton rate $G^{(2)}(x_{1},x_{2})$ is a fourth-order nonlinear transformation of $t$.
This system has been used \cite{AA3} to test the complementarity of coherence and entanglement with the change of transverse size of the pump beam, where $t$ was taken to be a double-slit. From Eqs. (\ref{EpsiObjInBothSignalIdler}) and (\ref{PsiC}) it is clear that for a small source the biphoton amplitude factorizes into a function of $x_{1}$ and another function of $x_{2}$ (coherence), while it is not factorizable (i.e., entangled) for a large pump-beam size (entanglement) \cite{BS3}.
\subsection{Object in the Pump Beam}
In another imaging configuration, the object is placed in the pump beam as illustrated in Fig. \ref{object in pump beam}. Equations (\ref{G2psi}) and (\ref{basicEq}) give \begin{equation}\label{G2x1x2ObjInPump}
G^{(2)}(x_{1},x_{2})=|\int dx t(x)h_{s}(x_{1},x)h_{i}(x_{2},x)|^{2} \end{equation} provided that $E_{p}(x)$ is uniform over the object. Many possibilities for imaging based on Eq. (\ref{G2x1x2ObjInPump}) can be envisioned. For example, if both $h_{s}$ and $h_{i}$ are $2-f$ systems, the result is proportional to the squared magnitude of the Fourier transform of $t$. In another example, if $h_{s}(0,x)=1$, then \begin{equation}\label{I02ObjInPump}
I_{0}^{(2)}(x_{2})=|\int dx t(x)h_{i}(x_{2},x)|^{2}, \end{equation} and the behavior is that of a coherent imaging system. The object is illuminated at the pump wavelength, while the measurement is made at the much longer signal and idler wavelengths.
In a third example in which the signal and idler systems are identical, and the coincidence is measured at the same position, by use of a detector sensitive to the arrival of photon pairs (a two-photon absorber, for example), then \begin{equation}\label{G2x1x1ObjInPump}
G^{(2)}(x_{1},x_{1})=|\int dx t(x)h_{s}^{2}(x_{1},x)|^{2}. \end{equation} Again, the mathematical structure is that of a coherent imaging system.
An interesting modification to this configuration would be to add a $2-f$ system between the object and the crystal. In this case, $E_{p}(x)=T(2\pi\frac{x}{\lambda_{p}f_{o}})$, where $f_{o}$ is the focal length of the lens before the crystal. If the object is not symmetric in $x$, then its Fourier transform is a complex function. Yet the phase distribution of the object's spatial spectrum is not lost, since the three-wave interaction process in the NLC is coherent. If, in addition, we take both the signal and idler configurations to be $2-f$ systems, the biphoton rate becomes \begin{equation}\label{G2x1x2ObjInPump2fBeforeNLC}
G^{(2)}(x_{1},x_{2})=|t([\frac{x_{1}}{\lambda_{s}f_{s}}+\frac{x_{2}}{\lambda_{i}f_{i}}]f_{o}\lambda_{p})|^{2}. \end{equation} where $f_{s}$ and $f_{i}$ are the focal lengths of the $2-f$ systems in the signal and idler beams, respectively. In this case, in accordance with Eq. (\ref{I20x}), $I_{0}^{(2)}$ provides a magnified image of the object $t$ with a magnification factor of $\frac{\lambda_{i}f_{i}}{\lambda_{p}f_{o}}$.
\subsection{Object is the Detector}
In yet another imaging modality, illustrated in Fig. \ref{object is the detector}, the object is a two-photon absorber; it is thus a detector with quantum efficiency proportional to its absorbance $t_{2}$. The biphoton rate, in this case $G^{(2)}(x_{1},x_{1})$, is registered by some response of the object, such as emitted photoelectrons or fluorescence \cite{MT1}. The signal/idler optical system may, for example, be a single-lens imaging system or a scanning system, as in scanning confocal microscopy \cite{TW}. From Eqs. (\ref{G2psi}) and (\ref{basicEq}) \begin{equation}\label{G2x1x1ObjIsDetector} G^{(2)}(x_{1},x_{1})=t_{2}(x_{1})S(x_{1}), \end{equation} where $S(x_{1})$ is an object illumination function. One choice for $S$ would be a very narrow function , which would serve to sample the object in the transverse plane. This could be achieved, for example, by taking a pump of large transverse width and $2-f$ signal and idler systems. The size of the pump and the aperture of the lens limit the transverse resolution.
There are other sensible choices for the illumination function $S$. These can be implemented through either the pump profile, or the system impulse response function, or both. The object $t_{2}$ would then be extracted by dividing the observed biphoton rate by $S$. The object can also be uniformly illuminated by using a large pump and a $4-f$ imaging system, in which case $S$ becomes almost constant over a large portion of the object. We have studied this system elsewhere and compared the longitudinal and transverse resolutions to those of other schemes of microscopy that utilize classical light\cite {MN}.
\subsection{Objects in Signal, Idler, and Pump Beams: Image Triple Correlation}
Because the biphoton optical system is based on three-wave mixing, it inherently depends on three image distributions and therefore offers a number of unique options for optical-image processing. For example, if $4-f$ systems with aperture functions $t_{s}$ and $t_{i}$ are placed in the signal and idler beams, respectively, and a third object is placed in the pump beam such that the field at the entrance to the NLC is $t_{p}$ then Eqs. (\ref{G2psi}) and (\ref{basicEq}) yield \begin{equation}\label{G2x1x2ObjInSigIdlerAndPump}
G^{(2)}(x_{1},x_{2})=|\int dx t_{p}(x)T_{s}(\frac{2\pi}{\lambda_{s}f_{s}}(x-x_{1}))T_{i}(\frac{2\pi}{\lambda_{i}f_{i}}(x-x_{2}))|^{2}, \end{equation} where $T_{s}(q)$ and $T_{i}(q)$ are the Fourier transforms of $t_{s}(x)$ and $t_{i}(x)$; and $f_{s}$ and $f_{i}$ are the focal lengths of the signal and idler $4-f$ systems, respectively. This equation represents the magnitude of the triple correlation of the three functions $t_{p}$, $T_{s}$, and $T_{i}$. Triple correlation is useful in a number of signal-processing applications. Of course, if one of these three functions is uniform, the operation becomes ordinary correlation.
One application for this configuration could be system identification and coded-aperture imaging. In this application, a linear, shift-invariant optical system is to be identified, i.e., its impulse response function measured. The system may be placed in one of the two down-converted beam (say the signal) while a set of $N$ known reference systems are placed, one at a time, in the other beam (the idler) as the coincidence rate is measured. The set of idler systems are also assumed to be linear, shift-invariant with impulse response functions \begin{equation}\label{LSIfunction} h_{i}(x_{2},x)=h_{n}(x-x_{2}), \quad n=1, 2, ... N. \end{equation} Such systems may be generated by the use of a bank of apertures (filters). Since the unknown system is shift-invariant, its impulse response function is $h_{s}(x_{1},x)=h_{s}(x_{1}-x)$, so that by virtue of Eqs. (\ref{G2psi}) and (\ref{basicEq}) the biphoton rate measured at $x_{1}=x_{2}=0$ is given by \begin{equation}\label{G2 00}
C_{n}=G^{(2)}(0,0)=|\int dx h_{s}(-x)h_{n}(x)|^{2}, \end{equation} assuming the pump distribution to be uniform.
If $\{h_{n}(x)\}$ form a complete set of orthonormal functions, then the measured coefficients $\{C_{n}\}$ are simply the squared magnitudes of the coefficients of an expansion of the unknown function $h_{s}(-x)$ in this basis. Under special conditions, the phases can be retrieved, and the function $h_{s}(x)$ completely reconstructed \cite{HS87}.
In the special case for which $h_{n}(x)=\delta(x+x_{n})$, so that the idler field is sampled at positions $x_{n}$, Eq. (\ref{G2 00}) yields \begin{equation}\label{}
C_{n}=|h_{s}(x_{n})|^{2}, \end{equation}
in which case the measured coincidence rates provide samples of the magnitude of the impulse response function. A scanning system can therefore be used to determine $|h_{s}(x)|$.
\section{RESOLUTION OF BIPHOTON IMAGING}
In all of the configurations studied in the previous section we assumed a thin NLC and a narrow biphoton spectral bandwidth. Under these assumptions the imaging resolution of all the configurations is determined by the apertures placed in the system (including those placed in the pump beam). When these apertures are not accounted for, we obtain results reminiscent of classical {\it geometric optics}, such as the imaging formulas in Eqs. (\ref{IdealImagingEq}) and (\ref{IdealImagingEqLensInPump}). These geometric-optics results are typical of the work that has been carried out to date in entangled-photon imaging \cite{TP1,TP2}.
One of the advantages of our formalism is to facilitate deriving the analog of {\it wave-optics} results for such systems when all the physical parameters of the optical arrangement are accounted for, using straightforward calculations similar to those of classical wave optics \cite{BS1,MB}. In this section we examine the effect of the various parameters of entangled-photon imaging systems on the imaging resolution.
We take the width of the image formed by a point object $t(x)=\delta(x)$ in the marginal coincidence rate [Eq. (\ref{I2x})] as {\it a measure of the resolution of the entangled-photon imaging system}. Another definition of resolution may be based on the conditional coincidence rate [Eq. (\ref{I20x})].
We begin by modifying the principal imaging equations [Eqs. (\ref{G2psi}) and (\ref{basicEq})] by taking into consideration the thickness of the NLC and the biphoton spectral bandwidth. We assume a monochromatic pump beam of angular frequency $\omega_{p}$ and transverse distribution $E_{p}(x)$ at the entrance to a NLC of thickness $\ell$. The coincidence rate $G^{(2)}(x_{1},t_{1};x_{2},t_{2})$, with the detection times of D$_{1}$ and D$_{2}$ now made explicit, is given by \begin{equation}\label{G2x1t1x2t2psi}
G^{(2)}(x_{1},t_{1};x_{2},t_{2})=|\psi(x_{1},t_{1};x_{2},t_{2})|^{2}. \end{equation} Here $\psi(x_{1},t_{1};x_{2},t_{2})$ may be written in terms of a biphoton spectral amplitude $\tilde{\psi}(x_{1},x_{2};\omega_{s})$ via \begin{equation}\label{epsix1t1x2t2epsitilde} \psi(x_{1},t_{1};x_{2},t_{2})=\textmd{exp}(-i\omega_{p}t_{1})\int_{\Omega} d\omega_{s}\textmd{exp}(-i\omega_{s}(t_{1}-t_{2}))\tilde{\psi}(x_{1},x_{2};\omega_{s}), \end{equation} where $\Omega$ is the biphoton spectral bandwidth and $\tilde{\psi}(x_{1},x_{2};\omega_{s})$ is given by \cite{BS3} \begin{equation}\label{basicEqepsitilde} \tilde{\psi}(x_{1},x_{2};\omega_{s})=\int\int dq_{s}dq_{i}\Lambda(q_{s},q_{i};\omega_{s})H_{s}(x_{1},q_{s};\omega_{s})H_{i}(x_{2},q_{i};\omega_{p}-\omega_{s}), \end{equation} and the dispersion of the optical systems has been made explicit in the signal and idler transfer functions $H_{s}$ and $H_{i}$, which are Fourier transforms of the impulse response functions $h_{s}$ and $h_{i}$ (with respect to the second argument), respectively. The quantity $\Lambda(q_{s},q_{i};\omega_{s})$ in Eq. (\ref{basicEqepsitilde}) is given by \begin{equation}\label{LambdaFunction} \Lambda(q_{s},q_{i};\omega_{s})=\tilde{E}_{p}(q_{s}+q_{i})\tilde{\xi}(q_{s},q_{i};\omega_{s}); \end{equation} here $q$ is proportional to the transverse component of the momentum vector (it is the spatial frequency in the transverse plane), $\tilde{E}_{p}(q)$ is the Fourier transform of $E_{p}(x)$, and $\tilde{\xi}(q_{s},q_{i};\omega_{s})$ is a phase-matching function given by \begin{equation}\label{ExitaTilde} \tilde{\xi}(q_{s},q_{i};\omega_{s})=\ell \textmd{sinc}(\frac{\ell}{2\pi}\Delta r)\textmd{exp}(-j\frac{\ell}{2}\Delta r); \end{equation} and $\Delta r=r_{p}(q_{s}+q_{i},\omega_{p})-r_{s}(q_{s},\omega_{s})-r_{i}(q_{i},\omega_{p}-\omega_{s})$; $r_{j}(q,\omega)=\sqrt{n_{j}^{2}\frac{\omega^{2}}{c^{2}}-q^{2}}$, $j=p, s, \textmd{and}\: i$, where $n_{j}$ is the NLC index of refraction for the polarization and frequency of the $j^{th}$ field.
In most cases the detectors may be considered to be slow (i.e., their response time is large with respect to the inverse of the bandwidth of the system, which is a reasonable assumption for available photodetectors), and thus they measure a coincidence rate that is averaged over a long time interval. The resulting time averaged coincidence rate is \cite{BS3} \begin{equation}\label{Cx1x2}
C(x_{1},x_{2})=\int_{\Omega}d\omega_{s}|\tilde{\psi}(x_{1},x_{2};\omega_{s})|^{2}, \end{equation} showing that the time averaged coincidence rate for a slow detector is an incoherent sum of the biphoton spectral amplitudes over the bandwidth of the system. The spectrum of the down-converted biphotons can be quite large and the dispersion of the optical components must be considered carefully just as dispersion must be in ultrafast pulsed optics.
We also define conditional and marginal time-averaged coincidence rates as \begin{equation}\label{Cx2} C(x_{2})=\int dx_{1}C(x_{1},x_{2}), \end{equation} \begin{equation}\label{C0x2} C_{0}(x_{2})=C(0,x_{2}), \end{equation} respectively. It is obvious that when only a narrow spectral bandwidth is considered, $C(x_{2})$ and $C_{0}(x_{2})$ coincide with $I(x_{2})$ and $I_{0}(x_{2})$, respectively.
We now proceed to study the resolution of a representative configuration considered in Section 2: {\it object in the signal (or idler) beam}. The biphoton spectral amplitude of this system, illustrated in Fig. \ref{object in the signal beam}, now taking into consideration the thickness of the crystal and spectral bandwidth of the system, is given by \begin{equation}\label{EpsiTildeObjInSig} \tilde{\psi}(x_{1},x_{2};\omega_{s})=\int dx't(x')h_{1}(x_{1},x';\omega_{s})h_{3}(x_{2},x';\omega_{s}), \end{equation} where \begin{equation}\label{h3EpsiTilde} h_{3}(x_{2},x';\omega_{s})=\int\int dq_{s}dq_{i}\Lambda(q_{s},q_{i};\omega_{s})H_{2}(x',q_{s};\omega_{s})H_{i}(x_{2},q_{i};\omega_{p}-\omega_{s}). \end{equation} We assume throughout that the object is thin and non-dispersive. To determine the resolution of this imaging configuration we take $t(x)=\delta(x)$, whereupon Eq.(\ref{EpsiTildeObjInSig}) becomes $\tilde{\psi}(x_{1},x_{2};\omega_{s})=h_{1}(x_{1},0;\omega_{s})h_{3}(x_{2},0;\omega_{s})$, and consequently \begin{equation}\label{Cx2BasicObjInSig}
C(x_{2})=\int_{\Omega}d\omega_{s}|h_{3}(x_{2},0;\omega_{s})|^{2}g(\omega_{s}), \end{equation} \begin{equation}\label{C0x2BasicObjInSig}
C_{0}(x_{2})=\int_{\Omega}d\omega_{s}|h_{3}(x_{2},0;\omega_{s})|^{2}g_{o}(\omega_{s}), \end{equation}
where $g(\omega)=\int dx|h_{1}(x,0;\omega)|^{2}$ and
$g_{o}(\omega)=|h_{1}(0,0;\omega)|^{2}$. Note that the system $h_{1}$ affects the imaging resolution only through introducing an effective spectral bandwidth that may be ignored if it is larger than that of $h_{3}$.
As a concrete example we consider the system examined in Section 2.A.2, which is the second example of {\it object in the signal beam} configuration, namely {\it ideal single-lens imaging}, illustrated in Fig. \ref{object in the signal beam examples 1 and 2}. We assume, at first, a plane wave pump, so that $h_{3}(x_{2},0;\omega_{s})$ simplifies to \begin{equation}\label{h30EpsiTildePlaneWavePump} h_{3}(x_{2},0;\omega_{s})=\int dq_{s}\tilde{\xi}(q_{s},-q_{s};\omega_{s})H_{2}(0,q_{s};\omega_{s})H_{i}(x_{2},-q_{s};\omega_{p}-\omega_{s}). \end{equation} In this example, the transfer functions of the systems $h_{2}$ and $h_{i}$ are given by \begin{equation}\label{H2} H_{2}(0,q_{s};\omega_{s})=\textmd{exp}(jk_{s}d_{s})\textmd{exp}(-j\frac{d_{s}q_{s}^{2}}{2k_{s}}), \end{equation} \begin{equation}\label{Hi} H_{i}(x_{2},-q_{s};\omega_{p}-\omega_{s})= \textmd{exp}(jk_{i}(d_{1}+d_{2})) \textmd{exp}(j\frac{k_{i}x_{2}^{2}}{2d_{2}}) \textmd{exp}(-j\frac{d_{i}q_{s}^{2}}{2k_{i}}) P_{g}(q_{s}+\frac{k_{i}x_{2}}{d_{2}}), \end{equation} where $P_{g}(q)$ is the Fourier transform of $p(x)\textmd{exp}(j\frac{k_{i}x^{2}}{2}[\frac{1}{d_{2}}-\frac{1}{f}])$ with respect to $x$, and $p(x)$ is the lens aperture. Substituting Eqs. (\ref{ExitaTilde}), (\ref{H2}) and (\ref{Hi}) into Eq. (\ref{h30EpsiTildePlaneWavePump}) we obtain $h_{3}(x_{2},0;\omega_{s})$ which we then use in Eqs. (\ref{Cx2BasicObjInSig}) and (\ref{C0x2BasicObjInSig}) to estimate the resolution.
There are two techniques to implement this system in an actual setup. In one technique the NLC is adjusted for non-collinear SPDC, and one beam (usually chosen by a pinhole) is directed into the system $h_{s}$ and the other into $h_{i}$. Another technique is to adjust the NLC for collinear SPDC and then separate the two photons comprising the biphoton. In type-II SPDC (where the signal and idler photons have orthogonal polarizations) one can use a polarizing beam splitter to separate the biphoton. On the other hand, in type-I SPDC (where the signal and idler photons have the same polarization) the use of a non-polarizing beam-splitter will separate the pair into the two output ports of the beam splitter in 50$\%$ of the trials, and send the pair together into one output port in the rest of the trials. In the latter case, the trials do not contribute to the coincidence measurements carried out by the detectors D$_{1}$ and D$_{2}$ together with the coincidence detection circuit, and thus may be ignored.
Assuming a thin NLC, narrow spectral bandwidth, a plane-wave pump, and degenerate collinear down-conversion (where the signal and idler photons are separated using the method outlined above), one obtains the familiar diffraction pattern of a diffraction-limited imaging system. For a rectangular lens aperture of width $D$ and focal length $f$ the result is
$C(x_{2})\propto|\textmd{sinc}(\frac{x_{2}}{2\lambda_{o}F_{\#}})|^{2}$, where $F_{\#}=\frac{f}{D}$ is the the F-number of the lens and $\lambda_{o}=2\lambda_{p}$ is the wavelength of the degenerate down-converted photons. This is the best one can obtain; we demonstrate in the following that relaxing any of the restrictions indicated above will degrade the resolution.
Our calculations have been carried out using a $\beta$-barium borate (BBO) NLC that is illuminated with a pump of wavelength $\lambda_{p}=325$ nm (which corresponds to the ultraviolet line of a He-Cd laser), with a cut-angle of $36.44^{\circ}$ that corresponds to degenerate collinear type-I SPDC. Increasing the cut-angle beyond $36.44^{\circ}$ yields non-collinear degenerate SPDC while decreasing the cut-angle below this value yields collinear non-degenerate SPDC \cite{AA1}.
We first consider the effect of the finite thickness of the NLC. One effect is that the distance $d_{1}$, used in the imaging formula presented in Eq. (\ref{IdealImagingEq}), is modified to become \begin{equation}\label{modified d1} d_{1}=d_{i}+d_{s}\frac{\lambda_{s}}{\lambda_{i}}+\ell_{eq}, \end{equation} in contrast to that given in Eq. (\ref{d1}). The quantity $\ell_{eq}$ is an equivalent length for the NLC that is related to the physical length $\ell$ by \begin{equation}\label{lequivalent} \ell_{eq}=\frac{\ell}{2\lambda_{i}}(\frac{\lambda_{s}}{n_{s}}+\frac{\lambda_{i}}{n_{i}}). \end{equation} For the degenerate case ($\lambda_{s}=\lambda_{i}=\lambda_{o}$) this expression simplifies to $\ell_{eq}=\frac{\ell}{n_{o}}$, where $n_{o}$ is the index of refraction of the NLC at the degenerate wavelength. In other words, the thickness of the NLC must be accounted for in calculating the distances in the experimental arrangement in order to satisfy the imaging formula of Eq. (\ref{IdealImagingEq}).
Figure \ref{effect of NLC thickness} shows $C(x_{2})$ for several values of $\ell$, assuming degenerate collinear down-conversion and narrow spectral bandwidth. The distances in the configuration are chosen such that $d_{1}=d_{2}=2f$ (taking into account the effect of $\ell$ on $d_{1}$ according to Eq. (\ref{modified d1})), which corresponds to an imaging system of unity magnification. For $\ell=0.1$ mm one obtains the diffraction limited distribution corresponding to the thin NLC case. When $\ell$ increases the distribution widens, signifying a loss of imaging resolution, as is evident for the $\ell=1$-mm and $\ell=10$-mm curves. This result may be easily understood when one considers the fact that the NLC acts as a spatial filter through the phase-matching function $\tilde{\xi}(q_{s},q_{i};\omega_{s})$ defined in Eq. (\ref{ExitaTilde}). The collinear SPDC case corresponds to a low-pass spatial filter with a cut-off frequency that is inversely proportional to $\ell$ and hence the resolution degrades as the NLC thickness increases.
The spectral bandwidth of the system has a similar effect on the imaging resolution, which decreases with increased bandwidth. Figure \ref{effect of bandwidth} shows $C(x_{2})$ for several values of $\rho=\frac{\Omega}{\omega_{p}}$. These plots were obtained for a NLC of thickness $\ell=1$ mm, collinear SPDC, and a plane-wave pump. According to Eq. (\ref{modified d1}) $d_{1}$ is a function of wavelength (and so is $\ell_{eq}$ via Eq. (\ref{lequivalent})), so that only one pair of signal/idler wavelengths satisfy the imaging formula in Eq. (\ref{IdealImagingEq}). All biphotons with other signal/idler pair wavelengths are defocused, and hence their contribution to $C(x_{2})$ leads to a reduction in resolution. The plots in Fig. \ref{effect of bandwidth} were obtained assuming that Eq. (\ref{IdealImagingEq}) is satisfied by the degenerate signal/idler wavelengths, and that at these wavelengths $d_{1}=d_{2}=2f$.
Finally, the finite transverse width of the pump field also degrades the resolution. This can be understood by noting that smaller pump size reduces entanglement of the signal and idler photons \cite{BS3}. As a result, the quantum state of the light emitted by the NLC becomes separable and thus $C(x_{2})$ and $C_{o}(x_{2})$ simply become the intensity of the idler beam (which depends on $h_{i}$), but are independent of the signal beam \cite{AA2}. No information about the system $h_{s}$, which includes the object to be imaged in this case, may be extracted from the measurements carried out in the idler beam.
Figure \ref{effect of pump width} shows plots of $C(x_{2})$ for various values of the transverse width of the pump, denoted $B$. The calculations were performed taking $\ell=1$ mm, assuming collinear degenerate SPDC, and the presence of narrowband spectral filters in the system. Distances were chosen such that $d_{1}=d_{2}=2f$.
\section{CONCLUSION}
We have presented a Fourier-optics analysis of various imaging configurations using the unique features of spontaneous parametric down-conversion (SPDC) as a two-photon source. SPDC is a three-wave mixing process; the pump, signal, and idler are coupled through the phase-matching conditions. We investigated several imaging and image-processing configurations that utilize the quantum correlations among these three fields. Our formalism was also used to study the resolution of these entangled-photon imaging configurations.
\section*{APPENDIX: THE OPTICAL BILINEAR TRANSFORMATION}
\setcounter{equation}{0} \renewcommand{\theequation}
{\mbox{{A}{\arabic{equation}}}}
We present a brief overview of the theory of classical imaging in the framework of the bilinear optical transformation. The equations are formulated in such a way so as to facilitate convenient comparisons with the two-photon and biphoton cases presented in the text.
Because of the quadratic relation between the optical field and the optical intensity, imaging systems are typically described by a bilinear transformation \cite{BS4}. A general bilinear transformation is expressed as \begin{equation}\label{A1} g(x_{1})=\int\int dx'dx''f^{*}(x')f(x'')q(x_{1};x',x''), \end{equation} where $f(x)$ is the input function, $q(x_{1};x',x'')$ is the double impulse response function (DIR), and $g(x)$ is the output function. In general $f(x)$ is complex, but $g(x)$ is guaranteed to be real when the symmetry condition $q(x_{1};x',x'')=q^{*}(x_{1};x'',x')$ is satisfied. The DIR completely characterizes the bilinear system. This transformation represents, in general, the imaging system depicted in Fig. \ref{bilinear optics}. The input function $f(x)$ represents the transparency $t(x)$; the DIR is a combination of the second-order correlation function of the illumination $G^{(1)}(x',x'')$ and the impulse response function $h(x_{1},x)$ of the linear optical system; and the output $g(x)$ represents the intensity measured by the optical detector.
In the ideal case $q(x_{1};x',x'')=\delta(x_{1}-x')\delta(x_{1}-x'')$, whereupon
$g(x_{1})=|t(x_{1})|^{2}$, so that the system is a squarer with zero spread. When the DIR factorizes in the form $q(x_{1};x',x'')=h^{*}(x_{1},x')h(x_{1},x'')$, the output of the system is given by \begin{equation}\label{A2}
g(x_{1})=|\int dx'f(x')h(x_{1},x')|^{2}. \end{equation} Equation (\ref{A2}) is easily recognized as the output intensity of a coherent imaging system with impulse response function $h(x_{1},x')$ and an input complex field $f(x')$. When the DIR takes the form $q(x_{1};x',x'')=h^{*}(x_{1},x')h(x_{1},x'')\delta(x'-x'')$, we obtain \begin{equation}\label{A3}
g(x_{1})=\int dx'|f(x')|^{2}|h(x_{1},x')|^{2}, \end{equation}
which is the output of an incoherent system with point spread function $|h(x_{1},x')|^{2}$ and input intensity $|f(x')|^{2}$. In general, partially coherent imaging can be represented by a bilinear system with a DIR given by $q(x_{1};x',x'')=\gamma(x',x'')h^{*}(x_{1},x')h(x_{1},x'')$, where $\gamma(x',x'')$ represents the correlation function of the input light, and $h(x_{1},x')$ is the coherent impulse response function. When $\gamma(x',x'')=1$, we recover coherent imaging whereas when $\gamma(x',x'')=\delta(x'-x'')$, we recover incoherent imaging.
Entangled-photon imaging, like partially coherent imaging, is described by a bilinear system, with partial entanglement assuming the role of partial coherence \cite{BS3}.
\section*{ACKNOWLEDGMENTS}
This work was supported by the National Science Foundation; by the Center for Subsurface Sensing and Imaging Systems (CenSSIS), an NSF engineering research center; and by the David $\&$ Lucile Packard Foundation.
B. E. A. Saleh's e-mail address is besaleh@bu.edu.
\begin{figure}
\caption{Biphoton imaging using photon pairs generated by spontaneous parametric down-conversion. NLC stands for nonlinear crystal; D$_{1}$ and D$_{2}$ are single-photon detectors at locations $x_{1}$ and $x_{2}$, respectively; $G^{(2)}(x_{1},x_{2})$ is the biphoton rate; $h_{p}(x,x')$, $h_{s}(x_{1},x)$, and $h_{i}(x_{2},x)$ are the impulse response functions of the optical systems placed in the paths of the pump, signal, and idler beams, respectively.}
\label{overall layout}
\end{figure}
\begin{figure}
\caption{{\it Object in the signal beam} configuration. $E_{p}$ is the pump field at the entrance to the NLC; $h_{1}(x_{1},x')$ and $h_{2}(x',x)$ are the impulse response functions of the optical systems placed in the signal beam; $h_{i}(x_{2},x)$ is the impulse response function of the optical system placed in the idler beam; $t(x')$ is the object to be imaged, placed in the signal beam.}
\label{object in the signal beam}
\end{figure}
\begin{figure}
\caption{{\it Object in the signal beam} configuration of examples 1 and 2 displayed in an unfolded picture. $E_{p}$, $h_{1}(x_{1},x')$, and $t(x')$ are the same as in Fig. \ref{object in the signal beam}; $f$ is the focal length of a lens placed in the idler beam. See text for details.}
\label{object in the signal beam examples 1 and 2}
\end{figure}
\begin{figure}
\caption{{\it Object in the signal beam} configuration of example 3 displayed in an unfolded mode. $h_{1}(x_{1},x')$, and $t(x')$ are the same as in Fig. \ref{object in the signal beam}; a lens is placed in the pump beam and is represented here by the dotted lens of focal length $f$. See text for details.}
\label{object in the signal beam example 3}
\end{figure}
\begin{figure}
\caption{{\it Object in both signal and idler beams} configuration. $h_{1}(x_{1},x')$, $h_{2}(x',x)$ are the impulse response functions of the optical systems placed in the path of both the signal and idler beams; and $t(x')$ is the object to be imaged.}
\label{object in both signal and idler beams}
\end{figure}
\begin{figure}
\caption{{\it Object in the pump beam} configuration. $h_{s}(x_{1},x)$, $h_{i}(x_{2},x)$ are as in Fig. \ref{overall layout}; and $t(x)$ is the object to be imaged placed in the pump beam .}
\label{object in pump beam}
\end{figure}
\begin{figure}
\caption{{\it Object is the detector} configuration. $h_{1}(x_{1},x)$, is the impulse response of the optical system placed in the signal and idler paths; and D is a two-photon detector at location $x_{1}$.}
\label{object is the detector}
\end{figure}
\begin{figure}
\caption{Configuration for triple correlation. $h_{2s}(x',x)$ and $h_{1s}(x_{1},x')$ are the impulse response functions of the optical systems placed in the signal beam; $h_{2i}(x'',x)$ and $h_{1i}(x_{2},x'')$ are the impulse response functions of the optical systems placed in the idler beam; $t_{p}(x)$, $t_{s}(x')$, and $t_{i}(x'')$ are the three objects to be correlated.}
\label{triple correlation}
\end{figure}
\begin{figure}
\caption{Effect of NLC thickness $\ell$ on the imaging resolution of {\it object in the signal beam} configuration. Plots of normalized time-averaged marginal coincidence rate $C(x_{2})$ versus detector D$_{2}$ location $x_{2}$ normalized with respect to $x_{c}=2\lambda_{o}F_{\#}$, for $\ell=0.1, 1, \textmd{and}\: 10$ mm; $\lambda_{o}=650$ nm, $F_{\#}=5$.}
\label{effect of NLC thickness}
\end{figure}
\begin{figure}\label{effect of bandwidth}
\end{figure}
\begin{figure}
\caption{Effect of the transverse width of the pump $B$ on the imaging resolution of {\it object in the signal beam} configuration. Plots of normalized $C(x_{2})$ versus $x_{2}$ normalized with respect to $x_{c}=2\lambda_{o}F_{\#}$ are shown for $B=2, 1, 0.5, \textmd{and} \: 0.1$ mm; $\lambda_{o}=650$ nm, $F_{\#}=5$. }
\label{effect of pump width}
\end{figure}
\begin{figure}
\caption{Classical partially-coherent imaging. The quantity $G^{(1)}(x',x'')$ is the second-order correlation function of the optical field; $t$ is the object to be imaged; $h(x_{1},x)$ is the impulse response function of the imaging system; D is a detector placed at position $x_{1}$ that records the intensity $I(x_{1})$. }
\label{bilinear optics}
\end{figure}
\vskip6cm Figure 1, A. F. Abouraddy
\vskip6cm Figure 2, A. F. Abouraddy
\vskip6cm Figure 3, A. F. Abouraddy
\vskip6cm Figure 4, A. F. Abouraddy
\vskip6cm Figure 5, A. F. Abouraddy
\vskip6cm Figure 6, A. F. Abouraddy
\vskip6cm Figure 7, A. F. Abouraddy
\vskip6cm Figure 8, A. F. Abouraddy
\vskip6cm Figure 9, A. F. Abouraddy
\vskip6cm Figure 10, A. F. Abouraddy
\vskip6cm Figure 11, A. F. Abouraddy
\vskip6cm Figure 12, A. F. Abouraddy
\end{document} |
\begin{document}
\title{Compact Optimization Learning for AC Optimal Power Flow}
\author{Seonho~Park, Wenbo~Chen, Terrence~W.K.~Mak, and Pascal~Van~Hentenryck \thanks{
The authors are affiliated with the School of Industrial and Systems Engineering, Georgia Institute of Technology, Atlanta, GA 30332, USA,
E-mail: \{seonho.park,wenbo.chen,wmak,pvh\}@gatech.edu } }
\maketitle
\begin{abstract} This paper reconsiders end-to-end learning approaches to the Optimal Power Flow (OPF). Existing methods, which learn the input/output mapping of the OPF, suffer from scalability issues due to the high dimensionality of the output space. This paper first shows that the space of optimal solutions can be significantly compressed using principal component analysis (PCA). It then proposes {\sc Compact Learning}{}, a new method that learns in a subspace of the principal components and translates the vectors into the original output space. This compression reduces the number of trainable parameters substantially, improving scalability and effectiveness. {\sc Compact Learning}{} is evaluated on a variety of test cases from the PGLib and a realistic French transmission system having renewable energy changes with up to 30,000 buses. The paper also shows that the output of {\sc Compact Learning}{} can be used to warm-start an exact AC solver to restore feasibility, while bringing significant speed-ups. \end{abstract}
\begin{IEEEkeywords} Optimal Power Flow, Principal Component Analysis, Generalized Hebbian Algorithm, Nonlinear Programming, End-to-end Learning, Deep Learning \end{IEEEkeywords}
\section{Introduction} \label{sec:intro}
Optimal Power Flow (OPF) is at the core of grid operations: in many markets, it should be solved every five minutes (ideally) to clear real-time markets at minimal cost, while ensuring that the load and generation are balanced and that the physical and engineering constraints are satisfied. Unfortunately, the AC-OPF problem is nonlinear and nonconvex: actual operations typically use linear relaxations (e.g., the so-called DC-model) to meet the real-time requirements of existing markets.
As the share of renewable energies significantly increases in the generation mix, it becomes increasingly important to solve AC-OPF problems, not their linearized versions. This is exemplified by the ARPA-E GO competitions \cite{safdarian2022grid} designed to stimulate progress on AC-OPF. In recent years, machine-learning (ML) based approaches to the OPF have received increasing attention. This is especially true for end-to-end approaches that aim at approximating the mapping between various input configurations and corresponding optimal solutions \cite{zamzam2020learning,fioretto2020predicting,donon2020deep,diehl2019warm,Owerko2020,nellikkath2022physics,donti2021dc3,park2022self}. The ML approach is motivated by the recognition that OPF problems are solved repeatedly every day, producing a wealth of historical data. In addition, the historical data can be augmented with additional AC-OPF instances, moving the computational burden offline instead of solving during real-time operations.
One of the challenges of AC-OPF is the high dimensionality of its solution, which implies that the ML models, typically deep neural networks (DNNs), have an excessive number of trainable parameters for realistic power grids. As a result, many ML approaches are only evaluated on test cases of limited sizes. For instance, the pioneering works in \cite{zamzam2020learning} and \cite{fioretto2020predicting} tested their approaches on systems with 118 and 300 buses at most, respectively. To the best of the authors' knowledge, the largest AC-OPF test case for evaluating end-to-end learning of AC-OPF is the French system with around 6,700 buses \cite{chatzos2021spatial}. Observe also that, since end-to-end learning is a regression task, learning highly dimensional OPF output may lead to inaccurate predictions and significant constraint violations.
\tb{ To remedy this limitation and learn OPF at scale, this paper
proposes a different approach. Instead of directly mapping the
inputs to the AC-OPF solutions, this paper proposes to learn a
mapping to a low-rank representation defined by a Principal
Component Analysis (PCA) before translating the vectors into the
original output space. The contributions of the paper can be
summarized as follows: } \begin{itemize} \item \tb{A data analysis on the optimal solutions given
various input configurations shows that the optimal solutions to the
AC-OPF problems can be substantially compressed through PCA with
negligible informational loss.}
\item \tb{Motivated by this empirical observation, the paper proposes
{\sc Compact Learning}{}, a new ML approach that learns into a subspace of
principal components before translating the compressed output into
the original output space. In fact, {\sc Compact Learning}{} jointly learns both the
selected principal components and the compact mapping function.}
\item \tb{{\sc Compact Learning}{} learns the AC-OPF mapping for very large power systems
with up to 30,000 buses. To the best of the authors' knowledge, this
is the largest AC-OPF problem to which an end-to-end learning scheme
has been applied. The results show that {\sc Compact Learning}{} is comparable in
accuracy to the best previous approach, but scales significantly
better. }
\item \tb{The paper also demonstrates that the {\sc Compact Learning}{} predictions can
warm-start power flow and AC-OPF solvers. When seeding a power flow
solver, {\sc Compact Learning}{} exhibits compelling performance compared with
plain approaches. The warm-start results, which use both
primal and dual predictions, show that {\sc Compact Learning}{} can produce significant
speed-ups for AC-OPF solvers, which can be accelerated by a factor of
$3.6$ to $15.3$\texttimes{} on the bulk industry size power systems. } \end{itemize}
\noindent The rest of this paper is organized as follows: Section~\ref{sec:related} presents prior related works. Section~\ref{sec:acopf_ml_based} revisits the AC-OPF formulation and describes the supervised learning task. Section~\ref{sec:prelim} analyzes the structure of optimal solutions. Section~\ref{sec:method} presents {\sc Compact Learning}{} in detail. Section~\ref{sec:exp} demonstrates its performance for various AC-OPF test cases. Finally, Section~\ref{sec:conclusions} covers the concluding remarks.
\section{Related Work} \label{sec:related}
The \emph{end-to-end learning} approach aims at training a model that directly estimates the optimal solution to an optimization problem given various input configurations. This approach has attracted significant attention in power system applications recently because it holds the promise of decreasing the computation time needed to solve recurring optimization problems with reasonably small variations of the input parameters. For example, a \emph{classification-then-regression} framework \cite{chen2022learning} was proposed to directly estimate the optimal solutions to the security-constrained economic dispatch problem. Because of the existence of the bound constraints, they recognized that the majority of the generators are at their maximum/minimum limits in optimal solutions. This study observed a similar pattern in AC-OPF solutions, but a more efficient way to design the input/output mapping is proposed. Similarly, ML-based mappings have been utilized to approximate the optimal commitments in various unit commitment problems \cite{xavier2021learning,ramesh2022feasibility,park2022confidence}. Especially for AC-OPF problems, various supervised learning (e.g., \cite{zamzam2020learning,fioretto2020predicting}) and self-supervised learning approaches (e.g., \cite{park2022self,donti2021dc3}) have been researched. They have used dedicated training schemes such as Lagrangian Duality \cite{fioretto2020predicting,chatzos2020high} or physics-informed neural network \cite{nellikkath2022physics}. Graph neural networks have been also considered in this context \cite{donon2020deep,diehl2019warm,Owerko2020} for leveraging the power system topology. However, such direct approaches cannot scale to industry size problems mainly because of the dimension of the output space which is of very large scale. To remedy this, \emph{spatial decomposition} approaches \cite{chatzos2021spatial,mak2022learning} have been proposed to decompose the network in regions and learn the mappings per region.
Besides the end-to-end learning approach, ML has also helped optimization solve problems faster. In \cite{deka2019learning}, the authors used an ML technique to identify an active set of constraints in DC-OPF. Also in \cite{cengil2022learning}, ML is used to identify a variable subset for accelerating an optimality-based bound tightening algorithm \cite{sundar2018optimization} for the AC-OPF.
By definition, since learning the AC-OPF optimal solution is a regression task, the inference from the ML model will not be always correct. A variety of techniques have been used to remedy this limitation, including the use of warm-starts and power flows as post-processing. A Newton-based method is used to correct the active power generations and voltage magnitudes at generator buses so that they satisfied the AC power flow problem \cite{venzke2020inexact,chatzos2021spatial}. In \cite{taheri2022restoring}, the authors corrected voltages at buses by minimizing the weighted least square of the inconsistency in the AC power flow using the Newton-Raphson method. In \cite{zamzam2020learning}, the active power injections and voltage phasors are outputted directly from the ML model and determine the other variables by solving a power flow. Also, in \cite{baker2019learning} and \cite{dong2020smart}, the use of the learning scheme was suggested to provide warm-start points for ACOPF solvers, but only presented the results on small test networks (up to the 300 bus system). In \cite{pan2022deepopf}, the use of DNN-based learning method for generating warm-start points was suggested. Their method was tested on a power system with 2,000 buses, but no speed-up was reported with the learning-based warm-start point.
\tb{ In previous data-driven AC-OPF approaches, feature reduction has
been considered for reducing the parameters to tune using techniques
such as PCA \cite{lei2020data} or sensitivity analysis
\cite{liu2021explicit}. This work in contrast shows that the
optimal solutions can be compressed to the low-rank representations
defined by PCA without having any significant informational loss. } Traditional power network reduction techniques, such as Kron and Ward reduction~\cite{ward49equivalent} techniques, have been widely used in the power system industry for more than 70 years. These techniques focused on crafting simpler equivalent circuits to be used by system operators, primarily for analysis. More complex reduction models~\cite{jang13line,caliskan12kron,nikolakakos18reduced,jiang20enhanced} have also been developed recently. While it is possible to use classical reduction techniques to reduce the power systems before learning, the resulting prediction model would only be able to predict quantities on the reduced networks with potential accuracy issues. The main focus of the paper is not on general network/grid reduction techniques. Instead, it focuses on devising a scalable learning approach by reducing the number of trainable parameters.
In summary, most previous works for learning AC-OPF optimization proxies have not considered industry-size power systems. {\em This paper shows that {\sc Compact Learning}{} applies to large-scale power networks (up to 30,000 buses in the experiments) and produces significant benefits in speeding-up AC-OPF solvers through warm-starts.}
\section{Preliminaries} \label{sec:acopf_ml_based}
This section formulates the AC-OPF problem and specifies the supervised learning studied in this paper.
\subsection{AC-OPF Formulation} \begin{model}[!t] {\scriptsize \caption{\tb{The AC-Optimal Power Flow (AC-OPF) Problem}} \label{model:acopf_simple} \begin{flalign} &\operatornamewithlimits{\text{\textbf{Minimize}}}_{p^g,q^g,v,\theta} \sum_{i\in\mathcal{G}}{c_i(p_i^g)}\label{eq:acopf_obj_simple}\\ &\text{\textbf{subject to:}}\nonumber\\ &\theta_r= 0\label{eq:acopf_cnst_refva}\\ \setcounter{equation}{3} &\underline{p}_i^g\leqp_i^g\leq\overline{p}_i^g &\forall i\in\mathcal{G}\label{eq:acopf_cnst_pgbound} \tag{\theequation a}\\ &\underline{q}_i^g\leqq_i^g\leq\overline{q}_i^g &\forall i\in\mathcal{G}\label{eq:acopf_cnst_qgbound} \tag{\theequation b}\\ &\underline{v}_i\leq v_i\leq\overline{v}_i &\forall i\in\mathcal{N}\label{eq:acopf_cnst_vm}\\ \setcounter{equation}{5}
&S_{lij}\!=\!\left(Y_l^*\!-\!j\frac{b_l^c}{2}\right)\frac{v_i^2}{|T_l|^2}-Y_l^*\frac{V_iV_j^*}{T_l} &\forall (lij)\!\in\!\mathcal{E}\label{eq:acopf_cnst_flow1}\tag{\theequation a}\\ &S_{lji}\!=\!\left(Y_l^*\!-\!j\frac{b_l^c}{2}\right)v_i^2-Y_l^*\frac{V_i^*V_j}{T_l^*} &\forall (lji)\!\in\!\mathcal{E}^R\label{eq:acopf_cnst_flow2}\tag{\theequation b}\\
&\sum_{k\in\mathcal{G}_i}S_k^g\!\!-\!\!S_i^d\!\!-\!\!Y_i^s|V_i|^2=\!\!\!\!\!\!\!\!\sum_{(lij)\in\mathcal{E}\cup\mathcal{E}^R}\!\!\!\!S_{lij} &\forall i\in\mathcal{N}\label{eq:acopf_cnst_balance} \\
&|S_{lij}|\leq \bar{s}_l &\forall(lij)\!\in\!\mathcal{E}\!\cup\!\mathcal{E}^R\label{eq:acopf_cnst_thermal_limit}\\ &\underline{\Delta\theta}_l\leq \theta_i-\theta_j\leq \overline{\Delta\theta}_l&\forall (lij)\!\in\!\mathcal{E}\label{eq:acopf_cnst_angle_limit} \end{flalign} } \end{model}
\tb{ Model~\ref{model:acopf_simple} presents the AC-OPF formulation \cite{babaeinejadsarookolaee2019power}. The power network can be represented as a graph $(\mathcal{N},\mathcal{E})$ where $\mathcal{N}$ denotes the set of bus indices containing generators and load units, and $\mathcal{E}$ is the set of transmission line indices between two buses. $(lij)\in\mathcal{E}$ where $l$ is a branch index connected from node $i$ to node $j$. The set $\mathcal{E}^R$ captures the reversed orientation of $\mathcal{E}$, i.e., $(lji)\in\mathcal{E}^R,\:\forall (lij)\in\mathcal{E}$. }
\tb{A generator output $S^g=p^g+jq^g$ is a complex number, where the
real part $p^g$ is an active power generation (injection) and the
imaginary part $q^g$ is a reactive power generation. An AC voltage
$V=v\angle\theta$ is represented by a voltage magnitude $v$ and a
voltage angle $\theta$. The objective function
~\eqref{eq:acopf_obj_simple} minimizes the sum of quadratic cost
functions $c_i(\cdot)$ with respect to the active power generations
$p_i^g,i\in\mathcal{G}$. At the reference bus $r$, the voltage angle is set
to zero as defined in constraint~\eqref{eq:acopf_cnst_refva}.
Constraints~\eqref{eq:acopf_cnst_pgbound},
\eqref{eq:acopf_cnst_qgbound}, and \eqref{eq:acopf_cnst_vm} capture
the bounds on variables $p^g$, $q^g$, and $v$, respectively.
Constraints~\eqref{eq:acopf_cnst_flow1}, \eqref{eq:acopf_cnst_flow2}
represent the complex power flow for each transmission line, which
is governed by \emph{Ohm's law}. Here, $Y_l$, $b_l^c$, and $T_l$ are
the series admittance, line charging susceptance, and transformer
parameter at each branch $l$, respectively.
Constraints~\eqref{eq:acopf_cnst_balance} ensures that, at each bus,
the active and reactive power balance are satisfied. Here, $Y^s$ is
the bus shunt admittance and $\mathcal{G}_i$ represents the set of generator
indices attached to the bus $i$.
Constraints~\eqref{eq:acopf_cnst_thermal_limit} capture the thermal
limits and ensure that the apparent power does not exceed its limit
$\bar{s}_{ij}$ for every transmission line. }
\tb{ For the sake of simplicity, in what follows, the input configuration parameters and the optimal solution to the AC-OPF are denoted by $x$ and $y^*$, respectively. }
\subsection{Supervised Learning} The end-to-end learning approach in this paper consists in using supervised learning to find a mapping from an input $x$ to an optimal solution $y^*$ to AC-OPF. \tb{This work assumes that the set of
commitment decisions and generator bids are predetermined. This
setting is common in prior end-to-end learning studies for
AC-OPF (e.g.,
\cite{zamzam2020learning,chatzos2020high,chatzos2021spatial}). Existing
supervised learning schemes exploit the instance data
$\{x_i,y^*_i\}$. However, it often suffers from the high
dimensionality of the output when dealing with power networks of
industrial sizes. Table~\ref{tab:case_spec} reports the
specifications of the nine power networks from PGLib
\cite{babaeinejadsarookolaee2019power} and a realistic
version of the French system (denoted by \texttt{France\_2018}) used
in the experiments. The French system\footnote{\tb{Refer to
\cite{chatzos2022data} for more details on this test case.}}
uses the realistic grid topology of the French transmission system
captured in 2018 with annual time series data fot the renewable
generation capacity and load demand.}
\tb{ For the PGLib test cases, each instance has different active and
reactive load demands $p^d$ and $q^d$, i.e., $x:=\{p^d,q^d\}$. For
\texttt{France\_2018}, the generation capacity of the renewable
generators is also varying in addition to the load demands. As
such, for the PGLib cases, the dimensions of $x$ is
$\dim\left(x\right)=2|\mathcal{L}|$, and for \texttt{France\_2018}, the
input of the mapping is increased to
$x:=\{p^d,q^d,\{\overline{p}_i^{g} \}_{i \in \mathcal{G}^r}\}$, where
$\overline{p}$ represents the upper bounds of the active generation,
and $\mathcal{G}^r$ is the set of renewable generator indices. Note that
among 1890 generators in this system, there are 1609 renewable
non-dispatchable generators including hydro, solar, and wind
generators. }
{\em From Table~\ref{tab:case_spec}, observe that the dimension of the
output $y$ is much higher than that of $x$: this implies that the
DNN for the OPF will necessitate an excessive number of trainable
parameters.} It is the goal of this paper to propose a scalable approach that mitigates this curse of dimensionality.
\tb{Note that one can recover the whole optimal solution estimates
from the active generations and voltage magnitudes by solving a
power flow problem as in \cite{zamzam2020learning}. This could be
combined with the methods proposed herein to provide further
reduction in the output space at the cost of a more costly training
or inference procedure. However considering the full output space,
i.e., $y:=\{p^g,q^g,v,\theta\}$, makes it possible to use Lagrangian
Duality \cite{fioretto2020predicting,chatzos2020high} or Primal-Dual
Learning \cite{park2022self} frameworks to improve the learning
procedure further.}
\section{Low Rank Representation of the AC-OPF Solution} \label{sec:prelim}
\begin{table}[!t] \centering \small \setlength{\tabcolsep}{2pt}
\begin{tabular}{@{}l|cccccccc@{}}
\toprule Test case & $|\mathcal{N}|$ & $|\mathcal{G}|$ & $|\mathcal{L}|$ & $|\mathcal{E}|$ &$\dim\!\left(x\right)$ &$\dim\!\left(y\right)$\\ \midrule \texttt{300\_ieee} & 300 & 69 & 201 & 411 & 402 & 738 \\ \texttt{793\_goc} & 793 & 97 & 507 & 913 & 1014 & 1780 \\ \texttt{1354\_pegase} & 1354 & 260 & 673 & 1991 & 1346 & 3228 \\ \texttt{3022\_goc} & 3022 & 327 & 1574 & 4135 & 3148 & 6698 \\ \texttt{4917\_goc} & 4917 & 567 & 2619 & 6726 & 5238 & 10968 \\ \texttt{6515\_rte} & 6515 & 684 & 3673 & 9037 & 7346 & 14398 \\ \texttt{9241\_pegase} & 9241 & 1445 & 4895 & 16049 & 9790 & 21372 \\ \texttt{13659\_pegase} & 13659 & 4092 & 5544 & 20467 & 11088 & 35502 \\ \texttt{30000\_goc} & 30000 & 3526 & 10648& 35393 & 21296 & 67052 \\ \texttt{\tb{France\_2018}} & 6708 & 1890 & 6262 & 8965 & 14133 & 17196 \\ \bottomrule \end{tabular} \caption{\tb{Specifications of the AC-OPF Test Cases.}} \label{tab:case_spec} \end{table}
This section presents data analysis to motivate {\sc Compact Learning}{}. Again, Table~\ref{tab:case_spec} describes the test cases. They range from 300 to 30,000 buses and from 69 to 3526 generators. The table also specifies the input and output dimensions of the learning problem: the output dimension is large in sharp contrast to many classification problems in computer vision for instance. The experiments in this section are based on 20,000 instances for each test case: their optimal solutions were obtained via {\sc Ipopt} \cite{wachter2006implementation}. \tb{To generate the instances for
each test case in PGLib, the active loads were sampled from a
truncated multivariate Gaussian distribution as} \begin{equation}\label{eq:perturb_load}
p^d \sim \mathcal{T}\mathcal{N}\left( p^d_0,\Sigma, (1-\mu)p^d_0, \mu p^d_0 \right), \end{equation} \tb{ where $p^d_0$ is the baseline active loads and $\Sigma$ is the covariance matrix. The element of the covariance matrix $\Sigma_{ij}$ is defined using correlation coefficient $\rho$ as $\Sigma_{ij} = \rho \sigma_i \sigma_j$ where $\sigma_i$ and $\sigma_j$ are standard deviation of $p^d_i$ and $p^d_j$, respectively. Also, $\rho=1$ when $i=j$ and $\rho=0.5$ otherwise. $\mu$ is set to $0.5$, meaning that the active load demands were set to be perturbed by $\pm50\%$. The reactive loads $q^d$ were sampled from a uniform distribution ranging from $0.8$ to $1.0$ of the baseline values. } This perturbation method follows the protocols used in \cite{zamzam2020learning,donti2021dc3,park2022self}. \tb{For \texttt{France\_2018}, the experiments use the historical 2020 load demand and renewable generations data at the 30-minute granularity, which is publicly available at \cite{eco2mix}. These historical time series data are disaggregated spatially and interpolated to have 5-minute granularity following the protocol introduced in \cite{chatzos2022data}.} A PCA was performed on the 20,000 optimal solutions for each test case, which led to a number of interesting findings.
\begin{figure}
\caption{\tb{Explained Variance Ratios of PCA For the Various Principal Component Ratios for AC-OPF Instances (solid lines) and Natural Image Data (dashed lines).}}
\label{fig:pca}
\end{figure}
\begin{figure}
\caption{Histograms of Active Generations (left), Voltage Magnitudes (middle), and Principal Components (right) of the 20,000 Optimal Solutions in \texttt{1354\_pegase}. Three largest components on average are illustrated. The x-axis is normalized to $[0,1]$ using the minimum and maximum values.}
\label{fig:hist}
\end{figure}
\paragraph{(Almost) Lossless Compression} A key observation of the analysis is that PCA achieves an almost lossless compression with a few principal components. This is highlighted in Figure~\ref{fig:pca} where the x-axis represents the principal component ratio (i.e., the ratio of the number of the principal components in use to the dimension of the optimal solution) and the y-axis represents the explained variance ratio (i.e., the ratio of the cumulative sum of eigenvalues of the principal components in descending order to the sum of the all eigenvalues). The explained variance ratio is a proxy for how much the information is preserved within the chosen low-rank representation. For instance, the figure shows that $1\%$ of principal components preserves the $99.92\%$ of information of the AC-OPF optimal solutions for \texttt{13659\_pegase}. The detailed values are shown in Table~\ref{tab:pca_ratio}, which highlights that the compression is almost lossless with a 10\% principal component ratio. This contrasts with data instances in computer vision, as exemplified by the MNIST \cite{mnist}, CIFAR10 \cite{cifar10}, and Omniglot \cite{omniglot} datasets. This result is encouraging: it suggests that optimal solutions could be recovered with negligible losses when learning takes place in a low-rank space of a few principal components, potentially reducing the size of the mapping function substantially.
\paragraph{Larger Test Cases Need Fewer Principal Components} \tb{Figure~\ref{fig:pca} and Table~\ref{tab:pca_ratio} also highlight a desirable trend: larger test cases need a lower ratio of principal components to obtain the same level of explained variance ratio.} For instance, the explained variance ratios of \texttt{13659\_pegase} and \texttt{30000\_goc} with
a principal component ratio of $1\%$ are $99.92\%$ and $99.99\%$ respectively. In contrast, \texttt{300\_ieee}, the smallest test case, has $93.92\%$ of explained variance ratio. This observation shows that reducing the dimensionality through PCA is more effective for the bigger test cases and will be used in deciding the learning architecture for different test cases.
\begin{table}[!t] \centering \small \begin{tabular}{@{}lcccc@{}} \toprule
&\multicolumn{4}{c}{Principal Component Ratio} \\
\cmidrule(lr){2-5} Test case & 1\% & 5\% & 10\% & 20\% \\ \midrule \texttt{300\_ieee} & 93.92 & 99.56 & 99.97 & 99.99\\ \texttt{793\_goc} & 98.05 & 99.98 & 100.00 & 100.00\\ \texttt{1354\_pegase} & 99.04 & 99.98 & 100.00 & 100.00\\ \texttt{3022\_goc} & 98.64 & 99.99 & 100.00 & 100.00\\ \texttt{4917\_goc} & 99.03 & 99.99 & 100.00 & 100.00\\ \texttt{6515\_rte} & 99.74 & 99.99 & 100.00 & 100.00\\ \texttt{9241\_pegase} & 99.76 & 100.00 & 100.00 & 100.00\\ \texttt{13659\_pegase} & 99.92 & 100.00 & 100.00 & 100.00\\ \texttt{30000\_goc} & 99.99 & 100.00 & 100.00 & 100.00\\ \texttt{\tb{France\_2018}} & 99.75 & 99.99 & 100.00 & 100.00\\\midrule MNIST & 44.45 & 79.12 & 89.17 & 95.27\\ CIFAR10 & 62.09 & 82.34 & 88.37 & 93.29\\ Omniglot & 16.55 & 44.26 & 59.06 & 73.31\\ \bottomrule \end{tabular} \caption{\tb{Explained Variance Ratios (\%) on Various Principal Component Ratios from 1\% to 20\%.}} \label{tab:pca_ratio} \end{table}
\paragraph{Smoother Distributions on the Principal Components} Figure~\ref{fig:hist} provides some intuition for why learning in the space of the principal components is appealing. The figure shows the distributions of three active powers (left) and voltage magnitudes (middle) in the optimal solutions for the \texttt{1354\_pegase} test case. The values are plotted in log-scaled, highlighting the skewed nature of the active powers and voltage magnitudes: indeed, most values lie on their extreme limits. This has been observed before, leading to the use of the \emph{classification-then-regression} approach \cite{chen2022learning}. However, fortunately, Figure~\ref{fig:hist} (right) shows that the distribution on the principal components is well-posed: it is more convenient to learn the regression to the low-rank space with a few principal components rather than to the original optimal solution space directly. As a result, the regression learning in the space of principal components should be easier than in the original space.
\section{Compact Optimization Learning} \label{sec:method}
\begin{figure}
\caption{Schematic View of the Plain Approach (left) and {\sc Compact Learning}{} (right).}
\label{fig:schematic}
\end{figure}
The findings in Section~\ref{sec:prelim} suggest a {\sc Compact Learning}{} model whose outputs are in the subspace defined by the principal components. This enables to decrease the number of trainable parameters in the DNN-based mapping function, reducing the memory footprint significantly.
\subsection{Compact Learning} The idea underlying {\sc Compact Learning}{} is to {\em jointly learn the principal components and the mapping between the original inputs and the outputs in the subspace of the principal components.} Figure~\ref{fig:schematic} contrasts the overall architecture of the {\sc Compact Learning}{} with the conventional plain learning approach. The plain approach learns a mapping $y=M_\phi(x)$ from an input configuration $x$ to an optimal solution $y$, where $\phi$ are the associated trainable parameters. Since $y$ is high-dimensional, it is desirable to have a large number of parameters to obtain accurate solution estimates. {\sc Compact Learning}{} in contrast learns a mapping $z=R_\phi(x)$ from an input configuration $x$ into an output $z$ in the subspace of principal components before recovering an optimal solution prediction as $y = W z$. The output dimension of $R_{\phi}$, $\dim\left(z\right)$, in {\sc Compact Learning}{} should be substantially smaller than the dimension of $y$, making it possible to reduce the number of trainable parameters. In the upcoming sections, the way of learning $R$ and $W$ simultaneously is detailed.
\subsection{Learning the Principal Components} \label{ssec:principal_components}
Let $p$ and $d$ be the dimensions of $z$ and $y$, respectively, i.e., $p=\dim\left(z\right)$ and $d=\dim\left(y\right)$. The goal of {\sc Compact Learning}{} is to have $p$ substantially smaller than $d$, i.e., $p\ll d$. Matrix $W\in\mathbb{R}^{d\times p}$ is unitary, i.e., $W^\top W=I$, and its columns are composed of the orthonormal principal components of the output space associated with $p$ largest eigenvalues. It is obviously possible to obtain $W$ through PCA, but this computation takes a substantial amount memory when there are many instances.
Instead, {\sc Compact Learning}{} uses the Generalized Hebbian Algorithm (GHA), also called Sanger's rule \cite{sanger1989optimal}, to learn $W$ in a stochastic manner. GHA is specified in Algorithm~\ref{alg:GHA}. Using the mini-batch of optimal solutions $\{y^{*(i)}\}$, it first updates the element-wise running mean $\mu$ and variance $\sigma^2$ of optimal solution using the momentum parameter $\beta$. The momentum parameter $\beta$ is set to zero for the first iteration, and from the second iteration it is set to a value ranged $(0.0,1.0)$. The optimal solution $y^{*(i)}$ is normalized using the running mean and variance as shown in line~\ref{algline:GHA:normalize} in Algorithm~\ref{alg:GHA}. GHA uses Gram-Schmidt process to find out the $\Delta W$ of the $p$ leading principal components, where $\mathcal{LT}[\cdot]$ represents the lower triangular matrix. Once $\Delta W$ is determined, $W$ is updated by adding $\gamma\Delta W$, where $\gamma$ is the learning rate that decreases as the epoch number $e$ increases, i.e., \begin{equation*}
\gamma = \max\{\gamma_{\text{min}}, \gamma_{\text{init}}/(0.01e)\}, \end{equation*} In the update rule, $\gamma_{\text{init}}$ and $\gamma_{\text{min}}$ are hyperparameters representing the initial and minimum learning rate, respectively.
\tb{ One can simply use deterministic PCA to define $W$ instead of
using GHA. However this requires significant computing time before
starting the training, especially when dealing with a large number
of data instances of high dimensionality. In contrast, GHA
amortizes the computational effort over the training procedure
leading to a better handling of the largest test cases. }
\subsection{Training the Compact Learning Model} \label{ssec:training}
\begin{algorithm}[!t] \caption{Generalized Hebbian Algorithm,\\ \texttt{GHA}$(\{y^{(i)}\}_{i \in [B]},\!W,\!\mu,\!\sigma)$ } \label{alg:GHA} \textbf{Parameters}:\\ \-\hspace{0.1in}$\beta$: a momentum parameter\\ \-\hspace{0.1in}$\gamma$: a learning rate for updating $W$ \\ \-\hspace{0.1in}$\epsilon$: a small positive for numerical stability \begin{algorithmic}[1] \State $m \gets \frac{1}{B}\sum_{i=1}^B y^{(i)}$ \State $s^2 \gets \frac{1}{B}(y^{(i)}-m)^2$ \State $\mu \gets \beta\mu + (1-\beta) m$ \Comment{update running mean} \State $\sigma^2 \gets \beta\sigma^2 + (1-\beta) s^2$ \Comment{update running variance} \State $\hat{y}^{(i)} \gets (y^{(i)}-\mu)/\sqrt{\sigma^2+\epsilon}$, $\;\forall i \in [B]$ \Comment{normalize $y$}\label{algline:GHA:normalize} \State $\Delta W \!=\! \frac{1}{B}\!\sum_{i=1}^B \!\left(\hat{y}^{(i)}\hat{y}^{(i)\top} W \!-\! W \mathcal{LT}[W^\top \hat{y}^{(i)} \hat{y}^{(i)\top} W]\right)$ \State $W \gets W + \gamma\Delta W$ \Comment{update $W$} \State \textbf{return} $W$, $\mu$, $\sigma$. \end{algorithmic} \end{algorithm}
\begin{algorithm}[!t] \caption{{\sc Compact Learning}{}} \label{alg:compact_learning} \begin{algorithmic}[1] \For{$k$=1\dots} \State Sample $\{(x^{(i)},y^{*(i)}\}_{i \in [B]}$ from $\mathcal{D}$ \State $W,\mu,\sigma \gets$ \texttt{GHA}$(\{y^{*(i)}\}_{i \in [B]},W,\mu,\sigma)$ \label{algline:compact:GHA} \State $z^{(i)} = R_{\phi}(x^{(i)})$, $\;\forall i \in [B]$ \State $y^{(i)} = \sqrt{\sigma^2\!+\!\epsilon} \,W z^{(i)}\!+\!\mu$, $\;\forall i \in [B]$ \label{algline:compact:denorm} \State Update $\phi$ with $\mathcal{L}=\frac{1}{B}\sum_{i=1}^B\norm{{y^{(i)}}-{y^{*(i)}}}_1$ \EndFor{} \end{algorithmic} \end{algorithm}
The training process of {\sc Compact Learning}{} is summarized in Algorithm~\ref{alg:compact_learning} {\em which jointly learns the principal components $W$ and the mapping $R$.} Each iteration samples a mini-batch of size $B$ and applies the GHA Algorithm~\ref{alg:GHA} (line \ref{algline:compact:GHA} in Algorithm~\ref{alg:compact_learning}). The mapping $R$ then produces $z^{(i)}$ for each input $x^{(i)}$ and the output $y^{(i)}$ is obtained through the mapping $W z^{(i)}$ and a denormalization step (line \ref{algline:compact:denorm} in Algorithm~\ref{alg:compact_learning}). Finally, the parameters $\phi$ associated with $R$ are updated using backpropagation from the loss function $\mathcal{L}$, which captures the mean absolute error between the prediction $y^{(i)}$ and the optimal solution $y^{*(i)}$ (ground truth).
\subsection{Restoring Feasibility} \label{alg:postprocessing}
The predictions from {\sc Compact Learning}{} may violate the physical and engineering constraints of the AC-OPF. Some applications are required to address these infeasibilities and this study considers two post-processing methods for this purpose: (1) solving the power flow problem; and (2) warm-starting the exact AC-OPF solver.
\subsubsection{Power Flow}
The power flow problem, seeded with a prediction from the {\sc Compact Learning}{} model, restores the feasibility of the physical constraints. Formally, the power flow problem can be formulated as: \begin{equation} \label{eq:pf_formulation} \begin{aligned} &&&\text{\textbf{find}} && p^g, q^g, v, \theta,\\ &&&\text{\textbf{subject to }} && \text{Eq.~\eqref{eq:acopf_cnst_flow1}, \eqref{eq:acopf_cnst_flow2}},\\ &&& && \text{Eq.~\eqref{eq:acopf_cnst_balance}}. \end{aligned} \end{equation} In the power flow problem, like in \cite{chatzos2021spatial}, the active power injections and voltage magnitudes at the PV buses are fixed to the predictions. The power flow problem can then be solved by the Newton method to satisfy the physical constraints, i.e., {\em Ohm's law} (Eq.~\eqref{eq:acopf_cnst_flow1} and \eqref{eq:acopf_cnst_flow2}) and \emph{Kirchhoff’s Current Law} (Eq.~\eqref{eq:acopf_cnst_balance}). Finding a solution to the PF problem typically takes significantly less time than solving the AC-OPF. However, the solution may violate some of the engineering constraints of the AC-OPF.
\subsubsection{Warm-Starting the AC-OPF solver}
It is possible to remove all infeasibilities by warm-starting an AC-OPF solver with the {\sc Compact Learning}{} predictions. This study uses the primal-dual interior point algorithm {\sc Ipopt} as a solver, which is a standard tool for solving AC-OPF problems \cite{babaeinejadsarookolaee2019power,gopinath2022benchmarking}. Moreover, warm-starts for the primal-dual interior point algorithm seem to benefit significantly from dual initial points. Hence, the {\sc Compact Learning}{} model was generalized to predict dual optimal values for all constraints in addition to the primal optimal solutions. {\sc Ipopt} is then warm-started with both primal and dual predictions to obtain an optimal solution.
\section{Computational Experiments} \label{sec:exp}
\subsection{The Experiment Setting} \label{ssec:exp_setting}
\tb{The performance of {\sc Compact Learning}{} is demonstrated using nine test cases
from PGLIB v21.07 and the realistic version of the French power
system (denoted as \texttt{France\_2018}) as described in
Table~\ref{tab:case_spec}. A total of 52,000 instances were
generated. 50,000 instances are used for training and the remaining
2,000 instances are tested for reporting the performance results.
For the PGLib cases, these instances were obtained by perturbing the
load demands as Eq~\eqref{eq:perturb_load} in
Section~\ref{sec:prelim}: } \tb{ For \texttt{France\_2018}, 100,000
instances for training are generated by perturbing the instances in
September. Specifically, the upper bounds of active generation of
the wind and hydro renewable generators are perturbed by replacing
with the samples from
$\mathcal{N}(\overline{p}^g_i,0.2(\overline{p}^g_i-\underline{p}^g_i))$.
Also, the upper bounds of solar generators and load demands are
perturbed by multiplying factor sampled from multivariate Gaussian
$\mathcal{N}(\mathbf{1}, \Sigma)$ where $\Sigma$ is based on the
correlation coefficient of $0.8$. Those perturbed upper bound of
active generation is ensured to be greater than or equal to the
lower bound of it. Also, 2000 realistic test instances are
extracted from the instances in October for reporting the
performance. As such, for this test case, the distribution of test
instances is not necessarily the same as that of the training
instances. Note that accurate renewable forecasting would improve
the quality of the training instances, but this experiment setting
for \texttt{France\_2018} should provide realistic and difficult
circumstances where the model may be deployed for real operations. } The instances were solved using Pyomo \cite{hart2017pyomo} and {\sc
Ipopt} v3.12 \cite{wachter2006implementation} with the HSL ma27 linear solver.
The performance of {\sc Compact Learning}{} is compared with the plain approach that directly outputs the optimal solution. As in \cite{chatzos2021spatial}, four fully-connected layers followed by ReLU activations are used for the mapping functions for both {\sc Compact Learning}{} and plain learning approaches. For the plain approach, two distinct models are experimented; the first model, which is named {\sc
Plain-Large}, has $d$ hidden nodes for each fully-connected layer (where $d$ is the output dimension). The second baseline, which is named {\sc Plain-Small}, has the $p$ hidden nodes for each layer (where $p$ is the number of principal components considered in {\sc Compact Learning}{}). The {\sc Compact Learning}{} model has the same number of weight parameters as {\sc
Plain-Small}, but the last layer of the {\sc Compact Learning}{} model is learned through GHA. Indeed, {\sc Plain-Small} has an encoder-decoder structure as the number of the hidden nodes is smaller than that of the output. \tb{ The ratio of $p$ to $d$, which is also called
principal component ratio, is set to $5\%$ for six smaller test
cases (up to \texttt{6515\_rte}) and to $1\%$ for four bigger cases.
Mini-batch of $64$ instances is used, and the maximum epoch is set
to $1,000$. } The models are trained using the Adam optimizer \cite{kingma2014adam} with a learning rate of $1\mathrm{e}{\textrm{-}4}$, which is decreased at $900$ epochs by $0.1$. The overall implementation used PyTorch and the models were trained on a machine with a NVIDIA Tesla V100 GPU and Intel Xeon 2.7GHz. For GHA (Algorithm~\ref{alg:GHA}), the momentum parameter $\beta$ is set to $0.9999$ from the second iteration. The initial and minimum learning rate ($\gamma_{\text{init}}$ and $\gamma_{\text{min}}$) are set to $1\mathrm{e}{\textrm{-}4}$, and $1\mathrm{e}{\textrm{-}8}$, respectively. The parameter $\epsilon$ to prevent ill-conditioning is set to $1\mathrm{e}{\textrm{-}8}$.
\subsection{Learning Performance} \label{ssec:exp_result_performance}
Table~\ref{tab:performance} reports the accuracy of the models for predicting optimal solutions. Five distinct models with randomly initialized trainable parameters per method were trained: the results report the average results and the standard deviations (in parenthesis). The table shows the averaged value of optimality gaps and maximum constraint violations on the 2,000 test instances. The optimality gap is calculated as
$100\times|\frac{f(y)-f(y^*)}{f(y^*)}|$ where $f(y)$ is the objective function~\eqref{eq:acopf_obj_simple}. It also reports the maximum constraint violations (in per unit), \tb{which is computed as} \begin{equation}
\max\{\max_{i}\{\max\{g_i(y),0.\}, \max_i\{|h_i(y)|\}\}, \end{equation} \tb{ where $g_i(y)$ and $h_i(y)$ are, respectively, the inequality and
equality constraints enumerated in Model~\ref{model:acopf_simple}. } The first two sets of columns represent the performance of the plain approaches. Note that, because of the high dimensionality of the output and the limited GPU memory, {\sc Plain-Large} is only applicable to the four smaller test cases (up to \texttt{3022\_goc}): this is the limitation that motivated this study. When comparing the two plain models, {\sc Plain-Large} performs better than {\sc
Plain-Small} as {\sc Plain-Small} trades off the accuracy for scalability. {\sc Compact Learning}{} almost always performs better than the two plain approaches. In particular, it produces predictions with significantly fewer violations (sometimes by an order of magnitude), while also delivering smaller optimality gaps on the larger test cases.
Table~\ref{tab:params} shows the number of trainable parameters of the {\sc Compact Learning}{} model and the plain approaches. The architectures of {\sc Compact Learning}{} and {\sc Plain-Small} are exactly the same, except for the last layer: hence the number of trainable parameters of {\sc Compact Learning}{} is smaller than those for {\sc Plain-Small} by the dimension of $W$. Table~\ref{tab:params} clearly shows that {\sc Compact Learning}{} has the smallest number of trainable parameters. Overall, Table~\ref{tab:performance} and Table~\ref{tab:params} indicate that {\sc Compact Learning}{} provides an accurate and scalable approach to predict AC-OPF solutions.
\begin{table*}[!t] \centering \small
\begin{tabular}{@{}l|cccccc@{}} \toprule
& \multicolumn{2}{c}{\sc{Plain-Small}} & \multicolumn{2}{c}{\sc{Plain-Large}} & \multicolumn{2}{c}{{\sc Compact Learning}{} (proposed)} \\
\cmidrule(lr){2-3}\cmidrule(lr){4-5}\cmidrule(lr){6-7} Test case & Opt. Gap(\%) & Viol. & Opt. Gap(\%) & Viol. & Opt. Gap(\%) & Viol. \\ \midrule \texttt{300\_ieee} & 0.9886(0.0287) & 3.7282(0.0758) & \bb{0.1804}(0.0087) & 0.9256(0.0117) & 0.1859(0.0079) & \bb{0.5509}(0.0025) \\ \texttt{793\_goc} & 0.0579(0.0067) & 3.1791(0.0985) & \bb{0.0182}(0.0013) & 0.5825(0.0046) & 0.0305(0.0046) & \bb{0.2768}(0.0033) \\ \texttt{1354\_pegase} & 0.1494(0.0142) & 6.6858(0.1248) & \bb{0.0711}(0.0094) & 3.0360(0.1221) & 0.0942(0.0089) & \bb{0.5044}(0.0081) \\ \texttt{3022\_goc} & 0.0745(0.0105) & 3.2394(0.0846) & 0.0588(0.0094) & 1.5837(0.1432) & \bb{0.0485}(0.0076) & \bb{0.3906}(0.0185) \\ \texttt{4917\_goc} & 0.0852(0.0118) & 2.6590(0.1006) & - & - & \bb{0.0527}(0.0087) & \bb{0.5456}(0.0276) \\ \texttt{6515\_rte} & 0.7232(0.0364) & 5.8648(0.2185) & - & - & \bb{0.3065}(0.0177) & \bb{0.7519}(0.0290) \\ \texttt{9241\_pegase} & 0.1521(0.0138) & 6.7379(0.1980) & - & - & \bb{0.1344}(0.0097) & \bb{1.1791}(0.0725) \\ \texttt{13659\_pegase} & 0.0910(0.0057) & 4.8141(0.0685) & - & - & \bb{0.0745}(0.0046) & \bb{1.2915}(0.0497)\\ \texttt{30000\_goc} & 0.2296(0.0224) & 4.7172(0.0678) & - & - & \bb{0.1091}(0.0112) & \bb{0.0770}(0.0188)\\ \texttt{\tb{France\_2018}} & 1.4872(0.1352) & 47.4012(3.4524) & - & - & \bb{1.2869}(0.0845) & \bb{2.1041}(0.2218)\\ \bottomrule \end{tabular} \caption{\tb{Performance Results of {\sc Compact Learning}{} (Proposed) and Conventional Plain Learning
Approaches (Baselines). Std. dev. in parenthesis is evaluated across five
independent runs. \emph{Viol.}: the mean value of the maximum
constraint violations (in per unit) on the test instances. The best
optimality gap (\emph{Opt. Gap}) and maximum violation values are in
bold.}} \label{tab:performance} \end{table*}
\begin{table}[!b] \centering \small
\begin{tabular}{@{}l|ccc@{}} \toprule Test case & \sc{Plain-Small} & \sc{Plain-Large} & {\sc Compact} \\ \midrule \texttt{300} & 0.045 & 2.479 & 0.019 \\ \texttt{793} & 0.274 & 14.487 & 0.122 \\ \texttt{1354} & 0.818 & 46.041 & 0.321 \\ \texttt{3022} & 3.631 & 200.572 & 1.499 \\ \texttt{4917} & 9.795 & - & 4.074 \\ \texttt{6515} & 17.202 & - & 7.353 \\ \texttt{9241} & 6.796 & - & 2.268 \\ \texttt{13659} & 16.954 & - & 4.442 \\ \texttt{30000} & 60.610 & - & 16.067 \\ \tb{\texttt{France}}& 51.612 & - & 30.510 \\ \bottomrule \end{tabular} \caption{The Number of Trainable Parameters (in Millions) in the Models Trained by {\sc Compact Learning}{} and Plain Approaches.} \label{tab:params} \end{table}
\subsection{Post-processing: Power Flow} \label{ssec:exp_result_pf} One model among five trained models was randomly chosen for testing post-processing approaches and the results were evaluated on the same 2,000 test instances. Table~\ref{tab:pf_result} reports the constraint violations after applying the power flow model seeded with the predictions from the {\sc Compact Learning}{} and {\sc Plain-Small} models. The table also reports the time to solve the power flow problem. The results show that {\sc Compact Learning}{} produces power flow solutions with the smallest constraint violations, sometimes by an order of magnitude. The results of {\sc Compact Learning}{} are particularly impressive because the majority of the constraints are satisfied after applying the power flow. Note also that the power flow is fast enough to be used during real-time operations, opening interesting avenues for the use of learning and optimization in practice.
\subsection{Post-processing: Warm-start} \label{ssec:exp_result_warmstart}
Table \ref{tab:warmstart} and Figure \ref{fig:warmstart} report the results for warm-starts. The proposed warm-starting approach, WS:{\sc Compact}(P+D), is compared with the following warm-starting strategies: \begin{itemize}
\item Flat Start: $p^g$, $q^g$, $v$ are started with their minimum values, and initial $\theta$ is set to zero. This is a default setting without warm-start.
\item WS:DC-OPF(P): Motivated from \cite{venzke2020inexact}, the primal solution of the DC-OPF is used as a warm-starting point for solving AC-OPF.
\item WS:AC-OPF(P): The primal solution of the AC-OPF is used as a warm-starting point for solving AC-OPF again.
\item {WS:\sc Plain-Small(Large)(P)}: The primal predictions from the plain approaches are used as warm-starting points.
\item {WS:\sc Plain-Small(Large)(P+D)}: The primal and dual predictions from the plain approaches are used as warm-starting points.
\item WS:{\sc Compact}(P): The primal predictions from {\sc Compact Learning}{} are used as warm-starting points.
\item WS:{\sc Compact}(P+D): The proposed warm-starting approach. The primal and dual predictions from {\sc Compact Learning}{} are used as warm-starting points. \end{itemize}
\begin{table*}[!t] \centering \small
\begin{tabular}{@{}l|ccccc|ccccc@{}} \toprule
& \multicolumn{5}{c}{\sc{Plain-Small}} & \multicolumn{5}{c}{{\sc Compact Learning}{}} \\
\cmidrule(lr){2-6}\cmidrule(lr){7-11}
& \multicolumn{2}{c}{Eq.(\ref{eq:acopf_cnst_pgbound}-\ref{eq:acopf_cnst_vm})} & \multicolumn{2}{c}{Eq.~\eqref{eq:acopf_cnst_thermal_limit}}
& Time
& \multicolumn{2}{c}{Eq.(\ref{eq:acopf_cnst_pgbound}-\ref{eq:acopf_cnst_vm})} & \multicolumn{2}{c}{Eq.~\eqref{eq:acopf_cnst_thermal_limit}}
& Time \\ Test case & Viol.(p.u.) & Sat(\%)
& Viol.(MVA) & Sat(\%)
& sec.
& Viol.(p.u.) & Sat(\%)
& Viol.(MVA) & Sat(\%)
& sec.
\\ \midrule
\texttt{4917\_goc} & 0.1148 & 99.77 & 13.5821 & 99.47 & 1.07 & {\bf 0.0739} & 99.88 & {\bf 2.3134}& 99.50 & 1.15 \\ \texttt{6515\_rte} & 0.2220 & 99.80 & 6.0533 & 99.92 & 1.54 & {\bf 0.0841} & 99.92 & {\bf 1.5261}& 99.95 & 1.58 \\ \texttt{9241\_pegase} & 0.1338 &99.47 & 10.3893 & 99.98 & 3.63 & {\bf 0.0893} &99.82& {\bf 2.4869}& 99.97 & 3.58 \\ \texttt{13659\_pegase} & 0.2192 & 99.85 & 14.5279 & 99.99 & 7.11 & {\bf 0.0852} & 99.97 & {\bf 3.1203}& 100.00 & 7.01 \\ \texttt{30000\_goc} & 0.0249 & 99.97 & 4.5279 & 100.00 & 10.27 & {\bf 0.0192}& 100.00 & {\bf 1.1958} & 100.00 & 10.11 \\ \texttt{France\_2018} & 0.4073 & 99.98 & 11.5033 & 99.02 & 1.86 & \bf{0.3788} & 99.98 & \bf{3.2053} & 99.08 & 1.93 \\ \bottomrule \end{tabular} \caption{\tb{Averaged Maximum Violations and Ratio of Satisfied constraints (\%) after Applying the Power Flow to AC-OPF Problems with $>$4,000 Buses.}} \label{tab:pf_result} \end{table*}
\begin{table*}[!t] \centering \small \setlength{\tabcolsep}{2.5pt}
\begin{tabular}{lcc|rrrrrrr} \toprule
& Primal & Dual & \multicolumn{1}{c}{\texttt{4917}} & \multicolumn{1}{c}{\texttt{6515}} & \multicolumn{1}{c}{\texttt{9241}} & \multicolumn{1}{c}{\texttt{13659}} & \multicolumn{1}{c}{\texttt{30000}} & \multicolumn{1}{c}{\texttt{France}} \\ \midrule Flat Start & & & \multicolumn{1}{c}{8.90s} & \multicolumn{1}{c}{69.90s} & \multicolumn{1}{c}{44.67s} & \multicolumn{1}{c}{42.79s} & \multicolumn{1}{c}{163.48s} & \multicolumn{1}{c}{26.78s} \\ \midrule WS:AC-OPF(P) & \multirow{5}{*}{\ding{51}{}} & \multirow{5}{*}{\ding{55}{}} & 6.64s(1.34\texttimes) & 10.10s(7.06\texttimes) & 20.27s(2.22\texttimes) & 28.45s(1.53\texttimes) & 116.33s(1.43\texttimes) & 9.66s(2.62\texttimes)\\ WS:DC-OPF(P) & & & 10.68s(0.83\texttimes) & 75.55s(0.90\texttimes) & 53.59s(0.83\texttimes) & 51.35s(0.83\texttimes) & 196.17s(0.83\texttimes) & 30.11s(0.85\texttimes)\\ WS:\sc{Plain-Small(P)} & & & 6.69s(1.33\texttimes) & 10.06s(7.09\texttimes) & 20.24s(2.22\texttimes) & 28.24s(1.54\texttimes) & 116.23s(1.43\texttimes) & 10.04s(2.54\texttimes)\\ WS:{\sc Compact}(P) & & & 6.63s(1.34\texttimes) & 10.05s(7.10\texttimes) & 20.38s(2.21\texttimes) & 28.41s(1.53\texttimes) & 116.65s(1.43\texttimes) & 9.83s(2.61\texttimes)\\ \midrule WS:\sc{Plain-Small(P+D)}& \multirow{3}{*}{\ding{51}{}} & \multirow{3}{*}{\ding{51}{}} & 3.16s(2.89\texttimes) & 5.12s(14.79\texttimes) & 17.31s(3.17\texttimes) & 14.31s(3.05\texttimes) & 16.54s(10.98\texttimes) & 7.98s(3.65\texttimes)\\ WS:{\sc Compact}(P+D) & & & 2.49s(\bf{3.62}\texttimes) & 4.86s(\bf{15.26}\texttimes) & 15.59s(\bf{3.58}\texttimes) & 10.32s(\bf{4.23}\texttimes) & 15.66s(\bf{11.43}\texttimes) & 5.90s(\bf{4.84}\texttimes)\\ \bottomrule \end{tabular} \caption{\tb{Averaged Elapsed Times (s) to Solve the AC-OPF Problems with $>$4,000 Buses and Averaged Speed-up Factor to Flat Start. The Best Values are in Bold.}} \label{tab:warmstart} \end{table*}
\begin{figure*}
\caption{\tb{The Number of AC-OPF Instances Solved Within Various Elapsed Time Limits by Warm-Start (WS) and Flat Start.}}
\label{fig:warmstart}
\end{figure*}
\noindent Obviously, WS:DC-OPF(P) and WS:AC-OPF(P) need to solve the first problem to obtain the warm-starting points. For those, the time taken to solve the first problem is excluded in the reported elapsed time performance. Note that WS:AC-OPF(P) gives a virtual upper bound of the speed-up for primal-only warm-start for the primal-dual interior point algorithm. Also note that except for DC-OPF, the experiment does not include other convex relaxations of AC-OPF (e.g., the quadratic convex relaxation \cite{coffrin2015qc} and the semidefinite programming relaxations \cite{bai2008semidefinite}), since solving those relaxed problems takes significant time.
To predict dual solutions, an additional mapping function consisting of four fully-connected layers is trained for {\sc Compact Learning}{} and the plain approaches. The sizes of these networks are the same as those for the primal solutions. When using both the primal and dual warm-starting, the initial barrier parameter of {\sc Ipopt} is set to $1\mathrm{e}{\textrm{-}6}$ because the initial warm-starting point is closer to the optimal than the flat start. The convergence tolerance is set to $1\mathrm{e}{\textrm{-}4}$ for all cases.
Table \ref{tab:warmstart} reports the elapsed times and the corresponding speed-up ratio for the warm-start approaches. The first key observation is that it is critical to use both primal and dual warm-starts: only using the primal predictions is not effective in reducing the computation times of {\sc Ipopt}. This is not too surprising given the implementation of interior-point methods. Primal-dual warm-starts however produce significant benefits. WS:{\sc Compact}(P+D) produces the best results for all test cases. In particular, it yields a speed-up of $15.26$\texttimes{} for \texttt{6515\_rte}. \tb{ Also, even for the realistic French system
(\texttt{France\_2018}), WS:{\sc Compact}(P+D) gives a speed-up of
$4.84$\texttimes{}.} This is significant given the realism of this test case and highlights the potential of the combination of {\sc Compact Learning}{} and optimization to deploy AC-OPF in real operations. Observe also that WS:{\sc Compact}(P+D) strongly dominates the other approaches, including its (WS:{\sc Plain-Small(P+D)} counterpart. This was anticipated by its prediction errors reported earlier. Figure \ref{fig:warmstart} depicts these results visually: it plots the number of AC-OPF instances solved over time by the various warm-starting methods. The plot clearly demonstrates the benefits of {\sc Compact Learning}{} for predicting both primal and dual solutions.
\section{Conclusions} \label{sec:conclusions}
This paper has proposed {\sc Compact Learning}{}, a novel approach to predict optimal solutions to industry size OPF problems. {\sc Compact Learning}{} was motivated by the lack of scalability of existing ML methods for this task. This difficulty stems from the dimension of the output space, which is large-scale in industry size AC-OPF problems. To address this issue, {\sc Compact Learning}{} applies the PCA on the output space and learns in the subspace of a few leading principal components. It then combines this learning step with the GHA to learn the principal components, which is then used to transform the predictions back into the output space. Experimental results on industry size OPF problems show that {\sc Compact Learning}{} is more accurate than existing approaches both in terms of optimality gaps and constraint violations, sometimes by an order of magnitude.
The paper also shows that the predictions can be used to accelerate the AC-OPF. In particular, the results show that the power flow problems seeded by the {\sc Compact Learning}{} predictions have significantly fewer violations of the engineering constraints (while satisfying the physical constraints) for systems with up to 30,000 buses. Moreover, and even more interestingly, {\sc Compact Learning}{} can be used to warm-start an OPF solver with optimal predictions for both the primal and dual variables. The results show that {\sc Compact Learning}{} can produce significant speed-ups.
{\em Together these results indicate that {\sc Compact Learning}{} is the first method to
produce high-quality predictions for the industry size OPFs that
translate to significant practical benefits.} There are also many opportunities for future research. {\sc Compact Learning}{} is general-purpose and can be applied to other problems with large output space, which is typically the case in optimization. Nonlinear compression through autoencoder structure can be also considered for general-purpose optimization learning. \tb{ On OPF problems, the performance of {\sc Compact Learning}{}
can be improved by adopting concepts from Lagrangian Duality,
physics-informed networks, and different types of backbone
architectures. The dual solution learning is particularly
challenging in the experiments given its high dimensionality, and it
would be interesting to study how it could be improved and
simplified. Also, {\sc Compact Learning}{} can be extended to learning solutions to
more challenging problems such as Optimal Transmission Switching and
Security-Constrained OPF in power system applications. }
\section*{Acknowledgments} \tb{The authors thank Minas Chatzos for the discussions regarding the
implementation of the power flow problem. We also would like to
express our gratitude to the anonymous reviewers whose insightful
comments have greatly improved the quality of this paper. This
research is partly supported by NSF Awards 2007095 and 2112533.}
\end{document} |
\begin{document}
\title{Efficient high-fidelity quantum computation using matter qubits
and linear optics}
\author{Sean D.\ Barrett} \email{sean.barrett@hp.com}
\author{Pieter Kok} \email{pieter.kok@hp.com}
\affiliation{Hewlett Packard Laboratories, Filton Road, Stoke Gifford, Bristol BS34 8QZ, UK}
\date{\today}
\begin{abstract}
We propose a practical, scalable, and efficient scheme for
quantum computation using spatially separated matter qubits and
single photon interference effects. The qubit systems can be
NV-centers in diamond, Pauli-blockade quantum dots with an excess
electron or trapped ions with optical transitions, which are each
placed in a cavity and subsequently entangled using a
double-heralded single-photon detection scheme. The fidelity of
the resulting entanglement is extremely robust against the most
important errors such as detector loss, spontaneous emission, and
mismatch of cavity parameters. We demonstrate how this entangling
operation can be used to efficiently generate \emph{cluster
states} of many qubits, which, together with single qubit
operations and readout, can be used to implement universal
quantum computation. Existing experimental parameters indicate
that high fidelity clusters can be generated with a moderate
constant overhead. \end{abstract}
\pacs{32.80.-t, 78.70.-g}
\maketitle
Quantum computation (QC) offers a potentially exponential computational speed-up over classical computers, and many physical implementations have been proposed. Particularly promising proposals are those in which unitary operations and readout in matter qubits are implemented via laser-driven optical transitions. Examples are the original ion-trap proposal \cite{cirac95}, NV-centers in diamond \cite{JelezkoPRL2004}, and schemes utilizing the Pauli-blockade effect in quantum dots with a single excess electron \cite{Pazy2003,NazirSpin2004}. Single qubit operations and readout, using a combination of optical and RF control fields, have already been demonstrated in ion trap and NV-diamond systems \cite{TeleportationInnsbruck,TeleportationNIST,JelezkoPRL2004}, while a number of promising techniques for optically addressing quantum dot spin qubits have been proposed \cite{Pazy2003,NazirSpin2004}. In all these cases, the ratio of the single qubit operation time to the intrinsic decoherence times suggests that very high fidelity operations are possible.
However, there are substantial difficulties in \emph{scaling} these implementations to the large numbers of qubits required for useful QC. Multi-qubit gates are facilitated by a direct interaction between qubits. Thus adding a new qubit to a quantum register, together with the associated control fields, necessarily modifies the Hamiltonian of the system. This can mean that, as more qubits are added, logic gate implementations become progressively more complex, and furthermore, new decoherence channels can be introduced. Furthermore, the need to optically address individual qubits (e.g. in NV diamond or quantum dot systems) can lead to seemingly contradictory system requirements: the qubits need to be sufficiently well separated to be resolved by the optical field, but must be close enough such that two-qubit logic can be implemented via the inter-qubit interaction.
\begin{figure}\label{fig1}
\end{figure}
A potentially promising solution to these scaling challenges is to perform \emph{distributed} qunatum computing, in which the matter qubits are spatially separated. In this case, there is no direct interaction between the qubits. Instead, Entangling Operations (EOs) between qubits are implemented via single photon interference effects. A number of schemes to entangle pairs of distant qubits in this way have been proposed \cite{cabrillo99,bose99,Feng2003,Duan2003,Browne2003,Simon2003}. Recently, it has been shown that unitary logic gates can be can be performed in this manner \cite{Protsenko,Zou}. However, the latter schemes are either inherently non-deterministic \cite{Zou} or are sensitive to photon loss or photodetector inefficiency \cite{Protsenko} and it is not clear whether they can be used for scalable QC. Other schemes using single photon interference effects together with local {\em two-qubit} unitary operations have also been proposed \cite{Duan2004,Taylor}.
In this Letter, we propose a \emph{fully scalable} scheme for distributed QC using individual matter qubits assuming only single qubit operations. Our scheme is robust to photon loss and other sources of errors, and uses optical transitions of the qubit system, together with linear optics and photodetection to entangle pairs of spatially seperated matter qubits in a non-deterministic manner. A key observation is that even such a non-deterministic EO is sufficient for scalable QC: our EO can be used to efficiently generate \emph{cluster states} of many qubits, which, together with single qubit operations and measurements, are capable of universal QC \cite{rausschendorf01}. In the context of linear optics QC \cite{knill01}, it has recently been shown \cite{yoran03,nielsen04,browne04} that the cluster state model can be used to significantly reduce the resource overheads required for scalable QC.
We consider matter systems comprised of two long-lived, low lying states $|\text{\mbox{$\uparrow$}}\rangle$ and $|\text{\mbox{$\downarrow$}}\rangle$, and one excited state
$|e\rangle$, in an $L$-configuration (see Fig.~\ref{fig1}). The system is constructed in such a way that an optical $\pi$-pulse will induce the transformation $|\text{\mbox{$\downarrow$}}\rangle \rightarrow |e\rangle$ and
$|\text{\mbox{$\uparrow$}}\rangle \rightarrow |\text{\mbox{$\uparrow$}}\rangle$. The transition $|\text{\mbox{$\uparrow$}}\rangle
\leftrightarrow |e\rangle$ is forbidden, e.g., by a selection rule. The states $|\text{\mbox{$\uparrow$}}\rangle$ and $|\text{\mbox{$\downarrow$}}\rangle$ represent the logical qubit states $|0\rangle$ and $|1\rangle$ respectively. We assume that high fidelity single qubit operations and measurements can be performed on these logical qubits. Physical systems that have a suitable level structure include NV-centers in diamond \cite{JelezkoPRL2004}, quantum dots with a single excess electron
\cite{Pazy2003,NazirSpin2004}, and various trapped ion and atomic systems. Each such system is embedded in a separate optical cavity, such that only the $|\text{\mbox{$\downarrow$}}\rangle \leftrightarrow |e\rangle$ transition is coupled to the cavity mode. One end of each cavity is leaky, with the leakage rate of the $i^{\mathrm{th}}$ cavity given by $2\kappa_i$. The light escaping from the cavities is mixed on a 50:50 beam splitter, the output modes of which are monitored by two vacuum-discriminating detectors, $D_+$ and $D_-$, with efficiency $\eta$.
The scheme proceeds as follows. Firstly, both qubits are prepared in the state $|+\rangle = \left(|\text{\mbox{$\uparrow$}}\rangle + |\text{\mbox{$\downarrow$}}\rangle\right)/\sqrt{2}$
using local unitaries. We then implement the following sequence of operations: {\em i}) Apply an optical $\pi$-pulse to each qubit, coherently pumping the population in the $|\text{\mbox{$\downarrow$}}\rangle$ state into the $|e\rangle$ state; {\em ii}) Wait for up to a time $t_{\textrm{wait}}$ for a photo-detection event in either $D_+$ or $D_-$; {\em iii}) Wait for a further time $t_{\textrm{relax}}$ for any remaining excitation in the qubit-cavity systems to relax;
{\em iv}) Apply an $X$ operation to both qubits, coherently flipping the spins as $|\text{\mbox{$\uparrow$}}\rangle \to |\text{\mbox{$\downarrow$}}\rangle$ and $|\text{\mbox{$\downarrow$}}\rangle \to
|\text{\mbox{$\uparrow$}}\rangle$; {\em v}) Repeat steps {\em i}--{\em iii}.
Appropriate values for $t_{\textrm{wait}}$ and $t_{\textrm{relax}}$ are determined by the system parameters discussed below. If zero or two photo-detection events are observed on either round of the procedure, the scheme failed, and the qubits must be newly prepared before re-attempting the entangling procedure. On the other hand, if one (and only one) photo-detection event is observed on each round of the protocol, the scheme has succeeded, and a maximally entangled state is prepared \emph{with unit fidelity} (given ideal systems). We call this technique {\em double heralding}, and it turns out to be exceedingly robust against most common experimental errors.
We analyzed the scheme in detail using the quantum trajectories formalism \cite{carmichael}. For clarity, we first consider the ideal case, in which the detectors have unit efficiency
($\eta=1$), and spontaneous emission of photons from the transition $|e\rangle \rightarrow |\text{\mbox{$\downarrow$}}\rangle$ into modes other than the cavity mode is neglected. During time periods where no detector clicks are observed, the conditional state of the system, in the interaction picture, evolves smoothly according to the effective Hamiltonian ($\hbar=1$) \begin{equation}
H_{\textrm{eff}} = \sum_{i=A,B} \frac{g_i}{2} \left( | \text{\mbox{$\downarrow$}}
\rangle_i{}_i\langle e | \hat{c}^\dag_i + \textrm{H. c.} \right) - i \sum_{i=A,B} \kappa_i \hat{c}^\dag_i \hat{c}_i \,. \end{equation} Here, $g_i$ denotes the Jaynes-Cummings coupling between the
$|e\rangle_i \leftrightarrow |\text{\mbox{$\downarrow$}}\rangle_i$ transition and the mode of the $i^{\mathrm th}$ cavity, and $\hat{c}_i$ is the corresponding annihilation operator. For the purpose of illustrating the ideal case, we assume that systems $A$ and $B$ are identical, such that $g_A = g_B=g$ and $\kappa_A = \kappa_B = \kappa$, and that $\kappa \ge g$.
When a single click is observed in detector $D_\pm$, the state of the whole system discontinuously evolves as $|\psi(t)\rangle
\rightarrow \hat{c}_\pm |\psi(t)\rangle$, where $\hat{c}_\pm = (\hat{c}_A \pm \hat{c}_B)/\sqrt{2}$ denotes the corresponding jump operators. Thus, after steps {\em i}--{\em ii} of the entangling protocol, conditioned on observing a detector click at time $t_1 \le t_{\textrm{wait}}$, the unnormalized state of the whole system is \begin{eqnarray}
|\tilde{\psi}(t_1)\rangle
&=& \alpha(t_1) |\Psi_\pm\rangle +
\alpha(t_1)\beta(t_1)\frac{|\text{\mbox{$\downarrow$}},0;e,0\rangle \pm
|e,0;\text{\mbox{$\downarrow$}},0\rangle}{\sqrt{2}} \cr &&+
2\alpha^2(t_1)\frac{|\text{\mbox{$\downarrow$}},0;\text{\mbox{$\downarrow$}},1\rangle \pm
|\text{\mbox{$\downarrow$}},1;\text{\mbox{$\downarrow$}},0\rangle}{\sqrt{2}} \,. \label{StateAfterClick1} \end{eqnarray}
Here, $|q_A,p_A;q_B,p_B\rangle$ is the state of the whole system, with $q_{A(B)}$ and $p_{A(B)}$ denoting the states of matter system $A$ $(B)$ and cavity mode $A$ $(B)$ respectively,
$|\Psi^\pm\rangle = (|\text{\mbox{$\downarrow$}},0;\text{\mbox{$\uparrow$}},0\rangle \pm
|\text{\mbox{$\uparrow$}},0;\text{\mbox{$\downarrow$}},0\rangle)/\sqrt{2}$ are maximally entangled states, $\alpha(t) = -ig /(4 \sqrt{\kappa^2 - g^2}) \left(e^{-\Gamma_{\textrm{slow}} t/2} -e^{-\Gamma_{\textrm{fast}} t/2} \right)$ and $\beta(t) = \frac{1}{2}(1 + \kappa/ \sqrt{\kappa^2-g^2}) e^{-\Gamma_{\textrm{slow}} t/2} + \frac{1}{2}(1- \kappa/\sqrt{\kappa^2-g^2}) e^{-\Gamma_{\textrm{fast}} t/2}$, where $\Gamma_{\textrm{fast}} = \kappa+\sqrt{\kappa^2-g^2}$ and $\Gamma_{\textrm{slow}} = \kappa-\sqrt{\kappa^2-g^2}$. In order to obtain a detector click with significant probability, $t_{\textrm{wait}}$ should be chosen to be a few times $\Gamma_{\textrm{slow}}^{-1}$.
Equation (\ref{StateAfterClick1}) implies that it may be possible to observe a \emph{second} detector click on the \emph{first} round of the protocol. However, realistic photo-detectors typically cannot resolve two photons arriving in quick succession \cite{kok00}. Within the quantum trajectories description, this can be simulated by assuming that no information is available from either detector after the first click. After a time $t_{\textrm{relax}} \gg \Gamma_{\textrm{slow}}^{-1}$, the system decoheres to the state \begin{equation}
\rho = \frac{1}{N}|\Psi_\pm\rangle\langle\Psi_\pm| +
\left(1-\frac{1}{N} \right)| \text{\mbox{$\downarrow$}} \text{\mbox{$\downarrow$}}\rangle\langle
\text{\mbox{$\downarrow$}}\text{\mbox{$\downarrow$}} | \,, \label{StateAfterFirstRelaxation} \end{equation}
where $N = 1 + |\beta(t_1)|^2 + |\alpha(t_1)|^2$. The undesirable second term in
Eq.~(\ref{StateAfterFirstRelaxation}) is removed by applying steps {\em iv} and {\em v} of the entangling procedure. If a photo-detection occurs on the second round, the final state of the system is a pure, maximally entangled state. If the two clicks are observed in the same (different) detector(s), the final state is $|\Psi^+\rangle$ ($|\Psi^-\rangle$). Each of the four possible successful outcomes occurs with probability $1/8$, leading to a total success probability of $p=\frac{1}{2}$.
We also analyzed the scheme in the non-ideal case, allowing for less than perfect detector efficiency $\eta<1$, and finite spontaneous emission into free space ($\gamma_1 = \gamma_2 = \gamma > 0$). These imperfections do not reduce the fidelity of the final state, but do reduce the success probability (see Fig.~\ref{p_succ}a). Note $p$ has a quadratic dependence on $\eta$, while $p$ decreases rapidly for $\gamma \gtrsim \Gamma_{\textrm{slow}}$.
The dominating experimental imperfections that do reduce the fidelity can be classified into three groups: (1) decoherence of the matter qubits; (2) dark counts in the detectors; and (3) imperfect mode matching of the photons incident on the beam splitter. Firstly, the effect of spin decoherence depends on the way the cluster states are generated, and can be estimated by comparing the spin decoherence time $t_{\textrm{d}}$ with the ``clock time'' $t_{\textrm{c}} \sim 10 \Gamma^{-1}_{\textrm{slow}}$ at which the EO can be repeated. If the preparation of cluster states is performed in parallel, the typical time overhead is $m$ clock cycles (see below). Thus the average age of a qubit the moment it is added to the cluster is $(m/2)t_{\textrm{c}}$, and $m \lesssim 8$ for reasonable detector efficiencies. Assuming a reasonably strong cavity qubit coupling, $g=100 \gamma$, and critically damped cavities ($g\approx\kappa$), the size of errors due to spin decoherence is given by $\varepsilon \sim (m/2)t_{\textrm{c}}/t_{\textrm{d}} \sim 0.4 \gamma^{-1} / t_{\textrm{d}}$. For instance, for the NV-diamond system ($\gamma^{-1}=25$ ns \cite{beveratos2001} and $t_{\textrm{d}} = 32$ $\mu$s \cite{Kennedy2002}), we have $\varepsilon \sim 3 \times 10^{-4}$.
Secondly, detector dark counts on either round of the EO can lead to a spurious `success' of the EO, which can reduce the fidelity of the entanglement. For existing APD detectors, dark count rates are typically $\Gamma_{\textrm{dc}} < 500$ s$^{-1}$ \cite{PerkinElmerDataSheet}. Dark counts can be made negligible by observing the detector output only for the window $t_{\textrm{wait}} \sim 3 \Gamma^{-1}_{\textrm{slow}}$ ($\sim 1$ ns for NV-diamond). The probability of a spurious count is therefore $p_{\textrm{dc}}=\Gamma_{\textrm{dc}}t_{\textrm{wait}} \sim 10^{-7}$. Thus dark counts should have a negligible effect on the cluster fidelity.
Finally, imperfect mode matching of the photons emitted by the matter qubit-cavity systems reduces the fidelity, because the photons carry information regarding their origin. Non identical central frequencies, different polarizations, and spatio-temporal mode shapes of the photons can all reduce the fidelity. The frequency of the photons emitted from cavity $i$ depends on the frequencies of both the
$|\text{\mbox{$\downarrow$}}\rangle_i \leftrightarrow |e\rangle_i$ transition ($\omega_{\text{\mbox{$\downarrow$}} e, i}$) and the cavity mode ($\omega_{\textrm{cav}, i}$). The $\omega_{\text{\mbox{$\downarrow$}} e, i}$'s can be tuned independently, e.g. by using local electric and magnetic fields to induce Stark and Zeeman shifts. The $\omega_{\textrm{cav}, i}$'s can also be accurately and independently tuned, e.g. by using strain-tunable silica microcavities \cite{vonKlitzing2001}, or piezoelectrically tuned fibre optic microcavities \cite{JasonSmithPrivateCommunication}. The polarization of the emitted photons can be accurately matched using linear optical elements \cite{BachorTextbook}. The spatio-temporal mode shapes of the emitted photons depend on the $g_i$'s and $\kappa_i$'s of the respective cavities. These parameters depend on the structure of the cavities, and hence are more difficult to calibrate once the cavities have been fabricated. However, we calculated that the EO is rather robust to mismatches in the $g_i$'s and $\kappa_i$'s (see Fig \ref{p_succ}b): mismatches of a few percent lead to a reduction in fidelity of less than $10^{-3}$. Deterministic sources of indistinguishable photons, which have similar requirements to those needed for our scheme, are currently being developed by a number of groups \cite{Pelton2002,Stace2003,meier2004,beveratos2001,McKeever2004}.
\begin{figure}\label{p_succ}
\end{figure}
The next step towards scalable quantum computers is linking qubits together into cluster states, using the EO described above. A cluster state of qubits $\left\{q_1,q_2,..q_N \right\}$ can be represented graphically by a collection of qubit nodes connected by edges connecting neighboring qubits, as depicted in Fig.~\ref{fig2}a.
A linear cluster of $N$ qubits (a {\em chain}) may be represented in the form $|C\rangle_{1...N}=(|\text{\mbox{$\uparrow$}}\rangle_1 + |\text{\mbox{$\downarrow$}} \rangle_1 Z_2
)(|\text{\mbox{$\uparrow$}}\rangle_2 + |\text{\mbox{$\downarrow$}} \rangle_2 Z_3 )...(|\text{\mbox{$\uparrow$}}\rangle_N + |\text{\mbox{$\downarrow$}} \rangle_N )$, where $Z_i$ represents the Pauli phase-flip operation acting on qubit $i$. Such linear clusters can be grown using our EO, as we now describe. Given a cluster $|C\rangle_{2...N}$, qubit 1 can be added to the end of the cluster by first preparing qubit 1 in the state $|+\rangle_1 \equiv |\text{\mbox{$\downarrow$}}\rangle_1 + |\text{\mbox{$\uparrow$}}\rangle_1$ and then applying the EO to qubits 1 and 2. If the EO is successful, the resulting state is of the form $(|\text{\mbox{$\uparrow$}}\rangle_1 |\text{\mbox{$\downarrow$}}\rangle_2 \pm |\text{\mbox{$\downarrow$}} \rangle_1|\text{\mbox{$\uparrow$}}
\rangle_2 Z_3)|C\rangle_{3...N}$, depending on whether both clicks were observed in the same detector. This can be transformed into a cluster state by applying the local operations $H_1 X_2$ or $X_1 H_1 X_2$, conditional on the outcome of the EO (here $H_i$ is the Hadamard operation, and $X_i$ the Pauli operator implementing a bit flip). If the EO fails, the state of qubit 2 is, in general, unknown. However, measuring qubit 2 in the computational basis removes qubit 2 from the cluster, but projects qubits $\{3 \ldots N\}$ back into a pure cluster state. Therefore, failure of the EO causes the original cluster to shrink by 1 qubit.
\begin{figure}
\caption{Cluster states with arbitrary single-qubit measurements can
perform a universal quantum computation. a) The qubits (circles) are
entangled with their nearest horizontal neighbor via
EOs (depicted by lines between the
qubits), and gates between computational qubits are incorporated by
the vertical lines. b) Linear clusters can be joined together by applying
the EO between qubits $A_1$ and $B_1$, and subsequently performing single
qubit operations and measurements.}
\label{fig2}
\end{figure}
Repeatedly applying the procedure described above allows long chains to be grown. However, the theoretical upper limit on the success probability of our protocol is $p=\frac{1}{2}$, and when the protocol fails, the chain shrinks by 1 qubit. Therefore, with only the procedure described above one can not create large clusters efficiently. If recycling of the clusters after a failure \cite{browne04} is not performed, the average number of EOs required to create a cluster of $m$ qubits is $N_{\mathrm{EO}} = \sum_{i=1}^{m-1} p^{-i} = p^{1-m}(1-p^{m-2})/(1-p)$.
A way around this problem is a `divide and conquer' approach, in which short chains are grown inefficiently, and then joined to a longer cluster using the EO together with local operations. The EO can be used to join two clusters, as shown in Fig.~\ref{fig2}b. The initial state of two chains, $A$ and $B$ of length $N$ and $m$
respectively, may be written $(|C\rangle_{\{A\}} |\text{\mbox{$\uparrow$}}\rangle_{A_1} +
Z_{A_2}|C\rangle_{\{A\}} |\text{\mbox{$\downarrow$}}\rangle_{A_1})(|\text{\mbox{$\uparrow$}}\rangle_{B_1} |C\rangle_{\{B\}}
+ Z_{B_2} |\text{\mbox{$\downarrow$}}\rangle_{B_1} |C\rangle_{\{B\}})$, where $|C\rangle_{\{A\}}$
($|C\rangle_{\{B\}}$) represents a linear cluster state of qubits $\{A_2 \ldots A_N\}$ ($\{B_2 \ldots B_m\}$). These chains can be joined together by first performing the local operation $X_{A_1}$, then applying the EO between qubits $A_1$ and $B_1$, and measuring
$B_1$ in the basis $|\pm\rangle_{B_1} \equiv |\text{\mbox{$\downarrow$}}\rangle_{B_1} \pm
|\text{\mbox{$\uparrow$}}\rangle_{B_1}$. If the EO was successful, the remaining qubits are left in the state $\pm |C\rangle_{\{A\}} |\text{\mbox{$\uparrow$}}\rangle_{A_1}|C\rangle_{\{B\}}
\pm Z_{A_2}|C\rangle_{\{A\}} |\text{\mbox{$\downarrow$}}\rangle_{A_1} Z_{B_2}|C\rangle_{\{B\}}$, where the first sign depends on the outcome of the $B_1$ measurement, and the second sign depends on the outcome of the EO. Applying a local operation to qubit $A_1$ yields a cluster state, of length $N+m-1$, of qubits $\{A_N, \ldots A_1, B_2, \ldots B_m\}$. If the EO fails, qubit $A_1$ must be measured in the computational basis, and the original cluster state shrinks by 1 qubit. Thus the average length of the new cluster is $L = p(N+m-1) + (1-p) (N-1)$. In order that the cluster grows on average, we require $L>N$, which implies that length of the short chains should satisfy $m>1/p$.
Chains of fixed length $m$ can be grown independently using the EO, either by sequentially adding single qubits to the end of a cluster, or by joining sub-chains together. Growing these $m$-chains adds a constant overhead cost to the cluster generation process. For example, growing a 4-chain (without recycling) requires on average $p^{-3} + p^{-2} + p^{-1}$ applications of the EO, and each attempt to join such a chain adds on average $4p-1$ qubits to the large cluster, leading to a total cost of $C_4 = (p^{-3} + p^{-2} + p^{-1} + 1)/(4p-1)$ EOs per qubit added to the large cluster. A 5-chain can be grown by joining two 3-chains together. Joining such 5-chains to a longer cluster leads to a total cost of $C_5 = (2p^{-3} + 2p^{-2} + p^{-1} + 1)/(5p-1)$ EOs per qubit. To minimize these costs, the collection and detection efficiencies should be maximized. For example, for $p\approx 0.24$ (or $\eta = 70\%$ with $\gamma=0$), we require $m=5$, and we find $C_5 = 775$. A modest improvement in detector efficiency dramatically reduces the overhead cost: for $\eta=85\%$ and $\gamma=0$, we find $C_4 = 73.4$.
Note that there may be more efficient schemes for growing linear clusters using our EO (e.g. employing recycling of small clusters \cite{browne04}) which yield lower overhead costs.
In order to build linear chains into two-dimensional cluster states capable of simulating arbitrary logic networks, cross links between linear chains must be constructed \cite{nielsen04}. Such a link can be created by first using the EO to create an $I$ shaped cluster (see Fig \ref{fig2}(a)) offline, for some fixed cost. Provided the arms of this $I$-cluster are sufficiently long, the EO can be used to join the $I$-cluster to a pair of linear clusters with a high probability, and therefore create a cross link between the clusters. This leads to a constant overhead cost per cross link added to the cluster. Note that other methods for creating two-dimensional clusters, e.g. using microclusters \cite{nielsen04} or redundant encoding \cite{browne04}, have also been proposed.
Our proposal has a number of very desirable features with respect to practical implementations. Firstly, our scheme requires only a simple level structure and single-qubit operations. Secondly, photon loss does not reduce the fidelity of the entangled state of the qubits, but merely adds to the constant overhead cost. Thirdly, owing to the simplicity of the optical networks used in this scheme, mode matching should be relatively straightforward. Fourthly, the scheme is inherently \emph{distributed}: individual qubit-cavity systems can be placed in distant labs, and connected by optical fibers. This means that our scheme lends itself naturally to distributed applications, such as quantum repeaters \cite{Briegel98} and quantum cryptography \cite{ekert91}. Finally, many of the techniques described here have been demonstrated experimentally, and the system requirements needed to create high-fidelity cluster states do not seem prohibitively restrictive.
While preparing this manuscript, we became aware of an alternative scheme that may also be used for generating cluster states of matter qubits \cite{Lim2004}. We thank Tim Spiller and Bill Munro for valuable discussions and careful reading of the manuscript. The authors are supported by the E.U. Nanomagiq and Ramboq projects.
\end{document} |
\begin{document}
\title{The chromatic polynomial for cycle graphs} \author[J. Lee]{Jonghyeon Lee} \address[Jonghyeon Lee]{ Department of Mathematics \\Inha University \\Incheon 22212, Korea} \email{orie73@naver.com}
\author[H. Shin]{Heesung Shin$^\dagger$} \address[Heesung Shin]{ Department of Mathematics \\Inha University \\Incheon 22212, Korea} \email{shin@inha.ac.kr} \date{\today} \thanks{$\dagger$ Corresponding author. This work was supported by the National Research Foundation of Korea(NRF) grant funded by the Korea government(MSIP) (No. 2017R1C1B2008269).}
\begin{abstract} Let $P(G,\lambda)$ denote the number of proper vertex colorings of $G$ with $\lambda$ colors. The chromatic polynomial $P(C_n,\lambda)$ for the cycle graph $C_n$ is well-known as $$P(C_n,\lambda) = (\lambda-1)^n+(-1)^n(\lambda-1)$$ for all positive integers $n\ge 1$. Also its inductive proof is widely well-known by the \emph{deletion-contraction recurrence}. In this paper, we give this inductive proof again and three other proofs of this formula of the chromatic polynomial for the cycle graph $C_n$. \end{abstract}
\maketitle
\section{Introduction} \label{sec:intro} The number of proper colorings of a graph with finite colors was introduced only for planar graphs by George David Birkhoff \cite{Bir12} in 1912, in an attempt to prove the four color theorem, where the formula for this number was later called by the chromatic polynomial. In 1932, Hassler Whitney \cite{Whi32} generalized Birkhoff's formula from the planar graphs to general graphs. In 1968, Ronald Cedric Read \cite{Rea68} introduced the concept of chromatically equivalent graphs and asked which polynomials are the chromatic polynomials of some graph, that remains open.
\subsection*{Chromatic polynomial} For a graph $G$, a \emph{coloring} means almost always a \emph{(proper) vertex coloring}, which is a labeling of vertices of $G$ with colors such that no two adjacent vertices have the same colors. Let $P(G,\lambda)$ denote the number of (proper) vertex colorings of $G$ with $\lambda$ colors and $\chi(G)$ the least number $\lambda$ satisfying $P(G, \lambda)>0$, where $P(G, \lambda)$ and $\chi(G)$ are called a \emph{chromatic polynomial} and \emph{chromatic number} of $G$, respectively.
In fact, it is clear that the number of $\lambda$-colorings is a polynomial in $\lambda$ from a deletion-contraction recurrence. \begin{prop}[Deletion-contraction recurrence] For a given a graph $G$ and an edge $e$ in $G$, we have \begin{align} P(G,\lambda) = P(G-e,\lambda) - P(G/e,\lambda), \label{eq:rec} \end{align} where $G-e$ is a graph obtained by deletion the edge $e$ and $G/e$ is a graph obtained by contraction the edge $e$. \end{prop}
\begin{eg} The chromatic polynomials of graphs in Figure~\ref{fig:rec} are \begin{align*} P(G,\lambda) &=\lambda(\lambda-1)^2(\lambda-2),\\ P(G-e,\lambda)&=\lambda^2(\lambda-1)(\lambda-2), and\\ P(G/e,\lambda)&=\lambda(\lambda-1)(\lambda-2). \end{align*} It is confirmed that \eqref{eq:rec} is true for the graph $G$ and the edge $e$ in Figure~\ref{fig:rec}. \end{eg}
\begin{figure}
\caption{$G$ , $G-e$ and $G/e$}
\label{fig:rec}
\end{figure}
\subsection*{Cycle graph} A \emph{cycle graph $C_n$} is a graph that consists of a single cycle of length $n$, which could be drown by a $n$-polygonal graph in a plane. The chromatic polynomial for cycle graph $C_n$ is well-known as follows. \begin{thm} \label{thm:main} For a positive integer $n\ge1$, the chromatic polynomial for cycle graph $C_n$ is \begin{align} P(C_n,\lambda) = (\lambda-1)^n+(-1)^n(\lambda-1) \label{eq:main} \end{align} \end{thm}
\begin{eg} For an integer $n \le 3$, it is easily checked that the chromatic polynomials of $C_n$ are from \eqref{eq:main} as follows. \begin{align*} P(C_1, \lambda) &= (\lambda-1)+(-1)(\lambda-1) = 0 ,\\ P(C_2, \lambda) &= (\lambda-1)^2+(-1)^2(\lambda-1) = \lambda(\lambda-1),\\ P(C_3, \lambda) &= (\lambda-1)^3+(-1)^3(\lambda-1) = \lambda(\lambda-1)(\lambda-2). \end{align*}
As shown in Figure~\ref{fig:cycle}, the cycle graph $C_1$ is a graph with one vertex and one loop and $C_1$ cannot be colored, that means $P(C_1, \lambda) = 0$. The cycle graph $C_2$ is a graph with two vertices, where two edges between two vertices, and $C_2$ can have colorings by assigning two vertices with different colors, that means $P(C_2, \lambda) = \lambda(\lambda-1)$. The cycle graph $C_3$ is drawn by a triangle and $C_3$ can have colorings by assigning all three vertices with different colors, that means $P(C_3, \lambda) = \lambda(\lambda-1)(\lambda-2)$.
\begin{figure}
\caption{$C_n$ $(1 \leq n \leq 5)$}
\label{fig:cycle}
\end{figure}
\end{eg}
\section{Four proofs of Theorem~\ref{thm:main}} In this section, we show the formula~\eqref{eq:main} in four different ways.
\subsection{Inductive proof} This inductive proof is widely well-known. A \emph{path graph $P_n$} is a connected graph in which $n-1$ edges connect $n$ vertices of vertex degree at most $2$, which could be drawn on a single straight line. The chromatic polynomial for path graph $P_n$ is easily obtained by coloring all vertices $v_1, \dots, v_n$ where $v_i$ and $v_{i+1}$ have different colors for $i=1, \dots, n-1$. \begin{lem} \label{lem:path} For a positive integer $n\ge1$, the chromatic polynomial for path graph $P_n$ is \begin{align} P(P_n,\lambda)=\lambda(\lambda-1)^{n-1}. \label{eq:path} \end{align} \end{lem}
We use an induction on the number $n$ of vertices by the deletion-contraction recurrence and the above lemma for path graph: It is already shown that \eqref{eq:main} is true for $n\le3$ by the example in Section~\ref{sec:intro}. Assume that \eqref{eq:main} is true for a positive integer $n$. Using \eqref{eq:rec} and \eqref{eq:path}, we have \begin{align*} P(C_{n+1},\lambda) &=P(C_{n+1}-e,\lambda)-P(C_{n+1}/e,\lambda) \tag*{by \eqref{eq:rec}} \\ &=P(P_{n+1},\lambda)-P(C_n,\lambda) \\ &=\lambda(\lambda-1)^n-\left( (\lambda-1)^n+(-1)^n(\lambda-1) \right) \tag*{by \eqref{eq:path}}\\ &=(\lambda-1)^{n+1}+(-1)^{n+1}(\lambda-1). \end{align*}
\begin{figure}
\caption{$C_{n+1}$ , $P_{n+1}$ and $C_n$}
\end{figure}
Thus, \eqref{eq:main} is true for all positive integers $n\ge1$.
\subsection{Proof by inclusion-exclusion principle} The \emph{inclusion-exclusion principle} is a technique of counting the size of the union of finite sets. \begin{prop}[Inclusion-exclusion principle] Let $A_1, A_2, \dots, A_n$ be subsets of a finite set $U$. Then number of elements excluding their union is as follows \begin{align*} \abs{\bigcap_{i=1}^n {\overline{A_i}}} &=\sum_{I \subset [n]} (-1)^{\abs{I}} \abs{\bigcap_{i\in I} A_i}\\ &=\abs{U} - \sum_{i=1}^{n} \abs{A_i} + \sum_{i<j} \abs{A_i \cap A_j} - \dots +(-1)^n \abs{A_1 \cap \cdots \cap A_n} \end{align*} where $\overline{A}$ is the complement of $A$ in $U$. \end{prop}
Considering every condition to assign different colors to two adjacent vertices, for each edge $e$, we define a finite sets of arbitrary (including improper) colorings to assign same color to two adjacent vertices by the edge $e$.
Let $A_i$ be a set of colorings such that two vertices $v_i$ and $v_{i+1}$ are of same color, where $v_{n+1}$ is regarded as $v_1$. Applying the inclusion-exclusion principle, we can write the following \begin{align*} P(C_n,\lambda) &=\vert U \vert - \sum_{i=1}^{n} \abs{A_i} + \sum_{i<j} \abs{A_i \cap A_j} + \cdots + (-1)^n \abs{A_1 \cap \dots \cap A_n} \\ &= \lambda^n - \binom{n}{1} \lambda^{n-1} + \binom{n}{2} \lambda^{n-2} + \cdots + (-1)^{n-1} \binom{n}{n-1} \lambda + (-1)^n \lambda \\ &= (\lambda-1)^n - (-1)^n +(-1)^n \lambda \\ &= (\lambda-1)^n+(-1)^n(\lambda-1). \end{align*} Thus, \eqref{eq:main} is true for all positive integers $n\ge1$.
\subsection{Algebric proof} Let us consider a case of $n=5$ and $\lambda=4$, that is, to assign the vertices of $C_5$ in four colors: red, blue, yellow, and green. Also let us consider a complete graph $K_4$ with vertex names red, blue, yellow, and green, see Figure~\ref{fig:one-one}.\\
\begin{figure}
\caption{A cycle graph $C_5$ and a graph $K_4$ with names of colors}
\label{fig:one-one}
\end{figure}
When red-blue-red-yellow-green is assigned in order from the vertex $v_1$ to the vertex $v_5$ in $C_5$, it is corresponding to a closed walk of length $5$ in $K_4$ which begins and ends at red, that is, it is red-blue-red-yellow-green-red in $K_4$. By generalizing it, we have a correspondence between $\lambda$-colorings of $C_n$ and closed walks of length $n$ in $K_\lambda$. By this correspondence, it is enough to count the number of closed walks of length $n$ in $K_\lambda$, instead of the number of $\lambda$-colorings of $C_n$.
For a graph $G$ with vertex set $\set{v_1, \dots, v_n}$, the \emph{adjacency matrix} of $G$ is an $n \times n$ square matrix $A$ such that its element $A_{ij}$ is one when there is an edge between two vertices $v_i$ and $v_j$, and zero when there is no edge between $v_i$ and $v_j$. \begin{figure}
\caption{A graph $G$ and its adjacency matrix $A$}
\end{figure}
The following related to an adjacency matrix is well-known. \begin{prop} \label{prop:adj} Let $A$ be the adjacency matrix of the graph $G$ on $n$ vertices $v_1, \dots, v_n$. Then the $(i,j)$th entry of the matrix $A^n$ is the number of the walk of length $n$ beginning at $v_i$ and ending at $v_j$. \end{prop}
By Proposition~\ref{prop:adj}, we can calculate the number of closed walk of length $n$ in the complete graph $K_\lambda$: Let $A$ be an adjacency matrix of $K_\lambda$. Then $A$ is a $\lambda \times \lambda$ matrix as follows \begin{align*} A = \left( a_{ij} \right) = \begin{pmatrix}
0 & 1 & \cdots & 1 & 1 \\
1 & 0 & \cdots & 1 & 1 \\
\vdots & \vdots & \ddots & \vdots & \vdots \\
1 & 1 & \cdots & 0 & 1 \\
1 & 1 & \cdots & 1 & 0 \end{pmatrix}, \end{align*} where $a_{ij}=0$ if $i=j$, and otherwise $a_{ij}=1$. So the number of closed walks of length $n$ in $K_\lambda$ is enumerated by $tr(A^n)$, which equals the sum of all eigenvalues of $A^n$. Also let all eigenvalues of the matrix $A$ be denoted by $u_1, \dots, u_\lambda$, then all eigenvalues of the matrix $A^n$ are $u_1^n, \dots, u_\lambda^n$. \begin{align*} A = \begin{pmatrix}
0 & 1 & \cdots & 1 & 1 \\
1 & 0 & \cdots & 1 & 1 \\
\vdots & \vdots & \ddots & \vdots & \vdots \\
1 & 1 & \cdots & 0 & 1 \\
1 & 1 & \cdots & 1 & 0 \end{pmatrix} \sim \begin{pmatrix}
\lambda-1 & 0 & \cdots & 0 & 0 \\
0 & -1 & \cdots & 0 & 0 \\
\vdots & \vdots & \ddots & \vdots & \vdots \\
0 & 0 & \cdots & -1 & 0 \\
0 & 0 & \cdots & 0 & -1 \end{pmatrix}, \end{align*} Since the matrix $A$ have $\lambda$ eigenvalues $u_1 = \lambda-1$ and $u_2 =\dots=u_\lambda = -1$, we have \begin{align*} tr(A^n) =\sum_{i=1}^{\lambda}u_i^n = (\lambda-1)^n + \underbrace{(-1)^n + \dots + (-1)^n}_{\text{$\lambda-1$ times}}. \end{align*} Thus, \eqref{eq:main} is true for all positive integers $n\ge1$.
\subsection{Bijective proof} Let $X_n$ denote the set of $\lambda$-colorings of $C_n$ and $[\lambda-1]^n$ be the set of $n$-tuples of positive integers less than $\lambda$, where $[\lambda-1]$ means $\set{1, \dots, \lambda-1}$. We consider a mapping $\varphi$ from $\lambda$-colorings of $C_n$ in $X_n$ to $n$-tuples in $[\lambda-1]^n$.
\subsubsection*{A mapping $\varphi$ from $X_n$ to $[\lambda-1]^n$} The mapping $\varphi:X_n \to [\lambda-1]^n$ is defined as follows: Let $\omega$ be a $\lambda$-coloring of $C_n$ in $X_n$, we write $\omega=(\omega_1, \dots, \omega_n)$ where $\omega_i$ is the color of $v_i$ in $C_n$ and it is obvious that $\omega_i \neq \omega_{i+1}$ for $1 \le i \le \lambda$, where $\omega_{n+1}$ is regarded as $\omega_1$. An entry $\omega_i$ is called a \emph{cyclic descent} of $C$ if $\omega_i > \omega_{i+1}$ for $1\leq i\leq \lambda$. Then we define $\varphi(\omega) = \sigma = (\sigma_1, \dots, \sigma_n)$ with \begin{align*} \sigma_i = \begin{cases} \omega_i - 1, & \mbox{\text{if $\omega_i$ is a cyclic descent}}\\ \omega_i, & \mbox{\text{otherwise}}. \end{cases} \end{align*} Given a $\lambda$-coloring $\omega$, if $\omega_i = \lambda$ then $\omega_{i+1} < \lambda$, so $\omega_i=\lambda$ should be a cyclic descent. Thus we have $\sigma_i<\lambda$ for all $1\le i \le n$ and $\varphi(\omega)$ belongs to $[\lambda-1]^n$.
For example, in a case of $n=9$ and $\lambda=4$, $\omega=(1,2,1,3,2,3,1,4,2) \in X_9$ is given as an example of $4$-colorings of $C_9$. Here $\omega_2=2$, $\omega_4=3$, $\omega_6 = 3$, $\omega_8=4$, and $\omega_9=2$ are cyclic descents of $\omega$. So we have $$\varphi(\omega)= \sigma = (1,1,1,2,2,2,1,3,1) \in [3]^9.$$
\subsubsection*{A mapping $\psi$ as the inverse of $\varphi$} Let $Z_n$ be the set of $n$-tuples $\sigma = (\sigma_1, \sigma_2, \dots, \sigma_n)$ in $[\lambda-1]^n$ with $$\sigma_1 = \sigma_2 = \dots = \sigma_n$$ and it is obvious that the size of $Z_n$ is $\lambda-1$.
We would like to describe a mapping $\psi: \left( [\lambda-1]^n\setminus Z_n \right) \to X_n$ in order to satisfy $\varphi\circ\psi$ is the identity on $[\lambda-1]^n\setminus Z_n$ as follows: Given a $\sigma \in [\lambda-1]^n\setminus Z_n$, we define $\overline{\sigma}=(\overline{\sigma}_1, \dots, \overline{\sigma}_n)$ with \begin{align*} \overline{\sigma}_i = \begin{cases} \sigma_i + 1, & \mbox{\text{if $\sigma_i$ is a cyclic descent}}\\ \sigma_i, & \mbox{\text{otherwise}}. \end{cases} \end{align*} Since $\overline{\sigma}$ may have consecutive same entries, we define $\psi(\sigma) = \omega = (\omega_1, \dots, \omega_n)$ from $\overline\sigma$ with $\omega_i = \overline\sigma_i + 1$ for any entry $\overline\sigma_i$ of $\overline\sigma$ with a finite positive even integer $\ell$ satisfying \begin{align*} \overline\sigma_{i} = \overline\sigma_{i+1} = \dots = \overline\sigma_{i+\ell-1} \neq \overline\sigma_{i+\ell}, \end{align*} where $\overline\sigma_{n+k}$ is regarded as $\overline{\sigma}_k$ for $1\le k \le n$, and $\omega_i = \overline\sigma_i$, otherwise. Thus $\omega$ has no consecutive same entries and $1 \le \omega_i \le \lambda$ for all $1\le i \le n$, so $\psi(\sigma)=\omega$ belongs to $X_n$. Moreover, it is obvious that $\sigma_i \le \omega_i \le \sigma_i+1$ for all $1\le i \le n$ and if $\omega_i = \sigma_i + 1$ for some $1 \le i \le n$ then $\omega_i$ is a cyclic descent in $\omega$. Hence $\varphi(\omega) = \sigma$ and $\sigma \in [\lambda-1]^n\setminus Z_n$ if and only if $\psi(\sigma)=\omega$.
In a previous example, $\sigma = (1,1,1,2,2,2,1,3,1)$ is denoted as an example of $9$-tuples in $[3]^9$. Here $\sigma_6 = 2$, $\sigma_8=3$ are cyclic descents of $\sigma$ and we obtain $\overline\sigma=(1,1,1,2,2,3,1,4,1)$. And then there exist only three entries $\overline\sigma_2$, $\overline\sigma_4$, and $\overline\sigma_9$ in $\overline\sigma$ satisfying the following \begin{align*} k=2:& \quad \overline\sigma_2 = \overline\sigma_3 \neq \overline\sigma_4 \quad (\ell=2),\\ k=4:& \quad \overline\sigma_4 = \overline\sigma_5 \neq \overline\sigma_6 \quad (\ell=2), \text{ and }\\ k=9:& \quad \overline\sigma_9 = \overline\sigma_1 = \overline\sigma_2 = \overline\sigma_3 \neq \overline\sigma_4 \quad (\ell=4), \end{align*} so we get $\omega_2 = \overline\sigma_2 + 1 = 2$, $\omega_4= \overline\sigma_4 + 1 =3$, $\omega_9= \overline\sigma_9 + 1 =2$, and $$\psi(\sigma)= \omega = (1,2,1,3,2,3,1,4,2) \in X_9.$$
Let $Y_n$ be the set of $\lambda$-colorings $\omega$ in $X_n$ with $\varphi(\omega) \in Z_n$. Since two mapping $\varphi$ and $\psi$ are bijections between $X_n \setminus Y_n$ and $[\lambda-1]^n \setminus Z_n$, the size of the set $X_n \setminus Y_n$ is same with the size of the $[\lambda-1]^n \setminus Z_n$, which is equal to $(\lambda-1)^n - (\lambda-1).$
When $n$ is even, for any $1\le i \le \lambda-1$, there exist only two $n$-tuples in $X_n$ \begin{align*} \omega &= (i+1, i, i+1, i, \dots, i+1, i) \quad \text{ and } \quad \omega = (i, i+1, i, i+1, \dots, i, i+1) \end{align*} satisfying $\varphi(\omega) = (i, i, \dots, i) \in Z_n$. If $n$ is even, the size of $Y_n$ is equal to $2(\lambda-1)$ and we obtain \begin{align} P(C_n,\lambda) &= \abs{X_n} = \abs{X_n \setminus Y_n} + \abs{Y_n} \tag*{} \\
&= \left[ (\lambda-1)^n - (\lambda-1) \right] + 2(\lambda-1). \label{eq:even} \end{align}
When $n$ is odd, there is no $n$-tuples satisfying $\varphi(\omega) \in Z_n$ and the set $Y_n$ is empty. If $n$ is odd, we obtain \begin{align} P(C_n,\lambda) &= \abs{X_n} = \abs{X_n \setminus Y_n} + \abs{Y_n} \tag*{} \\
&= \left[ (\lambda-1)^n - (\lambda-1) \right] + 0. \label{eq:odd} \end{align} Therefore, \eqref{eq:main} yields from \eqref{eq:even} and \eqref{eq:odd} for all positive integers $n\ge1$.
\end{document} |
\begin{document}
\title[increasing stability]{Increasing stability for the inverse source scattering problem with multi-frequencies}
\author{Peijun Li} \address{Department of Mathematics, Purdue University, West Lafayette, Indiana 47907, USA.} \email{lipeijun@math.purdue.edu}
\author{Ganghua Yuan} \address{KLAS, School of Mathematics and Statistics, Northeast Normal University, Changchun, Jilin, 130024, China} \email{yuangh925@nenu.edu.cn} \thanks{MSC: 35R30, 78A46.} \thanks{ The research of PL was supported in part by the NSF grant DMS-1151308. The research of GY was supported in part by NSFC grants 10801030, 11271065, 11571064, the Ying Dong Fok Education Foundation under grant 141001, and the Fundamental Research Funds for the Central Universities under grant 2412015BJ011.}
\keywords{stability, inverse source problem, Helmholtz equation, Partial differential equation}
\begin{abstract} Consider the scattering of the two- or three-dimensional Helmholtz equation where the source of the electric current density is assumed to be compactly supported in a ball. This paper concerns the stability analysis of the inverse source scattering problem which is to reconstruct the source function. Our results show that increasing stability can be obtained for the inverse problem by using only the Dirichlet boundary data with multi-frequencies. \end{abstract}
\maketitle
\section{Introduction and problem statement}
In this paper, we consider the following Helmholtz equation: \begin{equation}\label{sol} \Delta u(x)+ \kappa^2 u(x)=f(x), \quad x\in\mathbb{R}^{d}, \end{equation}
where $d=2$ or $3$, the wavenumber $\kappa>0$ is a constant, $u$ is the radiated wave field, and $f$ is the source of the electric current density which is assumed to have a compact support. Denote by $B_{\rho}=\{x\in\mathbb{R}^d: |x|<\rho\}$ the ball with radius $\rho>0$ and center at the original. Let $R>0$ be a constant which is large enough such that $B_R$ contains the support of $f$. Let $\partial B_R$ be the boundary of $B_R$. The following Sommerfeld radiation condition is required to ensure the uniqueness of the wave field $u$: \begin{equation}\label{rc}
\lim_{r\to\infty}r^{\frac{d-1}{2}}(\partial_r u-{\rm i}\kappa u)=0,\quad r=|x|, \end{equation}
uniformly in all directions $\hat{x}=x/|x|$.
For a given function $u$ on $\partial B_R$ in two dimensions, it has the Fourier series expansion \[
u(R, \theta)=\sum_{n\in\mathbb{Z}}\hat{u}_n(R)e^{{\rm i}n\theta},\quad \hat{u}_n(R)=\frac{1}{2\pi}\int_0^{2\pi}u(R, \theta)e^{-{\rm i}n\theta}{\rm d}\theta. \] We may introduce the Dirichlet-to-Neumann (DtN) operator $\mathscr{B}: H^{1/2}(\partial B_R)\to H^{-1/2}(\partial B_R)$ given by \[
(\mathscr{B}u)(R, \theta)=\kappa\sum_{n\in\mathbb{Z}} \frac{H_n^{(1)'}(\kappa R)}{H_n^{(1)}(\kappa R)}\hat{u}_n(R) e^{{\rm i}n\theta}. \] For a given function $u$ on $\partial B_R$ in three dimensions, it has the Fourier series expansion: \[
u(R, \theta, \varphi)=\sum_{n=0}^\infty\sum_{m=-n}^n \hat{u}_n^m(R) Y_n^m(\theta, \varphi),\quad \hat{u}_n^m(R)=\int_{\partial B_R} u(R, \theta, \varphi)\bar{Y}_n^m(\theta, \varphi){\rm d}\gamma. \] We may similarly introduce the DtN operator ${\mathscr B}: H^{1/2}(\partial B_R)\to H^{-1/2}(\partial B_R)$ as follows: \[
(\mathscr{B}u)(R, \theta, \varphi)=\kappa\sum_{n=0}^\infty\sum_{m=-n}^n \frac{h_n^{(1)'}(\kappa R)}{h_n^{(1)}(\kappa R)}\hat{u}_n^m(R)Y_n^m(\theta, \varphi). \] Here $H_n^{(1)}$ is the Hankel function of the first kind with order zero, $h_n^{(1)}$ is the spherical Hankel function of the first kind with order zero, $Y_n^m$ is the spherical harmonics of order $n$, and the bar denotes the complex conjuate. Using the DtN operator, we can reformuate the Sommerfeld radiation condition into a transparent boundary condition \[ \partial_{\nu}u={\mathscr B}u\quad\text{on} ~ \partial B_R, \] where $\nu$ is the unit outer normal on $\partial B_R$. Hence one can also obtain the Neumann data on $\partial B_R$ once the Dirichlet date is available on $\partial B_R$. Now we are in the position to discuss our inverse source problem:
{\bf IP.} {\em Let $f$ be a complex function with a compact support contained in
$B_R$. The inverse problem is to determine $f$ by using the boundary observation data $u(x,\kappa)|_{\partial B_R}$ with an interval of frequencies $\kappa\in (0,K)$ where $K>1$ is a positive constant.}
The inverse source problem has significant applications in medical and biomedical imaging \cite{I-89}, and various tomography problems \cite{Ar, StUh}. In this paper, we study the stability of the above inverse problem. As is known, the inverse source problem does not have a unique solution at a single frequency \cite{DS-IEEE82, HKP-IP05}. Our goal is to establish increasing stability of the inverse problems with multi-frequencies. We refer to
\cite{BLT-JDE10, CIL} for increasing stability analysis of the inverse source scattering problem. In \cite{CIL}, the authors discussed increasing stability of the inverse source problem for the three-dimensional Helmholtz equation in a general domain $\Omega$ by using the Huygens principle. The observation data are both $u(x,\kappa)|_{\partial\Omega}, 0<\kappa<K$ and $\nabla u(x,\kappa)|_{\partial\Omega}, 0<\kappa<K$. In \cite{BLT-JDE10}, the authors studied the stability of the two- and three-dimensional Helmholtz equations via Green's functions. But the stabilities in \cite{BLT-JDE10} are different from the stability in this paper where only the Dirichlet data is required. Related results can be found in \cite{I-CM07, I-D11} on increasing stability of determining potentials and in the continuation for the Helmholtz equation. We refer to \cite{El, BaLiTr} for a uniqueness result and numerical study for the inverse source scattering problem. A survey can be found in \cite{BLLT-IP15} for some general inverse scattering problems with multi-frequencies.
\section{Main result}
Let $0<r<R$, define a complex-valued functional space: \[
\mathcal{C}_M =\{f\in H^{n+1}(B_R): \|f\|_{H^{n+1}(B_R)}\leq M, ~{\rm supp}f\subset B_r\subset B_R, ~f: B_R\to\mathbb{C}\}, \] where $M>1$ and $0<r<R$ are constants. For any $v\in H^{1/2}(\partial B_R)$, we set \[
\|v(x,\kappa)\|_{\partial B_R}=\int_{\partial B_R}\left( |\mathscr{B} v(x,
\kappa)|^2 +\kappa^2 |v(x, \kappa)|^2 \right){\rm d}\gamma. \]
Now we show the main stability result of the inverse problem.
\begin{theo}\label{mr1} Let $f_j\in \mathcal{C}_M, j=1, 2$, and let $u_j$ be the solution of the scattering problem \eqref{sol}--\eqref{rc} corresponding to $f_j$. Then there exists a positive constant $C$ independent of $n, K, M, \kappa$ such that \begin{align}
\label{cfe} \| f_1-f_2\|^2_{L^2(B_R)}\leq C \left(\epsilon^2+\frac{M^2}{\left(\frac{K^{\frac{2}
{3}}|\ln\epsilon|^{\frac{1}{4}}}{(R+1)(6n-6d+3)^3}\right)^{2n-2d+1}} \right), \end{align} where $K>1$, $n\ge d$ and \begin{align}
\label{e1} \epsilon&=\left(\int_0^K \kappa^{d-1} \|(u_1-u_2)(x,\kappa)\|_{\partial B_R}{\rm d}\kappa\right)^{\frac{1}{2}}. \end{align} \end{theo}
\begin{rema} There are two parts in the stability estimates \eqref{cfe}: the first part is the data discrepancy and the second part comes from the high frequency tail of the function. It is clear to see that the stability increases as $K$ increases, i.e., the problem is more stable as more frequencies data are used. We can also see that when
$n<\left[\frac{K^{\frac{2}{9}}|\ln\epsilon|^{\frac{1}{12}}}{(R+1)^{\frac{1}{3}} }+d-\frac{1}{2}\right]$, the stability increases as $n$ increases, i.e., the problem is more stable as the functions have suitably higher regularity. \end{rema}
Next we prove Theorem \ref{mr1} in the following section.
\section{Proof of Theorem \ref{mr1}}
First we present several useful lemmas.
\begin{lemm}
Let $f_j\in L^2(B_R)$ and ${\rm supp}f_j\subset B_R$, $j =1, 2$. Then
\begin{align*}
\|f_1-f_2\|^2_{L^2(B_R)}
&\le C\int_0^{\infty}\kappa^{d-1}\int_{\partial B_R}\left|\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)\right|^2{\rm d}\gamma {\rm d}\kappa.
\end{align*} \end{lemm}
\begin{proof}
Let $\xi\in\mathbb{R}$ with $|\xi|=\kappa$. Multiplying $e^{-{\rm i}\xi x}$ on both sides of \eqref{sol} and integrating over $B_R$, we obtain \[
\int_{B_R}e^{-{\rm i}\xi x}f(x){\rm d}x=\int_{\partial B_R}e^{-{\rm i}\xi x}(\partial_{\nu}u(x,\kappa)+{\rm i}\xi\nu u(x,\kappa)){\rm d}\gamma, \quad
|\xi|=\kappa\in (0,\infty). \] Since $\mbox{supp}f\subset B_R$, we have \[
\int_{\mathbb{R}^d}e^{-{\rm i}\xi x}f(x){\rm d}x=\int_{\partial B_R}e^{-{\rm i}\xi x}(\partial_{\nu}u(x,\kappa)+{\rm i}\xi\nu u(x,\kappa)){\rm d}\gamma, \quad
|\xi|=\kappa\in (0,\infty), \] which gives \[
\left|\int_{\mathbb{R}^d}e^{-{\rm i}\xi x}f(x){\rm d}x\right|^2\le\left|\int_{\partial B_R}(\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)){\rm d}\gamma\right|^2, \quad |\xi|=\kappa\in (0,\infty). \] Hence, \begin{align*}
\left(\int_{\mathbb{R}^d}\left|\int_{\mathbb{R}^d}e^{-{\rm i}\xi x}f(x){\rm d}x\right|^2 {\rm d}\xi\right)^{\frac{1}{2}}&\cr
\le&\left(\int_{\mathbb{R}^d}\left|\int_{\partial B_R}(\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)){\rm d}\gamma\right|^2 {\rm d}\xi\right)^{\frac{1}{2}}, \quad
|\xi|=\kappa\in (0,\infty). \end{align*} When $d=2$, we obtain by using the polar coordinates that \begin{align*}
&\left(\int_{\mathbb{R}^2}\left|\int_{\mathbb{R}^2}e^{-{\rm i}\xi x}f(x){\rm d}x\right|^2{\rm d}\xi\right)^{\frac{1}{2}}\cr
&\le\left(\int_0^{2\pi}{\rm d}\theta\int_0^{\infty}\kappa\left|\int_{\partial B_R}(\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)){\rm d}\gamma\right|^2{\rm d}\kappa\right)^{\frac{1}{2 }}\cr
&\le \left(2\pi\int_0^{\infty}\kappa\left|\int_{\partial B_R}(\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)){\rm d}\gamma\right|^2{\rm d}\kappa\right)^{\frac{1}{2}}\cr
&\le\left(2\pi^2R^2\int_0^{\infty}\kappa\int_{\partial B_R}\left|\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)\right|^2{\rm d}\gamma {\rm d}\kappa\right)^{\frac{1}{2}}, \end{align*} It follows from the Plancherel theorem that \begin{align*}
\|f_1-f_2\|^2_{L^2(B_R)}&=\|f_1-f_2\|^2_{L^2(\mathbb{R}^2)}\\
& =\frac{1}{(2\pi)^2}\int_{\mathbb{R}^2}|\hat{f}
_1(\xi)-\hat{f}_2(\xi)|^2{\rm d}\xi\\
&\le C\int_0^{\infty}\kappa\int_{\partial B_R}|\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)|^2{\rm d}\gamma {\rm d}\xi. \end{align*}
When $d=3$, we obtain by using the polar coordinates that \begin{align*}
&\left(\int_{\mathbb{R}^3}\left|\int_{\mathbb{R}^3}e^{-{\rm i}\xi x}f(x){\rm d}x\right|^2{\rm d}\xi\right)^{\frac{1}{2}}\cr
&\le\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}{\rm sin}\varphi
{\rm d}\varphi\int_0^{\infty}\kappa^2\left|\int_{\partial B_R}(\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)){\rm d}\gamma\right|^2{\rm d}\kappa\right|^{\frac{1}{2}}\cr
&\le \left(2\pi^2\int_0^{\infty}\kappa^2\left|\int_{\partial B_R}(\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)){\rm d}\gamma\right|^2{\rm d}\kappa\right)^{\frac{1}{2}}\cr
&\le\left(\frac{8}{3}\pi^3R^3\int_0^{\infty}\kappa^2\int_{\partial B_R}\left|\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)\right|^2{\rm d}\gamma {\rm d}\kappa\right)^{\frac{1}{2}}. \end{align*} It follows from the Plancherel theorem that \begin{align*}
\|f_1-f_2\|^2_{L^2(B_R)}&=\|f_1-f_2\|^2_{L^2(\mathbb{R}^3)}\\
& =\frac{1}{(2\pi)^3}\int_{\mathbb{R}^3}|\hat{f}
_1(\xi)-\hat{f}_2(\xi)|^2{\rm d}\xi\\
&\le C\int_0^{\infty}\kappa^2\int_{\partial B_R}\left|\partial_{\nu}u(x,\kappa)+\kappa u(x,\kappa)\right|^2{\rm d}\gamma {\rm d}\xi, \end{align*} which completes the proof. \end{proof}
For $d=2$, let \begin{align}\label{3.1}
I_1(s)=&\int_0^s \kappa^{3}\int_{\partial B_R}\left(\int_{B_R}-\frac{{\rm i}}{4}H_0^{(1)}(\kappa|x-y|)(f_1(y)-f_2(y)){\rm d}y\right)\cr
&\left(\int_{B_R}\frac{{\rm i}}{4}\bar{H}_0^{(1)}(\kappa|x-y|)(\bar{f}_1(y)-\bar{f}_2(y)){\rm d}y\right){\rm d}\gamma(x){\rm d}\kappa, \\
\label{3.2}I_2(s)=&\int_0^s \kappa\int_{\partial B_R}\left(-\int_{B_R}\frac{{\rm i}}{4}\partial_{\nu}H_0^{(1)}(\kappa|x-y|)(f_1(y)-f_2(y)){\rm d}y\right)\cr
&\left(\int_{B_R}\frac{{\rm i}}{4}\partial_{\nu}\bar{H}_0^{(1)}(\kappa|x-y|)(\bar{f} _1(y)-\bar{f}_2(y)){\rm d}y\right){\rm d}\gamma(x){\rm d}\kappa. \end{align} For $d=3$, let \begin{align}\label{3.3}
I_1(s)=&\int_0^s \kappa^{4}\int_{\partial B_R}\left(\int_{B_R}\frac{e^{{\rm i}\kappa|x-y|}}{4\pi |x-y|}(f_1(y)-f_2(y)){\rm d}y\right)\cr
&\left(\int_{B_R}\frac{e^{-{\rm i}\kappa|x-y|}}{4\pi
|x-y|}(\bar{f}_1(y)-\bar{f}_2(y)){\rm d}y\right){\rm d}\gamma(x){\rm d}\kappa,\\ \label{3.4}I_2(s)=&\int_0^s
\kappa^{3}\int_{\partial B_R}\left(\int_{B_R}\partial_{\nu}\frac{e^{{\rm i}\kappa|x-y|}}{4\pi |x-y|}(f_1(y)-f_2(y)){\rm d}y\right)\cr
&\left(\int_{B_R}\partial_{\nu}\frac{e^{-{\rm i}\kappa|x-y|}}{4\pi
|x-y|}(\bar{f}_1(y)-\bar{f}_2(y)){\rm d}y\right){\rm d}\gamma(x){\rm d}\kappa. \end{align} Denote $$ S=\{z=x+{\rm i}y\in\mathbb{C}: -\frac{\pi}{4}<{\rm arg} z<\frac{\pi}{4}\}. $$ The integrands in \eqref{3.1}--\eqref{3.4} are analytic functions of $\kappa$ in $S$. The integrals with respect to $\kappa$ can be taken over any path joining points $0$ and $s$ in $S$. Thus $I_1(s)$ and $I_2(s)$ are
analytic functions of $s=s_1+{\rm i}s_2\in S, s_1, s_2\in\mathbb{R}$.
\begin{lemm}
Let $f_j\in L^2(B_R), {\rm supp}f_j\subset B_R, j =1, 2$. We have for any $s=s_1+{\rm i}s_2\in S$ that \begin{enumerate}
\item for $d=2$,
\begin{align}
\label{3.5} |I_1(s)|&\leq 16\pi^3R^3|s|^{5}e^{4R|s_2|}\|f_1(x)-f_2(x)\|_{L^2(B_R)}^2,\\
\label{3.6} |I_2(s)|&\leq 16\pi^3R^3|s|^{3}e^{4R|s_2|}\|f_1(x)-f_2(x)\|_{H^1(B_R)}^2,
\end{align}
\item for $d=3$, \begin{align}
\label{3.7} |I_1(s)|&\leq 16\pi^3(|s|^{3}R^3+|s|^{4}R^4)e^{4R|s_2|}\|f_1(x)-f_2(x)\|_{L^2(B_R)}^2,\\
\label{3.8} |I_2(s)|&\leq 16\pi^3(|s|^{2}R^3+|s|^{3}R^4)e^{4R|s_2|}\|f_1(x)-f_2(x)\|_{H^1(B_R)}^2,
\end{align}
\end{enumerate} \end{lemm}
\begin{proof} We first prove (\ref{3.7}). Let $\kappa=st, t\in(0, 1)$. A simple calculation yields
\begin{align*}
I_1(s)=&\int_0^1 s^{5}t^{4}\int_{\partial B_R}\left(\int_{B_R}\frac{e^{{\rm i}st|x-y|}}{4\pi |x-y|}(f_1(y)-f_2(y)){\rm d}y\right)\cr
&\left(\int_{B_R}\frac{e^{-{\rm i}st|x-y|}}{4\pi
|x-y|}(\bar{f}_1(y)-\bar{f}_2(y)){\rm d}y\right){\rm d}\gamma(x){\rm d}t. \end{align*}
Noting that $|e^{{\rm i}st |x-y|}|\leq e^{2R|s_2|}$ for all $x\in \partial B_R, y\in B_R$, we have
\begin{align*}
|I_1(s)|&=\int_0^1|s|^{5}t^{4}\int_{\partial B_R}\left|\int_{B_R}\frac{e^{2|s_2|R}}{|x-y|
}|f_1(y)-f_2(y)|{\rm d}y\right|^2{\rm d}\gamma(x){\rm d}t\cr
&\le\int_0^1|s|^{5}t^{4}\int_{\partial B_R}\left|\int_{B_R}
|f_1(y)-f_2(y)|^2 {\rm d}y\right|\int_{B_R}\frac{e^{4R|s_2|}}{|x-y|^2}{\rm d}y {\rm d}\gamma(x){\rm d}t,\cr \end{align*} where we have used the Schwarz inequality for the integral with respect to $y$
in the last inequality. Using the polar coordinates $\rho=|x-y|$ with respect to $y$ yields \begin{align*}
|I_1(s)|\le\int_0^1|s|^{5}\left(\int_{B_R}|f_1(y)-f_2(y)|^2{\rm d}y\right)\int_{
\partial B_R} \left(2\pi^2\int_0^{2R}e^{4|s_2|R}{\rm d}\rho \right){\rm d}\gamma(x){\rm d}t,\cr \end{align*} which implies (\ref{3.7}).
Next we prove (\ref{3.8}). Let $\kappa=st, t\in(0, 1)$. A simple calculation yields
\begin{align*}
I_2(s)=&\int_0^1 s^{3}t^{2}\int_{\partial B_R}\left(\int_{B_R}\partial_{\nu}\frac{e^{{\rm i}st|x-y|}}{4\pi |x-y|}(f_1(y)-f_2(y)){\rm d}y\right)\cr
&\left(\int_{B_R}\partial_{\nu}\frac{e^{-{\rm i}st|x-y|}}{4\pi
|x-y|}(\bar{f}_1(y)-\bar{f}_2(y)){\rm d}y\right){\rm d}\gamma(x){\rm d}t, \end{align*} which gives
\begin{align*}
|I_2(s)|=\int_0^1|s|^{3}t^{2}\int_{\partial B_R}\left|\int_{B_R}\nabla_x\left(\frac{e^{{\rm i}st|x-y|}}{|x-y|}\right)\cdot\nu(f_1(y)-f_2(y)){\rm d}y\right|^2{\rm d}\gamma(x){\rm d}t. \end{align*}
Noting $\nabla_x\left(\frac{e^{{\rm i}st|x-y|}}{|x-y|}\right)=-\nabla_y\left(\frac{e^{{\rm i}st|x-y|}}{|x-y|}\right)$ and ${\rm supp}f_j\subset B_R, j=1,2$, we have \begin{align*}
|I_2(s)|=\int_0^1|s|^{3}t^{2}\int_{\partial B_R}\left|\int_{B_R}\frac{e^{{\rm i}st|x-y|}}{|x-y|}\nabla_y\left(|f_1(y)-f_2(y)\right)\cdot\nu
{\rm d}y\right|^2{\rm d}\gamma(x){\rm d}t. \end{align*} Following a similar argument for proving (\ref{3.7}), we can prove (\ref{3.8}).
Now we show the proofs of (\ref{3.5}) and (\ref{3.6}). First we prove (\ref{3.5}). By (\ref{3.1}) we have \begin{align*}
I_1(s)=\int_0^1 s^{4}t^{3}\int_{\partial B_R}\left|\int_{B_R}\frac{{\rm i}}{4}H_0^{(1)}(st|x-y|)(f_1(y)-f_2(y)){\rm d}y\right|^2{\rm d}\gamma(x){\rm d}t. \end{align*} The Hankel function can also be expressed by the following integral when ${\rm Re}z>0$ (see e.g.,\cite{Wa}, Chapter VI): \begin{align*} H_0^{(1)}(z)=\frac{1}{{\rm i}\pi}\int_{1+\infty{\rm i}}^{1}e^{{\rm i}z\tau}(\tau^2-1)^{-1/2}{\rm d}\tau. \end{align*} Consequently, \begin{align*}
|H_0^{(1)}(z)|&= \left| \frac{1}{\pi}\int^0_{+\infty}e^{{\rm i}({\rm Re }z+{\rm i}{\rm Im }z)(1+t{\rm i})}((1+t{\rm i})^2-1)^{-1/2}{\rm d}t\right|\cr
&\le \left|\frac{1}{\pi}e^{{\rm i}{\rm Re }z -{\rm Im
}z}\int^0_{+\infty}e^{-t{\rm Re z}-{\rm i}t{\rm Im }z}(2\tau{\rm i}-\tau^2)^{-1/2}{\rm d}t\right|\cr
&\le \frac{1}{\pi}e^{|{\rm Im }z|}\int_0^{+\infty}\frac{e^{-t{\rm Re
}z}}{\left|\tau^{1/2}(2{\rm i}-\tau)^{1/2}\right|}{\rm d}t\cr
&\le \frac{1}{\pi}e^{|{\rm Im }z|}\int_0^{+\infty}\frac{e^{-t{\rm Re }z}}{\tau^{1/2}(\tau^2+4)^{1/4}}{\rm d}t\cr
&\le \frac{1}{\pi}e^{|{\rm Im }z|}\int_0^{+\infty}\frac{e^{-t{\rm Re }z}}{\tau^{1/2}2^{1/2}}{\rm d}t\cr
&=\frac{1}{\pi}e^{|{\rm Im }z|}\left(\int_0^{1}\frac{e^{-t{\rm Re }z}}{\tau^{1/2}2^{1/2}}{\rm d}t+\int_1^{+\infty}\frac{e^{-t{\rm Re }z}}{\tau^{1/2}2^{1/2}}{\rm d}t\right)\cr
&\le\frac{1}{\pi}e^{|{\rm Im }z|}\left(\int_0^{1}\frac{1}{\tau^{1/2}}{\rm d}t+\int_1^{+\infty}e^{-t{\rm Re }z}{\rm d}t\right)\cr
&\le\frac{1}{\pi}e^{|{\rm Im }z|}\left(2+\frac{1}{{\rm Re }z}\right). \end{align*} Similarly, we can obtain \begin{align*}
|\overline{H}_0^{(1)}(z)|\le \frac{1}{\pi}e^{|{\rm Im }z|}\left(2+\frac{1}{{\rm Re }z}\right). \end{align*} Hence we have \begin{align*}
|I_1(s)|\le\int_0^1|s|^{4}t^{3}\int_{\partial B_R}\left|\int_{B_R}|f_1(y)-f_2(y)|^2{\rm d}y\right|\int_{B_R}
e^{4R|s_2|}\left(2+\frac{1}{|x-y|s_1t}\right){\rm d}y{\rm d}\gamma(x){\rm d}t. \end{align*}
Using the polar coordinates $\rho=|x-y|$ with respect to $y$ yields \begin{align*}
|I_1(s)|\le\int_0^1|s|^{4}t^{3}\left|\int_{B_R}|f_1(y)-f_2(y)|^2{
\rm d}y\right|\int_ { \partial B_R}
\left(2\pi^2\int_{0}^{2R}e^{4R|s_2|}\left(2\rho+\frac{1}{s_1t}\right){\rm d}\rho\right){\rm d}\gamma(x){\rm d}t. \end{align*} which completes the proof of (\ref{3.5}).
Noting that $\partial_{\nu}H_0^{(1)}(\kappa|x-y|)=\nabla_x H_0^{(1)}(\kappa|x-y|)\cdot\nu$ and $\nabla_x H_0^{(1)}(\kappa|x-y|)=-\nabla_y H_0^{(1)}(\kappa|x-y|)$, we can prove (\ref{3.6}) in a similar way. \end{proof}
\begin{lemm} Let $f_j\in H^n(B_R), n\ge d, {\rm supp}f_j\subset B_r\subset B_R, j =1, 2$. Then there exists a constant $C$ independent of $n$ such that for any $s\ge 1$ \begin{align}\label{3.9}
\int_s^{+\infty} \int_{\partial B_R}\kappa^{d-1} \bigl(|\partial_{\nu}u(x,\kappa)|^2
+\kappa^2|u(x, \kappa)|^2 \bigr){\rm d}\gamma{\rm d}\kappa&\leq C s^{-(2n-2d+1)}\| f_1-f_2\|^2_{H^{n+1}(B_R)}. \end{align} \end{lemm}
\begin{proof} It is easy to see that \begin{align*}
&\int_s^{+\infty}\int_{\partial B_R} \kappa^{d-1}\bigl(|\partial_{\nu}u(x,\kappa)|^2
+\kappa^2|u(x, \kappa)|^2 \bigr){\rm d}\gamma{\rm d}\kappa\cr
&= \int_s^{+\infty} \int_{\partial B_R} \kappa^{d+1}|u(x, \kappa)|^2 {\rm d}\gamma{\rm d}\kappa + \int_s^{+\infty} \int_{\partial B_R} \kappa^{d-1}
|\partial_{\nu}u(x,\kappa)|^2{\rm d}\gamma{\rm d}\kappa\cr &\triangleq L_1+L_2. \end{align*} Next, we will estimate $L_1$ and $L_2$. When $d=3$, we have \begin{align*}
L_1&=\int_s^{+\infty} \int_{\partial B_R} \kappa^{4}|u(x, \kappa)|^2 {\rm d}\gamma{\rm d}\kappa\cr &=\int_s^{+\infty} \int_{\partial B_R}
\kappa^{4}\left|\int_{\mathbb{R}^3}\frac{e^{{\rm i}\kappa|x-y|}}{4\pi|x-y|}(f_1-f_2)(y){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*}
Using the polar coordinates $\rho=|y-x|$ originated at $x$ with respect to $y$, we have \begin{align*}
L_1=\int_s^{+\infty} \int_{\partial B_R} \kappa^{4}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}\sin\varphi{\rm d}\varphi\int_0^{+\infty}\frac{e^{{\rm i}\kappa\rho}}{4\pi}(f_1-f_2)\rho {\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Using integration by parts and noting ${\rm supp}f_j\subset B_r\subset B_R$, we obtain \begin{align*}
L_1=\int_s^{+\infty} \int_{\partial B_R} \kappa^{4}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}\sin\varphi{\rm d}\varphi\int_{R-r}^{2R}\frac{e^{{\rm i}\kappa\rho}}{4\pi({\rm i}\kappa)^n}\frac{\partial^n[(f_1-f_2)\rho]}{\partial\rho^n} {\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Consequently, \begin{align*}
L_1\le &\int_s^{+\infty} \int_{\partial B_R} \kappa^{4}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}\sin\varphi{\rm d}\varphi\int_{R-r}^{2R}\frac{1}{4\pi \kappa^n}\right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}
(f_1-f_2)\right|\rho\right.\left.\left.+n\left|\sum\limits_{|\alpha|=n-1}\partial_y^{
\alpha}(f_1-f_2)\right|\right){\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{4}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}\sin\varphi{\rm d}\varphi\int_{R-r}^{2R}\frac{1}{4\pi \kappa^n}\right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}(f_1-f_2)\right|\frac{1}{\rho}
\right.\left.\left.+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha}
(f_1-f_2)\right|\frac{n}{\rho^2}\right)\rho^2{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
\le& \int_s^{+\infty} \int_{\partial B_R} \kappa^{4}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}\sin\varphi{\rm d}\varphi\int_{R-r}^{2R}\frac{1}{4\pi \kappa^n}\right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}(f_1-f_2)\right|\frac{1}{R-r}
\right.\left.\left.+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha}
(f_1-f_2)\right|\frac{n}{(R-r)^2}\right)\rho^2{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{4}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{\pi}\sin\varphi{\rm d}\varphi\int_{0}^{+\infty}\frac{1}{4\pi \kappa^n}\right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}(f_1-f_2)\right|\frac{1}{R-r}
\right.\left.\left.+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha}
(f_1-f_2)\right|\frac{n}{(R-r)^2}\right)\rho^2{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Changing back to the Cartesian coordinates with respect to $y$, we have \begin{align} L_1 \le& \int_s^{+\infty} \int_{\partial B_R}
\kappa^{4}\left|\int_{\mathbb{R}^3}\frac{1}{4\pi \kappa^n}\right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}(f_1-f_2)\right|\frac{1}{R-r}
\right.\left.\left.+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha}
(f_1-f_2)\right|\frac{n}{(R-r)^2}\right)dy\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
\le& C n\|f_1-f_2\|_{H^n(B_R)}\int_s^{+\infty}\kappa^{4-2n}{\rm d}\kappa\cr
=&C\frac{n}{2n-5}\|f_1-f_2\|_{H^n(B_R)}\frac{1}{s^{2n-5}}\cr
\label{3.10}\le &3C\|f_1-f_2\|_{H^n(B_R)}\frac{1}{s^{2n-5}},\quad n\ge 3. \end{align} Next we estimate $L_2$ for $d=3$, \begin{align*}
L_2=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{2} |\partial_{\nu}u(x,\kappa)|^2{\rm d}\gamma{\rm d}\kappa\cr =&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{4}\left|\int_{\mathbb{R}^3}\left(\nabla_y\frac{e^{{\rm i}\kappa|x-y|}}{4\pi|x-y|}\cdot\nu\right)(f_1-f_2)(y){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*}
Noting that $\nabla_y\frac{e^{{\rm i}\kappa|x-y|}}{4\pi|x-y|}=-\nabla_x\frac{e^{{\rm i}\kappa|x-y|}}{4\pi|x-y|}$ and ${\rm supp}f_j\subset B_R$, we have \begin{align*}
L_2=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{2} |\partial_{\nu}u(x,\kappa)|^2{\rm d}\gamma{\rm d}\kappa\cr =&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{2}\left|\int_{\mathbb{R}^3}\left(\nabla_y\frac{e^{{\rm i}\kappa|x-y|}}{4\pi|x-y|}\cdot\nu\right)(f_1-f_2)(y){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa\cr =&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{2}\left|\int_{\mathbb{R}^3}\frac{e^{{\rm i}\kappa|x-y|}}{4\pi|x-y|}\left(\nabla_y(f_1-f_2)(y)\cdot\nu\right){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Following a similar argument as that for the proof of (\ref{3.10}), we can obtain \begin{align}\label{3.11}
L_2\le C n\|f_1-f_2\|_{H^{n+1}(B_R)}\int_s^{+\infty}\kappa^{2-2n}{\rm d}\kappa=C\frac{n}{2n-3} \|f_1-f_2\|_{ H^{n+1}(B_R)}\frac{1}{s^{2n-3}},\quad n\ge 2. \end{align} Combining (\ref{3.10})--(\ref{3.11}) and noting $s>1$, we obtain (\ref{3.9}) for $d=3$.
When $d=2$, we have \begin{align*}
L_1=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}|u(x, \kappa)|^2 {\rm d}\gamma{\rm d}\kappa\cr
=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_{\mathbb{R}^2}\frac{{\rm i}}{4}H_0^{(1)}(\kappa|x-y|)(f_1-f_2)(y){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} The Hankel function can also be expressed by the following integral when $t>0$ (e.g., \cite{Wa}, Chapter VI): \begin{align*} H_0^1(t)=\frac{2}{{\rm i}\pi}\int_0^{+\infty}e^{{\rm i}ts}(s^2-1)^{-1/2}{\rm d}s. \end{align*}
Using the polar coordinates $\rho=|y-x|$ originated at $x$ with respect to $y$, we have \begin{align*}
L_1=\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_0^{+\infty}\frac{1}{4}H_0^{(1)}(\kappa\rho)(f_1-f_2)\rho{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Let \begin{align}\label{3.12} H_n(t)=\frac{2}{{\rm i}\pi}\int_0^{+\infty}\frac{e^{{\rm i}ts}}{({\rm i}s)^n(s^2-1)^{1/2}}{\rm d}s, \quad n=1,2,\cdots. \end{align} It is clear to note that \[
H_0(t)=H_0^{(1)}(t)\quad\text{and}\quad \frac{{\rm d} H_n(t)}{{\rm d}t}=H_{n-1}(t), ~t>0, ~n\in\mathbb{N}. \] Using integration by parts and noting ${\rm supp}f_j\subset B_r\subset B_R$, we obtain \begin{align*} L_1
=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_{R-r}^{2R}\frac{H_1(\kappa\rho)}{4\kappa^2}\frac{\partial
(f_1-f_2)\rho}{\partial\rho}{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_{R-r}^{2R}\frac{H_n(\kappa\rho) }{4\kappa^{n+1}}\frac{\partial^n
(f_1-f_2)\rho}{\partial \rho^n}{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Consequently, we have \begin{align*}
L_1\le&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_{R-r}^{2R}\left|\frac{H_n(\kappa\rho)
}{4\kappa^{n+1}}\right|\left|\frac{\partial^n (f_1-f_2)\rho}{\partial \rho^n}\right|{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
\le&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_{R-r}^{2R}\left|\frac{H_n(\kappa\rho) }{4\kappa^{n+1}}\right| \right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}
(f_1-f_2)\right|\right.\left.\left.+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha
}(f_1-f_2)\right|\frac{n}{\rho}\right)\rho{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
\le&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_{R-r}^{2R}\left|\frac{H_n(\kappa\rho) }{4\kappa^{n+1}}\right| \right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}
(f_1-f_2)\right|\right.\left.\left.+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha
}(f_1-f_2)\right|\frac{n}{R-r}\right)\rho{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Noting (\ref{3.12}), we see that there exists a constant $C>0$ such that
$|H_n(\kappa\rho)|\le C$ for $n\ge 1$. Hence, \begin{align*}
L_1\le&\int_s^{+\infty} \int_{\partial B_R} \kappa^{3}\left|\int_0^{2\pi}{\rm d}\theta\int_{R-r}^{2R}\frac{C }{4\kappa^{n+1}} \right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}(f_1-f_2)\right|\right.\left.\left.
+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha}(f_1-f_2)\right|\frac{n}{R-r}
\right)\rho{\rm d}\rho\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Changing back to the Cartesian coordinates with respect to $y$, we have \begin{align} L_1\le&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{3}\left|\int_{B_R}\frac{C}{4\kappa^{n+1}} \right.\cr
&\left(\left|\sum\limits_{|\alpha|=n}\partial_y^{\alpha}(f_1-f_2)\right|\right.\left.\left.
+\left|\sum\limits_{|\alpha|=n-1}\partial_y^{\alpha}(f_1-f_2)\right|\frac{n}{R-r}
\right){\rm d}x\right|^2 {\rm d}\gamma{\rm d}\kappa\cr
\label{3.13}\le& C n\|f_1-f_2\|_{H^n(B_R)}\int_s^{+\infty}\kappa^{1-2n}{\rm d}\kappa=C\frac{n}{2n-2} \|f_1-f_2\|_ {H^n(B_R)}\frac{1}{s^{2n-2}}. \end{align} Next we estimate $L_2$ for $d=2$. A simple calculation yields \begin{align*}
L_2=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{2} |\partial_{\nu}u(x,\kappa)|^2{\rm d}\gamma{\rm d}\kappa\cr =&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{4}\left|\int_{\mathbb{R}^3}\left(\frac{\rm i}{4}\nabla_y H_0^{(1)}(\kappa|x-y|)\cdot\nu\right)(f_1-f_2)(y){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*}
Noting that $\nabla_y H_0^{(1)}(\kappa|x-y|)=-\nabla_x H_0^{(1)}(k|x-y|)$ and ${\rm supp}f_j\subset B_r\subset B_R$, we have \begin{align*}
L_2=&\int_s^{+\infty} \int_{\partial B_R} \kappa^{2} |\partial_{\nu}u(x,\kappa)|^2{\rm d}\gamma{\rm d}\kappa\cr =&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{2}\left|\int_{\mathbb{R}^3}\left(\frac{\rm i}{4}\nabla_y H_0^{(1)}(\kappa|x-y|)\cdot\nu\right)(f_1-f_2)(y){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa\cr =&\int_s^{+\infty} \int_{\partial B_R}
\kappa^{2}\left|\int_{\mathbb{R}^3}\frac{\rm i}{4}H_0^{(1)}(\kappa|x-y|)\left(\nabla_y(f_1-f_2)(y)\cdot\nu\right){\rm d}y\right|^2 {\rm d}\gamma{\rm d}\kappa. \end{align*} Following a similar argument as the proof of (\ref{3.13}), we can obtain \begin{align} L_2
\le& C n\|f_1-f_2\|_{H^{n+1}(B_R)}\int_s^{+\infty}\kappa^{-2n}{\rm d}\kappa\cr
\label{3.14}=&C\frac{n}{2n-1}\|f_1-f_2\|_{H^{n+1}(B_R)}\frac{1}{s^{2n-1}}. \end{align} Combining (\ref{3.13}) and (\ref{3.14}) completes the proof of (\ref{3.9}) for $d=2$.
\end{proof}
The following lemma is proved in \cite{CIL}.
\begin{lemm} Let $J(z)$ be analytic in $S=\{z=x+{\rm i}y\in\mathbb{C}: -\frac{\pi}{4}<{\rm arg} z<\frac{\pi}{4}\}$ and continuous in $\bar{S}$ satisfying \[
\begin{cases}
|J(z)|\leq\epsilon, & z\in (0, ~ L],\\
|J(z)|\leq V, & z\in S,\\
|J(0)|=0.
\end{cases} \] Then there exits a function $\mu(z)$ satisfying \[
\begin{cases}
\mu(z)\geq\frac{1}{2}, & z\in(L, ~ 2^{\frac{1}{4}}L),\\
\mu(z)\geq \frac{1}{\pi}((\frac{z}{L})^4-1)^{-\frac{1}{2}}, & z\in (2^{\frac{1}{4}}L, ~ \infty)
\end{cases} \] such that \[
|J(z)|\leq V\epsilon^{\mu(z)}, \quad\forall\, z\in (L, ~ \infty). \] \end{lemm}
\begin{lemm}
Let $f_j\in\mathcal{C}_M, j=1,2$. Then there exists a function $\mu(z)$ satisfying \begin{equation}\label{mu}
\begin{cases}
\mu(s)\geq\frac{1}{2}, \quad & s\in(K, ~ 2^{\frac{1}{4}}K),\\
\mu(s)\geq \frac{1}{\pi}((\frac{s}{K})^4-1)^{-\frac{1}{2}}, \quad & s\in (2^{\frac{1}{4}}K, ~\infty),
\end{cases} \end{equation} such that \[
|I_1(s)+I_2(s)|\leq CM^2 e^{(4R+1)s}\epsilon^{2\mu(s)},\quad\forall s\in (K, ~\infty), \] for $d=2,3$. \end{lemm}
\begin{proof}
It follows from Lemma 3.2 that \[
|[I_1(s)+I_2(s)]e^{-(4R+1)s}|\leq CM^2,\quad\forall s\in S. \] Recalling \eqref{e1}, \eqref{3.1}-\eqref{3.4}, we have \[
|[I_1(s)+I_2(s)]e^{-(4R+1)s}|\leq\epsilon^2,\quad s\in [0, ~K]. \] A direct application of Lemma 3.5 shows that there exists a function $\mu(s)$ satisfying \eqref{mu} such that \[
|[I_1(s)+I_2(s)]e^{-(4R+1)s}|\leq CM^2\epsilon^{2\mu},\quad\forall s\in (K, ~\infty), \] which completes the proof. \end{proof}
Now we show the proof of Theorem 2.1.
\begin{proof}
We can assume that $\epsilon<e^{-1}$, otherwise the estimate is obvious. Let \[ s=\begin{cases}
\frac{1}{((4R+3)\pi)^{\frac{1}{3}}}K^{\frac{2}{3}}|\ln\epsilon|^{\frac{1}{4}}, & 2^{\frac{1}{4}}
((4R+3)\pi)^{\frac{1}{3}}K^{\frac{1}{3}}<|\ln\epsilon|^{\frac{1}{4}},\\
K, &|\ln\epsilon|\leq 2^{\frac{1}{4}}((4R+3)\pi)^{\frac{1}{3}}K^{\frac{1}{3}}.
\end{cases} \] If
$2^{\frac{1}{4}}(((4R+3)\pi)^{\frac{1}{3}}K^{\frac{1}{3}}<|\ln\epsilon|^{\frac{ 1}{4}}$, then we have \begin{align*}
|I_1(s)+I_2(s)|&\leq CM^2 e^{(4R+3)s}
e^{-\frac{2|\ln\epsilon|}{\pi}((\frac{s}{K})^4-1)^{-\frac{1}{2}}}\cr
&\leq CM^2 e^{\frac{(4R+3)}{((4R+3)\pi)^{\frac{1}{3}}}K^{\frac{2}{3}}|\ln\epsilon|^{\frac{
1}{4}}-\frac{2|\ln\epsilon|}{\pi} (\frac{K}{s})^2}\\ &=CM^2 e^{-2\left(\frac{(4R+3)^2}{\pi}\right)^{\frac{1}{3}}K^{\frac{2}{3}}
|\ln\epsilon|^{\frac{1}{2}}\left(1-\frac{1}{2}
|\ln\epsilon|^{-\frac{1}{4}}\right)}. \end{align*}
Noting that $\frac{1}{2} |\ln\epsilon|^{-\frac{1}{4}}<\frac{1}{2}$, $\left(\frac{(4R+3)^2}{\pi}\right)^{\frac{1}{3}}>1$ we have \begin{align*}
|I_1(s)+I_2(s)|&
\leq CM^2 e^{-K^{\frac{2}{3}}|\ln\epsilon|^{\frac{1}{2}}}. \end{align*} Using the elementary inequality \[
e^{-x}\leq \frac{(6n-6d+3)!}{x^{3(2n-2d+1)}}, \quad x>0, \] we get \begin{align}\label{3.16}
|I_1(s)+I_2(s)|\leq\frac{CM^2}{\left(\frac{K^2|\ln\epsilon|^{\frac{3}{2}}}{ (6n-6d+3)^3}\right)^{2n-2d+1}}. \end{align}
If $|\ln\epsilon|\leq 2^{\frac{1}{4}}(((4R+3)\pi)^{\frac{1}{3}}K^{\frac{1}{3}}$, then $s=K$. We have from \eqref{e1}, \eqref{3.1}-\eqref{3.4} that \[
|I_1(s)+I_2(s)|\leq \epsilon^2, \]
Here we have noted that for $s>0$, $I_1(s)+I_2(s)=\int_0^s \int_{\partial B_R}\kappa^{d-1} \bigl(|\partial_{\nu}u(x,\kappa)|^2
+\kappa^2|u(x, \kappa)|^2 \bigr){\rm d}\gamma{\rm d}\kappa$. Hence we obtain from Lemma 3.3 and \eqref{3.16} that \begin{align*}
&\int_0^\infty \int_{\partial B_R}\kappa^{d-1} \bigl(|\partial_{\nu}u(x,\kappa)|^2
+\kappa^2|u(x, \kappa)|^2 \bigr){\rm d}\gamma{\rm d}\kappa\\
&\leq I_1(s)+I_2(s)+\int_s^\infty \int_{\partial B_R}\kappa^{d-1} \bigl(|\partial_{\nu}u(x,\kappa)|^2
+\kappa^2|u(x, \kappa)|^2 \bigr){\rm d}\gamma{\rm d}\kappa\\ &\leq
\epsilon^2+\frac{CM^2}{\left(\frac{K^2|\ln\epsilon|^{\frac{3}{2}}}{(6n-6d+3)^3} \right)^{2n-2d+1}}+\frac{C
\|f_1-f_2\|^2_{H^{n+1}(B_R)}}{\left(2^{-\frac{1}{4}}((4R+3)\pi)^{-\frac{1}{3}}K^{
\frac{2}{3}} |\ln\epsilon|^{\frac{1}{4}}\right)^{2n-2d+1}}. \end{align*} By Lemma 3.1, we have \[
\|f_1-f_2\|^2_{L^2(B_R)}\leq C \left(\epsilon^2
+\frac{M^2}{\left(\frac{K^2|\ln\epsilon|^{\frac{3}{2}}}{(6n-6d+3)^3}\right)^{ 2n-2d+1}}+\frac{M^2}{\left(\frac{K^{\frac{2}
{3}}|\ln\epsilon|^{\frac{1}{4}}}{(R+1)(6n-6d+3)^3}\right)^{2n-2d+1}}\right). \]
Since $K^{\frac{2}{3}}|\ln\epsilon|^{\frac{1}{4}}\leq K^2
|\ln\epsilon|^{\frac{3}{2}}$ when $K>1$ and $|\ln\epsilon|>1$, we obtain the stability estimate. \end{proof}
\end{document} |
\begin{document}
\title{Entanglement-enhanced quantum metrology in a noisy environment} \author{Kunkun Wang} \author{Xiaoping Wang} \author{Xiang Zhan} \author{Zhihao Bian} \affiliation{Department of Physics, Southeast University, Nanjing 211189, China} \affiliation{Beijing Computational Science Research Center, Beijing 100084, China} \author{Jian Li} \affiliation{Department of Physics, Southeast University, Nanjing 211189, China} \author{Barry C. Sanders} \affiliation{Synergetic Innovation Center in Quantum Information and Quantum Physics, University of Science and Technology of China, CAS, Hefei 230026, China} \affiliation{Hefei National Laboratory for Physical Sciences at Microscale, University of Science and Technology of China, CAS, Hefei 230026, China} \affiliation{Institute for Quantum Science and Technology, University of Calgary, Alberta T2N 1N4, Canada} \affiliation{Program in Quantum Information Science, Canadian Institute for Advanced Research, Toronto, Ontario M5G 1M1, Canada} \author{Peng Xue} \email{gnep.eux@gmail.com} \affiliation{Department of Physics, Southeast University, Nanjing 211189, China} \affiliation{Beijing Computational Science Research Center, Beijing 100084, China} \affiliation{State Key Laboratory of Precision Spectroscopy, East China Normal University, Shanghai 200062, China}
\begin{abstract} Quantum metrology overcomes standard precision limits and plays a central role in science and technology. Practically it is vulnerable to imperfections such as decoherence. Here, we demonstrate quantum metrology for noisy channels such that entanglement with ancillary qubits enhances the quantum Fisher information for phase estimation but not otherwise. Our photonic experiment covers a range of noise for various types of channels, including for two randomly alternating channels such that assisted entanglement fails for each noisy channel individually. We have simulated noisy channels by implementing space-multiplexed dual interferometers with quantum photonic inputs. We have demonstrated the advantage of entanglement-assisted protocols in phase estimation experiment run with either single-probe or multi-probe approach. These results establish that entanglement with ancill\ae\ is a valuable approach for delivering quantum-enhanced metrology. Our new approach to entanglement-assisted quantum metrology via a simple linear-optical interferometric network with easy-to-prepare photonic inputs provides a path towards practical quantum metrology. \end{abstract}
\maketitle
\noindent
{\it Introduction:-}Quantum metrology ~\cite{VSL04,WGA07,VSL11,RJM12,SMA14,PL17,DAB+17} exploits nonclassicality to surpass classical limits to interferometric parameter estimation~\cite{SS08,BEJ13,MRC16}. Quantum metrological enhancement is achieved by employing quantum probes for detecting physical properties with resolution beyond the reach of classical approaches~\cite{LCF16,LMM17,EIA14,WMF14}. Without noise, entangling the measurement system with ancillary quantum degrees of freedom provides no advantage to scaling of measurement precision with number of particles~\cite{AJJ00,VSL06}. Contrariwise, in the presence of noise, which deleteriously affects measurement precision, entangling with ancill\ae\ is suggested to deliver higher precision than not using entanglement with ancill\ae~\cite{RL14,HMM16,HC17,YF17}.
We demonstrate experimentally that entangling probes with ancill\ae\ significantly enhances the performance of noisy quantum metrology as quantified by the quantum Fisher information (QFI) for parameter estimation (Fig.~\ref{circuit}). Through entanglement with ancill\ae\, the probe state is less sensitive to noise. Information from probes is limited by the Holevo bound~\cite{H82} whereas enlarging the Hilbert space by entangling with ancill\ae\ allows more information to be accessed by measurements that exploit the larger dimension of Hilbert space. The QFI is obtained by tracing over the auxiliary space, which maximizes over all mixed states. That might make the QFI larger than that without ancill\ae\ \cite{suppl}. The enlargement enhances the precision only for certain noisy channels, for which the input states entangled between the space of probes and ancill\ae\ are optimal~\cite{RJM12,JR13,AH08,F17}.
Based on these theoretical proposals, we experimentally investigate whether entangled ancill\ae\ can deliver enhanced metrological precision in the presence of noise~\cite{RJM13,BRL11} realized as simulated decohering quantum channels~\cite{KRR12,YJO12,Barry17}, and herein establish that indeed entangling with ancill\ae\ is advantageous for efficiently inferring the unknown parameter measuring for a wide range of noise values. We develop space-multiplexed noisy channels via a dual interferometric network~\cite{KRR12} and inject hyperentangled photonic states entangled in their polarizations and spatial modes~\cite{JTP10,ELL10}.
\begin{figure}
\caption{Concept of the comparison between the parallel scheme of quantum metrology with and without assisted entanglement. (a) Parallel scheme. Probes go through maps $\Lambda_\phi$ in parallel. (b) Parallel scheme with assisted entanglement. Introducing noiseless ancill\ae\ sharing entanglement with probes, and implementing joint measurements after the evolution give estimation with an enhanced precision.}
\label{circuit}
\end{figure}
\begin{figure*}
\caption{Experimental scheme. (a) Setup for entanglement-assisted single-probe approach. Heralded single photons are used to prepare polarization-spatial hyperentangled states for entanglement-assisted quantum metrology approach. Space-multiplexed noisy channels are realized by the dual interferometric network setup, in which spatial coherence is reduced, and the optical path delay enables the arrival time of the photons passing through different optical paths on the BD (for the amplitude-damping channel) or NBS (for the depolarizing channel) to be different. Random phases are added between photons in different optical paths before recombining them on the BD or NBS. Quantum process tomography is performed via wave plates (WPs), BD and PBS, and enables reconstruction of the process matrices for the channels. (b) Setup for entanglement-assisted two-probe approach. Polarization-entangled photon pairs are used to prepare the four-qubit hyperentangled state. Projective measurements are realized via BDs, WPs, NBSs and a PBS. Coincidences between paired photons are detected by APDs.}
\label{setup}
\end{figure*}
{\it Theory:-}First, we use a single-probe scheme as an example. Entanglement-assisted parameter estimation comprises three stages: \emph{preparation} in which a probe (a photonic qubit in our case) shares entanglement with an ancilla; \emph{parametrization} where the probe evolves in a channel and the parameter to be estimated is encoded in the probe whereas the ancilla does not participate; and \emph{measurement} in which a joint measurement is performed on both the probe and ancilla to yield a precise estimate of the parameter. We focus on a two-level probe detecting a phase shift modelled by the unitary map \begin{equation}
\mathcal{U}_\phi(\rho)
=U_\phi\rho U_\phi^\dagger,\;
U_\phi=\ket{0}\bra{0}+\text{e}^{\text{i}\phi}\ket{1}\bra{1} \end{equation} for $\rho$ the initial state. The noise map $\mathcal{E}$ acts after $\mathcal{U}_\phi$: $\phi$ is encoded into the probe state $\rho_\phi=\Lambda_\phi\rho$ for $\Lambda(\phi)=\mathcal{E}\circ\mathcal{U}_\phi$.
We use QFI~\cite{JMD16} \begin{equation}
J(\rho(\phi))=\text{Tr}\left(\rho(\phi) A^2\right),\;
\frac{\partial\rho(\phi)}{\partial\phi}=\frac{A\rho(\phi)+\rho(\phi) A}{2}, \label{eq:QFI2} \end{equation} to quantify the metrological precision, with~$A$ the symmetric logarithmic-derivative operator. QFI is an appropriate measure as it serves as an asymptotic measure of the amount of information inherent in how much the system parameters can be acquired by measurement. The quantum Cram\'{e}r-Rao bound~\cite{SC94} is a lower bound for the precision~$\Delta \phi$ of the estimate of~$\phi$: $\Delta \phi\geq1/\sqrt{\nu J(\rho(\phi))}$ for~$\nu$ the number of repetitions of the phase-estimate procedure. The best bound is found by maximizing the QFI, which depends on both~$\rho$ and~$\phi$.
For a single-probe instance, noise diminishes the measurement precision evident through reducing the output-state QFI after passing through~$\mathcal{E}$. Entangling with an ancilla enhances precision for noisy channels and the state transformation is $(\Lambda_\phi\otimes\mathds{1}) \widetilde{\rho}$ with the ancilla unchanged. Here, $\widetilde{\rho}$ denotes the probe+ancilla state whereas~$\rho$ denotes the single-probe state.
We consider three decoherence processes encountered in quantum-enhanced metrology: amplitude-damping (spontaneous emission and photon scattering inside the interferometer), general-Pauli (most general lossless channel) and depolarizing (most symmetric Pauli channel assuming uncorrelated noise) channels~\cite{JR13}, which are typically utilized when accounting for decoherence in optical interferometry~\cite{Barry17}.
We start with the amplitude-damping channel~\cite{HMM16} \begin{equation}
\sum_{\imath=0}^1
A_\imath\rho A^\dagger_\imath,\
A_0=\begin{pmatrix} 1&0\\0&\sqrt{1-\eta}\end{pmatrix},\
A_1=\begin{pmatrix} 0&\sqrt{\eta}\\0&0\end{pmatrix} \label{eq:ADC} \end{equation} for $\eta$ the probability of decay $\ket{1}\mapsto\ket{0}$. For a single-probe input state, the optimized QFI is $1-\eta$ and the optimal state is $\ket+:=(\ket{0}+\ket{1})/\sqrt{2}$. For the entanglement-assisted approach, the QFI is $2(1-\eta)/(2-\eta)$ for an entangled state of the probe and ancilla $\ket{\Phi}:=(\ket{00}+\ket{11})/\sqrt{2}$ and is always greater than that of the case without assisted entanglement for arbitrary $\eta\in(0,1)$~\cite{HMM16}.
For $\bm{\Xi}=(\mathds{1},X,Y,Z)$ the Pauli matrices, the general-Pauli channel is the map \begin{equation}
\mathcal{E}_\text{GPC}(\rho)=\sum_{i=0}^3p_i\Xi_i\rho\Xi_i,\
\sum_i p_i=1,\
0\leq p_i\leq1, \label{eq:PC} \end{equation} and the depolarizing channel $p_1=p_2=p_3=p/4$ is a special case. For a single-qubit probe, $\ket+$ is the optimal state, and the optimal QFI is $(1-p)^2$~\cite{HMM16}. If the joint-probe ancilla state is $\ket{\Phi}$, the QFI is $2(1-p)^2/(2-p)$. For arbitrary $p\in(0,1)$, the QFI is always greater than that of the case without assisted entanglement~\cite{HMM16}.
The depolarizing channel can be regarded as a time-sharing combination of a noiseless channel and a noisy channel in which the state will evolve to a maximally mixed state~\cite{RMC+04,SE11,CRV+11}. For either of the two channels, the entanglement-assisted approach does not provide any advantage. However, somewhat surprisingly, assisted entanglement improves QFI for the depolarizing channel. We can test for the general-Pauli channel (the depolarizing channel is a special case) which can be implemented in a time-sharing way~\cite{RMC+04,SE11,CRV+11,OSP+13,OCM+15}. Each Pauli operator is applied over a specific activation time, respectively, and the total decoherence process lasted over an activation cycle, achieving a time-sharing general-Pauli channel. To explain the advantages of entanglement-assisted quantum metrology, we rather implement a new type of general-Pauli channel, namely a space-multiplexed Pauli channel.
\begin{figure*}
\caption{Experimental results of the reconstructed noisy channels. For the entanglement-assisted approach, the reconstructed process matrices for the amplitude-damping channel with $\eta=0.5$ (a) and the depolarizing channel with $p=0.4$ (d) compared with their theoretical predictions (b) and (e). The fidelities $F$ of the reconstructed process matrices for the amplitude-damping and depolarizing channels as a function of the noise parameters are shown in (c) and (f), respectively. The red bars indicate the fidelities for the entanglement-assisted approach and the grey ones indicate those for the optimized single-probe approach. Error bars indicate the statistical uncertainty, obtained from Monte-Carlo simulations assuming Poissonian photon-counting statistics.}
\label{channel}
\end{figure*}
Our method can be extended to a more complicated case --- an $N$-probe approach. In the absence of noise, an $N$-probe approach with an optimal $N$-qubit input state (e.g., a N00N state) achieves the Heisenberg limit scaling, which provides improvement over classical limits. However, the advantages are destroyed by noise. Our entanglement-assisted approach in which $N$ probes are entangled with noiseless ancill\ae\ protects against noise and the effect caused by noise can be eliminated by assisted entanglement. Even in the presence of noise, the entanglement-assisted approach beats the shot-noise limit and even maintains the Heisenberg limit scaling for some special noisy channel.
We use a two-probe approach as an example. A two-qubit N00N state (one of the Bell states) $\ket{\Phi^+}=(\ket{00}+\ket{11})/\sqrt{2}$ with both qubits being probes is optimal only in the noiseless case. The phase $\phi$ to be estimated is obtained via the unitary map applied in parallel \begin{equation} \mathcal{U}^2_\phi\left(\varrho\right)=U_\phi\otimes U_\phi\varrho U_\phi^\dagger\otimes U_\phi^\dagger \end{equation} with $\varrho=\ket{\Phi^+}\bra{\Phi^+}$. Through a collective noisy channel in parallel, the probe state becomes $\varrho_\phi=\Lambda_\phi^{\otimes2}\varrho$.
A four-qubit entangled state $\tilde{\varrho}=(\ket{0000}+\ket{1111})(\bra{0000}+\bra{1111})/2$ of two probes and two ancill\ae\ beats the optimal state of two probes $\varrho$ in the presence of noise. Taking the collective damping channel as an example, its QFI is \begin{equation} \frac{8(\eta-1)^2\{2(\eta-1)^2\cos8\phi+(\eta-2)\eta\left[(\eta-2)\eta+2\right]+2\}}{\left[(\eta-2)\eta+2\right]^3} \end{equation} and is larger than that of $\varrho$, even though this particular four-qubit entangled state is not necessarily optimal.
{\it Realization of noisy channels:-}The experimental setup in Fig.~\ref{setup} involves the three stages of state preparation, parametrization and measurement. In the preparation stage, we prepare single photons in polarization-spatial hyperentangled states for entanglement-assisted single-probe approach~\cite{JTP10,ELL10}. Whereas, for entanglement-assisted two-probe approach, polarization-entangled photon pairs are used to prepare the four-qubit hyperentangled state~\cite{suppl}.
The probe state is transformed according to the noisy channel, whereas the ancilla qubit is not evolving. The noise is introduced in a controlled way only on the probe. The efficiency of the optimal estimation is shown to outperform quantum process tomography (QPT).
We now present the experimental implementation of a single-qubit amplitude-damping channel. As the noisy channel is only applied to the probe state, i.e., the polarization degree of freedom of the photons, the longitudinal spatial modes of the photons ($\ket{U}$ and $\ket{D}$) are not affected. The photons on either of the modes encounter the same noisy channel. In the polarization basis, the amplitude-damping map is realized by the dual interferometer setup implemented by splitting the two polarization components and putting independent polarization controls inside a beam displacer (BD) interferometer~\cite{YJO12}.
First a BD whose optical axis is perpendicular to that of the one which is used for preparing hyperentangled states in the state preparation stage splits the two polarization components by directly transmitting the vertically polarized photons and shifting the horizontally polarized photons by a lateral displacement. A half-wave plate (HWP) at $45^\circ$ rotates $\ket{H}$ to $\ket{V}$ and another HWP (H$_\text{A}$) at $\theta_\text{A}$ with $\cos2\theta_\text{A}=-\sqrt{1-\eta}$ applies a rotation $\begin{pmatrix} -\sqrt{1-\eta}&\sqrt{\eta}\\ \sqrt{\eta} &\sqrt{1-\eta}\end{pmatrix}$ on the polarization of photons. The following BD splits and combines the photons due to their polarizations, and the HWPs with certain setting angles are used to rotation the polarization of the photons.
A quartz crystal (QC) with thickness of $28.77$mm~\cite{KGX17} is inserted to reduce the spatial coherence of the photons with different polarizations. The sandwich-type HWP-BD-HWP setup works as a 50:50 beamsplitter recombining the photons. Accordingly, with probability $1/2$, the state emerging from the output port is the desired output state.
\begin{figure}
\caption{Experimental values of QFI. QFI vs~$\eta$ ($p$) for (a)~amplitude-damping and (b) depolarizing channels. Dashed curves show theoretical predictions of QFI for the entanglement-assisted approach, whereas solid curves are for the optimized single-probe approach. Data points are experimental results. Error bars are calculated from photon-counting statistics. }
\label{QFI}
\end{figure}
\begin{figure*}
\caption{Experimental values of the error $\sqrt{\nu}\delta\phi$. The error as a function of the channel noise for single-probe approach in (a) amplitude-damping and (b) depolarizing channels. (c) The result for two-probe approach in the amplitude-damping channel. Dashed curves show theoretical predictions of the error for the entanglement-assisted approach, whereas solid curves are for approaches without assisted entanglement. The grey shadow denotes the shot-noise limit. Data points are experimental results. Error bars are calculated with the method ``Bootstrap''. Interferometric visibilities of the setups in (a), (b) and (c) are $0.9969\pm0.0006$, $0.9928\pm0.0008$ and $0.9699\pm0.0055$, respectively. }
\label{error}
\end{figure*}
Furthermore, we can also create a single-qubit space-multiplexed general-Pauli channel~(\ref{eq:PC}) with five BDs and twelve HWPs. Six HWPs (H$_l$ at $\theta_l$, $l=1,\dots,6$) control the ratio of photons in different lateral spatial modes, and three of them at $45^\circ$ (in front of the fifth BD) flip the polarizations and then change the spatial modes of the corresponding photons. Therefore, after the fifth BD, the photons are distributed into four lateral spatial modes according to the parameters $p_i$. For a given desired channel the setting angles $\theta_l$ of the HWPs (H$_l$) are chosen to satisfy the relations \begin{align*} \sqrt{p_0}&=\cos2\theta_1\sin2\theta_3=\cos2\theta_2\cos2\theta_4\sin2\theta_6,\\ \sqrt{p_1}&=\sin2\theta_1=-\cos2\theta_2\cos2\theta_4\cos2\theta_6,\\ \sqrt{p_2}&=\cos2\theta_1\cos2\theta_3\cos2\theta_5=\sin2\theta_2,\\ \sqrt{p_3}&=\cos2\theta_1\cos2\theta_3\sin2\theta_5=-\cos2\theta_2\sin2\theta_4. \end{align*} Then the last three HWPs at $0^\circ$ and $45^\circ$, respectively, are inserted into different spatial modes and act as Pauli operators $\bm{\Xi}$ on the probe qubit.
Two nonpolarizing beamsplitters (NBSs) recombine the photons in the four lateral spatial modes. To reduce the spatial coherence of the photons, the optical distance~$\varsigma$ between the photons in the different lateral spatial modes should satisfy $L_\text{coh}<\varsigma<\text{c}\Delta t=0.9$m. In our experiment, $\max\varsigma\approx0.6$m. Hence, we realize the space-multiplexed general-Pauli channel.
To compare the approaches with and without assisted entanglement, we realize noisy channels on the probe qubit, which does not share entanglement with an ancilla. In our experiment, in both the state preparation and process tomography stages, the BDs and some WPs are removed from the setup in Fig.~\ref{setup} as no ancillary spatial mode is needed. In the parametrization stage, the photons are not distributed into different longitudinal spatial modes.
{\it Experimental results of QFI:-}We present our experimental results for noisy channels and compared the QFI for the single-probe approach with and without assisted entanglement. Our experimental process matrices $\chi_\text{exp}$ are reconstructed using process fidelity~\cite{XCX08,JAF14} \begin{equation} F=\frac{\text{Tr}(\chi^\dagger_\text{th}\chi_\text{exp})}{\sqrt{\text{Tr}(\chi^\dagger_\text{exp}\chi_\text{exp})\text{Tr}(\chi^\dagger_\text{th}\chi_\text{th})}} \label{eq:fidelity} \end{equation} to characterize the experimental realization of the noisy channels~\cite{suppl}. Figure~\ref{channel} shows the experimentally reconstructed $\chi_\text{exp}$ for the amplitude-damping channel with $\eta=0.5$ and the depolarizing channel with $p=0.4$. Our results exhibit $F\approx1$. Without assisted entanglement, all the fidelities of the amplitude-damping channel with various parameters are great than $0.9949\pm 0.0007$ and those of the depolarizing channel are greater than $0.9700\pm0.0041$. Whereas with entanglement sharing between the probe and ancilla, all the fidelities of the amplitude-damping channel are greater than $0.9647\pm 0.0003$ and those of the depolarizing channel are greater than $0.9593\pm 0.0016$.
To calculate the QFI, we use the diagonal form of the output state $\rho_\text{exp}(\phi)=\sum_i\lambda_i\ket{\psi_i}\bra{\psi_i}+\rho_\text{noise}$, where $\lambda_i$ and $\ket{\psi_i}$ are the eigenvalues and eigenstates, $\rho_\text{noise}$ is the irrelevant part of the density matrix and is independent of $\phi$~\cite{JMD16}. With this formula, we calculate the matrix elements of $A$ in the basis $\{\ket{\psi_i}\}$.
We use the amplitude-damping and depolarizing channels as examples as usual for decoherence in optical interferometry. For the amplitude-damping channel, the optimized QFI of the output state is $\frac{\left[2\rho_\text{exp}^{12}(\phi)\right]^2}{\rho_\text{exp}^{11}(\phi)+\rho_\text{exp}^{22}(\phi)}$, and $\frac{\left[2\widetilde{\rho}_\text{exp}^{14}(\phi)\right]^2}{\widetilde{\rho}_\text{exp}^{11}(\phi)+\widetilde{\rho}_\text{exp}^{44}(\phi)}$ for a single-probe input state and for the entanglement-assisted approach, respectively, with $\rho_\text{exp}^{ij}$ a matrix element of $\rho_\text{exp}$. For the depolarizing channel, without assisted entanglement, the optimized QFI for a single probe is $\frac{\left[2\rho_\text{exp}^{12}(\phi)\right]^2}{\left[\rho_\text{exp}^{11}(\phi)+\rho_\text{exp}^{22}(\phi)\right]}$. With assisted entanglement, the QFI of the output state of the probe+ancilla system is then $\frac{\left[2\widetilde{\rho}_\text{exp}^{14}(\phi)\right]^2}{\left[\widetilde{\rho}_\text{exp}^{11}(\phi)+\widetilde{\rho}_\text{exp}^{44}(\phi)\right]}+\frac{\left[2\widetilde{\rho}_\text{exp}^{23}(\phi)\right]^2}{\left[\widetilde{\rho}_\text{exp}^{22}(\phi)+\widetilde{\rho}_\text{exp}^{33}(\phi)\right]}$.
As we reconstruct all noisy-channel information via QPT~\cite{NC04,ABJ+03}, the output state for each case is reconstructed. By setting $\phi=0$, we calculate experimental QFI values of the output states. In Fig.~\ref{QFI}, experimental values of the QFI for the amplitude-damping and depolarizing channels either with or without the assisted entanglement are shown. Our experimental results agree well with theoretical calculations.
Evidently, for a single probe, in the presence of amplitude-damping noise and depolarizing noise, an entanglement-assisted scheme improves the QFI compared to the unentangled case for all ranges of noise regimes. To illustrate this, we also realize the general-Pauli channel with $p_0=p_2=0.5$ and $p_1=p_3=0$. The experimental value for QFI for the entanglement-assisted approach is $0.984\pm0.045$, which agrees with the theoretical prediction $1$, whereas the optimized QFI for a single probe is $0$. This represents the case of orthogonal noise when the ancilla approach recovers almost the full information on the phase even in the presence of noise.
{\it Phase estimation:-}For the single-probe approach, the phase $\phi$ to be estimated has been obtained via a unitary map via an additional HWP inserting in the interferometer which causes the optical path difference between photons with different polarizations. The optimal measurement strategy around $\phi\sim 0$ consists in projecting in the polarization-spatial hyperentangled states $(\ket{HU}\pm \text{i}\ket{VD})/\sqrt{2}$. Since no information on $\phi$ is carried on the other bases, for convenience, we choose $\ket{HD}$ and $\ket{VU}$. The projective measurements are realized via a BD, a quarter-wave plate (QWP) at $0$, HWPs at $45^\circ$ and $22.5^\circ$ respectively, and a polarizing beamsplitter (PBS). Coincidences between the outputs and the trigger are detected by single photon avalanche photodiodes (APDs)~\cite{suppl}.
For the amplitude-damping channel, the outcome probabilities of the projective measurements are $P\left[(\ket{HU}\pm \text{i}\ket{VD})/\sqrt{2}\right]=\left[2-\eta\pm2v\sqrt{1-\eta}\sin\phi\right]/4$, $P(\ket{HD})=\eta/2$ and $P(\ket{VU})=0$, where $v$ is the visibility of the interferometer. The optimal measurement is identified by optimising the highest QFI $2v^2(1-\eta)/(2-\eta)$, which proves that the measurement achieves the quantum Cram\'{e}r-Rao bound for the input state. Whereas, for depolarizing channel, the outcome probabilities are $P\left[(\ket{HU}\pm \text{i}\ket{VD})/\sqrt{2}\right]=\left[2-p\pm2v(1-p)\sin\phi\right]/4$, $P(\ket{HD})=p/4$ and $P(\ket{VU})=p/4$ and the corresponding QFI is $2v^2(1-p)^2/(2-p)$, which is always above the single-probe QFI.
For the two-probe approach, we use the amplitude damping channel as an example. The input state is prepared in a two-photon N00N state $(\ket{HH}+\ket{VV})/\sqrt{2}$. Each probe is affected by an individual amplitude damping channel with the noise parameter $\eta$. With ancillary degree of freedom---spatial modes of two photons, the entanglement-assisted state becomes $(\ket{HUHU}+\ket{VDVD})/\sqrt{2}$. The optimal measurement strategy around $\phi\sim 0$ consists in projecting in the polarization-spatial hyperentangled states $(\ket{HUHU}\pm\text{i}\ket{VDVD})/\sqrt{2}$. No information on $\phi$ is carried on the other $14$ bases. The outcome probabilities of the projective measurements are $P\left[(\ket{HUHU}\pm\text{i}\ket{VDVD})/\sqrt{2}\right]=\left[2-2\eta+\eta^2\mp2v(1-\eta)\sin2\phi\right]/4$, $P(\ket{HDHD}=\eta^2/2)$, $P(\ket{HDVD})=\eta(1-\eta)/2$, $P(\ket{VDHD})=\eta(1-\eta)/2$, and zero for the other projective measurements. The optimal measurement is identified by optimising the highest QFI $8v^2(1-\eta)^2/\left[1+(1-\eta)^2\right]$, which is always above the two-probe approach without assisted entanglement $4v^2(1-\eta)^2/\left[1-\eta+\eta^2\right]$.
To realize the entanglement-assisted single-probe approach, for each of the various noise parameters, data is accumulated for collecting time of $10$s, corresponding to a coincidence count rate of about $20,000$ events per acquisition. Whereas for the entanglement-assisted two-probe approach, the coincidence count rate is about $2,000$ events per acquisition. Totally $100$ values of the phase $\phi$ are collected. The standard deviation of the sample $\delta\phi$ is expected to converge to the ultimate limit established by the quantum Cram\'{e}r-Rao bound in the limit of a large number of repetitions. We use the standard deviation of the sample multiplied by $\sqrt{\nu}$ (here, $\nu$ is the average number of the events) to indicate the error $\sqrt{\nu}\delta\phi$.
Figure~\ref{error} shows the experimental results of the error $\sqrt{\nu}\delta\phi$ as a function of the noise parameters for different approaches in different noisy channels. For the single-probe approach, due to experimental imperfections such as imperfect interferometric visibility of the setup, it is difficult to observe the advantages of the entanglement-assisted approach at low noise. With the noise parameter increasing, the advantages are more obvious. For the two-probe case, the approach of a two-qubit N00N state beats the shot-noise limit both in the noiseless case and at low noise level. The advantage over the classical metrology is affected by noise. Assisted entanglement protects against the noise, especially at high noise level.
{\it Discussion:-}We experimentally realized entangled-assisted quantum metrology and demonstrated its efficacy through the QFI for single-qubit amplitude-damping, depolarizing and general-Pauli noisy channels. Compared to the approach without assisted entanglement, we observe an enhancement over the noisy cases. Our achievement relies on replacing time-sharing noisy channels by space-multiplexed noisy channels using a practical, linear-optical interferometric network. Our demonstration serves as a foundation for future experimental simulations employing networks of multi-qubit channel simulations.
We use polarization-spatial hyperentangled states encoded in photons, which are easier to create and control. Our new approach to entanglement-assisted quantum metrology via a simple linear-optical interferometric network with easy-to-prepare photonic inputs provides a path towards practical quantum metrology.
{\it Note:-}After completing this work, we learned of related work by the group of Marco Barbieri~\cite{SGM+17}.
{\bf Acknowledgments} We thank Lorenzo Maccone for helpful discussions and appreciate elucidating correspondence with Carlton M. Caves regarding how and why assisted entanglement is an advantage. We acknowledge support by NSFC (Nos.~11474049, 11674056 and GG2340000241), NSFJS (No. BK20160024), the Scientific Research Foundation of the Graduate School of Southeast University and the Open Fund from State Key Laboratory of Precision Spectroscopy of East China Normal University. BCS acknowledges financial support from the 1000-Talent Plan.
\begin{thebibliography}{10} \expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi \expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi \providecommand{\bibinfo}[2]{#2} \providecommand{\eprint}[2][]{\url{#2}}
\bibitem{VSL04} \bibinfo{author}{Giovannetti, V.}, \bibinfo{author}{Lloyd, S.} and
\bibinfo{author}{Maccone, L.} \newblock \bibinfo{title}{Quantum-enhanced measurements: Beating the standard
quantum limit}. \newblock \emph{\bibinfo{journal}{Science}} \textbf{\bibinfo{volume}{306}},
\bibinfo{pages}{1330} (\bibinfo{year}{2004}).
\bibitem{WGA07} \bibinfo{author}{van Dam, W.}, \bibinfo{author}{D'Ariano, G.~M.},
\bibinfo{author}{Ekert, A.}, \bibinfo{author}{Macchiavello, C.} and
\bibinfo{author}{Mosca, M.} \newblock \bibinfo{title}{Optimal quantum circuits for general phase
estimation}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{98}}, \bibinfo{pages}{090501}
(\bibinfo{year}{2007}).
\bibitem{VSL11} \bibinfo{author}{Giovannetti, V.}, \bibinfo{author}{Lloyd, S.} and
\bibinfo{author}{Maccone, L.} \newblock \bibinfo{title}{Advances in quantum metrology}. \newblock \emph{\bibinfo{journal}{Nat. Photon.}} \textbf{\bibinfo{volume}{5}},
\bibinfo{pages}{222} (\bibinfo{year}{2011}).
\bibitem{RJM12} \bibinfo{author}{Demkowicz-Dobrza\'{n}ski, R.},
\bibinfo{author}{Ko{\l}ody\'{n}ski, J.} and \bibinfo{author}{Gu\c{t}\v{a}, M.} \newblock \bibinfo{title}{The elusive \protect{H}eisenberg limit in
quantum-enhanced metrology.} \newblock \emph{\bibinfo{journal}{Nat. Commun.}} \textbf{\bibinfo{volume}{3}},
\bibinfo{pages}{1063} (\bibinfo{year}{2012}).
\bibitem{SMA14} \bibinfo{author}{Alipour, S.}, \bibinfo{author}{Mehboudi, M.} and
\bibinfo{author}{Rezakhani, A.~T.} \newblock \bibinfo{title}{Quantum metrology in open systems: Dissipative
\protect{C}ram\'{e}r-\protect{R}ao bound.} \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{120405}
(\bibinfo{year}{2014}).
\bibitem{PL17} \bibinfo{author}{Braun, D.} \emph{et~al.} \newblock \bibinfo{title}{Quantum enhanced measurements without entanglement}. \newblock Preprint at https://arxiv.org/abs/1701.05152 (2017).
\bibitem{DAB+17} \bibinfo{author}{Pirandola, S.} and \bibinfo{author}{Lupo, C.} \newblock \bibinfo{title}{Ultimate precision of adaptive noise estimation}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{118}}, \bibinfo{pages}{100502}
(\bibinfo{year}{2017}).
\bibitem{SS08} \bibinfo{author}{Roy, S.~M.} and \bibinfo{author}{Braunstein, S.~L.} \newblock \bibinfo{title}{Exponentially enhanced quantum metrology}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{100}}, \bibinfo{pages}{220501}
(\bibinfo{year}{2008}).
\bibitem{BEJ13} \bibinfo{author}{Gendra, B.}, \bibinfo{author}{Ronco-Bonvehi, E.},
\bibinfo{author}{Calsamiglia, J.}, \bibinfo{author}{Mu{\~{n}}oz-Tapia, R.} and
\bibinfo{author}{Bagan, E.} \newblock \bibinfo{title}{Quantum metrology assisted by abstention}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{110}}, \bibinfo{pages}{100501}
(\bibinfo{year}{2013}).
\bibitem{MRC16} \bibinfo{author}{Oszmaniec, M.} \emph{et~al.} \newblock \bibinfo{title}{Random bosonic states for robust quantum metrology}. \newblock \emph{\bibinfo{journal}{Phys. Rev. X}} \textbf{\bibinfo{volume}{6}},
\bibinfo{pages}{041044} (\bibinfo{year}{2016}).
\bibitem{LCF16} \bibinfo{author}{Chen, G.} \emph{et~al.} \newblock \bibinfo{title}{Scalable \protect{H}eisenberg-limited metrology using
mixed states}. \newblock Preprint at https://arxiv.org/abs/1612.07427 (2016).
\bibitem{LMM17} \bibinfo{author}{Seveso, L.}, \bibinfo{author}{Rossi, M. A.~C.} and
\bibinfo{author}{Paris, M. G.~A.} \newblock \bibinfo{title}{Quantum metrology beyond the quantum
\protect{C}ram\'{e}r-\protect{R}ao theorem.} \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{95}},
\bibinfo{pages}{012111} (\bibinfo{year}{2017}).
\bibitem{EIA14} \bibinfo{author}{Kessler, E.~M.}, \bibinfo{author}{Lovchinsky, I.},
\bibinfo{author}{Sushkov, A.~O.} and \bibinfo{author}{Lukin, M.~D.} \newblock \bibinfo{title}{Quantum error correction for metrology}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{150802}
(\bibinfo{year}{2014}).
\bibitem{WMF14} \bibinfo{author}{D\"{u}r, W.}, \bibinfo{author}{Skotiniotis, M.},
\bibinfo{author}{Fr\"{o}wis, F.} and \bibinfo{author}{Kraus, B.} \newblock \bibinfo{title}{Improved quantum metrology using quantum error
correction}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{080801}
(\bibinfo{year}{2014}).
\bibitem{AJJ00} \bibinfo{author}{Childs, A.~M.}, \bibinfo{author}{Preskill, J.} and
\bibinfo{author}{Renes, J.} \newblock \bibinfo{title}{Quantum information and precision measurement}. \newblock \emph{\bibinfo{journal}{J. Mod. Opt.}} \textbf{\bibinfo{volume}{47}},
\bibinfo{pages}{155} (\bibinfo{year}{2000}).
\bibitem{VSL06} \bibinfo{author}{Giovannetti, V.}, \bibinfo{author}{Lloyd, S.} and
\bibinfo{author}{Maccone, L.} \newblock \bibinfo{title}{Quantum metrology}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{96}}, \bibinfo{pages}{010401}
(\bibinfo{year}{2006}).
\bibitem{RL14} \bibinfo{author}{Demkowicz-Dobrza\'{n}ski, R.} and \bibinfo{author}{Maccone, L.} \newblock \bibinfo{title}{Using entanglement against noise in quantum
metrology}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{113}}, \bibinfo{pages}{250801}
(\bibinfo{year}{2014}).
\bibitem{HMM16} \bibinfo{author}{Huang, Z.}, \bibinfo{author}{Macchiavello, C.} and
\bibinfo{author}{Maccone, L.} \newblock \bibinfo{title}{Usefulness of entanglement-assisted quantum
metrology}. \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{94}},
\bibinfo{pages}{012101} (\bibinfo{year}{2016}).
\bibitem{HC17} \bibinfo{author}{Yuan, H.} and \bibinfo{author}{Fung, C.-H.~F.} \newblock \bibinfo{title}{Quantum parameter estimation with general dynamics}. \newblock \emph{\bibinfo{journal}{npj Quantum Inf.}}
\textbf{\bibinfo{volume}{3}}, \bibinfo{pages}{14} (\bibinfo{year}{2017}).
\bibitem{YF17} \bibinfo{author}{Yuan, H.} and \bibinfo{author}{Fung, C.-H.~F.} \newblock \bibinfo{title}{Quantum metrology matrix}. \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{96}},
\bibinfo{pages}{012310} (\bibinfo{year}{2017}).
\bibitem{H82} \bibinfo{author}{Holevo, A.~S.} \newblock \emph{\bibinfo{title}{{Probabilistic and Statistical Aspects of
Quantum Theory; 2nd ed.}}} \newblock Publications of the Scuola Normale Superiore Monographs
(\bibinfo{publisher}{Springer}, \bibinfo{address}{Dordrecht},
\bibinfo{year}{2011}).
\bibitem{suppl} \bibinfo{note}{See supplemental materials for details}.
\bibitem{JR13} \bibinfo{author}{Ko{\l}ody\'{n}ski, J.} and
\bibinfo{author}{Demkowicz-Dobrza\'{n}ski, R.} \newblock \bibinfo{title}{Efficient tools for quantum metrology with
uncorrelated noise}. \newblock \emph{\bibinfo{journal}{New J. Phys.}} \textbf{\bibinfo{volume}{15}},
\bibinfo{pages}{073043} (\bibinfo{year}{2013}).
\bibitem{AH08} \bibinfo{author}{Fujiwara, A.} and \bibinfo{author}{Imai, H.} \newblock \bibinfo{title}{A fibre bundle over manifolds of quantum channels and
its application to quantum statistics}. \newblock \emph{\bibinfo{journal}{J. Phys. A: Math. Theor.}}
\textbf{\bibinfo{volume}{41}}, \bibinfo{pages}{255304}
(\bibinfo{year}{2008}).
\bibitem{F17} \bibinfo{author}{Chapeau-Blondeau, F.} \newblock \bibinfo{title}{Entanglement-assisted quantum parameter estimation
from a noisy qubit pair: A \protect{F}isher information analysis}. \newblock \emph{\bibinfo{journal}{Phys. Lett. A}}
\textbf{\bibinfo{volume}{381}}, \bibinfo{pages}{1369--1378}
(\bibinfo{year}{2017}).
\bibitem{RJM13} \bibinfo{author}{Chaves, R.}, \bibinfo{author}{Brask, J.~B.},
\bibinfo{author}{Markiewicz, M.}, \bibinfo{author}{Ko{\l}ody\'{n}ski, J.} and
\bibinfo{author}{Ac\'{i}n, A.} \newblock \bibinfo{title}{Noisy metrology beyond the standard quantum limit}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{111}}, \bibinfo{pages}{120401}
(\bibinfo{year}{2013}).
\bibitem{BRL11} \bibinfo{author}{Escher, B.~M.}, \bibinfo{author}{de~Matos~Filho, R.~L.} and
\bibinfo{author}{Davidovich, L.} \newblock \bibinfo{title}{General framework for estimating the ultimate
precision limit in noisy quantum-enhanced metrology}. \newblock \emph{\bibinfo{journal}{Nat. Phys.}} \textbf{\bibinfo{volume}{7}},
\bibinfo{pages}{406} (\bibinfo{year}{2011}).
\bibitem{KRR12} \bibinfo{author}{Fisher, K. A.~G.}, \bibinfo{author}{Prevedel, R.},
\bibinfo{author}{Kaltenbaek, R.} and \bibinfo{author}{Resch, K.~J.} \newblock \bibinfo{title}{Optimal linear optical implementation of a
single-qubit damping channel}. \newblock \emph{\bibinfo{journal}{New J. Phys.}} \textbf{\bibinfo{volume}{14}},
\bibinfo{pages}{033016} (\bibinfo{year}{2012}).
\bibitem{YJO12} \bibinfo{author}{Kim, Y.-S.}, \bibinfo{author}{Lee, J.-C.},
\bibinfo{author}{Kwon, O.} and \bibinfo{author}{Kim, Y.-H.} \newblock \bibinfo{title}{Protecting entanglement from decoherence using weak
measurement and quantum measurement reversal}. \newblock \emph{\bibinfo{journal}{Nat. Phys.}} \textbf{\bibinfo{volume}{8}},
\bibinfo{pages}{117} (\bibinfo{year}{2012}).
\bibitem{Barry17} \bibinfo{author}{Lu, H.} \emph{et~al.} \newblock \bibinfo{title}{Experimental quantum channel simulation}. \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{95}},
\bibinfo{pages}{042310} (\bibinfo{year}{2017}).
\bibitem{JTP10} \bibinfo{author}{Barreiro, J.~T.}, \bibinfo{author}{Wei, T.-C.} and
\bibinfo{author}{Kwiat, P.~G.} \newblock \bibinfo{title}{Remote preparation of single-photon ``hybrid''
entangled and vector-polarization states}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{105}}, \bibinfo{pages}{030407}
(\bibinfo{year}{2010}).
\bibitem{ELL10} \bibinfo{author}{Nagali, E.}, \bibinfo{author}{Sansoni, L.},
\bibinfo{author}{Marrucci, L.}, \bibinfo{author}{Santamato, E.} and
\bibinfo{author}{Sciarrino, F.} \newblock \bibinfo{title}{Experimental generation and characterization of
single-photon hybrid ququarts based on polarization and orbital angular
momentum encoding}. \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{81}},
\bibinfo{pages}{052317} (\bibinfo{year}{2010}).
\bibitem{JMD16} \bibinfo{author}{Zhang, J.} \emph{et~al.} \newblock \bibinfo{title}{Experimental preparation of high \protect{N00N}
states for phonons}. \newblock Preprint at https://arxiv.org/abs/1611.08700 (2016).
\bibitem{SC94} \bibinfo{author}{Braunstein, S.~L.} and \bibinfo{author}{Caves, C.~M.} \newblock \bibinfo{title}{Statistical distance and the geometry of quantum
states}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{72}}, \bibinfo{pages}{3439} (\bibinfo{year}{1994}).
\bibitem{RMC+04} \bibinfo{author}{Ricci, M.} \emph{et~al.} \newblock \bibinfo{title}{Experimental purification of single qubits}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{93}}, \bibinfo{pages}{170501}
(\bibinfo{year}{2004}).
\bibitem{SE11} \bibinfo{author}{Shaham, A.} and \bibinfo{author}{Eisenberg, H.~S.} \newblock \bibinfo{title}{Realizing controllable depolarization in photonic
quantum-information channels}. \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{022303} (\bibinfo{year}{2011}).
\bibitem{CRV+11} \bibinfo{author}{Chiuri, A.} \emph{et~al.} \newblock \bibinfo{title}{Experimental realization of optimal noise estimation
for a general \protect{P}auli channel}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{107}}, \bibinfo{pages}{253602}
(\bibinfo{year}{2011}).
\bibitem{OSP+13} \bibinfo{author}{Orieux, A.} \emph{et~al.} \newblock \bibinfo{title}{Experimental detection of quantum channels}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{111}}, \bibinfo{pages}{220501}
(\bibinfo{year}{2013}).
\bibitem{OCM+15} \bibinfo{author}{Orieux, A.} \emph{et~al.} \newblock \bibinfo{title}{Experimental generation of robust entanglement from
classical correlations via local dissipation}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{115}}, \bibinfo{pages}{160503}
(\bibinfo{year}{2015}).
\bibitem{KGX17} \bibinfo{author}{Wang, K.} \emph{et~al.} \newblock \bibinfo{title}{Optimal experimental demonstration of error-tolerant
quantum witnesses}. \newblock \emph{\bibinfo{journal}{Phys. Rev. A}} \textbf{\bibinfo{volume}{95}},
\bibinfo{pages}{032122} (\bibinfo{year}{2017}).
\bibitem{XCX08} \bibinfo{author}{Wang, X.}, \bibinfo{author}{Yu, C.-S.} and \bibinfo{author}{Yi,
X.~X.} \newblock \bibinfo{title}{An alternative quantum fidelity for mixed states of
qudits}. \newblock \emph{\bibinfo{journal}{Phys. Lett. A}}
\textbf{\bibinfo{volume}{373}}, \bibinfo{pages}{58} (\bibinfo{year}{2008}).
\bibitem{JAF14} \bibinfo{author}{Zhang, J.}, \bibinfo{author}{Souza, A.~M.},
\bibinfo{author}{Brandao, F.~D.} and \bibinfo{author}{Suter, D.} \newblock \bibinfo{title}{Protected quantum computing: Interleaving gate
operations with dynamical decoupling sequences}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{112}}, \bibinfo{pages}{050502}
(\bibinfo{year}{2014}).
\bibitem{NC04} \bibinfo{author}{Nielsen, M.} and \bibinfo{author}{Isaac, C.} \newblock \emph{\bibinfo{title}{Quantum Computation and Quantum Information
(Cambridge Series on Information and the Natural Sciences)}}
(\bibinfo{publisher}{Cambridge University Press},
\bibinfo{address}{Cambridge}, \bibinfo{year}{2004}).
\bibitem{ABJ+03} \bibinfo{author}{Altepeter, J.~B.} \emph{et~al.} \newblock \bibinfo{title}{Ancilla-assisted quantum process tomography}. \newblock \emph{\bibinfo{journal}{Phys. Rev. Lett.}}
\textbf{\bibinfo{volume}{90}}, \bibinfo{pages}{193601}
(\bibinfo{year}{2003}).
\bibitem{SGM+17} \bibinfo{author}{Sbroscia, M.} \emph{et~al.} \newblock \bibinfo{title}{Experimental ancilla-assisted phase-estimation in a
noisy channel}. \newblock Preprint at https://arxiv.org/abs/1707.08792 (2017).
\end{thebibliography}
\begin{widetext}
\renewcommand{\Alph{section}}{\Alph{section}} \renewcommand{S\arabic{figure}}{S\arabic{figure}} \renewcommand{S\Roman{table}}{S\Roman{table}} \setcounter{figure}{0} \renewcommand{S\arabic{equation}}{S\arabic{equation}} \setcounter{equation}{0}
\section{Supplemental Material for ``Entanglement-enhanced quantum metrology in a noisy environment''}
In this Supplemental Material, we discuss extended-channel quantum Fisher information, optimal probe states under the dynamics with depolarization, intuitive understanding why assisted entanglement helps against noise, as well as some experimental details.
\section{Extended-channel quantum Fisher information} The action of a quantum channel $\Lambda_\phi=\mathcal{E}\circ\mathcal{U}_\phi$ can always be expressed as its operator-sum representation, $ \Lambda_\phi\rho
=\sum_{i}K_i\left(\phi\right)\rho K_{i}^\dagger\left(\phi\right) $ with Kraus operator $K_i(\phi)$ satisfying $\sum_{i}K_{i}\left(\phi\right)K_{i}^\dagger\left(\phi\right)=\mathds{1}$. Evidently, this representation is not unique; different sets of linearly independent Kraus operators can be related by unitary transformations~\cite{RJM12} \begin{equation} \widetilde{K}_i\left(\phi\right)=\sum_j u_{ij}\left(\phi\right)K_{i}\left(\phi\right), \end{equation} where $u_{ij}$ is the element of a unitary matrix $u\left(\phi\right)$ possibly depending on $\phi$.
The single-channel quantum Fisher information is equal to the smallest quantum Fisher information of its purifications $\Lambda_\phi\rho = \text{Tr}_\text{E}\left(\ketbrad{\Psi_{\phi}}\right)$ with $\ket{\Psi_\phi}$ the state of input+environment and the subscript E for tracing out environment~\cite{AH08} \begin{equation}
J \left(\Lambda_\phi\rho\right)=\min \limits_{\ket{\Psi_{\phi}}} J\left(\ket{\Psi_{\phi}}\right) \end{equation} by minimizing over the state of input+environment $\ket{\Psi_\phi}$.
For a pure input state (not an unreasonable constraint as the optimal input state is always pure~\cite{F17}), different purifications correspond to different Kraus representations of the channel. Moreover, it is enough to parameterize equivalent Kraus representations in Eq.~(S1) with a Hermitian matrix $h$, which is the generator of infinitesimal rotations; i.e., $u\left(\phi\right)=\text{e}^{-\text{i}h\left(\phi-\phi_0\right)}$, in the vicinity of the real value $\phi_0$. This formulation simplifies the optimization problem Eq.~(S2) by revising it as a minimization problem over $h$. Therefore, we obtain the maximal quantum Fisher information after performing the input optimization as~\cite{JR13} \begin{equation}
\max \limits_{\rho}J \left(\Lambda_\phi\rho\right)= 4\max \limits_{\rho}\min \limits_{h}\text{Tr}\left(\rho\sum_i\dot{\widetilde{K}}_i^{\dagger}\left(\phi\right)\dot{\widetilde{K}}_i\left(\phi\right)\right) \end{equation} with $\dot{\widetilde{K}}_i\left(\phi\right)=\partial_{\phi}\widetilde{K}_i\left(\phi\right)$.
By considering an ancillary system with extended input states involving probe and ancilla, we acquire full information available about $\phi$ imprinted by the map $\Lambda_\phi$ on the extended output state. Then quantum Fisher information of the extended-channel is calculated in a similar way. The map becomes $\widetilde{\rho}\left(\phi\right)=\Lambda_\phi\otimes\mathds{1}\widetilde{\rho}$, where $\widetilde{\rho}$ denotes the initial pure state of the probe+ancilla system. The quantum Fisher information is \begin{equation}
\max \limits_{\widetilde{\rho}}J \left(\Lambda_\phi\otimes \mathds{1}\widetilde{\rho}\right)= 4\max \limits_{\rho_\text{A}}\min \limits_{h}\text{Tr}\left(\rho_\text{A}\sum_i\dot{\widetilde{K}}_i^{\dagger}\left(\phi\right)\dot{\widetilde{K}}_i\left(\phi\right)\right), \end{equation} where $\rho_\text{A}=\text{Tr}_\text{A}\left(\widetilde{\rho}\right)$ is obtained by tracing over the auxiliary space, which leads to the maximization over all mixed states $\rho_\text{A}$. Equation (S4) is exactly Eq.~(S3) with the pure input state replaced by a general mixed one. By maximizing over all mixed states, the extended channel quantum Fisher information can be larger than the unextened one. If and only if the optimal $\rho_\text{A}$ is a pure state, assisted entanglement does not help.
\section{Optimal probe states under the dynamics with depolarization}
The depolarizing channel is described by Kraus operators \begin{equation} K_0=\sqrt{1-\frac{3p}{4}}\Xi_0,K_{1,2,3}=\sqrt{\frac{p}{4}}\Xi_{1,2,3}, \end{equation} where $\Xi=(\mathds{1},X,Y,Z)$ are the Pauli matrices. Using the method of semi-definite programming~\cite{RJM12}, we find the optimal generator \begin{align}
h=\frac{1}{2}\left(\begin{array}{cccc}
0 & 0 & 0 & \xi\\
0 & 0 & -\text{i} & 0\\
0 & \text{i} & 0 & 0\\
\xi & 0 & 0 & 0
\end{array}\right), \xi=\frac{\sqrt{\left(4-3p\right)}}{2-p}. \end{align}
For the single-probe approach, the optimal input state is $\rho=\ket{+}\bra{+}$, where $\ket{\pm}=(\ket{0}\pm\ket{1})/\sqrt{2}$. Substituting the optimal state and generator into Eq.~(S3), we obtain the maximal quantum Fisher information of the single probe \begin{equation} \max \limits_{\rho}J \left(\Lambda_\phi\rho\right)=\left(1-p\right)^2. \end{equation}
For the entanglement-assisted approach, the optimal reduced state is the maximally mixed state $\rho_\text{A}=\left(\ketbra{0}{0}+\ketbra{1}{1}\right)/2$. The optimal entangled input state in this case is any pure state $\widetilde{\rho}$ with the reduced state equal to $\rho_\text{A}$. The simplest choice of the optimal input state is the maximally entangled state $\widetilde{\rho}=\left(\ket{00}+\ket{11}\right)\left(\bra{00}+\bra{11}\right)/2$~\cite{HMM16}, and the corresponding maximal quantum Fisher information is \begin{equation} \max \limits_{\widetilde{\rho}}J \left(\Lambda_\phi\otimes\mathds{1}\widetilde{\rho}\right)=\frac{2\left(1-p\right)^2}{\left(2-p\right)}, \end{equation} which is always greater than that of the single-probe approach for arbitrary $p\in (0,1)$.
\section{Intuitive understanding why assisted entanglement helps against noise}
The intuitive understanding of how and why the ancilla qubit helps is crucial to making progress on entanglement-assisted metrology. Here, we provide it for the case of a depolarizing channel.
Figure S1(a) shows the single-probe approach. A Hadamard operator creates the state of the probe qubit $\ket{+}$. With $U_{\phi}=\text{e}^{-\text{i}Z\phi/2}$, the depolarizing channel is \begin{equation} \mathcal{E}\odot = \left(1-\frac{3p}{4}\right)\mathds{1}\odot \mathds{1} + \frac{p}{4}\left(Z\odot Z+X\odot X+Y\odot Y\right), \end{equation} where $\odot$ is a placeholder for the operator which the quantum operation acts on, and the measurement is in the $Y$ basis. Figure S1(b) shows the entanglement-assisted protocol. The Hadamard and controlled-NOT operators together create the entangled state $\ket{\Phi^+}=(\ket{00}+\ket{11})/\sqrt{2}$, and the final measurement is a controlled-NOT followed by $Y\otimes Z$, i.e., $Y$ on the probe qubit and $Z$ on the ancilla qubit.
Then we use the convention that tensor products are written in the order lower-upper. Figure S1(c) shows the second form of the circuit in Fig.~S1(b), in which the first controlled-NOT is moved through the rotation $U_{\phi}$, then moved through the depolarizing channel, combining the second controlled-NOT and then converting the channel to a two-qubit quantum operation \begin{equation} \mathcal{F} \odot= \left(1-\frac{3p}{4}\right)\mathds{1}\otimes \mathds{1}\odot \mathds{1}\otimes \mathds{1} + \frac{p}{4}\left(Z\otimes \mathds{1}\odot Z\otimes \mathds{1}+X\otimes X\odot X\otimes X+Y\otimes X\odot Y\otimes X\right). \end{equation} The finial measurement is then of $Y\otimes Z$.
The effect of the single-qubit circuit on the state $\ket{+}$ is \begin{equation} \mathcal{E}\circ \mathcal{U}_{\phi} \left(\ketbra{+}{+}\right) = \left(1-p\right)U_{\phi}\ketbra{+}{+}U_{\phi}^{\dagger}+\frac{p}{2}\mathds{1}; \end{equation} i.e., the rotation is applied with probability $1-p$, and the qubit is mapped to the maximally mixed state with probability $p$.
The effect of the ancilla-assisted circuit on the state $\ket{+}\ket{0}$ is \begin{align} \mathcal{F}\circ \mathcal{U}_{\phi}\otimes \mathds{1} \left(\ket{+}\ket{0}\bra{0}\bra{+}\right) &=\left[\left(1-p\right)U_{\phi}\ketbra{+}{+}U_{\phi}^{\dagger}+\frac{p}{4}\mathds{1}\right]\otimes\ketbra{0}{0}+\frac{p}{4}\mathds{1}\otimes\ketbra{1}{1}\\ &=\left(1-\frac{p}{2}\right)\left[\left(1-q\right)U_{\phi}\ketbra{+}{+}U_{\phi}^{\dagger}+\frac{q}{2}\mathds{1}\right]\otimes\ketbra{0}{0}+\frac{p}{4}\mathds{1}\otimes\ketbra{1}{1}, \end{align} where \begin{equation} q=\frac{p/2}{1-p/2} \qquad \Longleftrightarrow \qquad 1-q=\frac{1-p}{1-p/2}. \end{equation}
\begin{figure}
\caption{(a) Circuit for the single-probe approach. $\mathcal{E}$ is a depolarizing channel. (b) Circuit for the entanglement-assisted protocol. (c) A different form of the circuit for the entanglement-assisted protocol for intuitive understanding why assisted entanglement helps against noise. $\mathcal{F}$ is a specific two-qubit operation. The lower wire in (b) and (c) is for the probe qubit and the upper wire in (b) and (c) is for the ancilla. The double wire on the right corresponds to a bit from a classical measurement. }
\end{figure}
Evidently, the form of $\mathcal{F}$ shows that $X$ and $Y$ errors map the main qubit to the maximally mixed state, wiping out the information about $\phi$. This happens just as for the single-qubit circuit, except that a record of when an $X$ or $Y$ error occurs is stored in the ancilla qubit. By monitoring the ancilla qubit, one can discard the random data that results from $X$ or $Y$ errors.
The upshot is that, with probability $1-p/2$, the entanglement-assisted quantum circuit works just like the single-qubit circuit. Compared to the single-qubit circuit, the entanglement-assisted quantum circuit achieves a successful rotation with probability $(1-p/2)(1-q)$, and with probability $(1-p/2)q/2$, maps to the maximally mixed state and with a record stored in the outcome $0$ of the ancilla qubit. As the single-qubit circuit achieves an estimator variance $1/(1-p)^2$, the entanglement-assisted circuit achieves an estimator variance \begin{equation} \frac{1}{1-p/2}\frac{1}{(1-q)^2} = \frac{1-p/2}{(1-p)^2}, \end{equation} which is smaller than $1/(1-p)^2$. That means assisted entanglement helps to achieve an smaller estimator variance compared to the single-probe approach. The term $1-p/2$ in the denominator of the first expression comes from the reduction in the number of trials when one discards the trials that give outcome $1$ on the ancilla qubit.
\section{State preparation}
We prepare single photons in polarization-spatial hyperentangled states for entanglement-assisted single-probe approach~\cite{JTP10,ELL10}. The source consists of a $\beta$-barium-borate (BBO) nonlinear crystal pumped by a CW diode laser, and polarization-degenerate photon pairs at $801.6$nm are generated by a type-I spontaneous parametric down-conversion (SPDC) process. The photon pairs have a coherence length of $L_\text{coh}=214.2\mu$m and spectral bandwidth $\Delta\lambda=3$nm.
Upon detection of a trigger photon, the signal photon is heralded in the measurement setup. This trigger-signal photon pair is registered by a coincidence count at two single-photon APDs with a $\Delta t=3$ns time window. Total coincidence counts are about $20,000$ over a collection time of $10$s. The probe is encoded in the horizontal $\ket{H}$ and vertical $\ket{V}$ polarizations of the heralded single photons.
After passing through a PBS followed by a HWP and a QWP, the single photons are prepared in an arbitrary single-qubit state. The longitudinal spatial modes $\ket{U}$ and $\ket{D}$ represent the basis states of the ancilla. A birefringent calcite BD whose optical axis is cut so that horizontally polarized light is directly transmitted and vertical light undergoes a longitudinal displacement into a neighboring mode, acts as an effective controlled-NOT gate on the polarizations and the spatial modes and prepare the initial state into a polarization-spatial hyperentangled state $\alpha\ket{HU}+\beta\ket{VD}$ ($|\alpha|^2+|\beta|^2=1$ and $\alpha,\beta\neq 0$).
Whereas, for entanglement-assisted two-probe approach, polarization-entangled photon pairs are used to prepare the four-qubit hyperentangled state. Similarly, entangled photons in $(\ket{HH}+\ket{VV})/\sqrt{2}$ are also generated via type-I SPDC. Two $\beta$-BBO crystals and a following titled HWP (H$_\text{C}$) placed right after two joint $\alpha$-BBO crystals are used to compensate the walk-off between photons with horizontal and vertical polarizations. Each photon passes through a BD and then a four-qubit polarization-spatial hyperentangled state $(\ket{HUHU}+\ket{VDVD})/\sqrt{2}$ is generated. Total coincidence counts are about $2,000$ over a collection time of $10$s.
\section{Accuracy of the noisy channel simulation}
To verify accuracy of the noisy channel simulation, we reconstruct the process matrices of the channels via two-qubit QPT~\cite{NC04,ABJ+03}. The action of a generic channel operating on a probe qubit is \begin{equation}
\mathcal{E}(\widetilde{\rho})=\sum_{n,m,n',m'=0}^3 \chi_{nmn'm'}(\Xi_n\otimes\Xi_m)\widetilde{\rho}(\Xi_{n'}\otimes\Xi_{m'}), \end{equation} where~$\chi_{nmn'm'}$ completely characterizes the process. To determine $\mathcal{E}$ we first choose some fixed states $\{\widetilde{\rho}\}$, which form a basis for the set of operators acting on the state space of the probe+ancilla system. Each state is then subject to the process $\mathcal{E}\otimes\mathds{1}$, and quantum state tomography is used to determine the output state $(\mathcal{E}\otimes\mathds{1}) \widetilde{\rho}$.
A total of sixteen initial states $\widetilde{\rho}_l$, $l=1,\dots,16$, and sixteen measurements on a two-qubit state of the probe+ancilla system are needed. These states are generated by PBS, BD and WPs. The HWP (H$_{\text{S}1}$), and QWP (Q$_{\text{S}1}$) are used to control the ratio and relative phase between the photons in the upper and lower modes, respectively, whereas H$_{\text{S}2}$ is used to control the ratio between the photons with different polarizations and Q$_{\text{S}2}$ is for the relative phase. Measurements are performed in the bases \begin{align}
\left\{\ket{H},\ket{V},\frac{\ket{H}-\text{i}\ket{V}}{\sqrt{2}},
\frac{\ket{H}+\ket{V}}{\sqrt{2}}\right\}\nonumber\\
\otimes\left\{\ket{U},\ket{D},\frac{\ket{U}-\text{i}\ket{D}}{\sqrt{2}},
\frac{\ket{U}+\ket{D}}{\sqrt{2}}\right\}. \end{align}
After reconstructing the process matrices, we use process fidelity in Eq.~(\ref{eq:fidelity}) to characterize the experimental realization of the noisy channels.
\section{Projective measurements for realizing phase estimation}
For entanglement-assisted single-probe approach, the optimal measurement strategy around $\phi\sim0$ is projecting the output state into four basis states: \begin{equation*} \left\{\frac{1}{\sqrt{2}}(\ket{HU}\pm \text{i}\ket{VD}),\ket{HD},\ket{VU}\right\}, \end{equation*} respectively. The projective measurements can be realized via a BD, a QWP, several HWPs and a PBS. A sandwich-type setup, i.e., HWP(at $45^\circ$)-BD-HWP(at $45^\circ$) separate the photons in the states $\ket{VU}$ and $\ket{HD}$ into the uppermost and lowest modes, and combine the photons in the states $\ket{HU}$ and $\ket{VD}$ into the middle mode. In the middle mode, a QWH at $0^\circ$ following by a HWP at $22.5^\circ$ applies a rotation on the polarization states, i.e., \begin{equation*}
\frac{1}{\sqrt{2}}\left(\ket{H}-\text{i}\ket{V}\right)\longrightarrow\ket{H}, \text{ } \frac{1}{\sqrt{2}}\left(\ket{H}+\text{i}\ket{V}\right)\longrightarrow\ket{V}. \end{equation*} Finally the PBS projects the photons in the middle mode into two basis states $(\ket{HU}\pm \text{i}\ket{VD})/\sqrt{2}$. Coincidences between the outputs and the trigger are detected by APDs. The outcome probabilities of projecting the state in the basis $\left\{(\ket{HU}\pm \text{i}\ket{VD})/\sqrt{2},\ket{HD},\ket{VU}\right\}$ depend on the coincidences between two of APDs (D$_0$, D$_\text{R}$), (D$_0$, D$_\text{L}$), (D$_0$, D$_\text{H}$) and (D$_0$, D$_\text{V}$), respectively.
For entanglement-assisted two-probe approach, the optimal measurement strategy around $\phi\sim0$ is projecting the output state into sixteen basis states: \begin{align*} \Big\{&\frac{1}{\sqrt{2}}\big(\ket{HUHU}\pm\text{i}\ket{VDVD}\big),\ket{HUHD},\ket{HUVU},\ket{HUVD},\ket{HDHU},\ket{HDHD},\ket{HDVU},\\ &\ket{HDVD},\ket{VUHU},\ket{VUHD},\ket{VUVU},\ket{VUVD},\ket{VDHU},\ket{VDHD},\ket{VDVU}\Big\}, \end{align*} respectively. Similar to the entanglement-assisted single-probe approach, the projective measurements here are realized via BDs, WPs, NBSs, and a PBS. We use a multi-channel coincidence counting system that records all possible combinations of two-photon detection events occurring coincidentally across $12$ APDs (D$_1$,\dots,D$_{12}$). The outcome probabilities of projecting the state in the bases depends on the combinations of coincidences between pair of APDs (D$_1$,\dots,D$_{12}$). The corresponding relation is shown in Table S1.
\begin{table}[tb]
\small \centering \caption{The corresponding relations between the projective measurements and the combinations of coincidences between pair of APDs. Here, `/' denotes `or', and `,' between `()' means `and'. For example, $\left(\text{D}_5 / \text{D}_6,\text{D}_9 / \text{D}_{10}\right),\left(\text{D}_7 / \text{D}_8,\text{D}_{11} / \text{D}_{12}\right)$ means that the outcome probability of projecting the state in the bases $\left(\ket{HUHU}+\text{i}\ket{VDVD}\right)/\sqrt{2}$ depends on the coincidences between pairs of APDs such as (D$_5$,D$_9$), (D$_5$,D$_{10}$), (D$_6$,D$_9$), (D$_6$,D$_{10}$), (D$_7$,D$_{11}$), (D$_7$,D$_{12}$), (D$_8$,D$_{11}$), and (D$_8$,D$_{12}$). The superscript `$*$' denotes that the probability of projective measurement depends on the doubled coincidences. That is because in some case two photons happen to be in the same port of the NBS with half of the probability, which can not be recorded in the experiment. Thus we need to double the coincidences for the rest cases to represent the correct outcome probability of projective measurement.}
\begin{tabular}{c||c|c|c|c}
\hline\hline
Basis state & $\left(\ket{HUHU}+\text{i}\ket{VDVD}\right)/\sqrt{2}$ & $\ket{HUHD}$ & $\ket{HUVU}$ & $\ket{HUVD}$ \\
\hline
Coincidences & $\left(\text{D}_5 / \text{D}_6,\text{D}_9 / \text{D}_{10}\right),\left(\text{D}_7 / \text{D}_8,\text{D}_{11} / \text{D}_{12}\right)$ & $\left(\text{D}_9 / \text{D}_{10} / \text{D}_{11} / \text{D}_{12},\text{D}_4\right)$ & $\left(\text{D}_9 / \text{D}_{10} / \text{D}_{11} / \text{D}_{12},\text{D}_3\right)$ & $(\text{D}_9,\text{D}_{10})^*,(\text{D}_{11},\text{D}_{12})^*$ \\
\hline\hline
Basis state & $\ket{HDHU}$ & $\ket{HDHD}$ & $\ket{HDVU}$ & $\ket{HDVD}$ \\
\hline
Coincidences & $\left(\text{D}_2,\text{D}_5 / \text{D}_6 / \text{D}_7 / \text{D}_8\right)$ & $\left(\text{D}_2,\text{D}_4\right)$ & $\left(\text{D}_2,\text{D}_3\right)$ & $\left(\text{D}_2,\text{D}_9 / \text{D}_{10} / \text{D}_{11} / \text{D}_{12}\right)$ \\
\hline\hline
Basis state & $\ket{VUHU}$ & $\ket{VUHD}$ & $\ket{VUVU}$ & $\ket{VUVD}$ \\
\hline
Coincidences & $\left(\text{D}_1,\text{D}_5 / \text{D}_6 / \text{D}_7 / \text{D}_8\right)$ & $\left(\text{D}_1,\text{D}_4\right)$ & $\left(\text{D}_1,\text{D}_3\right)$ & $\left(\text{D}_1,\text{D}_9 / \text{D}_{10} / \text{D}_{11} / \text{D}_{12}\right)$ \\
\hline\hline
Basis state & $\left(\ket{HUHU}-\text{i}\ket{VDVD}\right)/\sqrt{2}$ & $\ket{VDHD}$ & $\ket{VUVU}$ & $\ket{VDHU}$ \\
\hline
Coincidences & $\left(\text{D}_5 / \text{D}_6,\text{D}_{11} / \text{D}_{12}\right),\left(\text{D}_7 / \text{D}_8,\text{D}_9 / \text{D}_{10}\right)$ & $\left(\text{D}_5 / \text{D}_6 / \text{D}_7 / \text{D}_8,\text{D}_4\right)$ & $\left(\text{D}_5 / \text{D}_6 / \text{D}_7 / \text{D}_8,\text{D}_3\right)$ & $(\text{D}_5,\text{D}_6)^*,(\text{D}_7,\text{D}_8)^*$ \\
\hline\hline \end{tabular} \end{table}
\end{widetext}
\end{document} |
\begin{document}
\title{Some Results on incidence coloring, star arboricity and domination number} \begin{abstract} Two inequalities bridging the three isolated graph invariants, incidence chromatic number, star arboricity and domination number, were established. Consequently, we deduced an upper bound and a lower bound of the incidence chromatic number for all graphs. Using these bounds, we further reduced the upper bound of the incidence chromatic number of planar graphs and showed that cubic graphs with orders not divisible by four are not 4-incidence colorable. The incidence chromatic numbers of Cartesian product, join and union of graphs were also determined.
\end{abstract}
\section{Introduction}\label{sec-int}
An incidence coloring separates the whole graph into disjoint independent incidence sets. Since incidence coloring was introduced by Brualdi and Massey \cite{BR93}, most of the researches were concentrated on determining the minimum number of independent incidence sets, also known as the incidence chromatic number, which can cover the graph. The upper bound of the incidence chromatic number of planar graphs \citep{DO04}, cubic graphs \citep{MA05} and a lot of other classes of graphs were determined \citep{DO04, DO05, SH02, SUN08, WA02}. However, for general graphs, the best possible upper bound is an asymptotic one \citep{GU97}. Therefore, to find an alternative upper bound and lower bound of the incidence chromatic number for all graphs is the main objective of this paper.
In Section~\ref{sec-star}, we will establish a global upper bound for the incidence chromatic number in terms of chromatic index and star arboricity. This result reduces the upper bound of the incidence chromatic number of the planar graphs. Also, a global lower bound which involves the domination number will be introduced in Section~\ref{sec-dom}. Finally, the incidence chromatic number of graphs constructed from smaller graphs will be determined in Section~\ref{sec-product}.
All graphs in this paper are connected. Let $V(G)$ and $E(G)$ (or $V$ and $E$) be the vertex-set and edge-set of a graph $G$, respectively. Let the set of all neighbors of a vertex $u$ be
$N_G(u)$(or simply $N(u)$). Moreover, the degree $d_G(u)$(or simply $d(u)$) of $u$ is equal to $|N_G(u)|$ and the maximum degree of $G$ is denoted by $\Delta(G)$ (or simply $\Delta$). All notations not defined in this paper can be found in the books \cite{BO76} and \cite{WE01}.
Let $D(G)$ be a digraph induced from $G$ by splitting each edge $e(u,v) \in E(G)$ into two opposite arcs $uv$ and $vu$. According to \cite{SH02}, incidence coloring of $G$ is equivalent to the coloring of arcs of $D(G)$. Two distinct arcs $uv$ and $xy$ are {\it adjacent} provided one of the following holds:
\\ (1) $u=x$; \\ (2) $v=x$ or $y=u$.
Let $A(G)$ be the set of all arcs of $D(G)$. An {\it incidence coloring} of $G$ is a mapping $\sigma : A(G) \to C$, where $C$ is a {\it color-set}, such that adjacent arcs of $D(G)$ are assigned distinct colors. The {\it incidence chromatic number}, denoted by $\chi_i$, is the minimum cardinality of $C$ for which $\sigma : A(G) \to C$ is an {\it incidence coloring}. An {\it independent set} of arcs is a subset of $A(G)$ which consists of non-adjacent arcs.
\section{Incidence chromatic number and Star arboricity}\label{sec-star}
A {\it star forest} is a forest whose connected components are stars. The {\it star arboricity} of a graph $G$ (introduced by Akiyama and Kano \cite{AK85}), denoted by $st(G)$, is the smallest number of star forests whose union covers all edges of $G$.
We now establish a connection among the chromatic index, the star arborcity and the incidence chromatic number of a graph. This relation, together with the results by Hakimi et al. \cite{HA96}, provided a new upper bound of the incidence chromatic number of planar graphs, $k$-degenerate graphs and bipartite graphs.
\begin{thm}\label{thm-upper} Let $G$ be a graph. Then $\chi_i(G) \leq \chi'(G) + st(G)$, where $\chi'(G)$ is the chromatic index of $G$. \end{thm}
\noindent {\bf Proof: } We color all the arcs going into the center of a star by the same color. Thus, half of the arcs of a star forest can be colored by one color. Since $st(G)$ is the smallest number of star forests whose union covers all edges of $G$, half of the arcs of $G$ can be colored by $st(G)$ colors. The uncolored arcs now form a digraph which is an orientation of $G$. We color these arcs according to the edge coloring of $G$ and this is a proper incidence coloring because edge coloring is more restrictive. Hence $\chi'(G) + st(G)$ colors are sufficient to color all the incidences of $G$. \hspace*{\fill}$\Box$
We now obtain the following new upper bounds of the incidence chromatic numbers of planar graphs, a class of $k$-degenerate graphs and a class of bipartite graphs.
\begin{cor}\label{cor-plane} Let $G$ be a planar graph. Then $\chi_i(G) \leq \Delta + 5$ for $\Delta \neq 6$ and $\chi_i(G) \leq 12$ for $\Delta = 6$. \end{cor} \noindent {\bf Proof: } The bound is true for $\Delta \leq
5$, since Brualdi and Massey \cite{BR93} proved that $\chi_i(G) \leq 2\Delta$. Let $G$ be a planar graph with $\Delta
\geq 7$, we have $\chi'(G) = \Delta$ \citep{SA01,VI68}. Also, Hakimi et al. \cite{HA96} proved that $st(G) \leq 5$. Therefore, $\chi_i(G) \leq \Delta + 5$ by Theorem~\ref{thm-upper}. \hspace*{\fill}$\Box$
While we reduce the upper bound of the incidence chromatic number of planar graphs from $\Delta + 7$ \citep{DO04} to $\Delta +5$, Hosseini Dolama and Sopena \cite{DO05} reduced the bound to $\Delta + 2$ under the additional assumptions that $\Delta \geq 5$ and girth $g \geq 6$.
A {\it $k$-degenerate graph} $G$ is a graph with vertices ordered $v_1, v_2, \dots, v_n$ such that each $v_i$ has degree at most $k$ in the graph $G[v_1,v_2,\dots,v_i]$. A {\it restricted $k$-degenerate graph} is a $k$-degenerate graph with the graph induced by $N(v_i) \cap \{v_1,v_2,\dots,v_{i-1}\}$ is complete for every $i$. It has been proved by Hosseini Dolama et al.\cite{DO04} that $\chi_i(G) \leq \Delta + 2k -1$, where $G$ is a $k$-degenerate graph. We lowered the bound for restricted $k$-degenerate graph as follow.
\begin{cor}\label{cor-kdeg} Let $G$ be a restricted $k$-degenerate graph. Then $\chi_i(G) \leq \Delta + k +2$. \end{cor} \noindent {\bf Proof: } By Vizing's theorem, we have $\chi'(G) \leq \Delta +1$. Also, the star arboricity of a restricted $k$-degenerate graph $G$ is less than or equal to $k+ 1$ \citep{HA96}. Hence we have $\chi_i(G) \leq \Delta + k +2$ by Theorem~\ref{thm-upper}.
\begin{cor}\label{cor-bip} Let $B$ be a bipartite graph with at most one cycle. Then $\chi_i(B) \leq \Delta + 2$. \end{cor} \noindent {\bf Proof: } Hakimi et al. \cite{HA96} proved that $st(B) \leq 2$ where $B$ is a bipartite graph with at most one cycle. Also, it is well known that $\chi'(B) = \Delta$. These results together with Theorem~\ref{thm-upper} proved the corollary. \hspace*{\fill}$\Box$
\section{Incidence chromatic number and Domination number}\label{sec-dom} A {\it dominating set} $S \subseteq V(G)$ of a graph $G$ is a set where every vertex not in $S$ has a neighbor in $S$. The {\it domination number}, denoted by $\gamma(G)$, is the minimum cardinality of a domination set in $G$.
A {\it maximal star forest} is a star forest with maximum number of edges. Let $G=(V,E)$ be a graph, the number of edges of a maximal star forest of $G$ is equal to $|V| - \gamma(G)$ \citep{FE02}. We now use the domination number to form a lower bound of the incidence chromatic number of a graph. The following proposition reformulates the ideas in \cite{AL89} and \cite{MA05}.
\begin{prop}\label{prop-dom} Let $G = (V,E)$ be a graph. Then $\displaystyle \chi_i(G) \geq
\frac{2|E|}{|V| - \gamma(G)}$.\end{prop} \noindent {\bf Proof: } Each edge of $G$ is divided into two arcs in opposite directions. The total number of arcs of $D(G)$ is therefore equal to $2|E|$. According to the definition of the adjacency of arcs, an independent set of arcs is a star forest. Thus, a maximal independent set of arcs is a maximal star forest. As a result, the number of color class required is at least $\displaystyle \frac{2|E|}{|V| - \gamma(G)}$. \hspace*{\fill}$\Box$
\begin{cor}\label{cor-dom} Let $G = (V,E)$ be an $r$-regular graph. Then $\displaystyle \chi_i(G) \geq \frac{r}{1 - \frac{\gamma(G)}{|V|}}$.
\end{cor} \noindent {\bf Proof: } By Handshaking lemma, we have $\displaystyle 2|E| = \sum_{v\in V}
d(v) = r|V|$, the result follows from Proposition~\ref{prop-dom}. \hspace*{\fill}$\Box$
Corollary~\ref{cor-dom} provides an alternative method to show that a cycle $C_n$, where $n$ is not divisible by 3, is not 3-incidence colorable. As $C_n$ is a 2-regular graph with $\displaystyle
\gamma(C_n) > \frac{|V(C_n)|}{3}$, we have $$\displaystyle \chi_i(C_n) \geq
\frac{2}{1-\frac{g(G)}{|V|}} > \frac{2}{1-\frac{1}{3}} = 3.$$
\begin{cor}\label{cor-nec} Let $G = (V,E)$ be an $r$-regular graph. Two necessary conditions for $\displaystyle \chi_i(G) = r +1$ (also for $\chi(G^2) = r +1$ \cite{SUN08}) are:\begin{enumerate} \item The number of vertices of $G$ is divisible by $r+1$. \item If $r$ is odd, then the chromatic index of $G$ is equal to $r$. \end{enumerate} \end{cor} \noindent {\bf Proof: } We prove 1 only, 2 was proved in \cite{SUN06}. By Corollary~\ref{cor-dom}, if $G$ is an $r$-regular graph and $\displaystyle
\chi_i(G) = r+1$, then $\displaystyle r+1 = \chi_i(G) \geq \frac{r|V|}{|V|-
\gamma(G)} \Rightarrow \frac{|V|}{r+1} \geq \gamma(G)$. Since the global lower bound of domination number is $\displaystyle \left\lceil \frac{|V|}{\Delta + 1}\right\rceil$, we conclude that the number of vertices of $G$ must be divisible by $r +1$. \hspace*{\fill}$\Box$
\section{Graphs Constructed from Smaller Graphs}\label{sec-product}
In this section, we determine the upper bound of the incidence chromatic number of union of graphs, Cartesian product of graphs and join of graphs, respectively. Also, these bounds can be attained by some classes of graphs \citep{SUN07}. Let the set of colors assigned to the arcs going into $u$ be $C^+_G(u)$. Similarly, $C^-_G(u)$ represents the set of colors assigned to the arcs going out from $u$.
We start by proving the following theorem about union of graphs.
\begin{thm}\label{thm-union} For all graphs $G_1$ and $G_2$, we have $\chi_i(G_1 \cup G_2) \leq \chi_i(G_1)+ \chi_i(G_2)$. \end{thm} \noindent {\bf Proof: } If some edge $e \in E(G_1) \cap E(G_2)$, then we delete it from either one of the edge set. This process will not affect $I(G_1 \cup G_2)$, hence, we assume $E(G_1) \cap E(G_2) = \varnothing$. Let $\sigma$ be a $\chi_i(G_1)$-incidence coloring of $G_1$ and $\lambda$ be a $\chi_i(G_2)$-incidence coloring of $G_2$ using different color set. Then $\phi$ is a proper $(\chi_i(G_1) + \chi_i(G_2))$-incidence coloring of $G_1 \cup G_2$ with $\phi(uv) = \sigma(uv)$ when $e(uv) \in E(G_1)$ and $\phi(uv) = \lambda(uv)$ when $e(uv) \in E(G_2)$. \hspace*{\fill}$\Box$
The following example revealed that the upper bound given in Theorem~\ref{thm-union} is sharp.
\begin{ex} {\rm Let $n$ be an even integer and not divisible by 3. Let $G_1$ be a graph with $V(G_1) = \{u_1, u_2, \dots, u_n\}$ and
$E(G_1) = \{u_{2i-1}u_{2i}|1\leq i \leq \frac{n}{2}\}$. Furthermore, let $G_2$ be another graph with $V(G_2) = V(G_1)$ and
$E(G_2) = \{u_{2i}u_{2i+1}| 1\leq i \leq \frac{n}{2}\}$. Then, it is obvious that $\chi_i(G_1) = \chi_i(G_2) = 2$ and $G_1 \cup G_2 = C_n$ where $n$ is not divisible by 3. Therefore, $\chi_i(C_n) = 4 =\chi_i(G_1) + \chi_i(G_2)$.} \hspace*{\fill}$\Box$ \end{ex}
Next, we prove the theorem about the Cartesian product of graphs. The following definition should be given in prior.
\begin{thm}\label{thm-carte} For all graphs $G_1$ and $G_2$, we have $\chi_i(G_1 \Box G_2) \leq \chi_i(G_1)+ \chi_i(G_2)$. \end{thm}
\noindent {\bf Proof: } Let $|V(G_1)| =m$ and $|V(G_2)| =n$. $G_1 \Box G_2$ is a graph with $mn$ vertices and two types of edges: from conditions (1) and (2) respectively. The edges of type (1) form a graph consisting of $n$ disjoint copies of $G_1$, hence its incidence chromatic number equal to $\chi_i(G_1)$. Likewise, the edges of type (2) form a graph with incidence chromatic number $\chi_i(G_2)$. Consequently, the graph $G_1 \Box G_2$ is equal to the union of the graphs from (1) and (2). By Theorem~\ref{thm-union}, we have $\chi_i(G_1 \Box G_2) \leq \chi_i(G_1)+ \chi_i(G_2)$. \hspace*{\fill}$\Box$
We demonstrate the upper bound given in Theorem~\ref{thm-carte} is sharp by the following example.
\begin{ex} {\rm Let $G_1 = G_2 = C_3$, then $G_1 \Box G_2$ is a 4-regular graph. If it is 5-incidence colorable, then its square has chromatic number equal to 5 \citep{SUN08}. However, all vertices in $G_1 \Box G_2$ is of distance at most 2. Therefore, $G_1 \Box G_2$ is not 5-incidence colorable and the bound derived in Theorem~\ref{thm-carte} is attained.
} \hspace*{\fill}$\Box$ \end{ex}
Finally, we consider the incidence chromatic number of the join of graphs.
\begin{thm}\label{thm-join} For all graphs $G_1$ and $G_2$ with $|V(G_1)| = m
$, $|V(G_2)| = n $ and $m \geq n \geq 2$. We have $\chi_i(G_1 \vee G_2) \leq \min\{m + n, \max\{\chi_i(G_1), \chi_i(G_2)\} + m + 2\}$. \end{thm} \noindent {\bf Proof: } On the one hand, we have $\chi_i(G_1 \vee G_2) \leq m + n$. On the other hand, the disjoint graphs $G_1$ and $G_2$ can be colored by $\max\{\chi_i(G_1),\chi_i(G_2)\}$ colors, and all other arcs in between can be colored by $m+2$ new colors. Therefore, $\max\{\chi_i(G_1), \chi_i(G_2)\} + m + 2$ is also an upper bound for $\chi_i(G_1 \vee G_2)$. \hspace*{\fill}$\Box$
Similar to the previous practices , we utilize the following example to show that the upper bound in Theorem~\ref{thm-join} is sharp.
\begin{ex} {\rm Let $G_1 = K_m$ and $G_2 = K_n$. Then the upper bound $m+n$ is obtained since $G_1 \vee G_2 \cong K_{m+n}$. On the other hand, let $G_1$ be the null graph of order $m$ and $G_2$ be the null graph of order $n$. Then the other upper bound $\max\{\chi_i(G_1), \chi_i(G_2)\} + m + 2$ is attained because $G_1 \vee G_2 \cong K_{m,n}$.} \hspace*{\fill}$\Box$ \end{ex}
\end{document} |
\begin{document}
\mainmatter
\title{On recent advances in 2D Constrained Delaunay triangulation algorithms}
\titlerunning{Recent advances in 2D CDT algorithms}
\author{Pranav Kant Gaur \and S. K. Bose }
\authorrunning{P.K. Gaur and S.K. Bose}
\institute{Computer Division, Bhabha Atomic Research Centre\\ Mumbai, India \\ \mailsa \\ \mailsb }
\toctitle{Recent advances in 2D CDT algorithms} \tocauthor{Pranav Kant Gaur} \maketitle
\begin{abstract} In this article, recent works on 2D Constrained Delaunay triangulation(CDT) algorithms have been reported. Since the review of CDT algorithms presented by de Floriani(Issues on Machine Vision, Springer Vienna, pg. 95--104, 1989), different algorithms for construction and applications of CDT have appeared in literature each concerned with different aspects of computation and different suitabilities. Therefore, objective of this work is to report an update over that review article considering contemporary prominent algorithms and generalizations for the problem of two dimensional Constrained Delaunay triangulation. \keywords{triangulation, constrained Delaunay, algorithms, 2D} \end{abstract}
\section{Introduction}
Digital modelling and simulation of a natural phenomenon often requires discrete representation of the physical objects involved. Representation should be as close to the original object as possible and at the same time it must allow for a reasonably accurate simulation of the problem of interest. Delaunay triangulation satisfies later requirement\cite{shewchuk2002good} however, being a convex hull algorithm it does not necessarily preserve the object boundaries. Constrained Delaunay triangulation on the other hand relaxes the empty-circle criteria thereby making it possible to fulfil the former requirement. Consequently, not every element of the resulting triangulation is Delaunay but boundary constraints are preserved. As \cite{shewchuk2002constrained} points out, CDTs provide an advantage of object boundary preservation at the cost of strict compliance of each element of the mesh with the Delaunay property, however, in general, it also results in lesser number of additional points(called \textit{steiner points}) added to satisfy both Delaunay property for each element and preserving object boundary(a variant called \textit{Conforming constrained Delaunay triangulation}). \par
Constrained Delaunay triangulation finds application in Path planning\cite{kallmann2005path}, Terrain modelling\cite{silveira2009optimization}, Geographic information systems\cite{qi2013computing}, PCB design\cite{halama}, finite difference analysis\cite{mctaggart2004finite}, data-visualization\cite{Yang2009375} etc. CDT finds its application even in the field of parallel mesh generation as \cite{chew1997parallel} claims that using CDT as mesh generation approach results in reduction of communication cost and elimination of synchronization overheads as compared to other approaches used for mesh generation. \cite{devillers2003minimal} uses CDT to reconstruct a triangulation given its minimal set of edges.
\subsection{Motivation}
There has been a brief review of constrained Delaunay triangulation algorithms by \cite{Floriani1989} however, since then many algorithms have been reported with each focusing on different aspects of computation like parallel computation, IO efficiency and generalizations like non-Euclidean distance metrics, higher order Delaunay criterion etc. There have also been additions of new algorithms to the categories defined by \cite{Floriani1989} for classification of CDT algorithms. Therefore, objective of this work is to report updates over \cite{Floriani1989}, keeping as much of the taxonomic structure proposed by the original paper as possible and enhancing it wherever it is required.
\subsection{Paper outline}
The concept of Constrained Delaunay triangulation and its properties are introduced in Section 2. Prominent algorithms appearing after the work by \cite{Floriani1989} have been discussed in section 3. Section 4 discusses some interesting generalizations of the CDT problem to higher dimensions, different space metric and a generalization of Constrained Delaunay criteria. Section 5 concludes the paper.
\section{Basic definition and properties of CDT} Let \textit{X} be a planar straight line graph(PSLG), then CDT of \textit{X} is the union of all constrained Delaunay simplices, which in two dimensions, contains all segments and points of \textit{X}. A simplex is \textit{constrained Delaunay} if there is a corresponding circumcircle which encloses no point of \textit{X} that is \textit{visible} from the \textit{inside} of the simplex\cite{lee1986generalized}. A point is \textit{visible} from inside a simplex if there is no segment of \textit{X} which intersects a segment drawn between that point and a point in the simplex\cite{Shewchuk:1998:CGE:276884.276893}. As it can be noted, \textit{visibility} criterion relaxes the original \textit{empty-circumcircle} constraint imposed on Delaunay simplices to permit points on or inside the circumcircle of a simplex if they are guarded by a constraint segment against all points of that simplex. Figure \ref{cdtExplain} explains the constrained Delaunay criterion, where in Fig. \ref{cdtExplainA} highlights the case when an outside point(E) is allowed to be at the circumference since it is not visible to any of the constituent points(A, B and C) and Fig. \ref{cdtExplainB} shows the case when an external point(E) is \textit{visible} and hence the triangle ABC is \textit{not} constrained Delaunay.\par \begin{figure}
\caption{Satisfies CDT criterion}
\label{cdtExplainA}
\caption{Does not satisfy CDT criterion}
\label{cdtExplainB}
\caption{Constrained Delaunay criterion}
\label{cdtExplain}
\end{figure}
\subsection{Properties of CDT in 2D} Constrained Delaunay triangulation in two dimensions exhibits identical properties as that by \textit{unconstrained} Delaunay triangulation. For example, like Delaunay triangulation, CDT maximizes the minimum angle of every triangle in the mesh\cite{lee1986generalized}, it minimizes the maximum enclosing circle radius\cite{shewchuk2008general} among all possible constrained triangulations of a given PSLG. Delaunay triangulation and voronoi diagram are duals of each other, similarly, CDTs are duals of constrained(or Bounded) Voronoi diagrams\cite{Klein:1993:LRA:160985.161008}, \cite{Joe1993}, \cite{wang1995finding}.
\section{Proposed taxonomy of state-of-art in CDT algorithms} Constrained Delaunay triangulation algorithms proposed in the literature can be categorised mainly in two classes based on how they process the input PSLG. They either process it all at once to create the CDT, called the \textit{static} algorithms or they process the input points and constraint segments one at a time thereby imparting incremental nature to the solution, which we categorize as \textit{dynamic} algorithms.
\subsection{Static CDT algorithms} Initial work in the direction of development of algorithms for CDT was for simple polygons as inputs. A linear time randomized divide and conquer approach was proposed by \cite{Klein:1993:LRA:160985.161008} for computing \textit{constrained} Voronoi diagram of a simple polygon by merging Voronoi diagrams of sub-regions of the input, thereby using the duality it can be used for computing the corresponding CDT. Specifically, \cite{Klein:1993:LRA:160985.161008} proposed partitioning input polygon into a set of simpler polygons which are called \textit{pseudonormal histograms}(or PNHs). Solution to the original CDT problem is obtained after merging CDTs of these individual PNHs. Decomposition of a polygon into PNHs and merging CDTs of PNHs to make the solution, both stages have linear time complexity however the algorithm for computing CDT of individual PNH was proposed using a random algorithm with linear time expected complexity. \cite{chin1998finding} later improved over this work by proposing a linear-time \textit{deterministic} algorithm for computing CDT of individual PNHs.
\cite{lee1986generalized} proposed an $O(|n|log |n|)$ average-case time complexity divide and conquer algorithm, where $n$ represents the number of points, for computing CDT(referred to as \textit{Generalized} Delaunay triangulation) of a given simple polygon by merging GDTs of its decomposition into a set of simpler polygons. \par
\subsubsection{Algorithms for general polygons}
\cite{lee1986generalized} also proposed an $O(|V^2|)$ time CDT algorithm for the case of general graphs(i.e., holes possible), where $|V|$ represents the number of points. Their algorithm is based on identifying the Delaunay edges incident on every point. It starts by first computing a set of points visible from each point and connecting them to form a structure called \textit{visibility graph}, followed by scanning of all edges incident on each point and removing all non-Delaunay edges unless it is a constraint edge. Resultant structure forms the CDT of given set of points and edges.
\cite{Chew:1987:CDT:41958.41981} proposed an $O(nlog n)$ time static divide and conquer algorithm for computing CDT, it takes complete PSLG as input at once and partitions the space into rectangular strips, computes CDT of individual strips and then combines neighboring strips to compute the CDT of given PSLG.
\cite{agarwal2005efficient} proposed an IO-efficient CDT algorithm which they claimed experimentally to be able to process 10GBs of LIDAR data on 128MB RAM and within 7.5 hours. There approach was to initially generate recursive subdivisions of the input point set(called, \textit{gradation}), use an already existing internal memory algorithm to compute CDT of the leaf of that recursive subdivision and use this result to compute CDT of supersets progressively(i.e., following the gradation). However, their algorithm is limited to the cases where number of constraint segments of PSLG are in the order of size of main memory. Specifically, for cases where $|S| \geq c_{o}M$, they claim total number of IO operations to be $O(\frac{N}{B}\log_{\frac{M}{B}} {\frac{N}{B}})$. They also pose an open question on existence of a randomized incremental CDT algorithm with $O(nlog_2 n)$ complexity which was later addressed in \cite{shewchuk2015fast}.
\subsection{Dynamic CDT algorithms} Dynamic CDT algorithms incrementally process input PSLG by inserting constraints one at time in the resultant mesh. Specifically, it inserts input points followed by insertion of constrained edges one at a time as opposed to the static algorithms which process the input all at once. Dynamic algorithms provide the practical flexibility of adding constraints on-demand. Dynamic CDT algorithms proposed thus far in the literature primarily differ in two stages of the algorithm, namely, the strategy for insertion of an input point and that for insertion of a constraint edge in the current triangulation.
\subsubsection{Point insertion strategy} \cite{de1988constrained} presents a dynamic algorithm for realizing multi-resolution surface representation using CDT\cite{de1992line} as its basis. It assumes triangulated PSLG as its input. Triangulation of higher resolution surface $T_{i+1}$ is obtained from that of lower resolution, $T_i$ by addition of points and constraint segments. For each new point, it identifies the set of triangles in $T_i$ which will have this point in their circumcircles, called the \textit{influence region} of that point and computes a polygon from outer boundaries of these triangles called \textit{influence polygon}. It then joins new point with all points of influence polygon, thereby re-meshing the interior of influence polygon with addition of this new point. Since only linear number of triangles are affected by insertion of a point, worst-case time complexity of point insertion algorithm is $O(n)$. \cite{Lu:1991:DCD:902954} presents an exactly similar point insertion approach but with different terminology.
\cite{ANGLADA1997215} proposes a point insertion algorithm derived from generalization of the approach proposed in \cite{Sloan:1987:FAC:22847.22852},\cite{lawson1977software}. It locates the triangle(say \textit{t}) which encloses the new point, \textit{p}. It then partitions \textit{t} into three triangles \textit{$t_1$},\textit{$t_2$}, \textit{$t_3$}. For each triangle, it determines if its neighbor triangle(sharing non-constraint edge) which does not share \textit{p} does not have \textit{p} in its circumcircle. If this neighbor triangle violates this condition then it is removed from the mesh and its neighbors are explored in-turn. This process continues until non of the triangles contain \textit{p} in its interior. Worst case complexity of this point insertion procedure is $O(n)$.
\cite{domiter2004constrained} proposes an approach based on \cite{vzalik2003incremental}. It performs point insertion in three stages, \textit{initialization}, \textit{triangulation} and \textit{finalization}. During initialization, an artificial triangle(also called \textit{super-triangle}) is constructed, it contains encloses all points of input PSLG. This supertriangle is splitted by insertion of first input point into three sub-triangles. Then during triangulation stage, it uniformly subdivides the input region using a two-level uniform planar subdivision data structure(referred as 2LUPS). This data structure partitions the input region in terms of cells, with uniform subdivision called the level one subdivision and adaptive subdivision inside a cell to adapt to non-uniform point density called the level two subdivision. Point insertion proceeds after this subdivision, in which, the point is first inserted into a cell of 2LUPS and then a closest point is searched within that cell. If the triangle incident on this found point also contains the point to be inserted then this triangle is divided else the next closest point is searched and similar checks are repeated. Each sub-triangle is checked for empty circle criterion. Last step in this algorithm is removing all triangles which share a vertex of super-triangle. This algorithm works with $O(n^{1.1})$ average case complexity and in worst case complexity reaches to $O(n^{2})$, where $n$ is the number of inserted points.
\cite{kallmann2004fully} first locates the input point, if it is present on an edge then edge is splited to contain this point, if this point is found on a face then face is splitted. So, point insertion primalrily involves dealing with case of inserting point on an edge or inserting point on a face. Since insertion of a new point may turn an edge non-Delaunay, edge flipping is used to restore Delaunay property of all non-constrained segments. Insertion of one point may require $O(n)$ edge flips, however for Delaunay triangulations with random input expected number of edge flips are constant\cite{guibas1992randomized}.
\subsubsection{Constraint edge insertion strategy}
\cite{de1988constrained}, \cite{de1992line} employ simple generalization of their algorithm for point insertion to the segment insertion problem. For each constraint segment they first identify the list of triangle which intersect this segment which is called the \textit{influence region} of that segment like that for points. From this list, outer boundary of these triangle is identified called the \textit{influence polygon} using an $O(n)$ worst case time algorithm. The new segment \textit{t} to be inserted is a diagonal of this polygon. Therefore, we have two cavities separated by this constrained segment. This polygon is then triangulated(non-Delaunay) using an $O(|Q^2|)$ worst case time algorithm to fill this region, where $|Q|$ is the size of influence polygon. After this re-triangulation, all edges inside this polygon are \textit{optimized} by enforcing the empty-circle criterion.
\cite{sloan1993fast} proposed a similar approach based on incremental insertion of edges which first computes Delaunay triangulation of the input point set using the approach described in \cite{Sloan:1987:FAC:22847.22852} followed by enforcing constraint segments into it and then \textit{optimizing} the triangulation by ensuring that all non-constraint edges in the resultant triangulation(i.e., the CDT) are Delaunay. It loops over every constraint edge, and for each edge, it finds all intersecting edges in the initial Delaunay triangulation. It then removes all intersecting edges and restores the Delaunay triangulation for non-constraint edges using \textit{triangle swapping}(implemented using edge-flipping). It then removes all superfluous triangles which are either present beyond input boundary or if they share a point with the supertriangle\cite{Sloan:1987:FAC:22847.22852} computed during Delaunay triangulation. They experimentally observe that the proposed algorithms roughly takes CPU time proportional to the number of points(i.e., $O(n)$) in PSLG.
Triangle package employs an algorithm proposed by \cite{shewchuk1996triangle} for computing CDT dynamically. They achieve $O(nlogn)$ average-case time complexity, regardless of distribution of points. It is similar in overall layout to the \cite{sloan1993fast} in that it starts with an initial Delaunay triangulation of input point set, followed by recovery of constraint edges in the final mesh by deleting the triangle it overlaps and \textit{re-triangulating} each side of the region thus formed.
\cite{ANGLADA1997215} proposed an improvement over \cite{de1992line} on constraint edge insertion strategy. It follows the typical outline of incrementally inserting constraint edge, identifying and removing intersecting triangles thereby forming two cavity and then re-triangulating cavities separately. However, the difference from \cite{de1992line} lies in the way they re-triangulate the cavities. They use a recursive algorithm for triangulating upper($P_{u}$) and lower($P_{l}$) polygons(or cavity). In a polygon(say $P_{u}$), this algorithm identifies a point $c$ (say, $c = s_l$) with respect to the constraint edge(say \textit{ab}) such that $\Delta abc$ satisfies empty circumcircle property. $\Delta abc$ divides $P_{u}$ into two subregions $P_E = \{a, s_{1}, s_{2}, ...s_{l}\}$ and $P_D = \{s_l, ..., s_n, b\}$. Then this algorithm is recursively applied on these sub-regions with respect to edges $ac$ and $bc$. This approach ensures preservation of $ab$ in the final mesh after both $P_u$ and $P_l$ are triangulated. Their algorithm has $O(n^2)$ worst-case time complexity. The approach proposed by \cite{shewchuk1996triangle} and \cite{ANGLADA1997215} have established as general framework for constraint segment insertion approaches, \cite{kallmann2004fully}, \cite{domiter2004constrained} and many other works have utilized these algorithms.
\cite{shewchuk2015fast} uses similar segment insertion algorithm framework as in \cite{ANGLADA1997215}, but proposes a new randomized cavity re-triangulation algorithm, along which the resultant segment insertion algorithm has time complexity linear in number of edges crossed by the constraint segment. \cite{shewchuk2015fast} claims that their algorithm can deal with non-convex cavities, dangling segments inside cavity and cavities with self-intersections which was not possible with the CDT algorithm proposed by \cite{Chew:1987:CDT:41958.41981}. They derived a $O(nlogn + nlog^2 k)$ tight bound on average case time complexity using the results from \cite{agarwal2005efficient}.
\subsection{Parallel CDT algorithms for PSLG}
Parallelization often requires domain decomposition, and in context of computing CDT of input PSLG, we need to partition the space of points in PSLG. In that direction, \cite{chernikov2008algorithm} proposes a quality Delaunay mesh generation algorithm which uses constrained segments to separate sub-domains. Assuming such a domain decomposition, their algorithm utilizes the fact that if each sub-domain has Delaunay conforming mesh then resultant global mesh will also be constrained Delaunay. However, ensuring Delaunay property in each sub-domain may require subdivision of some of its elements, therefore this algorithm may result in subdivision of constrained segments itself in which case a SPLIT message is signaled to the adjacent sub-domain(i.e., a thread or processor) to maintain consistency of the state of constrained segments across sub-domains. Their comparative evaluation on a single node with the algorithm proposed by \cite{shewchuk1996triangle} suggests comparable running times.
Recently, GPU's have found many applications beyond graphics computing and some of their variants have now come to be referred as General purpose GPUs(or the GPGPU's). \cite{qi2013computing} proposes a CDT algorithm using parallelization advantages available in GPUs. They first construct a triangulation of the points in input PSLG, then constraint edges are inserted using \textit{edge-flipping} which results in a constrained triangulation which is then transformed into the corresponding Constrained Delaunay triangulation, again using the edge-flipping approach. They use NVIDIA CUDA-enabled GPUs to achieve a claimed speedup of more than 10x over CDT implementation in Triangle package. However, performance of their approach degrades when the input dataset is skewed.
\section{Generalizations} In context of the usual distance metric for CDT problem, \cite{vigo2000computing} have proposed a generalization to the isotropic nature of triangles generated in conventional CDT algorithms, they propose \textit{directional} CDT, in which if each input point has an associated deformed ellipse which represents curvature of the surface, triangles in the resulting mesh with their shapes adapted to that curvature information can be generated. Such modelling problems arise in mesh generation over parametric surfaces. Their work required generalizing CDT problem from conventional Euclidean distance metric to the elliptical space. Therefore, effectively the circumcircle in the conventional problem becomes a circumellipse in this generalization. Similarly, in this transformed space Delaunay criterion becomes the empty circumellipse criterion.
Addressing the possibility of CDT in higher dimensions, \cite{Shewchuk:1998:CGE:276884.276893} proposes a sufficient condition for the existence of CDT in dimensions higher than two. CDTs have not been generalized beyond two-dimensions because of the existence of many singular non-triangulable structures, for example the Sch\"{o}nhardt polyhedron\cite{schonhardt1928zerlegung}. However, if we can transform our input PSLG to another topologically equivalent input following the Shewchuk's theorem, a CDT exists. It states that an \textit{d-dimensional} input PSLG \textit{X} has CDT if each of its \textit{k-dimensional} constraining facet is a union of k-dimensional \textit{strongly} Delaunay simplices($k \leq d-2$), where, a simplex is strongly Delaunay if there exists a circumsphere for its points which does not contain any other point inside or on its surface(hence a \textit{stronger} version of the Delaunay criterion).
\cite{gudmundsson2002higher} relaxes the empty circle criterion to include \textit{atmost k} points inside circle, in which case the triangle is called a k-order triangle. Such generalization is useful in optimizing criteria other than characteristic criteria of Delaunay triangulation, for example minimizing the number of local minima(or maxima), which are useful for dealing with artificial dam problem and interrupted ridge lines in elevation models respectively. \cite{gudmundsson2005constrained} further generalizes this concept from Higher order Delaunay triangulation(HODs) to constrained Higher order Delaunay triangulations. In the same direction, \cite{silveira2009towards} proposes definitions of higher order CDT in an attempt to combine the concept of higher order Delaunay triangulation and CDT. These definitions deal with the way of defining the \textit{order} of a triangle. Specifically, it defines different cases for how number of points inside circumcircle of a triangle should be counted, choice of which depends on the application.
\iffalse \section{Summary}
Here is the summary: \textbf{COMPLEXITY FOR ALL ABOVE DESCRIBED ALGORITHMS.}
\begin{table}[h!]
\centering
\begin{tabular}{||c|c|c|c|c|c||}
\hline
Algorithm class & Input assumptions & Merits & Demerits & Time complexity & Space complexity \\
\hline
\hline
Fast CDT & & & & & \\
Dynamic CDT & & & & & \\
Incremental CDT & & & & & \\
Sweep CDT & & & & & \\
Gift-wrapping & & & & & \\
GPU based CDT & & & & & \\
Multi-CPU based CDT & & & & & \\
IO efficient CDT & & & & & \\
Directional CDT & & & & & \\
\hline
\hline
\end{tabular}
\caption{Summary of CDT algorithms}
\label{tab:my_label}
\end{table} \fi \section{Conclusion} We have observed that in the class of static CDT algorithms, initial work has been in the direction of CDT algorithms for simple polygons and some works have extended the problem domain to general graph(i.e., allowing possibility of holes). Divide and conquer strategy has been a dominant design approach for this class of problems.
Since the review article \cite{Floriani1989}, dynamic CDT algorithms have seen higher growth, which seems to be due to their higher practicability and flexibility as compared to static algorithms which requires complete input all at once. However, since that period the average-case complexity has still stayed around $O(nlogn)$. Almost all reviewed dynamic CDT algorithms use identical segment insertion algorithm framework but they differ in their strategies for triangulation of polygons(or cavities) created after removal of triangles intersecting the constraint edge.
Algorithms with focus over IO-efficiency since then have been proposed which are able to handle PSLG's of the size of 10GBs. In present time, such algorithms become very crucial when the data generated by even personal computing devices is of this order.
Parallel algorithms have been proposed for multi-CPU and recently popular GPGPUs which have facilitated orders of magnitude speedup over equivalent sequential algorithms.
Further, almost throughout this period, the work by Dr. Shewchuk has formally established properties of constrained Delaunay triangulation \cite{shewchuk2008general}, defined existence criterion for higher dimensions\cite{Shewchuk:1998:CGE:276884.276893} and facilitated a robust 2D CDT code in Triangle\cite{shewchuk1996triangle}. In addition, a recent work of his group on CDT problem \cite{shewchuk2015fast} has attempted answering questions posed by \cite{agarwal2005efficient} regarding existence of randomized incremental CDT algorithm with $O(nlogn)$ time complexity.
Efforts have also been made to generalize the concept of constrained Delaunay triangulation beyond two dimensions. In order to deal with parametric surface meshes, setting of CDT problem has been extended beyond conventional Euclidean distance metric to deal with elliptical distance metric. Work by \cite{gudmundsson2005constrained} explored generalization of empty circumsphere property of Delaunay elements itself as they have dealt with the case of allowing multiple points inside the circumpsphere and proposed various definitions of Higher order Constrained Delaunay triangulation.
\end{document} |
\begin{document}
\title[Ramified Heegner points]{On Heegner Points for primes of additive reduction ramifying in the base field}
\author{Daniel Kohen} \address{Departamento de Matem\'atica, Facultad de Ciencias Exactas y Naturales, Universidad de Buenos Aires and IMAS, CONICET, Argentina} \email{dkohen@dm.uba.ar} \thanks{DK was partially supported by a CONICET doctoral fellowship}
\author{Ariel Pacetti} \address{Departamento de Matem\'atica, Facultad de Ciencias Exactas y Naturales, Universidad de Buenos Aires and IMAS, CONICET, Argentina} \email{apacetti@dm.uba.ar} \thanks{AP was partially supported by CONICET PIP 2010-2012 11220090100801, ANPCyT PICT-2013-0294 and UBACyT 2014-2017-20020130100143BA}
\address{University of Warwick, Coventry, UK} \email{m.masdeu@warwick.ac.uk} \thanks{MM was supported by EU H2020-MSCA-IF-655662}
\dedicatory{with an Appendix by Marc Masdeu}
\keywords{Heegner points} \subjclass[2010]{Primary: 11G05, Secondary: 11G40} \begin{abstract}
Let $E$ be a rational elliptic curve, and $K$ be an imaginary
quadratic field. In this article we give a method to construct
Heegner points when $E$ has a prime bigger than $3$ of additive
reduction ramifying in the field $K$. The ideas apply to more
general contexts, like constructing Darmon points attached to real
quadratic fields which is presented in the appendix. \end{abstract} \maketitle
\section*{Introduction}
Heegner points play a crucial role in our nowadays understanding of the Birch and Swinnerton-Dyer conjecture, and are the only instances where non-torsion points can be constructed in a systematic way for elliptic curves over totally real fields (assuming some still unproven modularity hypotheses). Although Heegner points were heavily studied for many years, most applications work under the so called ``Heegner hypothesis'' which gives a sufficient condition for an explicit construction to hold. In general, if $E$ is an elliptic curve over a number field $F$ and $K/F$ is any quadratic extension, the following should be true.
\noindent{\bf Conjecture:} If $\sign(E,K)=-1$, then there is a non-trivial Heegner system attached to $(E,K)$.
This is stated as Conjecture 3.16 in \cite{Dar04}. When $F=\QQ$, $E$ is an elliptic curve of square-free conductor $N$ and $K$ is an imaginary quadratic field whose discriminant is prime to $N$, the conjecture is proven in Darmon's book (\cite{Dar04}) using both the modular curve $X_0(N)$ and other Shimura curves. The hypotheses on $N$ and $K$ were relaxed by Zhang in \cite{ZH}, who proved the conjecture under the assumption that if a prime $p$ ramifies in $K$ then $p^2 \nmid N$.
When the curve is not semistable at some prime $p$ the situation is quite more delicate. An interesting phenomenon is that in this situation, the local root number at $p$ has no relation with the factorization of $p$ in $K$. Still the problem has a positive answer in full generality, due to the recent results of \cite{yuan2013gross}, where instead of working with the classical group $\Gamma_0(N)$, they deal with more general arithmetic groups. The purpose of this article is to give ``explicit'' constructions of Heegner points for pairs $(E,K)$ as above. Here by explicit we mean that we can compute numerically the theoretical points in the corresponding ring class field, which restricts us to working only with unramified quaternion algebras (since the modular parametrization is hard to compute for Shimura curves). For computational simplicity we will also restrict the base field to the field of rational numbers.
Let $\chi:K^\times \backslash K^\times_{\AA}\to \CC^\times$ be a finite order anticyclotomic Hecke character, and $\eta$ be the character corresponding to the quadratic extension $K/\QQ$. In order to construct a Heegner point attached to $\chi$ in a matrix algebra, for each prime number $p$ the following condition must hold \[ \epsilon(\pi_p,\chi_p)=\chi_p(-1) \eta_p(-1), \] where $\pi$ is the automorphic representation attached to $E$, and $\epsilon(\pi_p,\chi_p)$ is the local root number of $L(s,\pi,\chi)$ (see \cite[Section 1.3.2]{yuan2013gross}). If we impose the extra condition $\gcd(\cond(\chi),N\cond(\eta))=1$, then at primes dividing the conductor of $E/K$ the equation becomes \[ \varepsilon_p(E/K)=\eta_p(-1), \] where $\varepsilon_p(E/K)$ is the local root number at $p$ of the base change of $E$ to $K$ (it is equal to $\varepsilon_p(E) \varepsilon_p(E\otimes \eta)$). This root number is easy to compute if $p \neq 2,3$ (see \cite{Pacetti}): \begin{itemize} \item If $p$ is unramified in $K$, then $\eta_p(-1)=1$ and \[ \varepsilon_p(E/K)=\begin{cases} 1 & \text{ if } v_p(N)=0,\\ \kro{p}{\Disc(K)} & \text{ if } v_p(N)=1,\\ 1 & \text{ if } v_p(N)=2, \end{cases} \] where $v_p(N)$ denotes the valuation of $N$ at $p$. \item If $p$ is ramified in $K$ then $\eta_p(-1)=\kro{-1}{p}$ and \[ \varepsilon_p(E/K)= \kro{-1}{p} \cdot \begin{cases} 1 & \text{ if } v_p(N)=0 ,\\ \varepsilon_p(E) & \text{ if } v_p(N)=1 ,\\ \varepsilon_p(E_p) & \text{ if } v_p(N_{E_p})=1,\\ 1 & \text{ if }E \text{ is P.S.} ,\\ -1 & \text{ if } E \text{ is S.C.},\\ \end{cases} \] where $E_p$ denotes the quadratic twist of $E$ by the unique quadratic extension of $\QQ$ unramified outside $p$; $E$ is P.S. if the attached automorphic representation is a ramified principal series (which is equivalent to the condition that $E$ acquires good reduction over an abelian extension of $\QQ_p$) and $E$ is S.C. if the attached automorphic representation is supercuspidal at $p$ (which is equivalent to the condition that $E$ acquires good reduction over a non-abelian extension). \end{itemize}
Let $E/\QQ$ be an elliptic curve. We call it Steinberg at a prime $p$ if $E$ has multiplicative reduction at $p$ (and denote it by St.). In Table~\ref{table:signs} we summarize the above equations for $p \neq 2,3$, where the sign corresponds to the product $\varepsilon_p(E/K) \eta_p(-1)$. \begin{table}[h]
\begin{tabular}{l|r|r|r} & $p$ is inert & $p$ splits & $p$ ramifies\\ \hline St & \cellcolor[gray]{0.9} $-1$ & \cellcolor[gray]{0.9} $1$ & \cellcolor[gray]{0.9}$\varepsilon_p(E)$\\ \hline St $\otimes \chi_p$ &\cellcolor[gray]{0.7} $1$ & \cellcolor[gray]{0.9}$1$ & $\varepsilon_p(E_p)$\\ \hline P.S. &\cellcolor[gray]{0.7} $1$ &\cellcolor[gray]{0.9} $1$ & $1$\\ \hline Sc. &\cellcolor[gray]{0.7} $1$ &\cellcolor[gray]{0.9} $1$ & $-1$ \end{tabular} \caption{Signs Table} \label{table:signs} \end{table}
Our goal is to give an explicit construction in all cases where the local sign of Table~\ref{table:signs} equals $+1$. The cells colored in light grey correspond to the classical construction, and the ones colored with dark grey are considered in the article \cite{Kohen}. In the present article we will consider the following cases: \begin{itemize} \item $E$ has additive but potentially multiplicative reduction, and $\varepsilon_{p}(E_{p})=+1$. \item $E$ has additive but potentially good reduction over an abelian extension. \end{itemize}
\begin{rem*}
The situation for $p=2$ and $p=3$ is more delicate, although most
cases can be solved with the same ideas. For the rest of this
article we assume $p>3$. \end{rem*} The strategy is to build an abelian variety related to $E/K$ (in general of dimension greater than $1$) and use a classical Heegner construction on such variety so that we can transfer the Heegner points back to our original elliptic curve. To clarify the exposition, we start assuming that there is only one prime $p$ ramifying in $K$ where our curve has additive reduction, and every other prime $q$ dividing $N$ is split in $K$. The geometric object we consider is the following:
\begin{itemize} \item If $E$ has potentially multiplicative reduction, we consider the
elliptic curve $E_p$ of conductor $N/p$ which is the quadratic twist
of $E$ by the unique quadratic character ramified only at $p$. \item If $E$ has potentially good reduction over an abelian extension, then we consider an
abelian surface of conductor $N/p$, which is attached to a pair
$(g,\bar{g})$, where $g$ is the newform of level $N/p$ corresponding
to a twist of the weight $2$ modular form $E_f$ attached to $E$. \end{itemize} In both cases the classical Heegner hypothesis is satisfied
(eventually for dimension greater than one), and the resulting abelian varieties are
isogenous to our curve or to a product of the curve with itself over some field extension. Such isogeny is the key to relate the classical construction to the new cases considered. Each case has a different construction/proof (so they will be treated separately), but both follow the same idea. In all cases considered we will construct points on $(E(H_{c}) \otimes \CC)^{\chi}$. These points will be non-torsion if an only if $L'(E/K, \chi ,1) \neq 0$ as expected by the results of Gross-Zagier \cite{GZ} and Zhang \cite{ZH}.
Our construction is interesting on its own, and can be used to move from a delicate situation to a not so bad one (reducing the conductor of the curve at the cost of adding a character in some cases). So, despite we focus on classical modular curves, the methods of this article can be easily applied to a wide variety of contexts, for example more general Shimura curves.
In recent years, following a breakthrough idea of Darmon there has been a lot of work in the direction of defining and computing $p$-adic Darmon points, which are points defined over certain ring class fields of real quadratic extensions using $p$-adic methods. For references to this circle of ideas the reader can consult \cite{Dar04}, \cite{darmon2001integration}, \cite{bertolini2009rationality}, \cite{MR2289868}.
These construction are mostly conjectural (but see \cite{bertolini2009rationality}), and there has been a lot of effort to explicitly compute $p$-adic approximations to these points in order to
gather numerical evidence supporting these conjectures. The interested reader might consult \cite{darmon2006efficient}, \cite{MR2510743}, \cite{guitart2015elementary},
\cite{MR3384519}, \cite{MR3418066}.
In order to illustrate the decoupling of our techniques from the algebraic origin of the points, in an appendix by Marc Masdeu it is shown how these can be applied to the computation of $p$-adic Darmon points.
The article is organized as follows: in the first section we treat the case of a curve having potentially multiplicative reduction, and prove the main result in such case. In the second section we prove our main result in the case that we have potentially good reduction over an abelian extension. In the third section, we explain how to extend the result to general conductors and in the fourth section we finish the article with some explicit examples in the modular curves setting, including Cartan non-split curves, as in \cite{Kohen}. Lastly, we include the aforementioned appendix.
\noindent {\bf Acknowledgments:} We would like to thank Henri Darmon for many comments and suggestions regarding the present article and Marc Masdeu for his great help and contributions to this project. We would also like to thank the Special Semester ``Computational aspects of the Langlands program'' held at ICERM for providing a great atmosphere for working on this subject. Finally, we would like to thank the referee for the useful remarks.
\section{The potentially multiplicative case} \label{sec:potmult} Let $E/\QQ$ be an elliptic curve of conductor $p^2 \cdot m$ where $p$ is an odd prime and $gcd(p,m)=1$. Suppose that $E$ has potentially multiplicative reduction at the prime $p$. Let $K$ be any imaginary quadratic field satisfying the Heegner hypothesis at all the primes dividing $m$ and such that $p$ is ramified in $K$. Let $p^{\ast}=\kro{-1}{p}p$ and let $E_p$ be the quadratic twist of $E$ by $\QQ(\sqrt{p^\ast})$. We have an isomorphism $\phi:E_p \to E$ defined over $\QQ(\sqrt{p^\ast})$. The elliptic curve $E_p$ has conductor $p \cdot m$ and $\sign(E,K)=\sign(E_p,K)\varepsilon_p(E_p)$.
Recall that to have explicit constructions, we need to work with a matrix algebra so we impose the condition $\varepsilon_p(E_p)=1$ (see Table \ref{table:signs}). Then, $\sign(E,K)=\sign(E_p,K)=-1$ and the pair $(E_{p},K)$ satisfies the Heegner condition. Therefore, we can find Heegner points on $E_{p}$ and map them to $E$ via $\phi$. More precisely, let $c$ be a positive integer relatively prime to $N \cdot \Disc(K)$ and let $H_{c}$ be the ring class field associated to the order of conductor $c$ in the ring of integers of $K$. Let $\chi : \Gal(H_{c}/K) \rightarrow \CC^{\times}$ be any character and let $\chi_{p}$ be the quadratic character associated to $\QQ(\sqrt{p^\ast})$ via class field theory. Take a Heegner point $P_{c} \in E_p(H_c)$ and consider the point
\[P^{\chi\chi_{p}}_{c}= \sum_{\sigma \in \Gal(H_{c}/K)} \bar{\chi}\bar{\chi_{p}}(\sigma) P_{c}^{\sigma} \in (E_p(H_{c})\otimes \CC)^{\chi \chi_{p}}.\]
\begin{thm} The point $\phi(P^{\chi\chi_{p}}_{c})$ belongs to $(E(H_{c}) \otimes \CC)^{\chi}$ and it is non-torsion if and only if $L'(E/K,\chi,1) \neq 0$. \end{thm} \begin{proof} The key point is that since $p\mid \Disc(K)$, $\QQ(\sqrt{p^\ast}) \subset H_c$ (by genus theory). For $\sigma \in \Gal(\bar{\QQ}/\QQ)$, we have $\phi^{\sigma}= \chi_{p}(\sigma)\phi$, hence, \[ \phi(P^{\chi \chi_{p}}_{c})=\sum_\sigma \bar{\chi}(\sigma) \phi(P_{c})^{\sigma} \in (E(H_{c}) \otimes \CC)^{\chi}. \] Finally note that by the Theorems of Gross-Zagier \cite{GZ} and Zhang \cite{ZH} the point $P^{\chi\chi_{p}}_{c}$ is non-torsion if and only if $L'(E_{p} /K,\chi \chi_{p},1)= L'(E/K,\chi,1) \neq 0$. Since $\phi$ is an isomorphism the result follows.
\end{proof}
\section{The potentially good case (over an abelian extension)} Let $E/\QQ$ be an elliptic curve of conductor $p^2 \cdot m$ where $p$ is an odd prime and $gcd(p,m)=1$. For simplicity assume that $E$ does not have complex multiplication. We recall some generalities on elliptic curves with additive but potentially good reduction over an abelian extension. Although such results can be stated and explained using the theory of elliptic curves, we believe that a representation theoretical approach is more general and clear. Let $f_E$ denote the weight $2$ newform corresponding to $E$.
Let $W(\QQ_p)$ be the Weil group of $\QQ_p$, and $\omega_1$ be the unramified quasi-character giving the action of $W(\QQ_p)$ on the roots of unity. Using the normalization given by Carayol (\cite{Carayol}), at the prime $p$ the Weil-Deligne representation corresponds to a principal series representation on the automorphic side and to a representation \[ \rho_p(f)=\psi \oplus \psi^{-1} \omega_1^{-1}, \] on the Galois side for some quasi-character $\psi:W(\QQ_p)^{ab} \to \CC^\times$. Note that since the trace lies in $\QQ$, $\psi$ satisfies a quadratic relation, hence its image lies in a quadratic field contained in a cyclotomic extension (since $\psi$ has finite order). This gives the following possibilities for the order of inertia of $\psi$: $1$, $2$, $3$, $4$ or $6$. \begin{itemize} \item Clearly $\psi$ cannot have order $1$ (since otherwise the representation is unramified at $p$). \item If $\psi$ has order $2$, $\psi$ must be the (unique) quadratic character ramified at $p$. Then $E$ is the twist of an unramified principal series, i.e., $E_p$ has good reduction at $p$. \item If $\psi$ has order $3$, $4$ or $6$, there exists a newform $g \in S_2(\Gamma_0(p \cdot m),\varepsilon)$, where $\varepsilon = \psi^{-2}$, such that $f_E = g \otimes \psi$. In particular $\varepsilon$ has
always order $2$ or $3$. \end{itemize} In the last case, the form has inner twists, since the Fourier coefficients satisfy that $\overline{a_p} = a_p \varepsilon^{-1}(p)$ (see for example \cite[Proposition $3.2$]{RibetMF1V}).
\begin{rem}
The newform $g$ can be taken to be the same for $E$ and
$E_p$. \label{rem:order3} \end{rem}
\subsection{The case $\psi$ has order $2$.} This case is very similar to the one treated in the previous section. The curve $E_p$ has good reduction at $p$, and is isomorphic via $\phi$ to $E$. It is quite easy to see that under these conditions $\sign(E,K)=\sign(E_p,K)=-1$. Exactly as before we can construct Heegner points on $E_p$ and transfer them to $E$.
\subsection{The case $\psi$ has order $3$, $4$ or $6$.}\label{subsection:346} Let $d$ be the order of $\psi$. Let ${g \in S_2(\Gamma_0(p \cdot m),\varepsilon)}$ as before. Suppose its $q$-expansion at the infinity cusp is given by $g=\sum a_{n}q^{n}$. Following \cite{Ribet}, we define the coefficient field $K_{g}:=\QQ(\left\{a_n \right\})$.
\begin{rem}
$K_g$ is an imaginary quadratic field
generated by the values of $\psi$. It is equal to $\QQ(i)$ if $d=4$ and
to $\QQ(\sqrt{-3})$ if $d=3$ or $d=6$. \end{rem}
There is an abelian variety $A_{g}$ defined over $\QQ$ attached to $g$ via the Eichler-Shimura construction, with an action of $K_g$ on it, i.e. there is an embedding $\theta:K_g \hookrightarrow (\End_\QQ(A_g)\otimes \QQ)$. The variety $A_g$ can be defined as the quotient $J_{1}(p \cdot m)/ I_{g} J_{1}(p \cdot m)$ where $I_{g}$ is the annihilator of $g$ under the Hecke algebra acting on the Jacobian. Moreover, the L-series of $A_g$ satisfies the relation \[ L(A_g/\QQ,s)=L(g,s)L(\overline{g},s). \] The variety $A_{g}$ has dimension $[K_{g}:\QQ]=2$ and is $\QQ$-simple. However, it is not absolutely simple. The variety $A_{g}$ is isogenous over $\overline{\QQ}$ to the square of an elliptic curve (called a building block for $A_{g}$, see \cite{GL} for the general theory).
Under our hypotheses we have an explicit description. Let $L= \overline{\QQ}^{\ker(\varepsilon)}$ (which is the splitting field of $A_g$). It is a cubic extension if $d=3,6$ (and in particular $p \equiv 1 \pmod{3}$) and the quadratic extension $\QQ(\sqrt{p})$ if $d=4$ (which implies $p \equiv 1 \pmod 4$). Let $M$ be the extension $\overline{\QQ}^{\ker(\psi)}$.
\begin{prop} \begin{itemize} \item There exists an elliptic curve $\tilde{E}/L$ and an isogeny,
defined over $L$, $\omega: A_{g} \rightarrow
\tilde{E}^2$. Furthermore, if $d=3$ $($resp. $d=6)$
$\tilde{E}=E$ $($resp. $\tilde{E}=E_p)$ while if $d=4$,
$\tilde{E}$ is the quadratic twist of
$E/\QQ(\sqrt{p})$ by the unique quadratic extension unramified outside
$p$. \item In any case, there exists an isogeny $\varphi: A_{g} \rightarrow E^2$ defined over $M$. \end{itemize}
\label{prop:splitting} \end{prop}
\begin{proof}
$A_g \simeq E^2$ over $M$ because (on the
representation side) the twist becomes trivial while restricted to
$M$, so the L-series of $A_g$ becomes the square of that of $E$ (over such
field) and by Falting's isogeny Theorem there exists an isogeny
(defined over $M$). If $d=3$, $\varepsilon=\psi^2$ and $M=L$, while if $d=6$, starting with $E_p$ (whose character has order $3$) gives the result. If $d=4$, it is clear (on the representation side) that $L(A_g,s) = L(\tilde{E},s)$ over $L$, where $\tilde{E}$ is the twist of $E$ (while looked over $\bar{\QQ}^{\ker(\varepsilon)} = \QQ(\sqrt{p})$) by the quadratic character $\psi^2$. Then Falting's isogeny Theorem proves the claim. \end{proof}
\begin{prop}
Let $\sigma \in \Gal(\bar{\QQ}/\QQ)$. Then
$\varphi^\sigma:A_g \to E^2$ is equal to
$ \varphi \kappa(\left. \sigma \right|_M) $, where $\kappa$ is some
character of $\Gal(M/\QQ)$ of order $[M:\QQ]$.
\label{prop:conjugateisogeny} \end{prop} \begin{proof} Since $\varphi$ and $\varphi^\sigma$ are isogenies of the same degree there exists an element $a_\sigma \in \End(A_g) \otimes \QQ=K_{g}$ of norm $1$ such that $\varphi^{\sigma} = \varphi a_{\sigma}$. The map
$\kappa(\left. - \right|_M) : \Gal(\bar{\QQ}/\QQ) \rightarrow K^{\times}_{g}$, given by sending $\kappa(\left. \sigma \right|_M) \mapsto a_{\sigma}$ is a character, since the endomorphism $a_{\sigma}$ is defined over $\QQ$.
Clearly $\kappa$ has the predicted order since otherwise the isogeny $\varphi$
could be defined over a smaller extension (given by the fixed field
of its kernel), which is not possible. \end{proof}
In order to explicitly compute Heegner points it is crucial to have a better understanding of the isogenies $\omega$ and $\varphi$.
Let us recall some basic properties of Atkin-Li operators for modular forms with nebentypus, as explained in \cite{Atkin-Li}. Let $N$ be a positive integer, and let $P \mid N$ be such that $\gcd(P,N/P)=1$. Let $N'=\frac{N}{P}$ and decompose $\varepsilon =\varepsilon_P \varepsilon_{N'}$, where each character is supported in the set of primes dividing the sub-index.
\begin{thm}
Assuming the previous hypotheses, there exists an operator
$W_P:S_2(\Gamma_0(N),\varepsilon) \to
S_2(\Gamma_0(N),\overline{\varepsilon_P} \varepsilon_{N'})$ which satisfies the following properties:
\begin{itemize}
\item $W_P^2 = \varepsilon_P(-1) \overline{\varepsilon_{N'}}(P)$.
\item If $g$ is an eigenvector for $T_q$ for some prime $q \nmid N$
with eigenvalue $a_q$, then $W_p(g)$ is an eigenvector for $T_q$
with eigenvalue $\overline{\varepsilon_P}(q) a_q$.
\item If $g \in S_2(\Gamma_0(N),\varepsilon)$ is a newform, then
there exists another newform $h \in
S_2(\Gamma_0(N),\overline{\varepsilon_P}\varepsilon_{N})$ and a
constant $\lambda_P(g)$ such that $W_P(g) = \lambda_P(g) h$.
\item The number $\lambda_P(g)$ is an algebraic number of absolute
value $1$. Furthermore, if $a_P$, the $P$-th Fourier coefficient
of the newform $g$, is non-zero then \[ \lambda_P(g)=G(\varepsilon_P)/a_P, \] where $G(\chi)$ denotes the Gauss sum of the character $\chi$.
\end{itemize} \label{thm:AtLi} \end{thm}
The number $\lambda_P(g)$ is called the pseudo-eigenvalue of $W_P$ at $g$.
\begin{proof}
See \cite[Propositions 1.1, 1.2, and Theorems 1.1 and 2.1]{Atkin-Li}. \end{proof} In our setting $N=p \cdot m$, $P=p$, $\varepsilon_{N'}$ is trivial, and $W_p$ is an involution (i.e. $W_p^2=1$) acting on the differential forms of $A_g$.
If $\eta$ is an endomorphism of $J(\Gamma_1(N))$ (or one of its quotients), we denote $\eta^*$ the pullback it induces on the differential forms. Given an integer $u$ let $\alpha_u$ be the endomorphism of $J(\Gamma_1(N))$ corresponding to the action of the matrix $\left(\begin{smallmatrix} 1 & u/p\\ 0 & 1\end{smallmatrix}\right)$ on differential forms. Such endomorphism is defined over the cyclotomic field of $p$-th roots of unity.
Let $\tau \in \Gal(K_g/\QQ)$ denote complex conjugation. Recall that $\tau{a_q}=a_q \varepsilon^{-1}(q)$ for all positive integers $q$ prime to $p \cdot m$. Following \cite{Ribet2} we define \[ \eta_{\tau} = \sum_{u \; (\text{mod }{p})}\varepsilon (u) \alpha_u. \] Since $\varepsilon(u) \in \mathcal{O}_{K_g}$, via the map $\theta$ we think of $\eta_\tau$ as an element in $\End_{L}(A_g)$. To normalize $\eta_{\tau} $ we follow \cite{GL}. Let $a_p \in K_g$ be the $p$-th Fourier coefficient of $g$. \begin{lemma}
The element $a_p$ has norm $p$. \end{lemma} \begin{proof}
Looking at the curve $E$ over $\QQ_p$, the coefficient $a_p$ is one
of the roots of the characteristic polynomial attached to the
Frobenius element in the minimal (totally ramified) extension where
$E$ acquires good reduction (see for example Section 3 of
\cite{Tim}). Since the norm of the local uniformizer in such
extension is $p$ (because the extension ramifies completely) the
result follows. \end{proof} We then consider the normalized endomorphism $\frac{\eta_{\tau}}{a_p}$. \begin{rem}
Our choice is a particular case of the one considered in \cite{GL},
since our normalization corresponds to the splitting map
$\beta:\Gal(K_g/\QQ)\to K_g^\times$ given by $\beta(\tau)=a_p$. \end{rem}
\begin{thm} The operator $W_p$ coincides with
$\left(\frac{\eta_{\tau}}{a_p}\right)^*$. \label{thm:A-Lrelation} \end{thm}
\begin{proof}
It is enough to see how it acts on the basis $\{g,\overline{g}\}$ of
differential forms of $A_g$. By Theorem~\ref{thm:AtLi} (since $a_p$
is non-zero), $W_p(g) = \lambda_p \overline{g}$, where $\lambda_p =
G(\varepsilon)/a_p$. On the other hand,
$\eta_\tau(f)=G(\varepsilon)\overline{f}$, by
\cite[Lemma $2.1$]{GL}.
Exactly the same argument applies to $\overline{g}$, using the fact
that $\overline{G(\varepsilon)}=G(\overline{\varepsilon})$, since
$\varepsilon$ is an even character. \end{proof}
\begin{coro}
The Atkin-Li operator $W_p$ is defined over $L$, i.e. it corresponds
to an element in $\End_{L}(A_g) \otimes \QQ$. Its action decomposes
as a direct sum of two $1$-dimensional spaces. \end{coro} Let \[ \omega : A_g \to (W_p+1)A_g \times (W_p-1)A_g. \] Then both terms are $1$-dimensional, and the isogeny $\omega$ gives a splitting as in Proposition~\ref{prop:splitting}.
\begin{rem}
The explicit map $\omega$ satisfies the first statement of
Proposition~\ref{prop:splitting}. In order to get the second
statement we need to eventually compose it the isomorphism between
$\tilde{E}$ and $E$. Recall that $E =\tilde{E}$ if $d=3$ and
$\tilde{E}$ is a quadratic twist of it otherwise, so in any case the
isomorphism is easily computed. \end{rem}
\subsection{Heegner points} This section follows Section $4$ of \cite{darmon2012birch}, so we suggest the reader to look at it first. Keeping the notations of the previous sections, let $\varepsilon:(\ZZ/p)^\times\to \CC^\times$ be a Dirichlet character. Extend the character to $(\ZZ/{p \cdot m})^\times$ by composing with the canonical projection $(\ZZ/{p \cdot m})^\times \rightarrow (\ZZ/p)^\times$ and define \[ \Gamma^{\varepsilon}_{0}(p \cdot m) :=\left\lbrace
\left(\begin{smallmatrix} a & b\\ c & d\end{smallmatrix}\right) \in \Gamma_0(p \cdot m) :
\varepsilon(a)=1 \right\rbrace. \] Let $X^{\varepsilon}_{0}(p \cdot m)$ be the modular curve obtained as the quotient of the extended upper half plane $\mathcal{H}^{*}$ by this group. This modular curve has a model defined over $\QQ$ and it coarsely represents the moduli problem of parameterizing quadruples $(E,Q,C,[s])$ where \begin{itemize} \item $E$ is an elliptic curve over $\CC$, \item $Q$ is a point of order $m$ on $E(\CC)$, \item $C$ is a cyclic subgroup of $E(\CC)$ of order $p$, \item $[s]$ is an orbit in $C \setminus \left\lbrace 0 \right\rbrace$ under the
action of $\ker(\varepsilon) \subset (\ZZ/p)^{\times}$. \end{itemize}
\begin{rem}
There is a canonical map $\Phi: X^{\varepsilon}_{0}(p \cdot m) \to X_0(p \cdot m)$ which is
the forgetful map in the moduli interpretation. This map has degree
$\ord(\varepsilon)$. \end{rem} As in the classical case, there exists a modular parametrization \[ \xymatrix{
X^{\varepsilon}_{0}(p \cdot m)\ar[d]_\Phi\ar[r]^-{\Psi} {\ar@/_1pc/[rr]_{\Psi_g}}& \Jac(X^{\varepsilon}_{0}(p \cdot m)) \ar[r]^-\pi& A_{g}\\ X_0(p \cdot m)\ar@/_/@{.>}[urr] } \] where $\Psi(P)=\left( P \right) - \left( \infty \right)$ (the usual immersion of the curve in its Jacobian) and $\pi$ is the Eichler-Shimura projection onto $A_{g}$. These maps are defined over $\QQ$, as the cusp $\infty$ is rational. Our strategy is to construct Heegner points on $X^{\varepsilon}_{0}(p \cdot m)$ and push them through the modular parametrization $\Psi_{g}$ to the abelian variety $A_{g}$ and finally project them onto the elliptic curve $E$. To construct points on $X^{\varepsilon}_{0}(p \cdot m)$, we consider the canonical map \[ \Phi:X^{\varepsilon}_{0}(p \cdot m) \to X_0(p \cdot m), \]
and look at preimages of classical Heegner points on $X_0(p \cdot m)$.
Since the conductor $p \cdot m$ satisfies the classical Heegner hypothesis with respect to $K$ there is a cyclic ideal $\id{n}$ of norm $p \cdot m$. Let $c$ be a positive integer such that $gcd(c,p \cdot m)=1$. Then, a classical Heegner point on $X_0(p \cdot m)$ corresponds to a triple $P_{\id{a}}=(\mathscr{O}_{c},\id{n},[\id{a}]) \in X_{0}(p \cdot m)(H_{c})$, where $[\id{a}] \in \Pic(\mathscr{O}_c)$. Such point is represented by the elliptic curve $E_{\id{a}}= \CC/ \id{a}$ and its $\id{n}$ torsion points $E_{\id{a}}[\id{n}]$ (which are isomorphic to ($\id{a}{\id{n}}^{-1}/\id{a})$) are defined over $H_{c}$.
The action of $\Gal (\overline{\QQ}/ H_{c})$ on $E_{\id{a}}[\id{n}]$ gives a map $\Gal(\overline{\QQ}/H_c) \to (\id{a}{\id{n}}^{-1}/\id{a})^\times$. Composing such map with the character $\varepsilon$ gives \[ \rho: \Gal(\overline{\QQ}/H_{c}) \rightarrow (\id{a}{\id{n}}^{-1}/\id{a})^{\times} \stackrel{\varepsilon}{\rightarrow} \CC^{\times} . \] Its kernel corresponds to an extension $\tilde{H}_{c}$ of degree $\ord(\varepsilon)$ of $H_{c}$. Let $\tilde{H_c}=H_{c} M$.
\begin{prop}
The $\ord(\varepsilon)$ points $\Phi^{-1}(P_{\id{a}})$ lie in
$X^{\varepsilon}_{0}(p \cdot m)(\tilde{H_c})$ and are permuted under
the action of $\Gal(\tilde{H_c}/H_c)$. \end{prop} \begin{proof}
By complex multiplication $\tilde{H_c}$ lies in the composition of
$H_{c}$ and the ray class field $K_{\id{p}}$, where $\id{p}$ is the
unique prime of $K$ dividing $p$. The composition $H_{c}K_{\id{p}}$
equals $H_{c}(\xi_{p})$, where $\xi_{p}$ is a
$p$-th root of unity. Note that
$\QQ(\sqrt{p^ {\ast}}) \subset H_{c}$ and the extension
$H_{c}/K$ is unramified at $p$. Therefore, the unique extension of
degree $ord(\varepsilon)$ of $H_{c}$ lying inside $H(\xi_{p})$ is
given by $H_{c} \bar{\QQ}^ {ker(\psi)}=H_{c}M$. \end{proof} Using the aforementioned moduli interpretation, points on $X^{\varepsilon}_{0}(p \cdot m)$ represent quadruples $\left( \mathscr{O}_{c}, \id{n}, \left[ \id{a} \right],[t] \right)$ where $[t]$ is an orbit under $\ker(\varepsilon)$ inside $(\mathscr{O}_{c}/ (\id{n}/\id{p}))^{\times}$. \begin{rem}
Let $\sigma \in \Gal(\tilde{H_{c}}/K) $. Its action
on Heegner points is given by \[
\sigma \cdot(\mathscr{O}_{c},\id{n},[\id{a}] ,[t])=(\mathscr{O}_{c}, \id{n}, [\id{a}{\id{b}}^{-1}],[dt]), \] where $\sigma \mid_{H_c}=\text{Frob}_{\id{b}}$, and $d=\rho(\sigma) \in {\mathscr{O}_{c}/(\id{n}/\id{p})}^{\times}$. \label{rem:isogeny} \end{rem}
\subsection{Zhang's formula} \begin{thm}[Tian-Yuan-Zhang-Zhang]
Let $K$ be an imaginary quadratic field satisfying the Heegner
hypothesis for $p \cdot m$ and let
$\tilde{\chi} : \mathbb{A}^{\times}_{K} \rightarrow \CC^{\times}$ be
a finite order Hecke character such that
$\tilde{\chi} \mid_{\mathbb{A}^{\times}_{\QQ}} = \varepsilon^{-1}$.
Then $L(g,\tilde{\chi},s)$ vanishes at odd order at
$s=1$. Moreover, if such order equals $1$,
$(A_{g}(\tilde{H_{c}})) \otimes \CC))^{\tilde{\chi}}$ has rank one
over $K_{g} \otimes \CC$.
More precisely, consider the Heegner point
$\left( \left[ \mathfrak{a} \right],\mathfrak{n},1 \right) \in
X^{\varepsilon}_{0}(p \cdot m)(\tilde{H_{c}} )$ and denote by
$P_{c}$ its image under the modular parametrization $\Psi_{g}$. Then \[ P^{\tilde{\chi}}= \sum_{\sigma \in \Gal(\tilde{H_{c}}/K)} \bar{\tilde{\chi}}(\sigma) P_{c}^{\sigma} \in (A_{g}(\tilde{H_{c}})\otimes \CC)^{\tilde{\chi}} \] generates a rank one subgroup over $K_{g} \otimes \CC$. \label{thm:chinos} \end{thm} \begin{proof}
See \cite[Theorem 4.3.1]{tian2003euler}, \cite{zhang2010arithmetic}, and \cite[Theorem 1.4.1]{yuan2013gross}. \end{proof}
Let $c$ be a positive integer relatively prime to $\Disc(K) \cdot p \cdot m$, and let $\chi$ be a ring class character of $\Gal(H_c/K)$. Since $\bar{\kappa}^ 2= \varepsilon^{\pm 1}$, the character $\tilde{\chi} : \Gal(\tilde{H_{c}}/K) \rightarrow \CC^{\times}$ given by $\tilde{\chi}=\chi \bar{\kappa}$ satisfies the hypothesis of Theorem~\ref{thm:chinos} (for either $g$ or its conjugate $\bar{g}$). Summing up, we get the following theorem:
\begin{thm} \label{thm:explicit}
The point $\varphi( P^{ \chi \bar{ \kappa} })$ belongs to $(E^2(H_{c} \otimes \CC))^{\chi}$. In addition, it is non-torsion if and only if $ L'(E/K, \chi ,1) \neq 0$. \end{thm} \begin{proof} By definition and Proposition~\ref{prop:conjugateisogeny}. \[ \varphi( P^{ {\chi}\bar{ \kappa} })=\sum_{\sigma \in \Gal (\tilde{H_{c}}/K) } \bar{ \chi}(\sigma) \varphi( \kappa(\sigma) P ^\sigma)= \sum_{\sigma \in \Gal (\tilde{H_{c}}/K) } \bar{ \chi}(\sigma) {\varphi( P)}^{\sigma} , \] so it lies in the right space.
Since $\ord(\kappa)=\ord(\psi)$ and
${\bar{\kappa}}^2=\varepsilon^{\pm 1}$ we get
$\bar{\kappa}=\psi^{\pm 1}$. We know that
$g \otimes \psi=\bar{g} \otimes {\psi}^{-1}=f_E$, therefore, using $g$ or $\bar{g}$, we obtain $L(g,\tilde{\chi},s)=L(E,{\chi},s)$.
Theorem~\ref{thm:chinos} and the previous result imply that
$\varphi( P^{ \chi \bar{ \kappa} }) \in (E^2(H_{c} \otimes
\CC))^{\chi}$ is non-torsion if and only if $ L'(E/K, \chi ,1) \neq 0$.
\end{proof} Once we construct a non-torsion point on $E \times E$ we can project it to some coordinate in order to obtain a non-torsion point on $E$.
\subsection{Heegner systems} As in the classical case, the family of Heegner points constructed using different orders satisfy certain compatibilities.
\begin{prop} Let $\ell$ be a prime such that $\ell \nmid N$ and $\ell$
is inert in $K$. Then for every Heegner point
$P_{c\ell} \in A_{g}(\tilde{H}_{c\ell})$ there exists a Heegner
point $P_{c} \in A_{g}(\tilde{H_{c}})$ with \begin{equation}
\label{eq:kolyvagin} \Tr_{ \tilde{H}_{c\ell}/\tilde{H_{c}}} P_{c \ell}= \theta(a_{\ell}) P_{c} , \end{equation} where $a_{\ell}$ is the $\ell$-th Fourier coefficient of $g$. \end{prop} \begin{proof}
The proof mimics the classical case one (see \cite[Proposition $3.7$]{Gr89}). \end{proof} To construct a point on $E$, we first apply the isogeny $\varphi$ to a point in $A_g$ and then project onto one of the coordinates (call $\pi_i$ the projection to the $i$-th coordinate). But $K_g$ does not act on $E$! To overcome this problem, we restrict to primes $\ell$ which split completely in $L$. Let $Q_{c}:= \pi_{i}(\Tr_{ \tilde{H_{c}}/H_{c}} \varphi (P_{c})) \in E(H_{c})$. \begin{prop} Let $\ell$ be a prime such that $\ell \nmid N$, $\ell$ is inert in $K$ and $\ell$ splits completely in $L$. Then for every Heegner point $Q_{c\ell} \in E(H_{c\ell})$ there exists a Heegner point $Q_{c} \in E(H_{c})$ such that \[ \Tr_{H_{c \ell}/H_{c}} Q_{c \ell} = a_{\ell} Q_{c} . \] \label{prop:Heegner-compatibility} \end{prop} \begin{proof}
Applying $\pi_{i}(\Tr_{ \tilde{H_{c}}/H_{c}} \varphi)$ to equation
(\ref{eq:kolyvagin}), since $\varphi$ commutes with the trace and
$a_{\ell} \in \QQ$ (because $\ell$ splits completely in
$L$) we get \[ \pi_{i}(\Tr_{ \tilde{H_{c}}/H_{c}} \Tr_{ \tilde{H}_{c\ell}/\tilde{H_{c}}} \varphi(P_{c \ell}))= a_{\ell} Q_{c} .\] Also \[ \pi_{i}(\Tr_{ \tilde{H_{c}}/H_{c}} \Tr_{ \tilde{H_{c\ell}}/\tilde{H_{c}}} \varphi(P_{c \ell}))= \pi_{i}(\Tr_{ H_{c \ell}/H_{c}} \Tr_{ \tilde{H_{c\ell}}/H_{c \ell}} \varphi(P_{c \ell})), \] but since $\pi_{i}$ is defined over $\QQ$, this expression equals $\Tr_{ H_{c \ell}/H_{c}} Q_{c \ell}$ as claimed. \end{proof}
The previous results are enough for proving a Kolyvagin-type theorem. \begin{thm}[Kolyvagin, Bertolini-Darmon] If $\pi_i(\varphi(P^{\chi \bar{\kappa}}))$ is non-torsion, then $\dim_{\CC}(E(H_c))^\chi=1$. \end{thm}
\begin{proof}
The proof is very similar to the one given in
\cite{Bertolini-Darmon} (Theorem 2.2) with the following remarks
(using their notation and terminology): any $p$-descent prime is
automatically unramified in $L$ hence $K(E[p])$ and $L$ are
disjoint. We also require special rational primes $\ell$ to split
completely in $L/\QQ$. Recall that $L$ is totally real, hence such
condition is compatible with the other ones and special primes do
exist. The first assertion of Proposition $3.2$ in
\cite{Bertolini-Darmon} is exactly our
Proposition~\ref{prop:Heegner-compatibility}, and the second one
follows from \cite{Gr89} (proof of Proposition 3.7). With these
modifications, the proof of \cite{Bertolini-Darmon} holds. \end{proof} \section{General case}
While considering the case of many primes ramifying in $K$, it is clear that the potentially multiplicative case works similarly. Some extra difficulties arise in the other cases. To make the exposition/notation easier, we start considering the following two cases:
\noindent {\bf Case 1:} Suppose that the conductor of $E$ equals $p_1^2 \cdots p_r^2\cdot m$ where: \begin{itemize} \item $E$ has potentially good reduction at all $p_i$'s over an
abelian extension, \item all characters $\psi_{p_i}$ have the same order, \item all $p_{i}$'s are ramified in $K$, \item $m$ satisfies the classical Heegner hypothesis. \end{itemize} Let $P=\prod_{i=1}^r p_i$. There are $2^r$ newforms of level $P\cdot m$ which are twists of $f$ (obtained, following the previous section notation, by twisting $f_E$ by all possible combinations of $\{\psi_{p_i},\overline{\psi_{p_i}}\}$). Working with all of them implies considering an abelian variety of dimension $2^r$, but the coefficient field has degree $2$ so such variety is not simple over $\QQ$.
Instead, take ``any'' newform $g \in S_2(\Gamma_0(P\cdot m),\varepsilon)$, and consider the abelian surface $A_g$ attached to it by Eichler-Shimura. The only Atkin-Li operator acting on (the space of holomorphic differentials of) such variety is the operator $W_P$, which again is an involution, so we can split the space in the $\pm 1$ part and proceed as in the previous case considered (where the splitting map is determined by $\beta(\tau)=\prod_{i=1}^r a_{p_i}$).
The ambiguity on the choice of $g$ is due to the following: the operators $W_{p_i}$ act transitively on the set of all newforms $g$. In particular they ``permute'' the different abelian surfaces (note that such operators are not involutions, but have eigenvalues in the coefficient field $K_g$ which is independent of $g$). Although surfaces attached to different choices of $g$ are in general not isomorphic (the traces of the Galois representations are different), they become isomorphic over $M$ hence all of them give the same Heegner points construction.
\noindent{\bf Case 2:} Suppose the conductor of $E$ equals $p^2 \cdot q^2 \cdot m$, where \begin{itemize} \item $E$ has potentially good reduction at $p$ and $q$ over an abelian extension, \item the order of $\psi_p$ equals $4$ and that of $\psi_q$ equals $3$, \item both $p$ and $q$ ramify in $K$, \item $m$ satisfies the classical Heegner hypothesis. \end{itemize} With such assumptions the coefficient field $K_{g}$ equals $\QQ(\sqrt{-1},\sqrt{-3})$. Let $g \in S_2(\Gamma_0(p q m),\varepsilon)$ be any twist of $f$, obtained by choosing local characters $\psi_p$ at $p$ and $\psi_q$ at $q$ (so $\varepsilon = \psi_p^2 \psi_q^2$). By Eichler-Shimura there exists a $4$ dimensional abelian variety $A_g$ defined over $\QQ$ (attached to $g$) and an embedding $K_g \hookrightarrow \End(A_g)\otimes \QQ$. The Atkin-Li operators $W_p$ and $W_q$ do act on the differential forms of $A_g$ although not necessarily as involutions. Since their eigenvalues lie in $K_g$, we can diagonalize them.
Let $\sigma_i$ denote the Galois automorphism of $K_g$ which fixes $\sqrt{-3}$ and $\sigma_{\sqrt{-3}}$ be the one fixing $\sqrt{-1}$ (so their composition is complex conjugation). We have the following analogue of Theorem~\ref{thm:A-Lrelation}. \begin{thm} With the previous notations:
\begin{enumerate}
\item the operator $W_p$ coincides with
$\left(\frac{\eta_{\sigma_i}}{a_p}\right)^*$, \item the operator $W_q$ coincides with
$\left(\frac{\eta_{\sigma_{\sqrt{-3}}}}{a_q}\right)^*$, \item the operator $W_{pq}$ coincides with
$\left(\frac{\eta_{\sigma_i \sigma_{\sqrt{-3}}}}{a_p a_q}\right)^*$.
\end{enumerate} \end{thm} \begin{proof}
The proof mimics that of Theorem~\ref{thm:A-Lrelation}. Consider the
basis of differential forms given by
$\{g,\overline{g},h, \overline{h}\}$, where
$h \in S_2(pqm,\overline{\varepsilon_p}{\varepsilon_q})$ equals $\sigma_i(g)$. By
Theorem~\ref{thm:AtLi}: \[
W_p\, g = \frac{G(\varepsilon_p)}{a_p} \, {h}, \qquad W_p\,
\overline{g}= \frac{\overline{G({\varepsilon_p})}}{\overline{a_p}}
\,\overline{h}, \qquad W_p\, h =
\frac{\overline{G(\varepsilon_p)}}{\overline{a_p}} \, {g}, \qquad
W_p\, \overline{h}= \frac{{G({\varepsilon_p})}}{{a_p}} \, \overline
g. \] A splitting map is given by \begin{align}
\label{eq:splittinggeneralcase}
\beta(\sigma_i)=a_p, & &\beta(\sigma_{\sqrt{-3}})=a_q,& & \beta(\sigma_i \sigma_{\sqrt{-3}})=a_pa_q \psi_{p}(q) \psi_{q}(p), \end{align} By \cite[Lemma 2.1]{GL} we have \begin{align*} \left(\frac{\eta_{\sigma_i}}{a_p}\right)^*g = \frac{G(\varepsilon_p)}{a_p} h, & &\left(\frac{\eta_{\sigma_i}}{a_p}\right)^*\overline g = \frac{\overline{G(\varepsilon_p)}}{\overline{a_p}} \overline h, & & \left(\frac{\eta_{\sigma_i}}{a_p}\right)^*h = \frac{\overline{G(\varepsilon_p)}}{\overline{a_p}} g,\\ \left(\frac{\eta_{\sigma_i}}{a_p}\right)^* \overline{h} = \frac{{G(\varepsilon_p)}}{{a_p}} \overline g. \end{align*} The same computations proves the second statement, and the last one follows from the fact that if $\chi,\chi'$ are two characters of conductors $N$ and $N'$ with $(N:N')=1$, then \begin{equation}
\label{eq:Gaussrelation} G(\chi \cdot \chi') = \chi(N')\chi'(N) G(\chi) G(\chi'). \end{equation} \end{proof} Then we can split $A_g$ into four pieces over $M$ as in the previous section.
Although we considered only two particular cases, the general construction follows easily from them. Just split the primes into three sets: the ones with potentially multiplicative reduction, the ones with potentially good reduction with characters of order $4$ and the ones with potentially good reduction with characters of order $3$ or $6$. Treat each set as in Case $1$, and use Case $2$ to mix them. Note that in any case the abelian surface $A_g$ has dimension $1$, $2$ or $4$. \section{Examples} In this section we show some examples of our construction, which were done using \cite{PARI2}. The potentially multiplicative case is straightforward since we only have to find the corresponding quadratic twist and then construct classical Heegner points. The potentially good case is a little more involved. We consider the following two cases:
\noindent $\bullet$ The case where $\ord(\psi_p)=2$ works exactly the same as the previous one, since we only have to find the quadratic twist.
\noindent $\bullet$ In the case $\ord(\psi_p)=3, 4$ or $6$ we start by applying Dokchitser's algorithm \cite{Tim} (see also the appendix in \cite{Kohen}) to find $\psi_p$ as well as the corresponding Fourier coefficient $a_{p}$ (which give the $q$-expansion of $g$). We compute $A_g$ using the Abel-Jacobi map, and then we split it following Section \ref{subsection:346}.
Each factor is isomorphic to $E$ over $M$. To find the isomorphism explicitly, we compare the lattices of $E$ and the one computed and find one $\alpha \in M$ sending one lattice to the other. \begin{table}[h] \scalebox{0.8}{
\begin{tabular}{||r|c||c|c||c||r|c||c|c||}
\hline N& E & St & Ps& $K$ & $\ord(\psi_p)$& $a_p$ & $\tau$ & $P$\\
\hline $5^2\cdot 29$ & \href{http://www.lmfdb.org/EllipticCurve/Q/725.a1}{.a1}& $\{5,29\}$ & $\emptyset$& $\QQ(\sqrt{-5})$ & & & $\frac{45+\sqrt{-45}}{145}$ & $[8,8]$\\ \hline $5^2\cdot 23$ & \href{http://www.lmfdb.org/EllipticCurve/Q/575.e1}{.e1} & $\{23\}$ & $\{5\}$ & $\QQ(\sqrt{-5})$ &$4$ & $2-i$ & $\frac{15+\sqrt{-5}}{5 \cdot 23}$ & $[\frac{-1637}{2^6}, \frac{-2^8 - 3 \cdot 5^2 \cdot 127
\sqrt{-5} }{ 2^9} ]$\\ \hline $2^2\cdot 7^2$ & \href{http://www.lmfdb.org/EllipticCurve/Q/196.b2}{.b2} & $\emptyset$ & $\{7\}$ & $\QQ(\sqrt{-7})$ & $3$ & $\frac{-5+\sqrt{-3}}{2}$ & $\frac{21+\sqrt{-7}}{7 \cdot 2^3}$ & $[\frac{-139}{4},\frac{581\sqrt{-7}}{8}]$\\ \hline $2\cdot3^2\cdot7^2$ & \href{http://www.lmfdb.org/EllipticCurve/Q/882.a1}{.a1} & $\{2\}$ & $\{7\}$ & $\QQ(\sqrt{-7})$ & $3$ & $\frac{-1+3\sqrt{-3}}{2}$ & $\frac{21+\sqrt{-7}}{28}$ & $[39,15]$\\ \hline $5^2\cdot 7^2$ & \href{http://www.lmfdb.org/EllipticCurve/Q/1225.d2}{.d2} & $\emptyset$ & $\{5\}$ &$\QQ(\sqrt{-35})$ & $4$ & $1-2i$& $\frac{-35+\sqrt{-35}}{70}$ & $[-15,\frac{15+175\sqrt{-35}}{2}]$\\
& & & $\{7\}$ & & $3$ & $\frac{1-3\sqrt{-3}}{2}$& & \\ \hline \end{tabular}} \caption{Examples of ramified primes} \label{table:curvesdata} \end{table}
The computations are summarized in Table~\ref{table:curvesdata}. The table is organized as follows: the first two columns contain the curve conductor and its label (following \cite{lmfdb} notation). The next two columns list the principal series and the Steinberg primes of the curve (following \cite{Pacetti} algorithm). The fifth column contains the imaginary quadratic field. For the computations we just considered the whole ring of integers. The sixth and seventh columns contain the order of the character and the number $a_p$ for the principal series primes ramifying in $K$. Finally the last two columns show the Heegner points considered in the upper-half plane and the point constructed in $E(K)$.
Some remarks regarding the examples considered: \begin{itemize} \item The first example corresponds to a potentially multiplicative
case. The class number of $\mathcal{O}_K$ is $2$ and
$H=\QQ(\sqrt{5},i)$. If $\chi_5$ denotes the non-trivial character
of the class group, we can trace with respect to it and get the point
$[9, \frac{-9+15 \sqrt{5}}{2}] \in E(H)^{\chi_{5}}$. \item The second and third examples correspond to elliptic curves with
only one potentially good reduction prime ramifying in $K$. The
former has $ord(\varepsilon)=2$ while the latter has
$ord(\varepsilon)=3$ \item The fourth example is quite interesting, since the prime $2$
splits in $K$ (so we use an Eichler order at $2$), the prime $3$ is
inert in $K$ (so we use a Cartan order at $3$), and the prime $7$ is
ramified in $K$. This is a mixed case of the Cartan-Heegner
hypothesis (as in \cite{Kohen}) and the present one. We compute the
$q$-expansion of $g$ (as explained in the aforementioned article) as
a form in $S_{2}(\Gamma_{0}(2\cdot 7^2) \cap \Gamma_{ns}(3))$ and
then twist by the character $\psi_7$ (of order $3$) to get a form in
$S_{2}(\Gamma^{\varepsilon}_{0}(2\cdot 7) \cap \Gamma_{ns}(3))$. The
results of Section~\ref{subsection:346} apply to give the
corresponding splitting. \item The last example corresponds to an elliptic curve with two
primes of potentially good reduction ramifying in $K$, hence the
coefficient field is $K_{g}=\QQ(\sqrt{-1},\sqrt{-3})$. \end{itemize}
\appendix \section{Computation of a Darmon point (by Marc Masdeu)}
Let $E$ denote the elliptic curve \cite[\href{http://www.lmfdb.org/EllipticCurve/Q/147.c2}{147.c2}]{lmfdb}, of conductor $3\cdot 7^2$ which has potentially good reduction over an abelian extension at the prime $7$. Let $K = \QQ(\sqrt{35})$, which has class number $2$. The prime $3$ is inert in $K$, while $7$ ramifies. It is easy to see that $\operatorname{sign}(E,K)=-1$.
Let $p = 3$ and consider the Dirichlet character $\chi$ of conductor $7$ which maps $3\in (\ZZ/7\ZZ)^\times$ to $\zeta_6=e^{\pi i/3}$. Let $\Gamma$ denote the group \[
\Gamma = \Gamma_0^\chi(7)[1/3] = \Big\{\smtx abcd\in \SL_2\left(\ZZ\left[1/3\right]\right)~|~ c \in 7\ZZ[1/3],\ \chi(a) = 1\Big\}. \]
In the page \url{http://github.com/mmasdeu/} there is code available to make computations with such groups.
There is a $2$-dimensional irreducible component in the plus-part of $H^1(\Gamma_0^\chi(21),\ZZ)$, which corresponds to the abelian surface $A_g$. Let $\{g_1, g_2\}$ be an integral basis of this subspace, normalized such that its basis vectors are not multiples of other integral vectors. Following the constructions of~\cite{MR3384519} with the non-standard arithmetic groups, each of these vectors yield a cohomology class \[ \varphi^{(i)}_E\in H^1(\Gamma, \Omega^1_{\mathcal{H}_3}),\quad i =1,2. \] Here $\mathcal{H}_3$ denotes the $3$-adic upper half-plane and $\Omega^1_{\mathcal{H}_3}$ is the module of rigid-analytic differentials with $3$-adically bounded residues.
The ring of integers $\mathcal{O}_K$ of $K$ embeds into $M_2(\ZZ)$ via \[ \sqrt{35}\mapsto \psi(\sqrt{35}) = \left(\begin{array}{rr} 15 & 10 \\ -19 & -15 \end{array}\right). \]
The fundamental unit of $K$ is $u_K=\sqrt{5}+6$, which is mapped to the matrix \[ \psi(u_K) = \left(\begin{array}{rr} 21 & 10 \\ -19 & -9 \end{array}\right). \]
In order to obtain an element of $\Gamma_0^\chi(7)$ we need to consider $u_K^{14}$, which maps to \[ \gamma_K = \psi(u_K)^{14} = \left(\begin{array}{rr} -3057309462214237 & -4524404717310744 \\ 2852342104391556 & 4221080735198699 \end{array}\right)\in \Gamma_0^\chi(7). \]
The matrix $\gamma_K$ fixes a point $\tau_K$ in $\mathcal{H}_3$, \[ \tau_K = 680113883076491926203393 + 188920523076803312834276\,\alpha_3 + O(3^{50}),
\] where $\alpha_3$ denotes a square root of $35$ in $K_3$, the completion of $K$ at $3$.
We present the above groups using Farey symbols so as to solve the word problem for them. Although the homology class of $\gamma_K\otimes \tau_K$ might not lie in $H_1(\Gamma_0^\chi(7),\operatorname{Div}^0\mathcal{H}_3)$, its projection into the $A_g$ isotypical component is. It can be seen that such projection is given by the operator $(T_2^2-3T_2+3)(T_2+3)$, where $T_2$ is the $2$-th Hecke operator (just by computing the characteristic polynomial of the Hecke operator $T_2$ in the whole space and computing its irreducible factors). This allows to represent $(T_2^2 - 3T_2 + 3)(T_2+3)(\gamma_K\otimes \tau_K)$ by a cycle of the form \[ \smtx{-6}{1}{-7}{1}\otimes D_1 + \smtx{15}{-4}{49}{-13}\otimes D_2 + \smtx{1}{1}{0}{1}\otimes D_3 + \smtx{22}{-9}{49}{-20}\otimes D_4 + \smtx{-13}{5}{-21}{8}\otimes D_5, \] where $D_i$ are divisors of degree $0$ obtained by the aforementioned code (each divisor has support consisting of more than a thousand points in $\mathcal{H}_3$).
This class was integrated against the cohomology classes $\varphi^{(i)}_E$ using an overconvergent lift as explained in~\cite{MR3384519} giving a point in $A_g(\CC_3)$ which can be projected onto $E(\CC_3)$ by choosing an appropriate linear combination of the basis elements. In the generic case any projection would work. We have taken in this case the projection onto $g_1$. Concretely, the integral corresponding to $\varphi^{(1)}_E$ resulted in the $3$-adic element \[ J = 2 + (\alpha_3 + 2)\cdot 3 + 3^2 + (2\cdot \alpha_3 + 1)\cdot 3^3 + (\alpha_3 + 1)\cdot 3^5 + (\alpha_3 + 2)\cdot 3^6 + (\alpha_3 + 1)\cdot 3^7 + \cdots + O(3^{120}) \] If we apply Tate's uniformization (at $3$) to such point, we obtain a point in $E(K_3)$ which coincides up to the working precision of $3^{120}$ with \[ 14\cdot 13\cdot P = 14\cdot 13\cdot \left(\frac{164850\sqrt{7}}{2809} + \frac{610894}{2809}, \frac{63872781\sqrt{35}\sqrt{7}}{297754} + \frac{96772060\sqrt{35}}{148877} - \frac{1}{2} \right). \] Note that $P \in E(H)$, where $H=K(\sqrt{7})=\QQ(\sqrt{35},\sqrt{7})$ is the Hilbert class field of $K$ as would be predicted by the conjectures. The factor $14$ appears because we took the $14$th power of the fundamental unit, while the factor $13$ is due to the fact that the point would naturally lie in the elliptic curve \href{http://www.lmfdb.org/EllipticCurve/Q/147.c1}{147.c1}, which is $13$-isogenous to $E$.
Finally, if one takes the trace of $P$ to $K$ one obtains: \[ P_K = P + P^\sigma = \left(\frac{63367}{2000} , \frac{5823153}{200000}\sqrt{35} - \frac 12\right),\quad \Gal(H/K)=\langle \sigma\rangle, \] and one can check that $P_K$ is non-torsion and thus generates a subgroup of finite index in $E(K)$.
\end{document} |
\begin{document}
\begin{abstract} We construct several new families of Fano varieties of K3 type. We give a geometrical explanation of the K3 structure and we link some of them to projective families of irreducible holomorphic symplectic manifolds. \end{abstract} \maketitle
\tableofcontents
\section{Introduction}
Fano varieties and Irreducible Holomorphic Symplectic manifolds (for short, IHS) are two of the most studied classes of varieties in algebraic geometry. They are very different in nature (for example, they have different Kodaira dimensions) and they are often studied using different tools. Indeed, Fano varieties are at the core of birational geometry, while IHS manifolds (sometimes called hyperk\"ahler when the context is more differential-geometric) can be considered as a higher dimensional analogue of K3 surfaces, with lattice theory as one of the most relevant operative tools.
One of the most important properties of Fano varieties is their \emph{boundedness}: indeed it is well known that in every dimension there exists a finite number of families of Fano varieties up to deformations. This still holds if we allow some mild singularities, see \cite{birkar} for an up--to--date survey. It is therefore natural to aim for a classification, but such a problem is currently out of reach. A complete answer is known when the dimension is up to three, see for example \cite{ip99} for the smooth case. In the singular case the classification is still an open problem, even in low dimension and with mild singularities (for example terminal).
From dimension four onwards, only partial results are known. In particular a known explicit bound in terms on the canonical volume is assumed to be hugely non-sharp (being a large number as $(n+2)^{(n+2)^{n2^{3n}}}$), already for $n=2$. The strategy for a partial classification usually is to consider special subclasses of Fano varieties, or fixing some other invariant, such as the \emph{index}. Recall that this the integer $\iota_X$ which is the maximal number for which the anticanonical class is divisible in the Picard group.
It is a classical result that whenever a Fano $X$ is smooth, the index satisfies $\iota_X \leq \ddim X+1$, with the equality attained only in the case of projective space. Prime Fano varieties, that is Fano varieties with Picard rank $\rho=1$, of index $\ddim X -2 \leq \iota_X \leq \ddim X+1$ are completely classified, as they are when $\iota_X \geq \frac{\ddim X+1}{2}$ and $\rho_X >1$. Again see \cite{ip99} for a complete list of results. Mukai's conjecture further bounds the Picard rank in terms of the index: namely the conjecture states that for a smooth Fano $\rho_X(\iota_X-1) \leq \ddim X.$ The general philosophy is therefore that high index Fano varieties are somewhat easier to classify than low index.\\ On the contrary the main problem in the study of IHS is the lack of examples.
Similarly to the case of Calabi-Yau manifolds, no result of boundedness is known in general for IHS manifolds (although there are some partial results if one fixes for example the Beauville-Bogomolov-Fujiki form). However, finding examples is definitely harder than in the Calabi-Yau case. The known deformation types include two series of examples for every even dimension found by Beauville for every even dimension (Hilbert scheme of points on a K3 surface and a similar construction, called \emph{generalised Kummer variety} on an abelian surface), and two sporadic examples in dimension 6 and 10, found by O'Grady. Even if we fix the deformation type and we look for \emph{polarised} families (in analogy with the K3 case) the situation does not improve much: very few examples of projective families are known. A survey of this story can be found for example in \cite{beauville}.\\ The interplay between special classes of Fano varieties and IHS manifold is not a new story: a main example is the one of a maximal family of IHS fourfolds (deformation equivalent to the Hilbert Scheme of tow points on a K3 surface) as the Fano variety of lines of a smooth cubic fourfold, due to Beauville and Donagi. We remark that this is not the unique IHS that can be linked to a cubic fourfold, as the recent constructions of Lehn-Lehn-Sorger-van Straten, \cite{llsvs} (an 8-fold of K3$^{[4]}$-type) and Laza-Sacc\`a-Voisin, \cite{lsv} (example of OG10 manifold) highlight. The cubic fourfold is not the only Fano to which we can associate polarised families of IHS: this is indeed a common feature of a special subclass of Fano varieties, called \emph{Fano varieties of K3 type} (FK3 for short) whose study is the central topic of this paper. We give here the key definitions. \begin{definition} Let $X$ be a smooth, projective $n$-dimensional Fano variety and $j$ be a non-negative integer. The cohomology group $H^j(X, \mathbb{C}) \cong \bigoplus_{p+q=j} H^{p,q}(X)$ (with $j \geq k$) is said to be of $k$ Calabi-Yau type if \begin{itemize} \item $h^{\frac{k+j}{2},\frac{j-k}{2}}=1$; \item $h^{p,q}=0$, for all $p+q=j, \ p <\frac{k+j}{2}$. \end{itemize} $X$ is said to be of $k$ (pure) Calabi-Yau type (k--FCY or Fano of k-CY type for short) if there exists at least a positive $j$ such that $H^j(X, \mathbb{C})$ is of $k$ Calabi-Yau type. Similarly, $X$ is said to be of mixed $(k_1, \ldots, k_s)$ Calabi-Yau type if the cohomology of $X$ has different level CY structures in different weights. \end{definition}
In the above definition, we consider all sub-Hodge structures, even those naturally arising using Lefschetz's Theorems (for us, a sixfold with a 2 Calabi Yau structure in $H^4$ and $H^4\cong H^6\cong H^8$ is a $(2,2,2)$ Calabi-Yau type). In the paper, we will say that a Fano variety is \emph{central} if all its cohomology groups have level 0.\\ A Fano variety of K3 type (FK3) is nothing but a 2-FCY. Fano varieties of CY type were first introduced and studied by Iliev and Manivel in \cite{ilievmanivel}. The authors focus on the case $k=3$, adding moreover an extra condition on the $H^1(T_X)$ (which we do not ask, since it would rule out already the cubic fourfold and many other interesting examples). They classify 3-FCY that can be obtained by slicing homogeneous spaces with linear and quadratic equations. We remark that our definition is purely Hodge-theoretical, but there are deep links with the concept of CY subcategories, see for example \cite{kuzicy}. In particular, constructing examples of Fano varieties of K3 and CY type might help in finding new playground for testing Kuznetsov's conjecture on rationality.\\ We are especially interested in the FK3 case, due to its deep relation with IHS manifolds. Indeed, a result of Kuznetsov and Markushevich in \cite{kuzm} shows that if $\mathfrak{M}$ is a moduli space of stable or simple sheaves on $X$, then any form in $H^{n-q-2}(X, \Omega^{n-q})$ defines a closed 2-form in $H^0(\mathfrak{M}^{\textrm{smooth}}, \Omega^2)$. This is indeed a good starting point in the hunt for examples of IHS. In particular, let us mention the IHS linked to the Debarre-Voisin twentyfold hypersurface, or to a Gushel--Mukai fourfold, or to a section of a product of $\mathbb{P}^3$, all examples of FK3 varieties, see \cite{debarrevoisin}, \cite{dk16}. \cite{ilievmanivel2}. \\ Although FK3 are definitely easier to hunt than IHS, there are not many known examples in the literature. For example, as complete intersections in (weighted) projective spaces one finds only the cubic fourfold, see \cite{ps18}. More examples are found if one allows terminal and $\mathbb{Q}$-factorial singularities, see \cite{frz19} but no new examples of IHS are produced anyway. In \cite{eg1} we conjectured that even taking complete intersection in Grassmannian one does not get any new example other than a complete intersection with four linear hypersurfaces in the Grassmannian $\Gr(2,8)$ and the above mentioned examples. This paper deals with the construction of examples of FK3 as zero locus of general global section of homogeneous vector bundles in Grassmannians or products of such. This is motivated by the list of K\"uchle, see \cite{kuchle}, of index 1 Fano fourfolds obtained in such a way, where few more interesting FK3 are found. Therefore the aim of this paper is twofold: \begin{aim*} \begin{enumerate}\item Construct new examples of Fano varieties of K3 type; \item Construct examples of polarised families of IHS from our FK3. \end{enumerate} \end{aim*} This paper is a first step of this project. One of its main aim is to show that there might be a lot of examples \emph{out there}. Even if obtaining a complete classification of all Fano of K3 type might be out of reach, a classification might be attainable if we restrict ourselves to some special subcases, for example Fano obtained as zero locus on Grassmannians and homogeneous varieties. The main problem here is that in general translating the (Hodge-theoretical) requirement of being of K3 type into algebraic conditions is not easy. Using some tools that we developed in \cite{eg1} we were anyway able to find some numerological condition useful to produce examples of FK3, see Numerology \ref{num}. Unfortunately the condition in \ref{num} are still too general for replicating a classification-type argument as the original one from K\"uchle. However, \ref{num} has the advantage of highlighting the connection between FK3 and \emph{central} Fano varieties, that is Fano such that $h^{p,q}\neq 0$ if and only if $p=q$. When this happens, we say that all the cohomology groups of $X$ has \emph{level} (lv) 0, see \ref{num}. Indeed a future problem we are interested in is the following, possibily up to restriction to some special subcases, with zero locus of sections of homogeneous vector bundles as a first step. \begin{problem} Classify Fano varieties such that lv $(H^j(X,\mathbb{C}))=0$ for all $j$. \end{problem} \subsection{How we subdivide the examples}
We first write down the list of examples that we have found. Later on in the paper we will explain the numerology behind our list, and give a detailed geometrical description of our examples. Our purpose its twofold. Indeed to a Fano of K3-type we want to associate (whenever possible) both a K3 category and an IHS manifold. For the definition of K3 or CY (sub) category we follow \cite{kuzicy}. Before doing this, we need to prove first that the families of Fano that we consider are of K3 type. This is done usually with either Riemann-Roch type computations as for example in \ref{m7} or using our Griffiths ring-type construction as in Proposition \ref{s1}, or via a Borel-Bott-Weil computation, as in Proposition \ref{t129}.
In particular we divide our list into three distinct blocks. We say that a FK3 $X$ is of \emph{blow-up} type (\textbf{B}) if there exists a pair $(Y,S)$, with $S \subset Y$, $Y$ Fano, $S$ K3 surface such that $X \cong Bl_S Y$. Examples of this type are already included in K\"uchle list, \cite{kuchle}, called \emph{c7} and \emph{d3}. We say that a FK3 $X$ is of \emph{Mukai type} (\textbf{M}) if we can reduce systematically the study of its derived category to Mukai's classification of Fano threefolds. We say that a FK3 $X$ is \emph{sporadic} (\textbf{S}) if it does not fall in one of the two previous categories. We collect all our list of examples of FK3 in Table \ref{table}. \\
For FK3 of blow-up and Mukai type the question on the existence of a K3-subcategory admits always a positive answer. This is the content of Propositions \ref{lem:blowup}, \ref{rennemo} and Theorem \ref{cayley}. However the question of existence of an IHS linked to any FK3 is far from being answered. We give an example in Proposition \ref{gp2} . For the FK3 of sporadic type, we do not have any information a priori. For all of them the question on the existence of a K3-subcategory is open, and even we have to cook up ad-hoc methods even to show that they are of K3 type (in the Hodge theoretical sense). Here as well there is no easy answer from the IHS viewpoint. A new construction is given for example in Proposition \ref{o2}. Special attention must be placed upon example (\textbf{S6}) and (\textbf{S7}). Indeed they are cut by irreducible vector bundles which are not linear. We observe as well the appereance of mixed structures of $(2,3)$-CY type. The last part of the paper is devoted to the study of these varieties. The results about IHS are collected in Table \ref{table2}. We point out that we believe that to any of the example in Table \ref{table} we will eventually be able to construct an example of polarised IHS. We added in both our tables two examples found independently by Iliev and Manivel in \cite {ilievmanivel2}, while our work was still in the very early stage. These are the families \textbf{B1} and \textbf{S3}. Although they were already known we decided to include them anyway in our list, since they fit perfectly in our pattern.
We highlight now the main results and the structure of this paper. \subsection{Results and Structure} This paper is devoted to the construction of a meaningful bunch of examples of Fano varieties of K3 type. We mainly exploit our numerological condition in \ref{num}, coming from a similar analysis to the one we carried out in \cite{eg1}. Our main result can be summarised in \begin{thm}There exists 23 examples of families of Fano varieties of K3 type obtained as zero locus of general global section of homogeneous vector bundles over Grassmannians or products of such. These Fano varieties have dimension $4 \leq n \leq 20$, Picard rank $1 \leq \rho_X \leq 3$ and index $\frac{n-1}{2}\leq \iota_X \leq \frac{n}{2}$. \end{thm} See Table \ref{table} for the list of this Fano varieties. We point out that since they have an index which is comparatively high with respect to the dimension (close to Wisniewski's bound), there could be hope for a classification. For each of this Fano we first needed to prove that they are of K3 type. We either explain geometrically in a systematic way (whenever possible) the presence of K3 structure (both from a Hodge-theoretical and derived category viewpoint) or we give an ad-hoc description for the sporadic cases. We point out that new examples may and will be discovered and analysed in a series of future works.\\ Some of the Fano we analyse have new and interesting behaviours. We collect some of the results here. \begin{thm} There exists prime Fano varieties with multiple CY structures (see Proposition \ref{3k3}) and with mixed Calabi-Yau (2,3) structure, (see Proposition \ref{23cy}). \end{thm} To the best of our knowledge, these are the first examples of known prime Fano varieties with this property. The prime hypothesis eliminates the possibility for these CY structure to come from a blow-up, a projective bundle or other related constructions. We link some of these Fano varieties to projective families of IHS manifolds. Unfortunately, up to now we have only found new ways of describing old examples, but we believe that a further extensive examination of our list could lead to new constructions. We collect our results here. \begin{thm}We show that the Hilbert square on a K3 of genus 8 is isomorphic to the zero locus of a certain bundle on $\Gr(4,6) \times \Gr(2,6)$, see Proposition \ref{gp2}. We show that the Debarre-Voisin IHS 4-folds are isomorphic to the space of special rational fourfolds on varieties of type $\TT(2,10)$, see Proposition \ref{t2} and to the compactification of the space of $(\mathbb{P}^1)^3$ on a linear section of $\Ml(3,8)$, see Theorem \ref{hk}. \end{thm} These results are collected in Table \ref{table2}. We spend a few words on the structure of this paper. In \textbf{Section 2} we explain how our numerological condition creates the list and we explain some straightforward geometric tricks and a general strategy to attack these Fano varieties. In \textbf{Section 3} we perform a case--by--case analysis of the most interesting examples and we prove our main results. We finish with a bunch of \textbf{Appendices}, where we describe three related cases we encountered: some extra Fano varieties of 3CY type, a trio of infinite series of Calabi--Yau varieties and a Fano variety with a fake K3 structure.
\section{The quest for examples}
\textbf{Notation for the paper and for the tables}
With $\mathcal{R}$ and $\mathcal{Q}$ we denote (respectively) the rank $k$ tautological and the rank $n-k$ quotient bundle on the Grassmanian $\Gr(k,n)$. We fix the convention that $\mathcal{O}_G(1) =\mathrm{det}(\mathcal{Q})=\mathrm{det}(\mathcal{R}^{\vee})$.
$\mathrm{S}_i\Gr(k,n)$ denotes the $i$-th symplectic Grassmannian. The most relevant cases for us are for $k=1$ and $k=2$. For $i=1$ this variety is nothing but the usual symplectic Grassmannian (usually called Lagrangian when $2k=n$), for $i=2$ it is the \emph{bisymplectic Grassmannian}, which will be better defined and characterised later in the paper. If $k=1$ we will simply write $\SGr(k,n)$. $\overline{\SGr(3,6)}$ in the table will denote a linear section of $\SGr(3,6)$. $\OGr(k,n)$ denotes the orthogonal Grassmannian and $\mathbb{S}_{n} $ we denote one of the two connected components of $\OGr(n,2n)$ in its spinor embedding.
$\TT(k,n)$ denotes the subvariety of $\Gr(k,n)$ cut by the zero locus of a general three-form $\sigma \in \bigwedge^3 V_n^\vee$. According to $k$, $\TT(k,n)$ can be represented as the zero locus of a general global section of a different vector bundle. As an example, if $k=3$, $\TT(3,n)$ is nothing but a linear section of the Grassmannian $\Gr(3,n)$, if $k=2$, $\TT(2,n)$ is the congruence of lines given by the bundle $\mathcal{Q}^*(1)$ and if $k=4$ the bundle is of course $\bigwedge^3 \mathcal{R}^{\vee}$.\\
We use $X_1 \subset G$ to denote a linear section of the variety $G$ (and similarly for higher degree or multidegree). Whenever there might be ambiguity or we want to emphasize the choice of the linear subspace we might write $X_H$. Similarly, sometimes we will use the shorthand $X_{\mathcal{F}} \subset G$ to denote the zero locus of a general global section of the vector bundle $\mathcal{F}$ over $G$.\\
The notation $H^n_{\van}(X)$ (and similarly for the $(p,q)$ part) will denote the vanishing subspace of the cohomology group, see \cite[2.27]{voisin2} for a definition. \\
If $X$ and $Y$ are smooth projective variety we will use the shortand $D^b(X) \hookrightarrow D^b(Y)$ to mean that one can construct a semiorthogonal decomposition for $D^b(Y)$ where $D^b(X)$ appears as one of the factors, up to a fully faithful functor.\\
The notation $S_g$ means a K3 surface of genus g. With $Q_k$ we indicate the $k$-dimensional quadric hypersurface.
\begin{table}[ht] \centering \begin{tabular}{@{} *9l @{}l @{}l @{}l @{}l @{}l @{}} \toprule no. & \emph{$X \subset Y$}& $\ddim X$ & $\iota_X$ & $\rho_X$ & Comments \\ \midrule B1 & $X_{(2,1,1)} \subset \mathbb{P}^3 \times \mathbb{P}^1 \times \mathbb{P}^1$ &4& 1 & 3& $X \cong Bl_{S_7} (\mathbb{P}^3 \times \mathbb{P}^1)$\\ B2 & $X_{(2,1)} \subset \Gr(2,4) \times \mathbb{P}^1$& 4&1&2& $X \cong Bl_{S_5} \Gr(2,4)$\\ M1 & $X_{(1,1,1)} \subset \mathbb{P}^3 \times \mathbb{P}^3 \times \mathbb{P}^3$& 8& 3&$ 3$ &$D^b(S_{3} )\hookrightarrow D^b(X)$ \cite[Section 4]{ilievmanivel2} \\ M2 & $X_{(1,1,1)} \subset Q_3 \times \mathbb{P}^2 \times \mathbb{P}^2$ & 6 & 2& 3& $D^b(S_{4} )\hookrightarrow D^b(X)$\\ M3 & $X_{(1,1)} \subset \Gr(2,5)\times Q_5$& 10 &4& 2& $D^b(S_6) \hookrightarrow D^b(X)$ \\ M4 & $X_{(1,1)} \subset \SGr(2,5) \times Q_4$&8&3& 2&$D^b(S_6) \hookrightarrow D^b(X)$ \\ M5& $X_{(1,1)} \subset \Ml(2,5) \times Q_3$& 6&2&2& $D^b(S_6) \hookrightarrow D^b(X)$\\ M6& $X_{(1,1)} \subset \mathbb{S}_{5} \times \mathbb{P}^7$& 16& 7& 2&$D^b(S_7) \hookrightarrow D^b(X)$\\ M7 & $X_{(1,1)} \subset \Gr(2,6) \times \mathbb{P}^5$& 12 & 5& 2 & $D^b(S_{8}) \hookrightarrow D^b(X)$\\ M8& $X_{(1,1)} \subset \SGr(2,6) \times \mathbb{P}^4$& 10& 4& 2&$D^b(S_8) \hookrightarrow D^b(X)$\\ M9& $X_{(1,1)} \subset \Ml(2,6) \times \mathbb{P}^3$& 8 &3& 2 & $D^b(S_8) \hookrightarrow D^b(X)$\\ M10 & $X_{(1,1)} \subset \SGr(3,6) \times \mathbb{P}^3$&8&3& 2&$D^b(S_9) \hookrightarrow D^b(X)$ \\ M11 & $X_{(1,1)} \subset \overline{\SGr(3,6)} \times \mathbb{P}^2$&6&2& 2&$D^b(S_9) \hookrightarrow D^b(X)$ \\ M12& $X_{(1,1)} \subset \mathrm{G}_2 \times \mathbb{P}^2$& 6& 2& 2& $D^b(S_{10}) \hookrightarrow D^b(X)$\\ M13& $X_{(1,1)}\subset \Gr(2,8)\times \mathbb{P}^3$ & 14 & 1 & 2 & $D^b(S_3) \hookrightarrow D^b(X)$\\ S1 & $X_{1^4} \subset \Gr(2,8)$& 8 & 4& 1 & $D^b(S_3) \hookrightarrow D^b(X)$ \\ S2& $X_1 \subset \OGr(3,8)$ &8&3& 2& $D^b(S_7) \hookrightarrow D^b(X)$\\ S3& $X_1 \subset \SGr(3,9)$ &14&6& 1 & \cite[Section 5]{ilievmanivel2}\\
S4& $X_1 \subset \Ml (3,8)$ &8&3& 1 & \\ S5& $X_1 \subset \TT(2,9)$ &6&2& 1 &\\ S6& $\TT(2,10)$ &8&3& 1 &$ 3 \times $ K3 structure\\ S7& $X_1 \subset \TT(2,10)$ &7&2& 1 & $ 2 \times $ K3 structure, $ 1 \times $ 3CY\\ S8& $X_{L} \subset \TT(k,10)$ &&& 1 & invariants depending by $k$ and $L$\\
\bottomrule
\hline \end{tabular} \captionof{table}{Fano of K3 type with invariants} \label{table}
\end{table}
\begin{table}[ht] \centering \begin{tabular}{@{} *9l @{}l @{}l @{}l @{}l @{}l @{}} \toprule no. & \emph{$X \subset Y$} & IHS $Z$& Comments\\ \midrule
M1 & $X_{(1,1,1)} \subset \mathbb{P}^3 \times \mathbb{P}^3 \times \mathbb{P}^3$ & \cite[Section 4]{ilievmanivel2} & $Z \cong Hilb^2 S_3$ \\ M7 & $X_{(1,1)} \subset \Gr(2,6) \times \mathbb{P}^5$& Prop.\ref{gp2}& $Z \cong Hilb^2 S_8$\\ S2& $X_1 \subset \OGr(3,8)$ & Prop.\ref{o2}&$Z \cong S_7$\\ S3& $X_1 \subset \SGr(3,9)$ & \cite[Section 5]{ilievmanivel2}& $Z \cong Z_{DV}$\\ S4& $X_1 \subset \Ml (3,8)$ & Thm. \ref{hk} &$Z \cong Z_{DV}$\\ S6&$\TT(2,10)$ & Prop. \ref{t2}&$Z \cong Z_{DV}$\\
\bottomrule
\hline \end{tabular} \captionof{table}{Projective families of IHS linked to FK3} \label{table2}
\end{table}
\subsection{What are we looking for?}
Many of the examples in the above table are obtained by chasing up the same numerology. Indeed from arguments similar to the one used in \cite{eg1} one can come up with a numerical criterion (cf. \cite{ilievmanivel} and \cite{kuzicy} for comparison and similar criteria). For a smooth projective variety we define the \emph{level} of $H^j(X, \mathbb{C})$ as the largest difference $|p-q|$ for which $H^{p,q}(X) \neq 0$, with $p+q=j$.
It is obvious that lv$(H^j(X, \mathbb{C})) \leq $ wt $(H^j(X, \mathbb{C})) \leq \ddim X$. For a Fano variety by Kodaira vanishing the first inequality is always strict. For example, if $X$ is a Fano of dimension $n$, then $\mathrm{lv} (H^n(X,\mathbb{C})) \leq \ddim X-2$. Moreover we say that a variety $X$ is \emph{central} if all of its $H^j$ have level zero, or equivalently if $h^{p,q}(X)=0$ for $p \neq q$. \begin{criterion}\label{num} Let $Y$ be a smooth projective Fano variety of dimension $2t+1$ and index $\iota_Y$.
Assume that $t$ divides $\iota_Y $ and that lv$(H^{2t+1}(Y))\leq 1$. Then a generic $ X \in | -\frac{1}{t} K_Y |$ is a Fano variety of K3 type, with the K3-type structure located in degree $2t$. \end{criterion} The above criterion is not necessary. Notable exceptions are \textbf{(S1)} (where the divisibility relation does not hold) and \textbf{(S6)}, where there the decomposition in irreducibles of the bundle that cuts the variety has no linear factor (albeit the variety has the correct ratio between dimension and index), and moreover two K3 sub-Hodge structures are present, in degree 6 and 8.\\ To the best of our knowledge the above numerology admits no counterexample. However the cohomological vanishing required to potentially prove the statement are ad-hoc, and there seems to us no easy way to transform the above statement in a proper theorem. However it is a cheap and easy way to produce several candidates, which turn out to be all of the desired type. We do not feel confident enough to state it as conjecture, as it stands. There could be ways of turning it into a statement or a conjecture. For example we could ask for $Y$ to have a rectangular Lefschetz decomposition in the categorical sense. Or, whenever $Y$ itself is cut by a section of an homogeneous vector bundle $\mathcal{F}= \bigoplus \mathcal{F}_i$ on $\Gr(k,n)$, we might ask that the slope $\mu (-\frac{1}{t} K_Y) > \mu(\mathcal{F}_i)$ for all $i$. However, for the purpose of the current paper, we prefer to leave it as is is, and we plan to formalise this statement in a future work. \subsubsection{Some numerology (and how the list is created)} The list of FK3 in the tables has no presumption of being complete. The main problem is the condition on the level of Hodge theory of the ambient variety, which is quite hard to control. The first case to investigate is the one of complete intersections in homogeneous varieties. We conjectured in \cite{eg1} that there are no more FK3 as complete intersection in $\Gr(k,n)$ other than the well-known cubic fourfold, the Gushel-Mukai fourfold, the Debarre-Voisin twentyfold hypersurface and a codimension four linear section of Grassmannian $\Gr(2,8)$. We have not been able to prove this conjecture yet, but no counterexample has been found either.\\ We tried as well hypersurfaces in other homogeneous varieties other than $\Gr(k,n)$, for example using the list of Konno in \cite{konno2}, but none of them satisfied the above condition. For the complete intersections in homogeneous space, we do not have any reasonable conjecture. Atanas Iliev informed us that a FK3 variety can be obtained by taking a 6-codimensional linear section of the $E_6$ variety $\mathbb{O}\mathbb{P}^2$, but we have not pursued this direction yet.\\ Already in this paper we analyse some extra case that do not fit in our numerological pattern. This is for example the case of $\TT(k,10)$ (and its linear section). However, since this the only reasonable systematic way to produce examples, we decided to write few lines to explain how the list was found and why it stops. To do this, we decided to use as key varieties $Y$ examples that automatically satisfied the Hodge theoretical condition in \ref{num}.
Let $G$ be one of the varieties below. Consider the positive integer $m$ such that $\omega_G\cong \mathcal{O}_G(-m)$ and $D=\ddim G$. The equations in \ref{num} become \begin{equation}\label{condition}2t+1=D \textrm{ and } at=m. \end{equation} \subsubsection*{$\Gr(k,k+l)$} For the Grassmannian $\Gr(k,k+l)$ the dimension is $D=lk$ and the index equals $k+l$. First notice that $D$ must be odd. The equations are $2t+1=kl$ and $at=k+l$, some $a$. Substituting we get $\frac{a(kl-1)}{2}=k+l$ and thus $akl=a+2k+2l$. Since $a\geq 1$ we have $kl \leq a+2k+2l$. It is easy to see that there are no solutions if $k\geq 5$, and for obvious reasons the case $k=2,4$ are excluded. In the case case $k=3$ substituting we get $l=\frac{a+6}{3a-2}$. This implies $a< 3$ for the previous number to be an integer. The case $a=2$ gives an even dimensional Grassmannian, so we discard it. The case $a=1$ corresponds to $G=\Gr(3,10)$. The associated FK3 is the Debarre-Voisin variety. \subsubsection*{$\SGr(k,k+l)$} The symplectic Grassmannian $\SGr(k,k+l)$ has dimension $kl- {k \choose 2}$ and index equal to $l+1$. If we substitute this in the equation above and look for solutions we find as triple $(k,l,a)=(2,3,2),(3,6,1), (5,3,2), (10,6,1)$. However, if $\omega$ is a non-degenerate skew symmetric $(k+l) \times (k+l)$ matrix, there are no $k$-dimensional isotropic subspaces if $k>l$ and $k+l$ even. We can therefore discard the last two triples and we are left with $X_2 \subset \SGr(2,5)$ (Gushel-Mukai fourfold) and $X_1 \subset \SGr(3,9)$, already considered in
\cite{ilievmanivel2}.
\subsubsection*{$\Ml(k,k+l)$}
The bisiymplectic Grassmannian $\Ml(k,k+l)$ has dimension $kl-k(k+1)$ and index equal to $l-k+2$. If we substitute in the equation above and look for solutions we find as triple $(k,l,a)=(3,5,1), (5,5,1)$. The second one can be identified with a (multi)-linear section of $(\mathbb{P}^1)^5$, see \cite{kuznetsovpicard}, the first one, an 8-fold linear section of $\Ml(3,8)$ is new.\\
A similar computation can be done for the tri-symplectic Grassmannian $\mathrm{S}_3\Gr(k,k+l)$. This is relevant since two K3 by Mukai (genus 6 and genus 12) can be considered as (respectively) quadratic and linear section of it. However, no more examples have been found. \subsubsection*{$\OGr(k,k+l)$} The orthogonal Grassmannian $\OGr(k,k+l)$ has dimension $kl-{k+1 \choose 2}$ and index $l-1$ (with respect to the Pl\"ucker line bundle $\mathcal{O}_G(1)$, albeit non-irreducible in the Picard group). The only admissible triple is $(3,5,1)$. This is a linear section of the orthogonal Grassmannian $\OGr(3,8)$. \subsubsection*{$Z_{\mathcal{Q}^*(1)}$} This variety is the zero locus of a general global section of the bundle $\mathcal{Q}^*(1)$ on $\Gr(k, k+l)$. If $k=2$, it is $\TT(k,k+l)$. It has dimension $l(k-1)$ and index $k+1$. There are two admissible triples, $(2,7,1), (6,3,1)$. However the second one can be identified with $X_1 \subset \SGr(3,9)$. The first one $X_1 \subset \TT(2,9)$ is new. Notice that we find as well the generic K3 of genus 4 as $(2,3,3)$ since the zero locus of $Q^*(1)$ on $\Gr(2,5)$ is a quadric threefold. There are as well some FK3 obtained by $\TT(2,n)$. However, they do not fall in this pattern, and we will examine them separately. \subsubsection*{\textit{Other varieties}} We tried other bundles to produce varieties of K3 type, such as $\mathcal{R}^{\vee}(1)$ or the locus of $\Sym^2 \mathcal{R}^{\vee} \oplus \bigwedge^2 \mathcal{R}^{\vee}$ (the \emph{orthosymplectic Grassmannian}). Even without no guarantee on the weight of the Hodge structure, our attempt was motivated by some example in the list of K\"uchle, see \cite{kuznetsovpicard}. However, we found no more new example. \subsubsection*{\textit{Products}} Products of projective spaces do produce a handful more of examples. One can easily see that no more than 5 projectives can be involved, with the extremal case being $X_{(1^5)} \subset (\mathbb{P}^1)^5$. Other examples are $X_{(1,1,1)} \subset (\mathbb{P}^3)^3$, and $X_{(2,1,1)} \subset \mathbb{P}^3 \times \mathbb{P}^1 \times \mathbb{P}^1$. In the products of Grassmannians when $k>1$, no further example is found. Indeed the index of a product of Grassmannians has index the gcd$(k_i+l_i)$. Substituting in the equations, one first find that no more than two Grassmannians can be used, and only one of them can have $k>1$. The possible cases are $X_{(1,1)} \subset \Gr(2,6) \times \mathbb{P}^5$ and $X_{(2,1)} \subset \Gr(2,4) \times \mathbb{P}^1$. Identical computations yield all the remaining cases.
\subsection{Geometric tools and tricks} \subsubsection{A blow-up lemma} We state here a blow up lemma. Although it merely descends from definitions, it is worth to recall it. It is worth to point out that a similar lemma is used in \cite{kuznetsovpicard}. \begin{lemma}\label{lem:blowup} Let $X=X_{(d, 1)} \subset Z \times \mathbb{P}^1$. Then $X \cong Bl_S Z,$ where $S$ is the intersections of $2$ divisors of degree $d$ on $ Z$. \end{lemma} \begin{proof} Let $\mathbb{P}^1=\Proj(\mathbb{C}[y_0,y_1])$ and $V^{\vee} \cong \mathbb{C}[y_0,y_1]_1$. (that is, homogeneous forms of degree 1). Denote by $W^{\vee} \cong H^0(\mathcal{O}_{Z}(d)) $. $X$ is given by definition by a choice of $\lambda \in W^{\vee} \otimes V^{\vee}$, or equivalently by a map (that we will still denote by $\lambda$) $\lambda: V \longrightarrow W^{\vee}.$ This map gives a 2-dimensional subspace of $ W^{\vee}$, or equivalently a pencil of divisors in $ Z$. The base locus of this pencil coincides with the $S$ defined in the lemma. The (only) incidence equation for the blow up of $Z$ in $S$ is $y_0f_d+y_1g_d$ and this is of course the same equation defining $X$. This proof admits an obvious generalisation when $\rho(Z) >1$. \end{proof} \subsubsection{Higher codimension case and Cayley trick(s)} The above blow-up lemma admits a higher-codimensional generalisation. Indeed, when $X$ is the zero locus of a $(1,1)$ divisor in $U \times \mathbb{P}^{r-1}$ (with the obvious generalisation if $\rho(U) >1$) then $X$ can be given either by an element of $ W^{\vee} \otimes V_r^{\vee}$ or as a map $$\lambda: V_r \longrightarrow W^{\vee}.$$ If $r>2$ we cannot identify $X$ with any birational modification of the pair $(U,S)$, where $S$ is the base locus of the above linear system. However $X$ and $S$ share a deep relation, known as the \emph{Cayley trick}. More precisely the result is the following \begin{thm}[Thm. 2.10 in \cite{orlov}, Thm 2.4 in \cite{kimkim}] \label{cayley} Let $q:E \rightarrow U$ be a vector bundle of rank $r\ge 2$ over a smooth projective variety $U$ and let $S=s^{-1}(0)\subset U$ denote the zero locus of a regular section $s \in H^0(U,E)$ such that $ \dim S = \dim U - \mathrm{rank}\, E$. Let $X=w^{-1}(0) \subset \mathbb{P} E^\vee$ be the zero locus of the section $w\in H^0(\mathbb{P} E^\vee, \mathcal{O}_{\mathbb{P} E^\vee}(1))$ determined by $s$ under the natural isomorphism $H^0(U,E)\cong H^0(\mathbb{P} E^\vee, \mathcal{O}_{\mathbb{P} E^\vee}(1))$. Then we have the semiorthogonal decomposition $$ D^b(X)= \langle q^*D^b(U), \cdots, q^*D^b(U) \otimes_{\mathcal{O}_X} {\mathcal{O}_X}(r-2), D^b(S) \rangle .$$ \end{thm} When this happens, we will write $D^b(S) \hookrightarrow D^b(X)$. There is as well an (older) analogue Hodge-theoretic statement, cf. Prop. 4.3 in \cite{konno}, stating that the vanishing cohomologies of $S$ and $X$ are isomorphic up to a shift. When the hypotheses of the above Theorem are verified, this therefore proves at once that $X$ is of K3-type.\\ The Cayley trick can be generalised in the following way, using the formalism of Homological projective duality. \begin{proposition} \label{rennemo} Let $Y_1$ and $Y_2$ be a pair of varieties with Lefschetz decompositions and embedded in $\mathbb{P}(V)$. Let $Z_H$ be the intersection of $Y_1 \times Y_2$ with a general (1,1)-divisor $H$. Let $f_H$ be the map that $H$ naturally defines from $\mathbb{P}(V)$ to $\mathbb{P}(V^\vee)$. Let $X_H = Y_1 \cap f_H^{-1}(Y_2^\vee$), where $Y_2^\vee$ is the Homological Projective dual to $Y_2$. Then $D(X_H) \hookrightarrow D(Z_H)$. \end{proposition} \begin{proof} Let $D(Y_2)=\langle A_0,A_1(1),\dots A_{m}(m) \rangle$ be the given Lefschetz decomposition of $Y_2$. The divisor $H$ parametrizes, for every point of $Y_1$, an hyperplane section of $Y_2$, hence it defines a map $f_H\,:Y_1\,\rightarrow\,\mathbb{P}(V^\vee)$. In this way, $Z_H$ is identified with the pullback through $f_H$ of the universal hyperplane section $\mathcal{Y}_2\subset Y_2\times \mathbb{P}(V^\vee)$. Now, by \cite[Lemma 3.3]{kuzi_sod2} we have $$D(\mathcal{Y}_2)=\langle D(Y_2^\vee), A_1(1)\boxtimes D(\mathbb{P}(V^\vee)),\dots, A_m(m)\boxtimes D(\mathbb{P}(V^\vee)) \rangle.$$ By applying base change \cite[Thm 5.6]{kuzi_sod} to the diagram $$\xymatrix{ Z_H \ar[d]^\iota \ar[r] & \mathcal{Y}_2\ar[d]^{\pi_2} \\ Y_1\ar[r]^{f_H} & \mathbb{P}(V^\vee), } $$ we obtain: $$D(Z_H)=\langle D(Y^\vee_2 \times_{\mathbb{P}(V^\vee)} Y_1), A_1(1)\boxtimes D(Y_1),\dots, A_m(m)\boxtimes D(Y_1)\rangle.$$ And the variety in the first factor here is precisely $X_H=Y_2^\vee \times_{\mathbb{P}(V^\vee)} Y_1=Y_1 \cap f_H^{-1} (Y_2)^\vee.$ \end{proof}
\section{Case-by-case analysis}
\subsection{Identifications} Before analysing in details the examples in our list, we want to eliminate some varieties that are well-known examples in disguise. We recall some results of Kuznetsov, that we conveniently bundle together. Recall that the variety $\Ml(k,n)$ is the \emph{bisymplectic Grassmannian}. It can be thought either as the intersection of two symplectic Grassmannian $\SGr(k,n)$ inside $\Gr(k,n)$ or as the zero locus over $\Gr(k,n)$ of a general global section of the bundle $\bigwedge^2 \mathcal{R}^{\vee} \oplus \bigwedge^2 \mathcal{R}^{\vee}$. We will better describe this variety later in the paper. \begin{thm}[Thm 3.1 and Cor. 3.5 in \cite{kuznetsovpicard}]\label{kuzzolo}The following hold: \begin{itemize} \item There is an isomorphism $\Ml(n,2n) \cong \prod (\mathbb{P}^1)^n$; \item The variety $X_{(1,1,1,1,1)} \subset \prod (\mathbb{P}^1)^5$ is isomorphic to $W=Bl_S(\prod (\mathbb{P}^1)^4)$, where $S=S_{(1,1,1,1)^2}$ is a non-generic K3 surface of genus $g=13$, given as the intersection of two divisors of multidegree $(1,1,1,1)$. \end{itemize} \end{thm} Some of the Fano of K3 type that we found in our search can be actually identified with the $W$ above. For this reason they are not included in our main table. More precisely we have \begin{lemma} Let $W$ the Fano of K3 type in \cite{kuznetsovpicard} defined above. Then the following Fano of K3 type \begin{itemize} \item $X_{(1,1,1,1,1)} \subset Q_2 \times Q_2 \times \mathbb{P}^1$; \item $X_{(1,1,1,1,1)} \subset \Ml(4,8) \times \mathbb{P}^1;$ \item $X_{(1,1,1,1,1)} \subset \Ml(3,6) \times \Ml(2,4);$ \end{itemize} are isomorphic to $W$. \end{lemma} \begin{proof} The first case is obvious, since $Q_2 \cong \mathbb{P}^1 \times \mathbb{P}^1$. For the other two cases, by definition and Kuznetsov's result $\Ml(n,2n)$ coincides with $\prod (\mathbb{P}^1)^n$. \end{proof} There is one more identification between two numerological candidates. \begin{lemma} $X_{(1,1,1)} \subset \mathbb{S}_3 \times \mathbb{P}^1 \times \mathbb{P}^1 \cong X_{(2,1,1)} \subset \mathbb{P}^3 \times \mathbb{P}^1 \times \mathbb{P}^1$. \end{lemma} \begin{proof} It follows from the well known identification $ \mathbb{S}_3 \cong \mathbb{P}^3$, see for example \cite{kuznetsovs}. The difference in the degree is explained since the line bundle giving the spinor embedding for $\mathbb{S}_3 $ is the square root of the Pl\"ucker one. \end{proof}
\subsection{Blow-up and Mukai type} To prove that each of the variety of type M and B are of K3 type one can use the Cayley trick statement, as in Theorem \ref{cayley}. Indeed the (stronger) derived category statement implies the Hodge theoretical one. Indeed this can be seen by writing down such a semiorthogonal decomposition as prescribed by \ref{cayley} and then taking Hochshild homology. Alternatively one can use Riemann-Roch and standard exact sequences to compute the relevant Hodge numbers. We did these calculations as sanity checks for all our examples, however we believe it is neither worth nor interesting to list all of them, since they are quite similar. Therefore we will include just one example, namely Proposition \ref{gq1}, where Theorem \ref{cayley} does not apply in a straightforward way. For the families B1 and B2, Lemma \ref{lem:blowup} settles the matter. \\ In terms of construction of polarised families of IHS, we investigate another construction of the Hilbert scheme of points on a genus 8 K3, see Proposition \ref{gp2}. We believe that each of the examples in our list of Fano could lead to similar constructions: this would be especially interesting, considering the lack of examples of polarised families of Hilbert schemes of points on K3 surfaces.
\subsection{M3: a (different) computation in intersection theory} The variety M3 is $X_{(1,1)} \subset \Gr(2,5) \times Q_5$. It has dimension 10 and index 4. It is neither a blow up with a center in a K3 surface, nor we can apply the Cayley trick. However we can show that it is a Fano of K3 type using Proposition \ref{rennemo}. Indeed we have \begin{lemma} Let $S_6$ be a K3 surface of genus 6 in the Mukai model and $X$ our M3 as defined in the table. Then $D^b(S_6) \hookrightarrow D^b(X)$. \end{lemma} \begin{proof} It suffices to apply Proposition \ref{rennemo}, since the Grassmannian $\Gr(2,5)$ (or even a quadric hypersurface) is projectively self-dual. The intersection of the Grassmannian $\Gr(2,5)$ with a 5-dimensional quadric (or, equivalently, the intersection of $\Gr(2,5)$ with a quadric and 3 hyperplanes in its Pl\"ucker embedding) is a K3 of genus 6 and degree 10 by Mukai's classification. To conclude one needs to argue that the orthogonal complement to the derived category of $D^b(S_6)$ in $D^b(X)$ is generated by an exceptional collection, and then taking Hochshild homology (which is additive on semi-orthogonal decompositions), together with the Hochshild-Konstant-Rosenberg isomorphism cf. \cite[Theorem 7.5, 8.3]{kuzhkr}. \end{proof}
As an alternative methods we can show that M3 is of K3 type using a lengthy (but rather standard) play with long exact sequences and cohomological vanishings. \begin{proposition}\label{gq1}Let $X=X_{(1,1)} \subset \Gr(2,5) \times Q_5$. Then $X$ is of K3 type. \end{proposition} The proof of the above proposition can be split in two lemma. The first one is a Chern class computation, the second one is essentially an application of Bott's theorem. \begin{lemma} \label{erchar}The topological Euler characteristic of $X$ is $e(X)=72$. \end{lemma} \begin{proof}This is a lengthy (but direct) exercise in intersection theory, and we will spare the details to the reader. Let us denote $Y=\Gr(2,5) \times Q_5$. Denote by $\alpha_1=c_1(\mathcal{O}_Q(-1))$ and $\beta_1=c_1(\mathcal{O}_G(-1))$. Denote by $\beta_2=c_2(\mathcal{R})$. One has $H^4(\Gr(2,5), \mathbb{Z}) = \langle \beta_1^2, \beta_2 \rangle$. One easily compute $c(Q)$, $c(G)$ and $c(Y)=c(G)c(Q)$. In particular by Gauss-Bonnet $c_{11}(Y)=-6 \alpha_1^5 \beta_1^6$ and $$e(Y)= \int_{Y} -6 \alpha_1^5 \beta_1^6=60.$$
We then use the normal sequence associated to $X$ $$ 0 \to T_X \to TY|_X \to \mathcal{O}_X(1,1) \to 0.$$
This implies $c(TY|_X)=c(X)(1-\alpha_1-\beta_1)$. We can compute recursively the Chern classes of $X$, with in particular $$c_{10}(X)=(9\alpha_1^5\beta_1\beta_2^2+9\alpha_1^4\beta_1^2\beta_2^2)|_X.$$ To compute the restriction we evaluate against the class of $X$, and we have $ c_{10}(X) \cdot X=18\alpha_1^5\beta_1^2\beta_2^2.$ Using the relation in $A(G)$ given by $2\beta_1^5=5\beta_1\beta_2^2$ we get $$c_{10}(X) \cdot X= \frac{2 \cdot 18}{5}\alpha_1^5 \beta_1^6=\frac{6}{5}e(Y)=72.$$ \end{proof}
\begin{lemma} For $0 \leq i \leq 3$ we have $h^{i, 10-i}(X)=0$. Moreover $h^{6,4}(X)=h^{4,6}(X)=1$. \end{lemma} \begin{proof} As before let us denote $Y=\Gr(2,5) \times Q_5$, and with $\mathcal{L} \cong \mathcal{O}_Y(1,1)$ (and its restriction to $X$ as well). We use the following two exact sequences \begin{equation} \label{seq1}
0 \to \Omega_X^{k-1} \otimes \mathcal{L}^{\vee} \to \Omega^k_{Y|X} \to \Omega^k_X \to 0 \end{equation} and
\begin{equation}\label{seq2} 0 \to \Omega^k_Y \otimes \mathcal{L}^{\vee} \to \Omega^k_Y \to \Omega^k_{Y|X}\to 0, \end{equation} possibly twisting for some positive multiple of $\mathcal{L}^{\vee}$ when required. The computation is rather lengthy and technical, and we will skip most of the details. To find similar computations the reader can refer to \cite{eg1}. For the results on the cohomological vanishings for both $\Gr(2,5)$ and $Q_5$ one can consult for example \cite{peternell}, \cite{snow}.\\ The first vanishing $h^{0,10}(X)$ is obvious. Let us show the first non-obvious one, that is $h^{1,9}(X)=0$. Consider the two sequences \ref{seq1} and \ref{seq2} above with $k=1$. Using the K\" unneth formula one easily see that the cohomology of $\Gr(2,5) \times Q_5$ is of Lefschetz-type. Moreover from Kodaira vanishing and since $H^{10}(X, \mathcal{L}) \cong H^{0}(X, \mathcal{O}_X(-3,-3))=0$ one reduces to
$$ 0 \to H^9(\Omega^1_{Y|X}) \to H^9 (\Omega^1_X) \to 0 $$ and $$ 0 \to H^9(\Omega^1_{Y|X}) \to H^{10}(\Omega^1_Y \otimes \mathcal{L}^{\vee}) \to 0.$$
However, if we denote with $\pi_1$ (resp. $\pi_2$) the projection on $\Gr(2,5)$ (resp. $Q_5$) we have $\Omega^1_Y \cong \pi_1^* \Omega^1_{\Gr(2,5)} \oplus \pi_2^* \Omega^1_Q$, and from the K\"unneth formula for the box product and the well known vanishings for the twisted cohomologies of $\Gr(2,5)$ and $Q_5$ we have $$H^{10}(\Omega^1_Y \otimes \mathcal{L}^{\vee})\cong H^9(\Omega^1_{Y|X}) \cong H^9 (\Omega^1_X)=0.$$ For $h^{2,8}(X)$ we use the sequences \ref{seq1} and \ref{seq2} with $k=2$ and $k=1$ twisted by $\mathcal{L}^{\vee}$. Indeed one has from \ref{seq1}
$$ 0 \to H^8(\Omega^2_{Y|_X}) \to H^8(\Omega^2_X) \to H^9(\Omega^1_X) \to H^8(\Omega^2_{Y|_X}) \to 0.$$ The two external terms can be checked to be 0 using \ref{seq2}, again together with the K\"unneth formula and the usual vanishings (using the decomposition for $\Omega^2_Y$). Using the twisted version of \ref{seq1} and \ref{seq2} we reduce to the isomorphism $H^8(\Omega^2_X) \cong H^{10}((\mathcal{L}_X^{\vee})^{\otimes 2})=0$. The same argument works as well for $h^{3,7}(X)=0$, where for $h^{4,6}(X)$ we get $$H^6(\Omega^4_X) \cong H^{10}((\mathcal{L}_X^{\vee})^{\otimes 4})\cong H^0(\mathcal{O}_X) \cong \mathbb{C}.$$ \end{proof}
The last Lemma is enough to prove that $X$ is of K3 type. In particular, when combined with Lemma \ref{erchar} we explicitely compute all the Hodge numbers. The following corollary is in fact proved bundling the two results above, together with Lefschetz theorem on hyperplane section and a direct application of the K\"unneth formula. \begin{corollary} Suppose $p+q \neq 10$. The only non-zero Hodge numbers $h^{p,q}$ of $X$ are $$h^{0,0}=h^{10,10}=1, \ h^{1,1}=h^{9,9}=2, \ h^{2,2}=h^{8,8}=4, \ h^{3,3}=h^{7,7}=6, \ h^{4,4}=h^{6,6}=8.$$ For $p+q=n$ the only non-zero Hodge numbers are $$h^{6,4}=h^{4,6}=1, \ h^{5,5}=28,$$ with moreover the dimension of the vanishing cohomology subspace $h^{5,5}_{\van}=19$. \end{corollary}
\subsection{M7: another construction of $S_8^{[2]}$} The 12-fold $X_{M7}$ is given by the zero locus of a (1,1) section on $\Gr(2,6) \times \mathbb{P}^5$. Let $S_8=\Gr(2,6) \cap H_1 \cap \ldots \cap H_6$. Then $S_8$ is a general K3 surface of genus 8 in Mukai's model. From the Cayley trick argument one has that $D^b(S_8) \hookrightarrow D^b(X_{M7})$. On the Hodge-theoretical level indeed we have: \begin{lemma}\label{m7} Let $X_{M7}$ as above. Then $X_{M7}$ is of K3 type with $h^{6,6}=31$ and the vanishing subspace $h_{\van}^{6,6}=19$. \end{lemma} \begin{proof} Since $\Gr(2,6) \times \mathbb{P}^5$ is a central variety, it is enough to compute the Euler characteristics $\chi(\Omega^i)$ for $i=5,6$. This can be done for example via Riemann-Roch or using Macaulay2. \end{proof} As expected, we can associate to $X_{M7}$ an IHS, which is linked to the genus 8 K3. To do this, let $Z$ be given by the zero locus of a general global section of the bundle $\bigwedge^2 \mathcal{R}^{\vee}_{4,6} \otimes \mathcal{R}^{\vee}_{2,6}$ on $\Gr(4,6) \times \Gr(2,6)$. We have the following proposition. \begin{proposition} $Z$ is an IHS fourfold. \end{proposition} \begin{proof}Recall the formula for the first Chern class of a product $c_1(\bigwedge^2 \mathcal{R}^{\vee}_{4,6} \otimes \mathcal{R}^{\vee}_{2,6})= \mathrm{rk}(\mathcal{R}^{\vee}_{2,6})\cdot c_1(\bigwedge^2 \mathcal{R}^{\vee}_{4,6} )+\mathrm{rk}(\bigwedge^2 \mathcal{R}^{\vee}_{4,6})\cdot c_1( \mathcal{R}^{\vee}_{2,6}).$ By adjunction it follows that for a general section $Z$ is a smooth fourfold with $c_1=0$. We compute now its holomorphic Euler characteristic $\chi(\mathcal{O}_Z)$. This can be done for example via a Riemann-Roch computation, since $$\chi(\mathcal{O}_Z) = \frac{c_2^2-c_4}{720}.$$ We will use a Macaulay2 code in order to speed up the calculation. \begin{verbatim} loadPackage "Schubert2" k1=2, l1=4, k2=4, l2=2; G26=flagBundle({k1,l1}, VariableNames=>{r1,q1}); (R1,Q1)=G26.Bundles; V=abstractSheaf(G26, Rank=>6); G46=flagBundle({k2,l2}, V, VariableNames=>{r2,q2}); (R2,Q2)=G46.Bundles; p=G46.StructureMap; R1G46=p^*(dual R1); F=R1G46**exteriorPower_2 dual R2; Z=sectionZeroLocus F; chi(OO_Z); \end{verbatim} Running the previous code one verifies $\chi(\mathcal{O}_Z)=3$. in particular the statement follows by simply applying Beauville-Bogomolov decomposition theorem. \end{proof} The deformation type of $Z$ can be shown to be the expected one as follows. \begin{proposition}\label{gp2} $Z$ is isomorphic to Hilb$^2(S^8)$. \end{proposition} \begin{proof} Let $h \in \bigwedge^2V_6^* \otimes V_6^*$ defining $X_{M_7}$. As above, we can consider $h$ as a morphism $$h: V_6 \to \bigwedge^2 V_6^*.$$ A point in Hilb$^2(S_8)$ is therefore given by a pair $(u_1, u_2)$, $u_i \in \bigwedge^2 V_6$ on both of which $h$ vanishes. Consider $W \subset \bigwedge^2 V_6$ spanned by $u_1, u_2$. Consider further the restricted morphism $\overline{h}^t: W \to V^\vee_6$. This has rank 2, and we can take $P= \mathrm{Im}(\overline{h}^t)$. By construction $h$ vanishes on the pair $(W,P) \in \Gr(4,6) \times \Gr(2,6)$, thus defining a point in $Z$. From this construction, it is clear that $W$ determines $P$. Moreover, the map we constructed inside $\Gr(4,6)$ can be seen as the same map (after duality) which associates to Hilb$^2(S_8)$ a line in the pfaffian cubic fourfold, hence it is an isomorphism. \end{proof} We point out the similarities between this contruction and \cite[Proposition B.6.3]{kps}. Here it is proved how the variety of lines (resp. conics) of a smooth cubic threefold (resp. a generic Fano threefold of genus 8) is isomorphic to a section of the bundle $\bigwedge^2 \mathcal{R}^{\vee}_{4,5} \otimes \mathcal{R}^{\vee}_{2,5}$ over $\Gr(4,5) \times \Gr(2,5)$. In turn, their proof can be modified to give an alternative proof of \ref{gp2}. \subsection{Sporadic examples} This subset of the list is the most interesting one. Indeed for these Fano we cannot produce a systematic method as in the \emph{Mukai} case. For each one of them already proving that they are of K3 type requires an ad-hoc strategy. Our most interesting results comes indeed from this section: indeed we reinterprete the Debarre-Voisin IHS fourfold as moduli space of relevant objects on a Fano of K3 type in two different ways, namely as in Theorem \ref{hk} and Proposition \ref{t2}. Moreover we produce the first examples of a Fano with multiple K3 structures, cf. Proposition \ref{3k3} and with a mixed $(2,3)$ CY structure, cf. Proposition \ref{23cy}. Moreover we do not limit ourselves to the computation of the Hodge numbers: we give indeed geometrical descriptions of many of the examples we consider, since we believe them to be a rich and beautiful sources of geometries. \subsection{S1: four codimensional linear section of $\Gr(2,8)$} We already considered this example in our previous work \cite{eg1}, therefore we will not spend too much time on it. It is described in a surprisingly simple way as a codimensional 4 linear section of the Grassmannian $\Gr(2,8)$. \begin{proposition}\label{s1} Let $X_{1,1,1,1} \subset \Gr(2,8)$ given by a generic section of $\mathcal{O}_G(1)^{\oplus 4} $. Then $X$ is an 8-fold of K3 type,with $h^{4,4}_{\van}(X)=19$. \end{proposition} We remark that there is another FK3 closely related to S1. This is $X_{(1,1)} \subset \Gr(2,8) \times \mathbb{P}^3$. In our main table this is listed as M13. We chose this notation since, although there is no K3 in the Mukai model related, it shares many similarities with the other Fano in the \textbf{M} group. In particular one can apply directly Cayley trick to prove that this Fano is of K3 type.\\
As already remarked in our previous work the projective dual of $X_{1,1,1,1} \subset \Gr(2,8)$ is quartic K3 surface $S_3 \subset \mathbb{P}^3$. An embedding of the derived category of the quartic K3 inside the derived category of the above linear section is proved in \cite{segalthomas}, Thm 2.8.\\ We already conjectured that this complete intersection in $\Gr(k,n)$ should be the only FK3 obtained in this way. We repeat the precise formulation of this conjecture here. \begin{conj} Let $X=X_{d_1, \ldots, d_c} \subset \Gr(k,n)$ a Fano smooth complete intersection of even dimension. Then $X$ is not of K3-type unless $$(\lbrace d_i \rbrace, k,n)=(\lbrace 3 \rbrace,1,6),(\lbrace 2,1\rbrace,2,5), (\lbrace 1,1,1,1\rbrace,2,8), (\lbrace 1 \rbrace, 3,10).$$ \end{conj}
\subsection{S2: a K3 of genus 7 from $\OGr(3,8)$} This sporadic example is a linear section $X= \OGr(3,8) \cap H$ of the orthogonal Grassmannian $\OGr(3,8)$. It is worth to spend few words on the ambient variety. In general the orthogonal Grassmannian $\OGr(n-1, 2n)$ behaves differently from $\OGr(k,2n)$, which for $k \neq n-1$ is a prime Fano variety. Indeed $\OGr(n-1, 2n)$ can be realised as a $\mathbb{P}^{n-1}$ bundle over (both of) $\mathbb{S}_{n}^i$, the latter denoting the two connected component of the maximal orthogonal Grassmannian $\OGr(n, 2n)$ in the spinor embedding. In particular the Picard group of $\OGr(n-1, 2n)$ has rank 2 with the Pl\"ucker line bundle $\mathcal{L} := \mathcal{O}_{\mathbb{S}^1} (1)\boxtimes \mathcal{O}_{\mathbb{S}^2}(1)$ is very ample. $\OGr(n-1, V_{2n})$ is non-degenerate in the Pl\"ucker embedding, and $$H^0(\OGr(n-1, 2n), \mathcal{L}) \cong \bigwedge^{n-1} V_{2n}^{\vee}.$$ With $X=X_1 \subset \OGr(3,8)$ in the introductory table we mean the zero locus of a generic global section of $\mathcal{L}$. Such $X$ is an 8-fold of index $\iota=3$. Since it is a linear section of a central variety, to compute its Hodge numbers it suffices to compute the Euler characteristics $\chi(\Omega^i_X)$, together with the knowledge of the cohomology of $\OGr(3,8)$. A full computation by the means of Borel-Bott-Weil theorem, can be found in the PhD thesis of the first author. We recall here the result. \begin{lemma}[cf. \cite{thesis}, Proposition A.1.1] $X$ is a Fano 8-fold of K3 type with $h^{4,4}(X)=24$, and its vanishing subspace of rank 19. \end{lemma} We explain now a link between this 8-fold $X$ and a K3 of genus 7. Recall from the work of Mukai that a generic K3 of such genus can be obtained by cutting $\mathbb{S}_{10}$ with 8 hyperplanes. Here we use a different description of the aforementioned K3. Let $X \subset \OGr(3,8)$ defined by $V(\sigma)$, $\sigma \in H^0(\mathcal{L})$. Let $\mathbb{S}_8$ be (one of the two connected component of) the orthogonal Grassmannian $\OGr(4,8)$, denote with $\mathcal{R}$ the restriction of the tautological bundle. Since $\sigma$ can be seen as an element in $H^0(\mathbb{S}_8, \bigwedge^3\mathcal{R}^{\vee})$ we can denote by $S= V(\sigma) \subset \mathbb{S}_8$. It is easy to check that $S$ is a K3 of genus 7 (notice that $\mathbb{S}_8$ is nothing but a 6-dimensional quadric hypersurface in disguise, either using triality or checking dimension and invariants). Such $S$ is responsible for the interesting part of the derived category (and therefore the Hodge theory of $X$). Indeed we quote the following result of Ito-Miura-Okawa-Ueda. Denote $\pi$ the restriction of the projection $p$ from $X$ to (one of the two) $\mathbb{S}_8$. \begin{lemma}[Lemma 2.1 in \cite{ito}] The morphism $\pi$ is a $\mathbb{P}^2$-bundle over $\mathbb{S}_8 \smallsetminus S$ and a $\mathbb{P}^3$-bundle over $S$, locally trivial in the Zariski topology. \end{lemma} In turn we can use an adapted version of Orlov's blow-up formula to this case. This is indeed a generalisation of the Cayley trick. We borrow this result from the forthcoming \cite{nested}, where it will be shown in full details and generality. For this reason, the proof will be omitted here.\\ First, in the notation above, denote by $\iota: S \subset \mathbb{S}_8 $. The above Lemma is equivalent to the following commutative diagram
$$\xymatrix{ F \ar@{^{(}->}[r]^j \ar[d]_p & X \ar[d]^\pi \\ S \ar@{^{(}->}[r]^\iota & \mathbb{S}_8,}$$
with $F$ a smooth projective subvariety, $j:F \subset X$ of codimension $d=4+2-3=3$ and a locally free sheaf $\mathcal{F}$ of rank $4$ on $S$ such that $p:F \simeq \mathbb{P}_S(\mathcal{F}) \to S$. We denote by $\mathcal{O}_F(H)$ the relative ample bundle of $p$ and we assume that there is a line bundle $\mathcal{O}_Y(H)$ such that $\mathcal{O}_Y(H)_{\vert F} \simeq \mathcal{O}_F(H)$ and that there is a vector bundle $\mathcal{E}$ of rank $d$ on $X$ such that $F$ is the zero locus of a general section of $\pi^*\mathcal{E} \otimes \mathcal{O}_Y(-H)$.
We define the functors $\Phi_l:\Db(S) \to \Db(F)$ by the formula $\Phi_l(A)= j_* (p^* A \otimes \mathcal{O}(lH))$.
\begin{proposition}\label{o2} In the configuration above, $\Phi_l$ is fully faithful for any integer $l$, and there is a semiorthogonal decomposition: $$\Db(Y)=\sod{\Phi_{-1}\Db(Z),\pi^*\Db(X),\ldots,\pi^*\Db(X)\otimes \mathcal{O}_Y(2H)}$$ \end{proposition}
\subsection{S3: bisymplectic Grassmannian $\Ml(3,8)$ and Debarre-Voisin IHS} The variety $\Ml(k,n)$ is given by the vanishing of a global section of the bundle $\bigwedge^2 \mathcal{R}^{\vee} \oplus \bigwedge^2 \mathcal{R}^{\vee}$ on the Grassmannian $\Gr(k,n)$. Equivalently, given a pencil $\lambda: \mathbb{C}^2 \to \bigwedge^2 V_n^{\vee}$ it parametrises k-dimensional subspaces isotropic for all skew-forms in the pencil. In \cite{kuznetsovpicard} this variety is studied by Kuznetsov when $k=n/2$ and by Benedetti in \cite{ben18} with a strong emphasis in the case $k=2$. Let us recall some key facts of the construction. Assume that $n=2m$ is of even dimension. To a general pencil $\lambda$ are canonically associated $m$ degenerate skew-forms $\lbrace \lambda_1, \ldots, \lambda_m \rbrace$, given by the intersection bewteen the line $L_{\lambda} \subset \mathbb{P}(\bigwedge^2 V^{\vee})$ and the (Pfaffian) discriminant hypersurface $D$, corresponding to degenerate skew-forms. Denote by $K_i$ the kernel of $\lambda_i$. The smoothness of $\Ml$ is equivalent to the $\lambda_i$ being distinct, and moreover we can decompose $V=K_1 \oplus \ldots \oplus K_m$ as a direct sum.\\ Kuznetsov gives as well the canonical form for the pencil, espressing the two generators (up to dividing by 2) as $$\omega_1= x_{1,2}+x_{3,4}+\ldots +x_{n-1,n}, \ \ \omega_2= a_1x_{1,2}+a_2x_{3,4}+\ldots +a_m x_{n-1,n},$$ with the $a_i$ pairwise distinct, and $x_{i,j}:= x_i \wedge x_j$. This way, we can identify $K_1:=\langle e_1, e_2 \rangle$, $K_2:=\langle e_3, e_4 \rangle$ and so on. When $m=k$ one has $\Ml(k, 2k) \cong \prod (\mathbb{P}^1)^k$, see the theorem already recalled in \ref{kuzzolo}. When $m\neq k$ however we do not have such a nice description as a product. For $k=2$ for example $\Ml(2,n)$ is an intersection of $\Gr(2,n)$ with a linear subspace of codimension 2. \\ Let us now focus on the case $\Ml(3,8)$. We compute first the cohomology of a linear section of $\Ml(3,8)$.
\begin{proposition} \label{ah}A linear section $X_1=V(\sigma_1) \subset \Ml(3,8)$ is of K3 type. \end{proposition} \begin{proof}
The first thing to prove is that $\Ml(3,8)$ is a central variety. This can be done via a direct computation, for example using Borel-Bott-Weil theorem. There is however another (much easier) argument which is however more conceptual, and prove the similar statement for all $\Ml(k,n)$. Indeed in \cite[Proposition 2.10]{ben18} it is proved that there is a torus $T \cong (\mathbb{C}^*)^n$ acting on $\Ml(k,n)$ with the fixed locus constituted only by $2^k {n \choose k}$ points. This implies, thanks to \cite[Theorem 2]{sommese} that the $\Ml(k,n)$ is a central variety, with $2^k {n \choose k}$ being its topological Euler characteristic.\\ Lefschetz theorem on hyperplane section enables us to describe the cohomology of $Z$ except all the Hodge groups $h^{p,q}(Z)$ with $p+q=8$. We can determine these dimensions by computing the Euler characteristics of $\chi(\Omega_X^i)$. The latter can be computed via a direct but lengthy computation, and computer algebra systems as Macaulay2 can speed up everything. One has in particular \begin{align*}&\chi(\Omega^1_X)=\chi(\Omega^1_{\Ml(3,8)})=1\\ &\chi(\Omega^2_X)=\chi(\Omega^2_{\Ml(3,8)})=2\\ &\chi(\Omega^3_X)=\chi(\Omega^3_{\Ml(3,8)})+1=7\\ &\chi(\Omega^4_X)=26. \end{align*} \end{proof} This gives as well all the Hodge numbers. We collect them in the next corollary for the reader's convenience. \begin{corollary}The only non-zero Hodge numbers $h^{p,q}$ of $\Ml(3,8)$ are $$h^{0,0}=h^{9,9}=1, \ h^{1,1}=h^{8,8}=1, \ h^{2,2}=h^{7,7}=2, \ h^{3,3}=h^{6,6}=6, \ h^{4,4}=h^{5,5}=6.$$ \end{corollary}
\begin{corollary} \label{hodges2}Suppose $p+q \neq 8$. The only non-zero Hodge numbers $h^{p,q}$ of $X$ are $$h^{0,0}=h^{8,8}=1, \ h^{1,1}=h^{7,7}=1, \ h^{2,2}=h^{6,6}=2, \ h^{3,3}=h^{5,5}=6.$$ For $p+q=8$ the only non-zero Hodge numbers are $$h^{3,5}=h^{5,3}=1, \ h^{4,4}=26,$$ with moreover $h^{5,5}_{\van}=20$. \end{corollary}
We want now to associate to our Fano of K3 type $X$ an IHS $Z$. To do this, at first notice that $\Ml(3,8)$ is degenerate in the Pl\"ucker embedding in $\mathbb{P}(\bigwedge^3 V_8)$. It lies indeed in $\mathbb{P}(U)$, where
$$U:= \ker (\varphi: \bigwedge^3 V_8 \stackrel{(\lrcorner_1, \lrcorner_2)}{\longrightarrow} V_8 \oplus V_8),$$ where $\lrcorner_i$ denotes the contraction with the 2-skew form $\omega_i$. Equivalently, we have that $\Ml(3,8)$ is defined by a general $\sigma_1 \in U^{\vee}$.\\
Consider now the Grassmannian $\Gr(6,8)$. Denote by $\bar{\lrcorner_i}$ the contraction with the restriction of the two form $\omega_i|_W$ to a 6-space $W$. For the generic $[W] \in \Gr(6,8)$ the map $$\overline{\varphi}: \bigwedge^3 W \stackrel{(\bar{\lrcorner_1}, \bar{\lrcorner_2})}{\longrightarrow} W \oplus W$$ remains surjective, since the rank of $\omega_1|_W$ and $\omega_2|_W$ is still maximal.
However, when $W$ is such that every element of the pencil restricted to such $W$ has rank 4, then the above map is not surjective anymore. As a special example, one can take a subspace given by $x_1=x_3=0$. Then for example the vector $(e_5, d e_5)$, $d \neq 1$ is not in the image of $\overline{\varphi}$.
To identify in general the locus $D$ where $\overline{\varphi}$ is not surjective let us write (in the notation above) $V_8=K_1 \oplus K_2 \oplus K_3 \oplus K_4$. We can then describe $D$ as $$D:= \lbrace W_6 \subset V_8 \ | \ \ddim (W_6 \cap K_i) \geq 1, \ \forall i \rbrace.$$ $D$ is therefore isomorphic to a $\Gr(2,4)$-bundle over $(\mathbb{P}^1)^4 \cong \Ml(4,8)$, and over it we have a cokernel sheaf $\mathcal{G}$ of rank 4 on its support, given by the Kernel of the rank 4 map $W \to W^* \oplus W^*$. Summing up, we have the following result \begin{proposition} On $\Gr(6,8)$ there is an exact sequence of sheaves $$ 0 \to F \to \bigwedge^3 \mathcal{R} \to \mathcal{R} \oplus \mathcal{R} \to \mathcal{G} \to 0.$$ \end{proposition} \begin{corollary} $F^\vee$ is a globally generated vector bundle of rank 8 and $H^0(F^{\vee})= U^{\vee}$. \end{corollary} \begin{proof} Dually, there is a surjective morphism of sheaves $\bigwedge^3 \mathcal{R}^\vee \to F^\vee$, which is surjective on stalks. Hence, global sections of $F^\vee$ which are images of global sections of $\bigwedge^3 \mathcal{R}^\vee$ are sufficient to generate stalks, so that $F^\vee$ is globally generated. \end{proof} Moreover, since $\mathcal{G}$ is a torsion sheaf supported in codimension 4 we have the following corollary. \begin{corollary} $c_1(F^{\vee})=8h.$ \end{corollary} \begin{proposition} Let $Z \subset \Gr(6,8)$ defined by the zero locus of a general global section of the vector bundle $F^{\vee}$. Then $Z$ is a fourfold with canonical class $\omega_Z \cong \mathcal{O}_Z$. \end{proposition} \begin{thm} \label{hk}Let $Z$ as above, and let $Z_{DV} \subset \Gr(6,10)$ the Debarre-Voisin IHS. Then $Z$ is isomorphic to $Z_{DV}$. Moreover, $Z$ can be interpreted as (the compactification of) the space of $\Ml(3,6) \cong (\mathbb{P}^1)^3$ inside $X_1 \subset \Ml(3,8)$. \end{thm} \begin{proof} With a non canonical choice of a two-space $\langle v,w\rangle = V_2\subset V_{10}$, the three form $\omega$ defining $Z_{DV}$ can be written as $\omega=\omega_8+v^\vee\wedge \sigma_1+w^\vee\wedge \sigma_2$, where $\omega_8$ is a three form on an eight dimensional vector space $V_8$ and $\sigma_i$ are two forms on the same space. The natural projection from $\mathbb{P}(V_{10})$ to $\mathbb{P}(V_8)$ induces a rational map from $\Gr(6,10)$ to $\Gr(6,8)$. For this map, there are three kinds of six-spaces: \begin{itemize} \item[Type 0] Six spaces which do not intersect the fixed two space $V_2$. \item[Type 1] Six spaces meeting the fixed $V_2$ in a line $U_1$. \item[Type 2] Six spaces containing the fixed $V_2$. \end{itemize} By a dimension count and the genericity assumption on $Z_{DV}$, spaces of type 2 do not occur inside $Z_{DV}$. Spaces of type 1 are given by the Schubert cycle $\sigma_{3,0^5}(V_2)$, and inside $Z_{DV}$ this is a curve of degree $132$, which is smooth since one can check that the Schubert cycle we use to obtain it is smooth as well. The blow up of $Z_{DV}$ along this curve maps into a subvariety of $\Gr(6,8)$ given by six planes where the three form $\omega_8$ is given as the sum of $\sigma_1$ and $\sigma_2$ wedged with the dual of some vectors of the six space itself. This is precisely the variety $Z$ for the forms $\omega_8,\sigma_1,\sigma_2$. The local picture in the exceptional divisor is given by sending a six plane $U_1\subset U_6$ to the set of all possible six planes in $V_8$ containing $U_6/U_1$, which is a $\mathbb{P}^2$. The image $\pi(U_6)$ of a six space $U_6\in Z_{DV}$ contains three spaces parametrized by $X_1 \subset \Ml(3,8)$ where the form $\omega_8$ restricts to zero, hence also the two forms $\sigma_1,\sigma_2$ are zero. That is, a point of $Z$ parametrizes a copy $\Ml(3,6)\cong (\mathbb{P}^1)^3$ contained in $X_1$ as claimed above.
We proved that $Z$ has trivial canonical bundle and, if the rational map we defined above from $Z_{DV}$ has degree one, $Z$ and $Z_{DV}$ would be birational minimal models, hence the map given by the blow-up of $Z_{DV}$ along the curve composed with the projection would be a flop. But a flop is not defined in codimension at most two on an IHS fourfold, hence the map was already well defined and is an isomorphism. Let us prove that this map has indeed degree one: Let $V_6$ and $W_6$ be two points of $Z_{DV}$ with the same projection. Therefore, their basis differ only for multiples of v and w and, after a linear combination, we can suppose that at most two elements differ by these vectors. Let us treat first the case of a single vector: let $V_6=\langle v_1,v_2,v_3,v_4,v_5,v_6 \rangle$ and let $W_6=\langle v_1,v_2,v_3,v_4,v_5,v_6+av+bw \rangle$. As the choice of $V_6$ varies, the coefficients $a,b$ are not constant, hence we can suppose $a=1,b=0$ (which happens in codimension one). Thus on $W$ we have $\omega(v_6+v,x,y)=v\wedge \sigma_1(x,y)$. So, if the six space annihilates such a three form, it must be isotropic for $\sigma_1$, which is clearly impossible on a six space, unless the two form degenerates, which happens in codimension two.\\ On the other hand, if $W_6=\langle v_1,v_2,v_3,v_4,v_5+w,v_6+v \rangle$ we have $\omega(v_6+v,x,y)=v\wedge \sigma_1(x,y)$ and $\omega(v_5+w,x,y)=w\wedge \sigma_2(x,y)$. This implies that the residual four space is isotropic with respect to both forms, which is a codimension twelve condition on the six spaces themselves. Indeed, this is $\Ml(4,8)\cong (\mathbb{P}^1)^4$ inside $\Gr(4,8)$. Hence, by the genericity assumption on $\omega$, this does not happen in our case.
\end{proof}
\subsection{S5: a section of a non-central variety} This sporadic Fano of K3 type is rather different from the others. It is a linear section of a certain 7-fold of index 3 that we call $\TT(2,9)$, which is not even central, let alone homogeneous. This 7-fold is the zero locus of a general global section of the bundle $\mathcal{Q}^*(1)$ on the Grassmannian $\Gr(2,9)$. By Borel-Bott-Weil we interpret $H^0(\Gr(2,9), \mathcal{Q}^*(1)) \cong \bigwedge^3 V_9^{\vee},$ therefore $\TT(2,9)$ (sometimes shortened as $\TT$ in the following proofs) is given by the locus of two-spaces in a 9-dimensonal space which are annihilated by a 3-form. This 7-fold, which is indeed a \emph{congruence of lines} has been considered in the recent work (\cite{faenzi}, Ex. 4.14). As we said, the variety $\TT(2,9)$ is not central, therefore we cannot apply any trick as in Proposition \ref{blowup} to compute the Hodge numbers of its linear section. Therefore we will need to go through a proper Borel-Bott-Weil computation.\\
We will start by stating the final result on the Hodge numbers. \begin{proposition} \label{t129}The Hodge numbers of $\TT(2,9)$ are \begin{center} {\small \[\begin{matrix} &&&&&&&&1 &&&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&0 &&1&&0&&&&&&\\
&&&&& 0 && 0 && 0 &&0&&& \\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&0&&0 & & 2 & &2 && 0 && 0 &&&\\ &&0 && 0 && 0 &&2 &&0 &&0 && 0&&\\ &0 && 0 && 0 && 2&& 2 &&0 &&0 && 0&\\ &&0 && 0 && 0 &&2 &&0 &&0 && 0&&\\ &&&0&&0 & & 2 & &2 && 0 && 0 &&&\\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&&& 0 && 0 && 0 &&0&&& \\ &&&&&&0 &&1&&0&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&&&1 &&&&&&&& \end{matrix}\]} \end{center} \end{proposition} From the above diamond it immediately follows that holomorphic Euler characteristics for $\TT$ are $\chi(\Omega^1_{\TT})=-1, \ \chi(\Omega^2_{\TT})=0, \ \chi(\Omega^3_{\TT})=2$. These can be easily double-checked using Macaulay2. Moreover the topological Euler characteristic $e_{\textrm{top}}(\TT)=0$ (cf. \cite{faenzi}, Ex. 4.14). \begin{corollary} \label{cor29}Let $X= \TT(2,9) \cap H$ be a linear section of $\TT(2,9)$. This is a Fano of K3 type with Hodge diamond \begin{center} {\small \[\begin{matrix} &&&&&&&&1 &&&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&0 &&1&&0&&&&&&\\
&&&&& 0 && 0 && 0 &&0&&& \\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&0&&0 & & 2 & &2 && 0 && 0 &&&\\ &&0 && 0 && 1 &&22 &&1 &&0 && 0&&\\ &&&0&&0 & & 2 & &2 && 0 && 0 &&&\\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&&& 0 && 0 && 0 &&0&&& \\ &&&&&&0 &&1&&0&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&&&1 &&&&&&&& \end{matrix}\]} \end{center} The vanishing subspace is $h^{2,2}_{\van}(X)=20$. The holomorphic Euler characteristics for $X$ are $\chi(\Omega^1_{X})=-1, \ \chi(\Omega^2_{X})=1, \ \chi(\Omega^3_{\TT})=-18$. Moreover the topological Euler characteristic $e_{\textrm{top}}(X)=24$.\end{corollary} \begin{proof} The Hodge numbers for $X$ follows from those of $\TT(2,9)$ together with the computations of $\chi(\Omega^i)$, which can be easily done a priori via Riemann-Roch and the help of computer algebra. \end{proof} \subsubsection{Borel-Bott-Weil computation for $\TT(2,9)$} Borel-Bott-Weil theorem is a powerful tool for computing cohomologies of vector bundles on homogeneous spaces. Together with some well-known sequences it is often sufficient to compute Hodge numbers for varieties cut by general global sections of homogeneous vector bundles. Although rather long and involved, the procedure is mostly algorithmic. We will include the general setup (skipping most details for the sake of readability) in order to give the reader a toolbox for further computations. \subsubsection*{General BBW strategy} Let $\Gr(k,n)$ be the Grassmannian of $k$-dimensional subspaces of $V_n$. Consider two dominant weights $\alpha = (\alpha_1, \dots ,\alpha_{n-k})$ and $\beta = (\beta_1, \dots ,\beta_{k})$ for the Schur functors $\Sigma$ applied to $\mathcal{Q}$ and $\mathcal{R}$ and their concatenation $\gamma = (\gamma_1,\dots,\gamma_n)$. Let $\delta$ the decreasing sequence $\delta = (n-1, \dots , 0)$ and consider $\gamma + \delta$. Write $\sort(\gamma + \delta)$ for the sequence obtained by arranging the entries of $\gamma + \delta$ in non-increasing order, and define $\tilde{\gamma} = \sort(\gamma + \delta)- \delta$. If $\gamma + \delta$ has repeated entries, then $$H^i(\Gr(k,n),\Sigma_{\alpha} \mathcal{Q} \otimes \Sigma_{\beta} \mathcal{R})=0$$ for all $i \ge 0$. Otherwise, writing $l$ for the \emph{number of disorders}, that is the number of pairs $(i, j)$ with $1 \le i < j \le n$ and $\gamma_i - i < \gamma_j - j$ we have$$H^l(\Gr(k,n),\Sigma_{\alpha} \mathcal{Q} \otimes \Sigma_{\beta} \mathcal{R} ) = \Sigma_{\tilde{\gamma}} V$$ and $H^i(\Gr(k,n),\Sigma_{\alpha} \mathcal{Q}\otimes \Sigma_{\beta}\mathcal{R} )=0$ for $i \ne l$. Let now $Z \subset \Gr(k,n)$ a variety which is the zero locus of a general section of a rank $r$ globally generated vector bundle $F^{\vee}$. We have the Koszul complex for $Z$, which is indeed a resolution \begin{equation}\label{koszul} 0 \to \det(F) \to \bigwedge^{r-1} F \to \ldots \to F \to \mathcal{O}_G \to \mathcal{O}_Z \to 0. \end{equation} If $H$ is another globally generated vector bundle on $\Gr(k,n)$ we can tensor the above sequence by $H$: we have the spectral sequence
$$\mathbf{E}_1^{-q,p} =H^p(\Gr(k,n), H \otimes \bigwedge^q F)\Rightarrow H^{p-q}(Z, H|_Z),$$ if moreover both $F$ and $H$ are homogeneous we can compute all terms on the left by BBW formula. We can now compute the Hodge numbers for our $X$. Notice that the $F$ in the Koszul complex above is the dual of bundle we start with. In this case it will be $\mathcal{Q}(-1)$. \subsubsection*{The Hodge numbers $h^{1,i}(\TT(2,9))$} We apply the above formula together with the conormal sequence, which since $N^{\vee}_{\TT/\Gr} \cong F$ becomes
$$ 0 \to F|_{\TT}\to \Omega^1_G|_{\TT} \to \Omega^1_{\TT} \to 0.$$
We can compute the cohomologies of the first two bundles using the above strategy. $F|_X$ turns out to be acyclic, whereas the only non-zero cohomology of $\Omega^1_G|_{\TT}$ is $H^1(\Omega^1_G|_{\TT}) \cong H^1(\Omega^1_G) \cong \mathbb{C}$. It follows that the Hodge numbers $h^{1,i}(\TT)=0$, $i \neq 1$ and $h^{1,1}(\TT)=1$. \subsubsection*{The Hodge numbers $h^{2,i}(\TT(2,9))$} In order to compute these other Hodge numbers we need to rise the conormal sequence to the second exterior power, that is
$$ 0\to \Sym^2 F|_{\TT} \to (F \otimes \Omega^1_G)|_{\TT} \to \Omega^2_{G}|_{\TT} \to \Omega^2_{\TT} \to 0. $$ $\Sym^2 F \otimes \bigwedge^i F$ is acylic for $i\neq 7$. This can be checked using first the Littlewood-Richardson formula to determine the irreducible decomposition of each of these bundles, and then applying several iteration of the BBW formula. For $i=7$ it is $\Sigma_{3,1^6}\mathcal{Q}\otimes \Sigma_{9,9}\mathcal{R}$ that has
$H^{12}(\Sym^2 F \otimes \bigwedge^7 F) \cong \mathbb{C}$ (and therefore $H^5(\Sym^2 F|_{\TT}) \cong \mathbb{C}$). The bundle $\Omega^1 \otimes F \otimes \bigwedge^i F$ is acylic for all i. The bundle $\Omega^2 \otimes \bigwedge^i F$ is not acylic for $i=0$ (and $H^2(\Omega^2_G|_{\TT}) \cong\mathbb{C}^2$) and for $i=3$. Indeed in the case $i=3$ its decomposition in irreducibles contains the summand $ \Sigma_{3,3,3,2,2,1,1}\mathcal{Q}\otimes \Sigma_{7,5} \mathcal{R}$. This gives $H^6(\Omega^2 \otimes \bigwedge^3 F)= \mathbb{C}$. Putting all these data together one obtains $H^2(\Omega^2_{\TT})=H^3(\Omega^2_{\TT})\cong \mathbb{C}^2$ with the other Hodge $h^{2,i}=0$. \subsubsection*{The Hodge numbers $h^{3,i}(\TT(2,9))$} By Riemann-Roch one gets $\chi(\Omega^3_{\TT})=2$. Thanks to the knowledge of $h^{i,3}(\TT)$ for $i \neq 3,4$, this implies $ h^{3,3}(\TT)=h^{4,3}(\TT).$
We use the third power of the conormal sequence, namely
$$0 \to \Sym^3 F |_{\TT} \to (\Omega^1 \otimes \Sym^2F)|_{\TT} \to (\Omega^2 \otimes F)|_{\TT} \to \Omega^3_G|_{\TT} \to \Omega^3_{\TT} \to 0.$$
One strategy is to split the sequence above in three short one, namely
\begin{equation}0 \to \Sym^3 F |_{\TT} \to (\Omega^1 \otimes \Sym^2F)|_{\TT} \to J_2 \to 0 ,\end{equation}
\begin{equation} 0 \to J_2 \to (\Omega^2 \otimes F)|_{\TT} \to J_1 \to 0 ,\end{equation}
\begin{equation} \label{finaleq} 0\to J_1 \to \Omega^3_G|_{\TT} \to \Omega^3_{\TT} \to 0. \end{equation}
The only cohomological contributions come from \begin{enumerate}[(a)] \item $H^{12}(Sym^ 3F \otimes \bigwedge^6 F)= \mathbb{C}^{81} \cong \End(V_9) \cong \mathfrak{gl}(V_9)$; \item $H^{12}(Sym^3 F \otimes \bigwedge^7 F) = \mathbb{C}^{84} \cong \bigwedge^3 V_9$;
\item $H^{13}(\Omega^1 \otimes Sym^2F \otimes \bigwedge^7 F) = \mathbb{C} \cong H^6 ((\Omega^1 \otimes \Sym^2F)|_{\TT})$;
\item $H^6(\Omega^2 \otimes F \otimes \bigwedge^2 F) = \mathbb{C}\cong H^4((\Omega^2 \otimes F)|_{\TT})$;
\item $H^{10}(\Omega^2 \otimes F \otimes \bigwedge^5 F)= \mathbb{C} \cong H^5((\Omega^2 \otimes F)|_{\TT})$;
\item $H^3(\Omega^3) = \mathbb{C}^2 \cong H^3(\Omega^3_G|_{\TT})$;
\item $H^7(\Omega^3 \otimes \bigwedge^3 F)= \mathbb{C} \cong H^4(\Omega^3_G|_{\TT} )$;
\item $H^{11}(\Omega^3 \otimes \bigwedge^6 F) = \mathbb{C} \cong H^5(\Omega^3_G|_{\TT} )$. \end{enumerate}
Except in the case of (a) and (b) one can compute immediately the cohomology of the restriction of the bundles to $\TT$. The only non obvious case is given by the exact sequence
$$ 0 \to H^5 (\Sym^3 F |_{\TT} ) \to \bigwedge^3 V \stackrel{\phi_f}{\to} \End(V_9) \to H^6 (\Sym^3 F |_{\TT}) \to 0.$$ The situation is analogous to (\cite{klm}, Appendix B). Indeed the dual of the map $\phi_f$ is the map $\varphi_f: \End(V_9) \to \bigwedge^3 V_9^{\vee}$ mapping $u \mapsto u(f)$, where $f$ is the defining section for $\TT$ and $u$ is the Lie action. This is because one can do the same computation in family, use the $GL(V)$ equivariance to ensure that $\varphi_f$ depends linearly on $f$. Since up to a scalar there is a unique equivariant map from $\bigwedge^3 V^{\vee}$ to $\Hom(\End(V), \bigwedge^3 V^{\vee})$ we can conclude. Therefore for general $f$ the map $\varphi_f$ is injective (this can be verified for example using the general form for $f$ given in (\cite{faenzi}, 4.14) with sufficiently general coefficients and therefore $\phi_f$ is surjective as required.\\ If we plug in these cohomological informations in the long exact sequence associate to the sequence \ref{finaleq} we get several non-zero cohomology groups. In particular the final groups in this sequence are $$ \ldots \to \mathbb{C} \stackrel{\epsilon}{\to} H^4 (\Omega^3_{\TT}) \stackrel{\mu}{\to} \mathbb{C}^2 \stackrel{\nu}{\to} \mathbb{C} \to 0$$ Therefore $h^{3,3}(\TT)=h^{3,4}(\TT)= \ddim (\ker \mu) + \ddim (\mathrm{Im} \ \mu)$ and by standard properties of long exact sequences $h^{3,3}(\TT)=h^{3,4} \leq 2$. On the other hand by Hard Lefschetz $h^{3,3}(\TT)=h^{3,4} \geq 2$. This concludes the proof of the theorem. \subsubsection{Geometry of $\TT(2,9)$ and $X$} This rather atypical (for our setting) Hodge structure for $\TT(2,9)$ has a geometrical explanation.\\ First consider a linear section $X_H \subset \Gr(3,9)$. It is a Fano 17-fold of index 8. One can compute that its central Hodge structure has level 1, with the same numerology of a genus 2 curve. Consider the configuration in the diagram below. The map $p: \mathrm{Fl}(2,3,9) \to \Gr(3,9)$ is a $\mathbb{P}^2$ bundle, given by the choice of $V_2 \subset V_3$. It remains as well a $\mathbb{P}^2$ bundle if we restrict $p$ from $X_{p^* H} \to X_H$. The Hodge structure of $X_H \subset \Gr(3,9)$ is therefore repeated three times in $X_{p^* H}$. Consider as well the projection $\phi$ from $\mathrm{Fl}(2,3,9) \cong \mathbb{P}_{\Gr(2,9}(\mathcal{Q}(-1))$ to $\Gr(2,9)$, that is a $\mathbb{P}^6$-bundle. Restricting $\phi$ to $X_{p^* H}$ this gives a $\mathbb{P}^5$ bundle generically on $\Gr(2,9)$, that degenerates on a $\mathbb{P}^6$ on the zero locus $Z_H$ of a section of the dual of $\mathcal{Q}(-1)$, that is $\TT(2,9)$. \begin{equation} \label{diagrammoneswaggone}\xymatrix{ & F \ar[dl]^\phi & X_{p^* H} \ar[dr]^p \ar[dl]^ \phi \ar@{^{(}->}[r] & \mathrm{Fl}(2,3,9) \ar[dr]^p \\ Z_H \ar@{^{(}->}[r] & \Gr(2,9) & & X_H \ar@{^{(}->}[r] & \Gr(3,9) }\end{equation} One can prove that the Hodge structure of $\TT(2,9)$ can be pushed down from $X_{p^* H}$, which in turn can be calculated from $X_H \subset \Gr(3,9)$. This can be considered as an alternative (and a bit more geometrical) proof of Thm. \ref{t129}. The precise details of this construction and extension to the derived category case will appear in \cite{nested}. In particular a similar argument, albeit in a more complicated version, can be used to derive directly Corollary \ref{cor29} and geometrically explain the K3 structure. We do not produce here a result interpreting some moduli space on $X$ as an IHS: however we expect a similar result to Proposition \ref{t2} to hold here as well.
\subsection{S6: $3 \times $K3 structure } This sporadic Fano has some interesting features. First of all, unlike all our other examples, it is not a section of another Fano by the zero locus of a line bundle. Then it is a Fano of K3 type in two different ways. \\ The variety $\TT(2,10)$ is the zero locus of a general global section of the bundle $\mathcal{Q}^*(1)$ on the Grassmannian $\Gr(2,10)$. As in the previous case \textbf{S5} we have $$H^0(\Gr(2,10), \mathcal{Q}^*(1)) \cong \bigwedge^3 V_{10}^{\vee},$$ therefore $\TT(2,10)$ is given by the locus of two-spaces in a 10-dimensonal space which are annihilated by a 3-form. It is straightforward to check that $\TT(2,10)$ is a Fano 8-fold of index $\iota=3$. We compute first its Hodge numbers
\begin{proposition} \label{3k3}The Hodge numbers of $\TT(2,10)$ are \begin{center} {\small \[\begin{matrix} &&&&&&&&1 &&&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&0 &&1&&0&&&&&&\\
&&&&& 0 && 0 && 0 &&0&&& \\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&0&&0 & & 0 & &0 && 0 && 0 &&&\\ &&0 && 0 && 1 &&22 &&1 &&0 && 0&&\\ &0 && 0 && 0 && 0&& 0 &&0 &&0 && 0&\\ 0 && 0 && 0 && 1&& 23 &&1 &&0 && 0&&0\\ &0 && 0 && 0 && 0&& 0 &&0 &&0 && 0&\\ &&0 && 0 && 1 &&22 &&1 &&0 && 0&&\\ &&&0&&0 & & 0 & &0 && 0 && 0 &&&\\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&&& 0 && 0 && 0 &&0&&& \\ &&&&&&0 &&1&&0&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&&&1 &&&&&&&& \end{matrix}\]}
\end{center} \end{proposition}
As we can see from the above theorem, $\TT(2,10)$ has a Hodge structure of K3 type both in $H^6$ (and therefore in $H^{10}$ by duality) and in $H^8$, making it a rather peculiar example. Indeed by Hard Lefschetz the K3 structure in $H^6$ immediately implies the presence of a K3 sub-structure in $H^{10}$. The surprising bit is that this is the whole of $H^8$, with the exception of a primitive cycle. The computation of the above Hodge numbers is done via a Borel-Bott-Weil computation, as in the previous section. Since these are rather long computations (and not really different from the previous case) we will just sketch it. \begin{proof} Let $F$ be the dual of the bundle that cuts $\TT$.The computations of the Hodge numbers until $h^{2,i}$ does not present any challenge. In the third exterior power of the conormal exact sequence
$$0 \to \Sym^3 F |_{\TT} \to (\Omega^1 \otimes \Sym^2F)|_{\TT} \to (\Omega^2 \otimes F)|_{\TT} \to \Omega^3_G|_{\TT} \to \Omega^3_{\TT} \to 0$$
we have that $\Omega^2 \otimes F)|_{\TT}$ is acylic, for $(\Omega^1 \otimes \Sym^2F)|_{\TT}$ the unique cohomology group is $H^7((\Omega^1 \otimes \Sym^2F)|_{\TT}) \cong \mathbb{C}$ and for the third cotangent we have $H^3( \Omega^3_G|_{\TT}) \cong \mathbb{C}^2$. The only tricky part comes when considering $\Sym^3 F |_{\TT}$. Indeed from the spectral sequence associated to the Koszul resolution for $\Sym^3 F |_{\TT}$ one finds an exact sequence $$ 0 \to H^{13}(K_7) \to H^{14}( \bigwedge^8 F \otimes \Sym^3 F) \to H^{14}( \bigwedge^7 F \otimes \Sym^3 F) \to H^{14}(K_7) \to 0 $$
where $K_7$ is the sheaf which we used to complete the sequence $0 \to \bigwedge^8 F \otimes \Sym^3 F \to \bigwedge^7 F \otimes \Sym^3 F$. The above sequence is equal to:
$$ 0 \to H^{13}(K_7) \to \bigwedge^3 V_{10} \to \End(V_{10}) \to H^{14}(K_7) \to 0 $$
As in the previous section case, one can argue that the middle map is surjective, and therefore chasing the sequence one gets that the unique cohomology group for $\Sym^3 F |_{\TT}$ is $H^6( \Sym^3 F |_{\TT}) \cong \mathbb{C}^{20}$. Collecting all these data together in the above long exact sequence we get $h^{3,3}(\TT)=22$ and $h^{5,3}(\TT)=1$. The missing number can be obtained from the computation of the Euler characteristic. \end{proof}
This strange Hodge structure can be explained with a construction absolutely equivalent to the one of \eqref{diagrammoneswaggone}, with of course $\mathrm{Fl}(2,3,10)$ being the relevant Flag. In particular, one can repeat the construction of 3.9.2 and do the computations in $K_0(\textrm{Var})$ as an alternative way of computing Hodge numbers. Indeed this is the same Hodge structure coming from the Debarre-Voisin twentyfold $Y_1 \subset \Gr(3,10)$. It is therefore not surprising that we can relate the IHS fourfold $Z_{DV} \subset \Gr(6,10)$ to $\TT(2,10)$. \\ Define first $Z_{\mathcal{O}(1)^4}$ to be the zero locus of four general linear sections in the Grassmannian $\Gr(2,6)$. Moreover we denote by $\TT_{,\omega}(2,10)$ a distinguished element of the family defined by a specified 3-form $\omega$. \begin{proposition} \label{t2} The Debarre-Voisin fourfold $F_{\omega}$ is birational to the moduli space (contained in the Hilbert scheme) of fourfolds $Z_{\mathcal{O}(1)^4}$ contained in the variety $\TT_{,\omega}(2,10)$. \end{proposition} \begin{proof} Let $W$ be a general point in the Debarre-Voisin fourfold given by a general three form $\omega$. Let us consider the subscheme of $\TT_{,\omega}(2,10)$ given by all two spaces contained inside $W$. This does not coincide with the full Grassmannian $\Gr(2,6)$, as the condition $\omega(W)=0$ does not imply $\omega \lrcorner \bigwedge^2 U=0$ for all $U\subset W$ two-spaces. Notice that this is not the case if one considers three spaces contained in $W$, that is the construction of the Debarre-Voisin IHS fourfold as a moduli space of $\Gr(3,6)$ contained in the respective twentyfold.\\ On $\Gr(k,10)$ for all $k$ we have a sequence $0\to \mathcal{R} \to V_{10}\otimes\mathcal{O}\to (V_{10}\otimes\mathcal{O})/\mathcal{R}\to 0$ which dually gives a sequence $0 \to \mathcal{R}^\perp \to V_{10}^\vee\otimes\mathcal{O}\to \mathcal{R}^\vee \to 0$. This gives a filtration of $\bigwedge^3 V_{10}^\vee\otimes\mathcal{O}$ with factors $\bigwedge^3 \mathcal{R}^\perp,$ $\bigwedge^2\mathcal{R}^\perp\otimes \mathcal{R}^\vee$, $\mathcal{R}^\perp\otimes \bigwedge^2\mathcal{R}^\vee$ and $\bigwedge^3\mathcal{R}^\vee$. The three-form $\omega$ is a section of the last factor $\bigwedge^3\mathcal{R}^\vee$ on $\Gr(6,10)$. On the zero locus of such a section, this lift to a section of $\mathcal{R}^\perp\otimes\bigwedge^2\mathcal{R}^\vee$, which corresponds to a map $V_{10}/W\rightarrow \bigwedge^2 W^\vee.$ The image of such a map is a four dimensional space $H_4$ of two forms on $W$, for every six space $W$ in the Debarre-Voisin twentyfold given by $\omega$.\\
Let $U\subset W$ be a point of $\TT_{,\omega}(2,10)$. The space $U$ is isotropic for all two forms in $H_4$, indeed if this were not the case we would have a two form $\sigma\in H_4$ such that $\sigma_{|U}$ is non degenerate and, by how forms in $H_4$ are obtained, this would imply $\omega \lrcorner \bigwedge^2 U\neq 0$. On the contrary, in an appropriate basis, it is not difficult to show that $\omega \lrcorner \bigwedge^2 U =0$ is implied by $\sigma(U)=0$ for all $\sigma\in H_4$. Thus, the scheme of subspaces $U\subset W$ with fixed $W$ is parametrized by a fourfold $Z_{\mathcal{O}(1)^4} \subset \Gr(2,W)$, which a Fano fourfold of index two, rational by \cite[Thm. 2.2.1]{fei}, with central cohomology $(h^{1,1}, h^{2,2})=(1,8)$. This gives a rational map between the Debarre-Voisin fourfold and the space of $Z_{\mathcal{O}(1)^4}$ contained in $\TT(2,10)$ (and in a fixed $\Gr(2,6)$). As by changing the point of the Debarre Voisin fourfold we change the ambient Grassmannian $\Gr(2,6)$, it is clear that such a map is generically injective, hence birational.
\end{proof}
\subsection{S7: a mixed (2,3) CY structure} A curious yet interesting thing happens when we take a linear section $X_H$ of the above $\TT(2,10)$. Indeed by Lefschetz's hyperplane section theorem we know that the K3 structure of $\TT(2,10)$ in $H^6$ and $H^8$ must transfer to its linear section: what is most interesting is that the $H^7$ presents as well a Calabi-Yau structure of level three. To the best of our knowledge, this is the first example of a prime variety that has 2 different examples of CY-structure, of course in different weights. The precise result is \begin{proposition}\label{23cy} The Hodge numbers of a linear section $X_H \subset \TT(2,10)$ are \begin{center} {\small \[\begin{matrix} &&&&&&&&1 &&&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&0 &&1&&0&&&&&&\\
&&&&& 0 && 0 && 0 &&0&&& \\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&0&&0 & & 0 & &0 && 0 && 0 &&&\\ &&0 && 0 && 1 &&22 &&1 &&0 && 0&&\\ &0 && 0 && 1 && 44&& 44 &&1&&0 && 0&\\ &&0 && 0 && 1 &&22 &&1 &&0 && 0&&\\ &&&0&&0 & & 0 & &0 && 0 && 0 &&&\\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&&& 0 && 0 && 0 &&0&&& \\ &&&&&&0 &&1&&0&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&&&1 &&&&&&&& \end{matrix}\]} \end{center} \end{proposition} The above proposition can be proved with a Borel-Bott-Weil computation similar to the ones above. We will not add further details here in order to preserve the readability of the current paper. We will indeed give a sketch of a geometrical explanation of why such numbers appear.\\ Indeed as an expert reader might notice, the 3CY structure in our $X_H$ has the same dimension of the 3CY structure appearing in the $H^{23}$ of a linear section $X_1 \subset \Gr(3,11)$. We will give now an explanation on why and how this 3CY structure gets projected from such varieties to our $X_H \subset \TT(2,10)$. This will be only sketched, since the details (in a more general context) will appear in the forthcoming \cite{nested}. The first steps are the following lemmata. \begin{lemma} A linear section $X_1 \subset \Gr(3,11)$ is a Fano 23-fold of 3CY type. Indeed its non-zero Hodge numbers of weight 23 are $(h^{10,13},h^{11,12}, h^{12,11}, h^{13,10})=(1,44,44,1)$. \end{lemma} This lemma can easily be proved, for example using our results in \cite{eg1}. We notice that such a variety is of 3CY even in the (stronger) categorical sense, see \cite[4.5]{kuzicy}. The orthogonal complement to the Calabi-Yau category is generated by 150 exceptional objects. The following Lemma is less obvious \begin{lemma}\label{symphodge} A linear section $Y_1 \subset \SGr(3,10)$ is a Fano 17-fold of 3CY type. Indeed its non-zero Hodge numbers of weight 17 are $(h^{7,10},h^{8,9}, h^{9,8}, h^{10,7})=(1,44,44,1)$. \end{lemma} It can be proved for example with similar calculations to Corollary \ref{hodges2}, since we already know that the symplectic Grassmannian $\SGr(k, n)$ is a central variety. However one can prove that the statement is more than merely numerological. Indeed one can show the existence of a fully faithful functor $\Phi: D^b(Y_1) \to D^b(X_1)$ and a semiorthogonal decomposition of $D^b(X_1)$ with $\Phi D^b(Y_1)$ as first component, together with a bunch of exceptional objects. This obviously proves the Hodge-theoretical statement as well. This in turn explains the 3CY structure in $X_H \subset \TT(2,10)$. Indeed it is possible to write a diagram like the one for $\TT(2,9)$ in \ref{diagrammoneswaggone}, appropriately modified; in particular we have to pass through the symplectic partial flag $\mathrm{SFL}(2,3,9)$. The construction is more involved, but it is enough to explain that this mixed (2,3) Calabi-Yau structure ultimately comes from an hyperplane section of (respectively) $\Gr(3,10)$ and $\Gr(3,11)$. An interesting problem is therefore to look for other examples of varieties with mixed CY structure that are not induced by these constructions tricks outlined in \cite{nested}.
\subsection{S8: other K3 structures as $X_L \subset \TT(k,10)$} A similar construction can be applied to $\TT(4,10)$, $\TT(5,10)$ and their linear sections. Indeed both of them will inherite a bunch of K3 type structure as in \ref{diagrammoneswaggone}. As an example, in the case of $\TT(4,10)$ the diagram will be \begin{equation}\xymatrix{ & F \ar[dl]^\phi & X_{\pi^* H} \ar[dr]^p \ar[dl]^ \phi \ar@{^{(}->}[r] & \mathrm{Fl}(3,4,10) \ar[dr]^p \\ Z_H \ar@{^{(}->}[r] & \Gr(4,10) & & X_H \ar@{^{(}->}[r] & \Gr(3,10) }\end{equation} The map $p$ is a $\mathbb{P}^6$ bundle, whereas $\phi$ is generically a $\mathbb{P}^2$ bundle specialising to a $\mathbb{P}^3$ bundle over $Z_H$. This suggests that $\TT(4,10)$ should have 7xK3 type structure, and a Borel-Bott-Weil calculation confirms this. A similar construction, albeit more complicated can be performed as well for $\TT(5,10)$, where the fibers of the map on the right hand side of the diagram are $\Gr(2,7)$. Moreover on the left side of the diagram there are three type of fibers, corresponding to (generically) smooth hyperplane sections of $\Gr(2,5)$, singular sections in codimension 3 and the whole of $\Gr(2,5)$ in codimension 10. Of course the linear sections of both $\TT(4,10)$ and $\TT(5,10)$ inherits some structure of K3 type by Lefschetz theorem (depending of course by the dimension of the linear subspace). It is interesting to notice codimensional 1 and 2 linear section will be of mixed CY type, with an argument equivalent to the one of the previous section. Finally we remark that even $\TT(6,10)$ and $\TT(1,10)$ admits structures of K3 type: the first one is nothing but the IHS fourfold of Debarre-Voisin, while a the second one can be used to construct the Peskine variety in $\mathbb{P}^9$, although formally the latter is given by a degeneracy locus. In \cite{nested} we will indeed use this approach to compute the Hodge numbers of this special variety. \section*{Appendix A: some extra Fano of 3-CY type} The methods of this paper can be used to produce Fano of $k$- CY type for every $k$. Another interesting case is when the variety is 3CY. This has been already considered by Iliev and Manivel in \cite{ilievmanivel}. They classified the Fano of 3CY type that can be obtained as linear or quadratic section of homogeneous space, under the additional assumption that the $H^1(T_X)$ was to be isomorphic to one of the Hodge groups of $X$. Many more examples can be found using our method, especially if this condition is not assumed. We do not write the full list here, since we believe it would not fit well with rest of the paper. However it is worthy to point out that many of the examples can be produced as linear sections of symplectic and bisymplectic Grassmannian, with an explanation as in Lemma \ref{symphodge}. \\ Indeed such examples include $ X_1 \subset \SGr(3,10)$ and $X_1 \subset \SGr(4,9)$ in the symplectic Grassmannian and $X_1 \subset \Ml(3,9), X_1 \subset \Ml(4,9)$ and $X_2 \subset \Ml(2,6)$ for the bisymplectic. We point out that the Hodge structure of the linear section of $\SGr(3,10)$ and $\Ml(3,9)$ comes from an hyperplane section of $\Gr(3,11)$ (which is as well of 3CY type) with an argument similar to Lemma \ref{symphodge} to be fully spelled out in \cite{nested}. A different but not dissimilar argument can be made for $X_2 \subset \Ml(2,6)$ and explain how this structure of 3CY comes from $X_2 \subset \Gr(2,6)$. In the symplectic Grassmannian we find as well $X_2 \subset \SGr(4,7)$. In the Orthogonal Grassmannian we find the examples of linear sections of $\OGr(3,9), \OGr(4,9)$ and $\OGr^+(5,10)$. The latter is equivalent to a quadratic section of $\mathbb{S}_{10}$ in the spinor embedding (since the line bundle required is the square root of the Pl\"ucker one). This is already in the list of Iliev and Manivel, so we will not include it.\\ Another interesting example is a section $X_H \subset \mathrm{SO}(3,8)$ the \emph{ortho-symplectic} Grassmannian. The latter is given by the zero locus of $\bigwedge^2 \mathcal{R}^\vee \oplus \Sym^2 \mathcal{R}^\vee$ on $\Gr(3,8)$. We use the notation $X_H$ to point out that, as in the case of Orthogonal Grassmannian $\OGr(3,8)$, $\mathrm{SO}(3,8)$ has Picard rank equal to 2. We checked that there are no other examples of Fano of CY3 type in the orthosymplectic Grassmannian. \\ The cohomology of the orthosymplectic Grassmannian can be computed using a torus action on it (as remarked also in \cite{benphd}, and then Lefschetz's theorem and Borel-Bott-Weil theorem allow us to compute the cohomology of its linear sections in many cases. First, notice that two general symmetric and skew symmetric forms $s,\lambda$ on a space of dimension $2n$ can be put in the following canonical form: $$s =\sum^{2n} s_ix_i^2; \ \ \lambda =\sum^{n} x_{2i}\wedge x_{2i+1}.$$
In this way, the stabilizer of these forms contains as maximal torus the group $(\mathbb{C}^*)^n$. In a similar fashion to \cite[Prop 4.2.1]{benphd}, one can prove that this maximal torus has only isolated fixed points (more precisely, $2^k{n\choose k}$) and therefore the cohomology of the orthosymplectic grassmannian is concentrated in the $(p,p)$ part (and the characteristic of the cotangent sheaf and its exterior powers give us the desired cohomology).\\ From this, we obtain the following cohomology for $X_H$: \begin{center} {\small \[\begin{matrix} &&&&&&&&1 &&&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&0 &&2&&0&&&&&&\\
&&&&& 0 && 0 && 0 &&0&&& \\ &&&&0 &&0 && 7 &&0 && 0 &&&&\\ &&&0&&1 & & 45 & &45 && 1 && 0 &&&\\ &&&&0 &&0 && 7 &&0 && 0 &&&&\\ &&&&& 0 && 0 && 0 &&0&&& \\ &&&&&&0 &&2&&0&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&&&1 &&&&&&&& \end{matrix}\]} \end{center} We point out that $X_H \subset \mathrm{SO}(3,8), \ X_2 \subset \SGr(4,7)$ and $X_2 \subset \Ml(2,6)$ are particularly interesting as Fano of 3CY type, since they are of dimension 5 (the minimal possible) and therefore relevant for testing a modified version of Kuznetsov's conjecture on rationality and derived categories. We collect in the next table the 3CY structure mentioned in the above discussion. We mention that in analogy with the K3 case, $\TT(2,11)$ can be considered as well as an example of 3$\times$CY structure. Taking other $k$ and appropriate number of linear sections is possible as well to produce examples of \emph{mixed} $(3,j)$ CY structure. However we do not include them in the following table. Of course more examples could be found by considering products and such as in the FK3 case, but we decided to not consider them here in order to keep the length of this paper within an acceptable limit. We of course do not include the examples already considered in \cite{ilievmanivel}.
\begin{center} \begin{tabular}{@{} *9l @{}l @{}l @{}l @{}l @{}l @{}} \toprule Type &dim. &$\iota_X$& $h^{n-1/2,n+1/2}$ \\ \midrule $X_1 \subset \OGr(3,9)$ & 11 &4& 49\\ $X_1 \subset \OGr(4,9)$ & 9 &3&70\\
$X_1 \subset \SGr(3,10)$ & 17 &7&44 \\ $X_1 \subset \SGr(4,9)$ & 13 &5&45 \\ $X_1 \subset \Ml(3,9)$ & 11 &4& 44\\ $X_1 \subset \Ml(4,9)$ & 7 &2&45 \\ $X_2 \subset \Ml(2,6)$ & 5 &2&67 \\ $X_H \subset \mathrm{SO}(3,8)$ & 5 &1& 45 \\ $X_2 \subset \SGr(4,7)$ & 5 &2& 72 \\
\bottomrule
\hline \end{tabular} \captionof{table}{3CY structure in $\OGr, \SGr, \Ml$ and $\mathrm{SO}$} \label{table3} \end{center}
\section*{Appendix B: infinite CY series} During our search we identified some interesting class of varieties. Although not directly related to the main story of this paper, they have some interesting features that is worth to underline. As an example we identified some interesting infinite families of varieties (of every dimension) with trivial canonical bundle obtained using the same bundles in different Grassmannians. We checked that these varieties are actually Calabi-Yau for low dimension (up to 6). We expect them to be always like this. \\ We describe now these series of varieties, according to the type of bundles involved. \begin{align*} &A(k,l):= \mathcal{Q}(1) \oplus \bigwedge^2\mathcal{R}^{\vee} \textrm{ on } \Gr(k, k+l);\\ &B(k,l):= \mathcal{Q}^{\vee}(1) \oplus \Sym^2 \mathcal{R}^{\vee}\textrm{ on } \Gr(k, k+l);\\ &C(k, k+1):=\Sym^2 \mathcal{R}^{\vee}\oplus \bigwedge^2\mathcal{R}^{\vee}\oplus\mathcal{O}(1) \textrm{ on } \Gr(k,2k+1). \end{align*} $A(k,l)$ has dimension $l(k-1)-{k \choose 2}$, $B(k,l)$ has dimension $l(k-1)-{k+1 \choose 2}$ and $C(k)$ has dimension $k-1$. Notice that $A(k,l)$ can naturally be seen as $Z_{\mathcal{Q}(1)} \subset \SGr(k,k+l)$, $B(k,l)$ as $Z_{\mathcal{Q}^{\vee}(1)} \subset \OGr(k, k+l$) and $C$ is a linear section of the ortho-symplectic Grassmannian. In particular, as in \cite{kuznetsovc5} in the case of bisymplectic Grassmannian, one can prove that \[ C(k,k+1) \cong X_{(2, \ldots, 2)} \subset (\mathbb{P}^1)^k.\]\\ When $k=2$, the zero locus of a general global section of $A(2,l)$ is indeed deformation of a complete intersection given by $(\mathcal{O}(1))^{l+3} $ on $\Gr(2, l+3)$. Indeed first notice that on $\Gr(2, l+3)$ we have $(\mathcal{O}(1))^{l+3} \cong \mathcal{Q}(1) \oplus \mathcal{R}(1)\cong \mathcal{Q}(1) \oplus \mathcal{R}^{\vee} $. Then notice that the zero locus of a general global section of $ \mathcal{Q}(1) \oplus \mathcal{R}^{\vee}$ on $\Gr(2, l+3)$ is isomorphic to the zero locus of a general global section of $\mathcal{Q}(1) \oplus \mathcal{O}(1)$ on $\Gr(2, l+2)$. This follows from the standard fact that $Z_{\mathcal{R}^{\vee}} \subset \Gr(k, n) \cong \Gr(k, n-1)$ and under the following isomorphism $\mathcal{Q}(1)_{k,n}$ projects to $\mathcal{Q}(1)_{k,n-1} \oplus \mathcal{O}(1)$ .\\ For dimension $d=2,3,4$ we refer to \cite{benedetti}, \cite{inoue2016complete}. For $d=5,6$ the Calabi-Yaus in the series $A$ and $B$ are the following. We do not include $B(5,5)$, since it can bee seen as a deformation of the double spinor variety studied by Manivel in \cite{manivelspinor}. The Hodge numbers are computed in Proposition 3.3, together with the fact that this family is locally complete. Since $C$ is indeed a well-known class of variety in disguise, we will not compute the Hodge numbers for the first values of the series. In the following list of invariants we do not include either trivially known Hodge numbers such as $h^{0, n}$. Moreover the number not listed are always $0$.
\begin{center} \begin{tabular}{@{} *9l @{}l @{}l @{}l @{}l @{}l @{}} \toprule dim. & Type &$h^{1,1}$& $h^{2,2}$&$h^{4,1}$&$h^{3,2}$\\ \midrule
5 & $A(2,6)$ & 1&2& 163 & 1784 \\ 5 & $A(3,4)$ & 1&2& 148&1619 \\ 5 & $B(4,5)$ & 1&2& 165&1806 \\
\bottomrule
\hline \end{tabular} \captionof{table}{First values of infinite series for fivefolds} \label{table5} \end{center}
\begin{center} \begin{tabular}{@{} *9l @{}l @{}l @{}l @{}l @{}l @{}} \toprule dim. & Type &$h^{1,1}$& $h^{2,2}$&$h^{5,1}$&$h^{4,2}$&$h^{3,3}$\\ \midrule
6 & $A(2,7)$ & 1&2& 251&5202&14004 \\ 6 & $A(4,4)$ & 1&1& 251&5181&13960\\ 6 & $B(2,9)$ & 1&2& 120 &2254&6274\\ 6 & $B(3,6)$ & 1&2& 125&2380&6596 \\
\bottomrule
\hline \end{tabular} \captionof{table}{First values of infinite series for sixfolds} \label{table4} \end{center}
An interesting question, which however falls beyond the scope of this paper, is to investigate whether the varieties constructed in this way are generic in moduli, that is whether all of their deformations are embedded in the same Grassmannian. This can be done by a direct computation of $h^1(TG|_X)$ using Koszul complex and Borel-Bott-Weil theorem, however these calculations are quite demanding in each specific case, and a general argument is out of reach.
\section*{Appendix C: fake K3 structure} The numerical condition in \eqref{condition} restricted most of our search to vector bundles in which one of the irreducible summand is linear. One can of course try to rearrange this condition in order to eliminate the constraint. Indeed this is geometrically meaningful, as for example $\TT(2,10)$ shows (it is a zero locus of an indecomposable bundle that is non-linear, with slope $\mu=c_1(E)/r(E)=7/8$). It is possible, and we plan to do so, to fully investigate this case. \\ During a preliminary search we found this example, the zero locus $X_{\mathcal{R}^\vee(1)} \subset \Gr(2,6)$. It is a sixfold of index 3, defined by a bundle of slope $\mu=3/2$, satisfying all our preliminary numerological condition. Although it is not of K3 type, it is rather curious, and we decided to add it anyway. Indeed its Hodge numbers are
\begin{center} {\small \[\begin{matrix} &&&&&&&&1 &&&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&0 &&1&&0&&&&&&\\
&&&&& 0 && 0 && 0 &&0&&& \\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&0&&0 & & 0 & &0 && 0 && 0 &&&\\ &&0 && 0 && 0 &&22 &&0 &&0 && 0&&\\ &&&0&&0 & & 0 & &0 && 0 && 0 &&&\\ &&&&0 &&0 && 2 &&0 && 0 &&&&\\ &&&&& 0 && 0 && 0 &&0&&& \\ &&&&&&0 &&1&&0&&&&&&\\ &&&&&&&0&&0&&&&&&&\\ &&&&&&&&1 &&&&&&&& \end{matrix}\]} \end{center}
The absence of the 1 in $h^{4,2}$ is explained by a Borel-Bott-Weil computation, since an inconvenient cancellation in the spectral sequence occurs. It is possible that some higher-dimensional analogue of this \textit{false positive} may occur, although we expect this to be quite an exception and not the general rule.
\end{document} |
\begin{document}
\title[The Perpendicular Bisector Construction in $n$-Dimensional geometry]{The Perpendicular Bisector Construction in $n$-Dimensional Euclidean and Non-Euclidean Geometries}
\author{Emmanuel Tsukerman}
\date{\today} \begin{abstract} The {}``Perpendicular Bisectors Construction'' is a natural way to seek a replacement for the circumcenter of a noncyclic quadrilateral in the plane. In this paper, we generalize this iterative construction to a construction on polytopes with $n$ vertices in $(n-2)$-dimensional Euclidean, Hyperbolic and Elliptic geometries. We then show that a number of nice properties concerning this iterative construction continue to hold in these geometries. We also introduce an analogue of the isoptic point of a quadrilateral, which is the limit point of the Perpendicular Bisectors Construction, in $\mathbb{R}^{n}$ and prove some of its properties. \end{abstract} \maketitle
\section{Background}
A natural way to seek a replacement for the circumcenter of a cyclic planar quadrilateral in the case when the quadrilateral is noncyclic is to proceed with the following iterative construction: \begin{itemize} \item For every $3$ vertices of a quadrilateral $Q^{(1)}$, determine the circumcenter. The resulting 4 points form a new quadrilateral $Q^{(2)}$. The construction can then be iterated on $Q^{(2)}$ and then on $Q^{(3)}$, etc. \end{itemize} This construction is known as the {}``Perpendicular Bisectors Construction'' since the sides of $Q^{(i+1)}$ are determined using the perpendicular bisectors of the sides of $Q^{(i)}$.
The construction is so natural that it was looked at before a number of times. In particular, the following problem about the Perpendicular Bisectors Construction was proposed by Josef Langr \cite{Langr} in 1953:
\emph{The perpendicular bisectors of the sides of a quadrilateral $ABCD$ form a quadrilateral $A_{1}B_{1}C_{1}D_{1}$ and the perpendicular bisectors of the sides of $A_{1}B_{1}C_{1}D_{1}$ form a quadrilateral $A_{2}B_{2}C_{2}D_{2}$. Show that $A_{2}B_{2}C_{2}D_{2}$ is similar to $ABCD$ and find the ratio of similitude.}
Given that the problem is relatively simple, it is surprising that no solutions were published in English for over half a century. The problem was mentioned by C.S. Ogilvy (\cite{Ogilvy}, p. 80) as an example of an unsolved problem. According to an article on Alexander Bogomolny's \emph{Cut-the-knot }website \cite{cut-the-knot}, \emph{{}``B. Grünbaum \cite{Grunbaum} wrote about the problem in 1993 as an example of an unproven problem whose correctness could not be doubted... {[}D. Schattschneider{]} proved several particular cases of the problem, but the general problem remained yet unsolved. It looks like, by that time, the problem made it into the mathematical folklore. It reached Dan Bennett by the word of mouth and its simplicity had piqued his interest. He published a solution \cite{D. Bennet} in 1997 to a major part of the problem under an additional assumption that was promptly removed by J. King \cite{King} who (independently) also supplied a proof based on the same ideas''}. A paper by G.C. Shepard \cite{Shepard} also found an expression for the ratio, and several simpler forms of the expression are given by Radko and Tsukerman in \cite{RadkoTsukerman}.
In the same paper, Radko and Tsukerman show that the construction (or, if $ABCD$ is non-convex, the reverse construction) has a limit called the \textit{isoptic point}, due to its property of {}``being seen'' at equal angles from each of the triad circles of the quadrilateral. This point has many beautiful properties, such as having a parallelogram pedal, being the unique intersection of the $6$ circles of similitude of a quadrilateral and having many of the properties expected of a replacement of the circumcenter.
\section{Main Results}
We introduce a generalization to the Perpendicular Bisectors Construction, which we apply to polytopes with $n$ vertices in $(n-2)$-dimensional Euclidean, Hyperbolic and Elliptic geometries. We prove the remarkable property that for any dimension and any geometry previously mentioned, the $i$th generation polyope $P^{(i)}$ and $(i+2)$th generation polytope $P^{(i+2)}$ are in perspective for each $i$. After showing how the iterative construction in any of the geometries can be reversed via isogonal conjugation, we show that in the case of Euclidean geometry, all $P^{(2k)}$ are homothetic and all $P^{(2k+1)}$ are homothetic, and the center of homothecy is the same for both families of polytopes. Finally, we define an analogue of the isoptic point in $\mathbb{R}^{n}$ and prove some of its properties.
\section{Preliminaries and Notation}
We consider $d$-dimensional Euclidean, Hyperbolic or Elliptic space, where $d=n-2$. Recall that a hyperplane is a $(d-1)$-flat and that the mediator hyperplane of a segment $P_{1}P_{2}$, denoted $PB(P_{1}P_{2})$ throughout, is the hyperplane passing through the midpoint of $P_{1}P_{2}$ orthogonal to that segment. By a hypersphere, we will specifically mean a $(d-1)$-sphere. A facet of a polytope is a face with affine dimension $d-1$.
Our approach to proving the perspectivity $P^{(i)}$ and $P^{(i+2)}$ will naturally involve projective geometry. Specifically, we will view Euclidean, Hyperbolic and Elliptic geometries as embedded inside of real projective $d$-space. For the convenience of the reader, we now give a brief overview of how to do so.
Recall that a correlation in real projective $n$-space $\mathbb{R}P^{n}$ is a one-to-one linear transformation taking points into hyperplanes and vice verse. A \textit{polarity} is an involutory correlation, and we call the image of a point $P$ under a polarity its \textit{polar}, and the image of a hyperplane $q$ its \textit{pole}. We shall utilize the following facts: \begin{enumerate} \item A point $P$ is on the polar of a point $Q$ under a given polarity if and only if $Q$ is on the polar of $P$ under this same polarity. Similarly, a hyperplane $p$ is incident to the pole of hyperplane $q$ if and only if $q$ is incident to the pole of $p$. We call such $P$ and $Q$ \textit{conjugate points }and such $p$ and $q$ \textit{conjugate hyperplanes}. A point that lies on its own polar is called a\textit{ self-conjugate point}. Similarly, a hyperplane incident with its own pole is a \textit{self-conjugate hyperplane}. \item A nonempty set of self-conjugate points with respect to a given polarity is a quadric and any quadric is a set of self-conjugate points with respect to some polarity. \end{enumerate} As an illustration, given a point $P$ outside of a conic in the projective plane, there are two tangents passing through $P$. The polar of $P$ is the line incident to the two points of tangency.
\begin{figure}
\caption{Pole-polar relation in the projective plane \cite{Mathworld}.}
\end{figure}
To obtain Euclidean, Hyperbolic and Elliptic geometries as subgeometries of projective geometry we fix a polarity and an associated quadric $\Gamma$, depending on the geometry. We make the following identifications in the projective plane, and the more general identification for projective $n$-space are similar. \begin{enumerate} \item For Hyperbolic geometry, the points inside of the quadric are the the (ordinary) points of the geometry, points on the quadric are \textit{ideal points }and points outside of the quadric are \textit{hyperideal points}. Hyperbolic lines are the parts of the projective lines having ordinary points. Two hyperbolic lines are parallel (ultraparallel) if the corresponding projective lines intersect in ideal (ultraideal) points. They are perpendicular if they are conjugate with respect to $\Gamma$. \item For Elliptic geometry, the ordinary points are the points of the projective plane and the lines are the lines of the projective plane. Two elliptic lines are perpendicular if the corresponding projective lines are conjugate with respect to $\Gamma$. \item In Euclidean geometry, the ordinary points are the points of the projective plane not on $\Gamma$ and the ideal points are the points on $\Gamma$. Two lines are perpendicular if their ideal points correspond under the absolute projectivity. \end{enumerate} We refer the reader to \cite{Cederberg} for a more comprehensive discussion on the subgeometries of projective space, e.g., on the defiinition of angles, distances, etc.
$ $
We list here some of the notation which will be employed throughout: \begin{lyxlist}{00.00.0000} \item [{$N^{(i)}$}] \noindent is the $i$th generation set of vertices constructed via the iterative process. \item [{$(P_{1}\cdot\cdot\cdot P_{n-1})$}] \noindent will denote the unique hypersphere through points $P_{1},...,P_{n-1}$. \item [{$PB(P_{1}P_{2})$}] \noindent will denote the mediator of line segment $P_{1}P_{2}$. \item [{$PB(H_{1},H_{2})$}] \noindent will denote a common perpendicular of hyperplanes $H_{1}$ and $H_{2}$. In Euclidean geometry, this will simply mean that $PB(H_{1},H_{2})$ is perpendicular to both $H_{1}$ and $H_{2}$. \item [{$Iso_{P_{1}\cdot\cdot\cdot P_{n-1}}P_{n}$}] \noindent denotes the isogonal conjugate of the point $P_{n}$ in the simplex $P_{1}\cdot\cdot\cdot P_{n-1}$. \item [{$P^{(i)}\sim P^{(j)}$}] \noindent denotes that a polytope $P^{(i)}$ with vertices $N^{(i)}$ can be chosen to have the same combinatorial type as a polytope $P^{(j)}$ with vertices $N^{(j)}$, and $P^{(i)}$ and $P^{(j)}$ are similar.
\item [{$|P^{(i)}|$}] \noindent denotes the volume of $P^{(i)}$. \end{lyxlist}
\section{The Generalized Iterative Process}
Consider a set $N^{(1)}$ of $n$ points $V_{1},V_{2},...,V_{n}$ in $(n-2)$-dimensional space $V$. For convenience, we will say that $V_{i}=V_{n+i}$ for each $i$. When $V$ is Euclidean geometry, we will require that any $n-1$ be affinely independent and in Hyperbolic geometry, we will also require that any $n-1$ can be circumscribed in a hypersphere, i.e. the circumcenter of the hypersphere is an ordinary, rather than ideal or hyperideal, point. Our generalization of the iterative process is as follows. \begin{itemize} \item For each vertex $V_{i}$, $i=1,...,n$, construct the center $V_{i}^{(2)}$ of a hypersphere $(V_{i+1}\cdot\cdot\cdot V_{i+n-1})$. \end{itemize} The vertices $V_{i}^{(2)}$, $i=1,...,n$, determine a new set of $n$ points, which we will denote by $N^{(2)}$. \begin{itemize} \item The construction is then repeated on $N^{(2)}$ to produce $N^{(3)}$, etc. \end{itemize} It is easy to see that $N^{(2)}$ degenerates to a single point if and only if the points of $N^{(1)}$ are conhyperspherical, meaning that they can be inscribed in a hypersphere.
Moreover, \begin{lem} \label{lem:affinely dependent}In $(n-2)$-dimensional Euclidean geometry, the set $N^{(2)}$ contains a point at infinity (an ideal point) if and only if some $n-1$ points of $N^{(1)}$ are affinely dependent.\end{lem} \begin{proof} Assume that some $n-1$ points $V_{i}=(x_{i1},...,x_{id}),$ $i=1,...,n-1$, of $N^{(1)}$ are affinely dependent. The equation of the hypersphere passing through such $n-1$ points is given by \[
\left|\left(\begin{array}{cccccccc} x_{1}^{2}+x_{2}^{2}+...+x_{d}^{2} & x_{1} & x_{2} & \cdot & \cdot & \cdot & x_{d} & 1\\ x_{11}^{2}+x_{12}^{2}+...+x_{1d}^{2} & x_{11} & x_{12} & \cdot & \cdot & \cdot & x_{1d} & 1\\ x_{21}^{2}+x_{22}^{2}+...+x_{2d}^{2} & x_{21} & x_{22} & \cdot & \cdot & \cdot & x_{2d} & 1\\ \cdot & \cdot & \cdot & \cdot & & & \cdot & \cdot\\ \cdot & \cdot & \cdot & & \cdot & & \cdot & \cdot\\ \cdot & \cdot & \cdot & & & \cdot & \cdot & \cdot\\ x_{n-1,1}^{2}+x_{n-1,2}^{2}+...+x_{n-1,d}^{2} & x_{n-1,1} & x_{n-1,2} & \cdot & \cdot & \cdot & x_{n-1,d} & 1
\end{array}\right)\right|=0 \]
By expanding minors across the first row, we can find the coefficient of the quadratic terms of the hypersphere to be \[
\left|\left(\begin{array}{ccccccc} x_{11} & x_{12} & \cdot & \cdot & \cdot & x_{1d} & 1\\ x_{21} & x_{22} & \cdot & \cdot & \cdot & x_{2d} & 1\\ \cdot & \cdot & \cdot & & & \cdot & \cdot\\ \cdot & \cdot & & \cdot & & \cdot & \cdot\\ \cdot & \cdot & & & \cdot & \cdot & \cdot\\ x_{n-1,1} & x_{n-1,2} & \cdot & \cdot & \cdot & x_{n-1,d} & 1
\end{array}\right)\right|, \]
which is zero, so the center of the hypersphere is an ideal point.
Conversely, if the center of the hypersphere is ideal, the coefficient of the quadratic terms must be zero. \end{proof} We will call $N^{(k)}$ degenerate if it contains $n-1$ points which are are affinely dependent. \begin{lem} In $(n-2)$-dimensional Euclidean geometry, if the set $N^{(1)}$ is nondegenerate and is not conhyperspherical, then any $n-1$ points of $N^{(2)}$ are affinely independent.\end{lem} \begin{proof} For convenience, we denote the hypersphere $(V_{i+1}\cdot\cdot\cdot V_{n+i-1})$ by $(V_{i}^{(2)})$. Assume by contradiction that some $n-1$ points $V_{1}^{(2)},V_{2}^{(2)},...,V_{n-1}^{(2)}$ of $N^{(2)}$ are affinely dependent. Then they must lie on a $(d-1)$-flat. We can then set up our coordinate system so that hypersphere $(V_{i}^{(2)})$ has the expression \[ (x_{1}-c_{ix_{1}})^{2}+(x_{2}-c_{ix_{2}})^{2}+...+(x_{d-2}-c_{ix_{d-2}})^{2}+(x_{d-1}-c_{ix_{d-1}})^{2}+x_{d}^{2}=r_{i}^{2}. \]
The intersection of any two hyperspheres $(V_{i}^{(2)})\cap(V_{j}^{(2)})$ contains $n-2$ points from $N^{(1)}$, distinct by hypothesis, so that the hyperspheres are non-tangential. In addition, the intersection lies on a hyperplane of the form \[ x_{1}(2c_{ix_{1}}-2c_{jx_{1}})+...+x_{d-1}(2c_{ix_{d-1}}-2c_{jx_{d-1}})=r_{j}^{2}-r_{i}^{2}+(c_{ix_{1}}^{2}-c_{jx_{1}}^{2})+...+(c_{ix_{d-1}}^{2}-c_{jx_{d-1}}^{2}). \]
It easy to see then that the points $V_{n-2},V_{n-1},V_{n}\in\bigcap_{i=1}^{d-1}(V_{i}^{(2)})$ lie on a $2$-flat parallel to the $2$-flat on which $V_{1},V_{n-1},V_{n}\in\bigcap_{i=2}^{d}(V_{i}^{(2)})$ lie. Since the two planes intersect, they must be equal. Therefore $V_{1},V_{n-2},V_{n-1},V_{n}$ are affinely dependent, a contradiction. \end{proof} From now on, we will tacitly assume that $N^{(1)}$ is nondegenerate and not conhyperspherical.
Our approach to proving the perspectivity $N^{(i)}$ and $N^{(i+2)}$ will naturally be through projective geometry. Specifically, we will view Euclidean, Hyperbolic and Elliptic geometries as embedded inside of real projective $n$-space. See the preliminaries section for an overview of the relevant facts on projective geometry.
Let $\Gamma$ be a quadric in real projective $(n-2)$-space $\mathbb{R}P^{n-2}$. Choose a polarity that fixes $\Gamma$. Let $H_{i}$ and $H_{i}'$ for $i=1,...,m$ be $m$ pairs of distinct hyperplanes and let $H_{i}\cap H_{i}'=h_{i},\mbox{ }i=1,...,m$. In addition, let $H_{i}''$ be the polar of $h_{i}$ for each $i=1,...,m$. We then have \begin{lem} \label{lem:quadric and polars}The $m$ $(n-4)-$flats $h_{1},...,h_{m}$ lie on a hyperplane if and only if the $m$ lines $H_{1}'',...,H_{m}''$ are concurrent.\end{lem} \begin{proof} Assume first that $h_{1},...,h_{m}$ lie on a hyperplane $L$. Then $L$ is a conjugate hyperplane with respect to each $H_{i}''$. Therefore the $H_{i}''$ all pass through the pole of $L$, which is a point.
Conversely, assume that the $H_{i}''$ are concurrent at a point $P$. Then $P$ is conjugate to each $h_{i}$, so the $h_{i}$ all lie on the polar of $P$, which is a hyperplane.
\begin{figure}
\caption{Lemma \ref{lem:quadric and polars} on $\mathbb{S}^{2}$. Each $H_{i}''$ $(i=1,2,3)$ is the common perpendicular of $H_{i}$ and $H_{i}'$. The points $h_{1},h_{2},h_{3}$ are collinear implying that $H_{1}'',H_{2}'',H_{3}''$ are concurrent, and conversely. }
\end{figure}
\end{proof} The analogue of lemma \ref{lem:quadric and polars} in Euclidean geometry that is of interest to us is the following trivial statement. As before, we have $m$ pairs of hyperplanes $H_{i}$ and $H_{i}'$ for $i=1,...,m$. Then $H_{i}$ and $H_{i}'$ are parallel if and only if some $m$ lines $H_{1}'',...,H_{m}''$, with each $H_{i}$ perpendicular to both $H_{i}$ and $H_{i}'$, are concurrent. We are now ready to prove the following Theorem: \begin{thm} \label{thm: Perspectivity in a point}In $(n-2)$-dimensional Euclidean, Hyperbolic and Elliptic geometries, the sets of $n$ points $N^{(k)}$ and $N^{(k+2)}$ are perspective in a point.\end{thm} \begin{proof} Without loss of generality, assume that $k=1$. For simplicity, we will denote $N^{(1)}$ by $N$, and the points $V_{i}^{(1)}$ similarly. Let $N_{a,b}=\{V_{1},V_{2},...,V_{n}\}\setminus\{V_{a},V_{b}\}$ and let $H_{a,b}$ be the supporting hyperplane of $N_{a,b}$. Define $H_{a,b}^{(2)}$ similarly. By construction, line $V_{a}^{(1)}V_{b}^{(1)}$is a common perpendicular to $H_{a,b}$ and $H_{a,b}^{(2)}$. As we vary $b\in\{1,...,n\}\setminus\{a\}$, we obtain $n-1$ such lines all concurrent at point $V_{a}^{(1)}$. By the converse of lemma \ref{lem:quadric and polars} with $m=n-1$, the elements of the set $\{H_{a,b}\cap H_{a,b}^{(2)}\vert b\in\{1,...,n\}\setminus\{a\}\}$ lie on a hyperplane. Now consider the simplices $S_{a}=V_{a+1}\cdot\cdot\cdot V_{n+a-1}$ and $S_{a}^{(2)}=V_{a+1}^{(2)}\cdot\cdot\cdot V_{n+a-1}^{(2)}$. The facets of $S_{a}$ and $S_{a}^{(2)}$ are $H_{a,b}$ and $H_{a,b}^{(2)}$ with $b\neq a$, respectively. We apply the generalized Desargues theorem for $d$-dimensional space to the two simplices (see \cite{Bell}), to conclude that they are perspective in a point. Call this point $W$. By considering another pair of simplices, we conclude that they too must be perspective in $W$, because the simplicies of the same generation share parts, so that $N$ and $N^{(2)}$ are in perspective about $W$.
\begin{figure}
\caption{Two special cases of Theorem \ref{thm: Perspectivity in a point}: On the left is $\mathbb{S}^{2}$ and on the right is $2$-dimensional Hyperbolic space viewed in the Poincar\'{e} disk model. The points $A_{j},B_{j},C_{j}$ and $D_{j}$ are the members of the set $N^{(j)}$. The point $W$ is the point about which $N^{(1)}$ and $N^{(3)}$ are in perspective. }
\end{figure}
\end{proof} From the proof, it is not hard to see that \begin{cor} \label{thm:odd and even homothetic}In $(n-2)$-dimensional Euclidean geometry, all sets of the form $N^{(2i+1)}$ are homothetic, and all sets of the form $N^{(2k)}$ are homothetic. \end{cor} \begin{flushleft} We will now show how to reverse the iterative construction, so that given $N^{(i+1)}$ we can determine $N^{(i)}$. Recall that the isogonal conjugate of a point $P$ with respect to a triangle $\triangle ABC$ in the plane is the point of intersection of the three lines obtained by reflecting line $PA$ in the angle bisector of $\angle A$, line $PB$ in the angle bisector of $\angle B$ and line $PC$ in the angle bisector of $\angle C$. In case that $P$ lies on the circumcircle of $\triangle ABC$, the isogonal conjugate is an ideal point. For a more thorough discussion of isogonal conjugation in $\mathbb{R}^{2}$ and $\mathbb{R}^{3}$, we refer the reader to \cite{Grinberg} and \cite{Court} respectively. \par\end{flushleft}
\begin{flushleft} For our purposes, we will not be using this definition of the isogonal conjugate due to the ease and generality of the following definition, which is equivalent to the former in $\mathbb{R}^{2}$ and $\mathbb{R}^{3}$: \par\end{flushleft} \begin{defn*} \begin{flushleft} Let $S=P_{1}\cdot\cdot\cdot P_{d+1}$ be a simplex in $d$-dimensional space $V$, $P$ be a point not equal to $P_{1},...,P_{d+1}$ and $P_{1}',...,P_{d+1}'$ be the $d+1$ reflections of $P$ in the facets of $S$. Then the isogonal conjugate of $P$ with respect to $S$, denoted by $Iso_{S}P=Iso_{P_{1}\cdot\cdot\cdot P_{d+1}}P$, is the center of the hypersphere $(P_{1}'\cdot\cdot\cdot P_{d+1}')$. \par\end{flushleft} \end{defn*} \begin{flushleft} The following property shows that isogonal conjugation is an involution. \par\end{flushleft} \begin{lem} \label{lem:Iso Iso P =00003D P}With respect to any simplex $S$ in Euclidean, Hyperbolic or Elliptic geometry, $Iso_{S}Iso_{S}P=P$.\end{lem} \begin{proof} Let $Q=Iso_{S}P$ and for $i=1,...,d+1$, let $P_{i}'$ and $Q_{i}'$ be the reflection of $P$, respectively $Q$, in the facet opposite to $P_{i}$. We then have $PQ_{i}'=QP_{i}'$ for each $i=1,...,d+1$. As $Q$ is the center of the hypersphere $(P_{1}'\cdot\cdot\cdot P_{d+1}')$, we also have $QP_{i}'=QP_{j}'$ for every $i,j\in\{1,...,d+1\}$. Since $PQ_{j}'=QP_{j}'$, $PQ_{j}'=QP_{i}'=PQ_{i}'$, so that $P$ is equidistant from all the $Q_{k}'$. Therefore $P$ is the center of the hypersphere $(Q_{1}'Q_{2}'\cdot\cdot\cdot Q_{d+1}')$. \end{proof} Recall that we are using the notation that $V_{i}^{(2)}=V_{i+n}^{(2)}$. The following Theorem allows us to reverse the iterative process: \begin{thm} \label{Thm:Reversing with Isogonal Conjugation}In $n$-dimensional Euclidean, Hyperbolic and Elliptic geometry, $Iso_{V_{i+1}^{(2)}\cdot\cdot\cdot V_{n+i-1}^{(2)}}V_{i}^{(2)}=V_{i}$.\end{thm} \begin{proof} Consider the reflections of the vertex $V_{i}$ in each of the facets of the simplex $V_{i+1}^{(2)}\cdot\cdot\cdot V_{n+i-1}^{(2)}$. Since the facets of this simplex are the mediators $PB(V_{i}V_{j}),\forall j\in\{1,...,n\}\setminus\{i\}$, reflecting $V_{i}$ in them results in the points $V_{j}$, $\forall j\in\{1,...,n\}\setminus\{i\}$. The center of the hypersphere $(V_{i+1}\cdot\cdot\cdot V_{n+i-1})$ is by definition $V_{i}^{(2)}$, so that $Iso_{V_{i+1}^{(2)}\cdot\cdot\cdot V_{n+i-1}^{(2)}}V_{i}=V_{i}^{(2)}$. By lemma \ref{lem:Iso Iso P =00003D P}, $Iso_{V_{i+1}^{(2)}\cdot\cdot\cdot V_{n+i-1}^{(2)}}V_{i}^{(2)}=V_{i}$. \end{proof} We will now shift our attention from the sets $N^{(i)}$ to the polytopes $P^{(i)}$ with vertices $N^{(i)}$. \begin{defn*} Two polytopes $P$ and $P'$ are said to be \textit{combinatorially equivalent }(or of the same \textit{combinatorial type}) provided there exists a bijection $\phi$ between the set $\{F\}$ of all faces of $P$ and the set $\{F'\}$ of all faces of $P'$, such that $F_{1}\subset F_{2}$ if and only if $\phi(F_{1})\subset\phi(F_{2})$ \cite{Grunbaum2}. \end{defn*} We will say that $P^{(i)}\sim P^{(j)}$ when a polytope $P^{(i)}$ with vertices $N^{(i)}$ can be chosen to have the same combinatorial type as a polytope $P^{(j)}$ with vertices $N^{(j)}$, and $P^{(i)}$ and $P^{(j)}$ are similar.
Corollary \ref{thm:odd and even homothetic} then implies that in $(n-2)$-dimensional Euclidean geometry,
\[ P^{(1)}\sim P^{(3)}\sim P^{(5)}\sim... \]
and
\[ P^{(2)}\sim P^{(4)}\sim P^{(6)}\sim... \]
Let $|P^{(i)}|$ denote the volume of $P^{(i)}$. From Corollary \ref{thm:odd and even homothetic} it follows that for all $i,j$ and $k$, \[
\frac{|P^{(i)}|}{|P^{(i+2k)}|}=\frac{|P^{(j)}|}{|P^{(j+2k)}|}. \]
In the case $d=2$, it is also true that $\frac{|P^{(i)}|}{|P^{(i+k)}|}=\frac{|P^{(j)}|}{|P^{(j+k)}|}$ . In fact, it is shown in \cite{RadkoTsukerman} that \[
\frac{|P^{(k+1)}|}{|P^{(k)}|}=\frac{1}{4}(\cot\alpha+\cot\gamma)\cdot(\cot\beta+\cot\delta), \] where $\alpha,\beta,\gamma$ and $\delta$ are the angles of the quadrilateral $P^{(1)}$. However, experiment shows that the ratio of volumes of consecutive polyheda is not in general only dependent on $P^{(1)}$. Another property that holds for $d=2$, but not generally, is that if $P^{(1)}$ is nondegenerate and noncyclic, then $P^{(2)}$ is never cyclic. An easy way to see this is by applying isogonal conjugation as in lemma \ref{Thm:Reversing with Isogonal Conjugation}, which shows that $P^{(1)}$ must be at infinity. On the other hand, for $d=3$, we can construct an example where $P^{(1)}$ is nondegenerate and nonconhyperspherical and $P^{(2)}$ is conhyperspherical by using the same lemma \ref{Thm:Reversing with Isogonal Conjugation} (see figure \ref{fig:cyclic P^1}) because the isogonal conjugate of a point on the circumsphere is not in general at infinity.
\begin{figure}
\caption{The polyhedron $P^{(1)}$ is constructed to be not conhyperspherical. The polyhedron $P^{(2)}$ obtained from $P^{(1)}$ via the Perpendicular Bisector Construction, on the other hand, is inscribed in a sphere. The next generation polyhedron $P^{(3)}$ is the center of the sphere. This phenomenon that $P^{(i)}$ is noncyclic but $P^{(i+1)}$ is cyclic cannot occur in $\mathbb{R}^{2}$.}
\label{fig:cyclic P^1}
\end{figure}
\section{The Isoptic Point in $\mathbb{R}^{d}$}
We now show that any pair of odd and any pair of even generation polytopes are homothetic about the same point: \begin{thm} \label{thm:W is universal in R^n}The center of homothety $W^{(1)}$ of any pair of polytopes $P^{(2i+1)}$, $P^{(2j+1)}$ coincides with the center of homothety $W^{(2)}$ of any pair of polytopes $P^{(2k)}$, $P^{(2l)}$. \end{thm} \begin{proof} Let $M$ be the midpoint of segment $V_{a}V_{b}$ and $M^{(3)}$ that of $V_{a}^{(3)}V_{b}^{(3)}$. For $c\notin\{a,b\}$, $V_{c}^{(2)}$ lies on the perpendicular bisector of $V_{a}V_{b}$, so that $V_{a}MV_{c}^{(2)}$ forms a right triangle. Similarly, $V_{a}^{(3)}M^{(3)}V_{c}^{(4)}$ is a right triangle. Since $P^{(1)}\sim P^{(3)}$, and $V_{c}$ ($V_{c}^{(3)})$ is the center of the hypersphere through all $V_{i}$ ($V_{i}^{(3)})$ , $i\in\{1,...,n\}\setminus\{c\}$, the two triangles are similar, hence homothetic. Therefore $W^{(1)}=V_{b}V_{b}^{(3)}\cap M^{(1)}M^{(3)}\cap V_{c}^{(2)}V_{c}^{(4)}$. Now consider $V_{d}^{(2)}$ and $V_{d}^{(4)}$ in place of $V_{c}^{(2)}$ and $V_{c}^{(4)}$ for some $d\notin\{a,b,c\}$. Then by the same reasoning, $W^{(1)}=V_{b}V_{b}^{(3)}\cap M^{(1)}M^{(3)}\cap V_{d}^{(2)}V_{d}^{(4)}$. But $W^{(2)}=V_{c}^{(2)}V_{c}^{(4)}\cap V_{d}^{(2)}V_{d}^{(4)}$. Therefore $W^{(1)}=W^{(2)}$ . \end{proof} \begin{figure}
\caption{An example illustrating Theorem \ref{thm:W is universal in R^n}: The polyhedra $P^{(1)}$ and $P^{(3)}$ and the polyhedra $P^{(2)}$ and $P^{(4)}$ in $\mathbb{R}^{3}$ are homothetic about the same point $W$.}
\end{figure}
We will call this {}``universal'' center of homothety $W$. This point can be seen as the limit of the construction when $\frac{|P^{(1)}|}{|P^{(3)}|}>1$
and the limit of the reverse construction when $\frac{|P^{(1)}|}{|P^{(3)}|}<1$.
In the case $d=2$, this point is called the \textit{Isoptic point }due to its property of subtending equal angles at each triad circle of the quadrilateral\textit{ }(see Radko and Tsukerman \cite{RadkoTsukerman})\textit{. }In $\mathbb{R}^{2}$, $W$ has many properties that are analogous to those of the circumcenter. More generally, if $N^{(1)}$ is approaching a conhyperspherical configuration, then the limit of $W$ is the circumcenter of $N^{(1)}$.
Finally, we pose the following problem. It is shown in \cite{RadkoTsukerman} that in $\mathbb{R}^{2}$, the ratio of similarity of $P^{(i)}$ to $P^{(i+2)}$ is equal to the following expressions: \[ \frac{1}{4}(\cot\alpha+\cot\gamma)\cdot(\cot\beta+\cot\delta)=\frac{1}{4}(\cot\alpha_{1}-\cot\beta_{2})\cdot(\cot\delta_{2}-\cot\gamma_{1}) \] \[ =\frac{1}{4}(\cot\delta_{1}-\cot\alpha_{2})\cdot(\cot\beta_{1}-\cot\gamma_{2}), \]
where the angles $\alpha_{i},\beta_{i},\gamma_{i},\delta_{i}$, $i=1,2,$ are the angles formed between sides and diagonals of a quadrilateral (see figure \ref{fig:angles between sides diagonals}) and $\alpha=\alpha_{1}+\alpha_{2}$, $\beta=\beta_{1}+\beta_{2}$, etc. Is there a similar expression for the ratio of similarity in $\mathbb{R}^{n}$?
\begin{figure}
\caption{The angles between the sides and diagonals of a quadrilateral.}
\label{fig:angles between sides diagonals}
\end{figure}
$ $
Emmanuel Tsukerman: Stanford University
\textit{E-mail address: emantsuk@stanford.edu}
\end{document} |
\begin{document}
\title[On the Game Semantics of the the Probabilistic
$\mu$-Calculus]{On the Equivalence of Game and Denotational
Semantics for the Probabilistic $\mu$-Calculus\rsuper*}
\author{Matteo Mio} \address{LFCS, School of Informatics, University of Edinburgh} \email{M.Mio@sms.ed.ac.uk}
\keywords{Probabilistic Temporal Logic, Game Semantics, Two-player Stochastic Cames, Modal $\mu$-calculus} \subjclass{D.2.4, F.3.0, F.4.1}
\titlecomment{{\lsuper*}An extended abstract of this paper appeared in \emph{Proceedings of the 7th Workshop on Fixed Points in Computer Science} (Luigi Santocanale ed.), 2010, pp. 83-87.}
\begin{abstract} The \emph{probabilistic} (or \emph{quantitative}) modal $\mu$-calculus is a fixed-point logic designed for expressing properties of probabilistic labeled transition systems (PLTS). Two semantics have been studied for this logic, both assigning to every process state a value in the interval $[0, 1]$ representing the probability that the property expressed by the formula holds at the state. One semantics is \emph{denotational} and the other is a \emph{game semantics}, specified in terms of two-player stochastic games. The two semantics have been proved to coincide on all finite PLTS's, but the equivalence of the two semantics on arbitrary models has been open in literature. In this paper we prove that the equivalence indeed holds for arbitrary infinite models, and thus our result strengthens the fruitful connection between denotational and game semantics. Our proof adapts the \emph{unraveling} or \emph{unfolding method}, a general proof technique for proving result of parity games by induction on their complexity. \end{abstract}
\maketitle
\section{Introduction} The modal $\mu$-calculus L${\mu}$ \cite{Kozen83} is a very expressive logic obtained by extending classical propositional modal logic with least and greatest fixed point operators. The logic L${\mu}$ has been extensively studied as it provides a very powerful tool for expressing properties of labeled transition systems \cite{Stirling96}. Encodings of many important temporal logic such as LTL, CTL and CTL${}^{*}$ into L${\mu}$ \cite{Dam94}, provided evidence for the very high expressive power of the calculus. A precise expressivity result was given in \cite{JW96}, where the authors showed that every formula of monadic second order logic over transition systems which does not distinguish between bisimilar models is equivalent to a formula of L${\mu}$. The logic L${\mu}$ has a simple denotational interpretation \cite{Stirling96}. However it is often difficult to intuitively grasp the denotational meaning of a L${\mu}$ formula as the nesting of fixed point operators can induce very complicated properties. To alleviate this problem, another complementary semantics for the logic L${\mu}$, based on two-player (parity) games, has been studied in \cite{EJ91,Stirling96}. The two semantics have been proven to coincide and this allows us to pick the most convenient viewpoint when thinking about the logic L${\mu}$ \cite{EJ91,Stirling96}.
\begin{comment}One of the main properties of the games used to give semantics to L${\mu}$ formulas is the so called \emph{positional determinacy} which asserts that both Players can play optimally in a given game-configuration without knowing the history of the previously played moves. \end{comment}
In the last decade, a lot of research has focused on the study of reactive systems that exhibit some kind of probabilistic behavior, and logics for expressing their properties. Probabilistic labeled transition systems (PLTS's) \cite{S95} are a natural generalization of standard LTS's to the probabilistic scenario, as they allow both non-deterministic and (countable) probabilistic choices. A state $s$ in a PLTS can evolve by non-deterministically choosing one of the \emph{accessible} probability distributions $d$ (over process states) and then continuing its execution from the state $s^{\prime}$ with probability $d(s^{\prime})$. This combination of non-deterministic choices immediately followed by probabilistic ones, allows the modeling of concurrency, non-determinism and probabilistic behaviors in a natural way. PTLS's can be visualized using graphs labeled with probabilities in a natural way \cite{HP2000,KNPV2009,Bartels02}. For example the PLTS depicted in Figure \ref{figura_intro_plts_1} models a system with two states $p$ and $q$. At the state $q$ no action can be performed. At the state $p$ the system can evolve non-deterministically either to the state $q$ with probability $1$ (when the transition $p\freccia{a}d_{2}$ is chosen) or to the state $p$ with probability $\frac{1}{3}$ and to the state $q$ and with probability $\frac{2}{3}$ (when the transition $p\freccia{a}d_{1}$ is chosen). \begin{figure}
\caption{Example of PLTS}
\label{figura_intro_plts_1}
\end{figure}
The probabilistic modal $\mu$-calculus pL${\mu}$, introduced in \cite{MM97,HM96,AM04}, is a generalization of L${\mu}$ designed for expressing properties of PLTS's. This logic was originally named the \emph{quantitative} $\mu$-calculus, but since other $\mu$-calculus-like logics, designed for expressing properties of non-probabilistic systems, have been given the same name \cite{FGK2010}, we adopt the \emph{probabilistic} adjective which is meant to emphasize that the models considered are PLTS's. The syntax of the logic pL$\mu$ coincides with that of the standard $\mu$-calculus. The denotational semantics of pL${\mu}$ \cite{MM97,AM04} generalizes that of L$\mu$ by interpreting every formula $F$ as a map $\sem{F}\!:\!P\!\rightarrow\! [0,1]$, which assigns to each process state $p$ a \emph{degree of truth}. A key aspect of the denotational semantics of \cite{MM97,AM04} is the interpretation of conjunction, defined as ${\sem{F\wedge G}(p)}\!=\!{\min\{\sem{F}(p),\sem{G}(p)\}}$. This is not the only possible meaningful generalization of standard boolean conjunction to the real interval $[0,1]$. Indeed, different interpretations for the connectives of pL${\mu}$ (including the one of \cite{MM97, AM04}) have been proposed in \cite{HM96}, and there is no \emph{a priori} good reason to prefer one in favour of the others.
In \cite{MM07}, the authors introduce an alternative semantics for the logic pL${\mu}$. This semantics, given in term of two player stochastic (parity) games \cite{zielonka04}, is a natural generalization of the two player (non stochastic) game semantics for the logic L$\mu$ \cite{Stirling96}. As in L$\mu$ games, the two players play a game starting from a
configuration $\langle p, F\rangle$, where the objective for Player $1$ is
to produce a path of configurations along which the outermost
fixed point variable $X$ unfolded infinitely often is bound by a greatest
fixed point in $F$. On a configuration of the form $\langle p,G_{1}\vee G_{2}\rangle$,
Player $1$ chooses one of the disjuncts $G_{i}$, $i\!\in\!\{1,2\}$, by moving to the next
configuration $\langle p, G_{i}\rangle$. On a configuration $\langle p,G_{1}\wedge G_{2}\rangle$,
Player $2$ chooses a conjunct $G_{i}$ and moves to $\langle p, G_{i}\rangle$. On a configuration
$\langle p, \mu X.G\rangle$ or $\langle p, \nu X.G\rangle$ the game evolves to the configuration
$\langle p, G\rangle$, after which, from any subsequent configuration $\langle q, X\rangle$ the game
again evolves to $\langle q, G\rangle$. On configurations $\langle p, \diam{a}G\rangle$ and $\langle p, \quadrato{a}G\rangle$,
Player $1$ and Player $2$ respectively choose a transition ${p}\!\freccia{a}\!{d}$ in
the PLTS and move the game to $\langle d, G\rangle$. Here $d$ is a
probability distribution over process-states (this is the key difference between
pL$\mu$ and L$\mu$ games) and the configuration $\langle d,G\rangle$ belongs to Nature, the probabilistic
agent of the game, who moves on to the next configuration $\langle q,G\rangle$ with
probability $d(q)$. This game semantics offers a clear operational interpretation for the \emph{properties} associated to the formulas, explained in terms of the interactions between the controller (Player $1$) and a hostile environment (Player $2$) in the context of the stochastic choices occurring in the PLTS (Nature). The meaning of a pL$\mu$ formula $F$ at a state $p$ can be interpreted as expressing the (limit) probability for the controller to satisfy the \emph{property} specified by the formula.
In \cite{MM07}, the equivalence of the denotational and game semantics of pL$\mu$ on all \emph{finite} models was proven. The proof, which adapts the standard technique of \cite{Stirling96,EJ91} used to prove the equivalence of game and denotational semantics for L$\mu$, makes essential use of the fact that \emph{memoryless} and \emph{optimal} strategies exist in every \emph{finite} two-player stochastic game with \emph{parity} objectives \cite{zielonka04}. This property however, does not hold, in general, for two-player stochastic (parity) games of infinite size: optimal strategies may not exist, and an \emph{unbounded} amount of memory might be necessary even for playing $\epsilon$-optimally, i.e., for guaranteeing a probability of victory $\epsilon$-close to the optimal one. The general result, i.e., the equivalence of the game and denotational semantics of pL$\mu$ on arbitrary infinite models, is left open in \cite{MM07}.
In this paper we prove that the equivalence indeed holds for arbitrary infinite models, thus strengthening the connection between denotational and game semantics. This result, given that the pL$\mu$ games outlined above are natural generalization of standard L$\mu$ games, provides a justification for the denotational interpretation of the connectives of pL$\mu$ of \cite{MM97,AM04}. The generalization of the result of \cite{MM07} to arbitrary infinite models is of practical interest since infinite state systems often provide natural abstractions for, e.g., infinite memory, infinite data-sets, \emph{etcetera}. Our contribution consists in adapting a proof technique, called the \emph{unfolding method} \cite{Gradel2007,Santocanale2002}, which is adopted in \cite{FGK2010} to prove a similar result for a $\mu$-calculus-like logic designed to express quantitative properties of (non probabilistic) labeled transition systems. While this is not a difficult adaptation, the result is worth noting since the question has been open in literature since \cite{MM07}. Moreover the differences between the games considered in \cite{FGK2010} and pL$\mu$ two-player stochastic games, e.g., the fact that \emph{Markov chains} are the outcomes of the games rather than just infinite paths, make this result not immediate from \cite{FGK2010}.
The rest of the paper is organized as follows. In Section \ref{basic}, we introduce some mathematical definitions. In Section \ref{syntax} we define the syntax and the denotational semantics of the logic pL${\mu}$ as in \cite{MM97,AM04}. In Section \ref{game_definitions}, we define the class of two-player stochastic parity games that are going to be used to give game semantics to the logic. In Section \ref{game_semantics}, we define the game semantics of pL${\mu}$ in terms of two-player stochastic parity games as in \cite{MM07,AM04} and state the main theorem which asserts the equivalence of the denotational and game semantics for pL$\mu$. Lastly, in Section \ref{main_proof}, a detailed proof of the main theorem is given.
\section{Background definitions and notation}\label{basic}
\begin{definition}[Probability distributions] A (discrete) probability distribution $d$ over a set $X$ is a function $d\!:\!X\!\rightarrow\![0,1]$ such that $\sum_{x\in X}d(x)\!=\!1$. The \emph{support} of $d$, denoted by $supp(d)$, is defined as the (necessarily countable) set $\{x\!\in\! X\ | \ d(x)\!>\!0\}$. We denote with $\mathcal{D}(X)$ the set of probability distributions over $X$. We denote with $\delta_{x}$, for $x\!\in\! X$, the distribution over $X$ such that $supp(\delta_{x})\!=\!\{ x\}$, i.e., the unique distribution such that $\delta_{x}(x)=1$ and $\delta_{x}(y)=0$, for all $y\not=x$. \end{definition}
\begin{definition}[PLTS \cite{S95}]\label{PLTS} Given a countable set $L$ of labels, a \emph{probabilistic labeled transition system} is a pair $\langle P, \{ \freccia{a}\}_{a\in L}\rangle$, where $P$ is a set (of arbitrary cardinality) of process-states, and ${\freccia{a}}\! \subseteq\! P\times \mathcal{D}(P)$ for every $a\!\in\! L$. As usual we write $p\freccia{a}d$ if ${(p,d)}\!\in\!{\freccia{a}}$. \end{definition} The transition relation of a PLTS models the dynamics of the processes: $p\freccia{a}d$ means that the process $p$ can perform the atomic action $a\!\in\! L$ and then, with probability $d (q)$, behave like the process $q$. Probabilistic labeled transition system are a natural generalization of labeled transition systems to the probabilistic scenario: a standard LTS can be modeled as a PLTS in which every reachable distribution is of the form $\delta_{p}$, for some $p\!\in\! P$.
Given a set $X$, we denote with $2^{X}$ the set of all subsets $Y\!\subseteq\! X$. Given a complete lattice $(L,\leq)$, we denote with $\bigsqcup\!:2^{L}\!\rightarrow\! L$ and $\bigsqcap\!:2^{L}\!\rightarrow\! L$ the operations of join and meet respectively. A function $f\!:\!L\!\rightarrow\!L$ is \emph{monotone} if $x\!\leq\! y$ implies $f(x)\!\leq\! f(y)$, for every $x,y\!\in\!L$. The set of fixed points of any monotone function $f\!:\!L\rightarrow\!L$, ordered by $\leq$, is a non-empty complete lattice \cite{Tarski1955}. We denote with $\mathrm{lfp}(f)$ and $\mathrm{gfp}(f)$ the least and the greatest fixed points of $f$, respectively.
\begin{theorem}[Knaster-Tarski \cite{Tarski1955}] Let $(L,\leq)$ be a complete lattice and $f\!:\!L\!\rightarrow\!L$ a monotone function. The following equalities hold: \begin{enumerate}[(1)] \item $\mathrm{lfp}(f) =\bigsqcup_{\alpha} f^{\alpha}$, where $f^{\alpha}=\bigsqcup_{\beta<\alpha}f(f^{\beta})$, \item $\mathrm{gfp}(f) = \bigsqcap_{\alpha} f_{\alpha}$, where $f_{\alpha}=\bigsqcap_{\beta<\alpha}f(f_{\beta})$, \end{enumerate} where the greek letters $\alpha$ and $\beta$ ranges over ordinals. \end{theorem}
In the following we assume standard notions of basic topology and basic measure theory which can be found in, e.g., \cite{Kechris,Tao_measuretheory}.
\section{The Probabilistic Modal $\mu$-Calculus} \label{syntax} Given a countable set $\textit{Var}$ of propositional variables ranged over by the letters $X, Y , Z$ and a set of labels $L$ ranged over by the letters $a, b, c$, the formulas of the logic pL$\mu$ (in positive form) are defined by the following grammar: \begin{center}
$F, G ::= X \ | \ \diam{a}F\ | \ \quadrato{a}F \ | \ F\vee G\ | \ F\wedge G\ | \ \mu X.F\ | \ \nu X.F $ \end{center} As usual the operators $\nu X.F$ and $\mu X.F$ bind the variable $X$ in $F$. A formula is \emph{closed} if it has no \emph{free} variables.
\begin{definition}[Subformulas] We define the set $Sub(F)$ of \emph{subformualae} of $F$ by induction on the structure of $F$ as follows: \begin{center} \begin{tabular}{l l l l} $\ \ \ $ & $Sub( X )$ & $\bydef$ & $\{ X \}$\\ $\ \ \ $ & $Sub( F_{1}\wedge F_{2} )$ & $ \bydef$ & $ \{ F_{1}\wedge F_{2} \} \cup Sub(F_{1} ) \cup Sub(F_{2})$\\ $\ \ \ $ & $Sub( F_{1}\vee F_{2} )$ & $ \bydef$ & $ \{ F_{1}\vee F_{2} \} \cup Sub(F_{1} ) \cup Sub(F_{2})$\\ $\ \ \ $ & $Sub( \quadrato{a}F_{1} )$ & $ \bydef$ & $ \{ \quadrato{a}F_{1} \} \cup Sub(F_{1})$\\ $\ \ \ $ & $Sub( \diam{a}F_{1} )$ & $ \bydef$ & $ \{ \diam{a}F_{1} \} \cup Sub(F_{1})$\\ $\ \ \ $ & $Sub( \nu X.F_{1} )$ & $ \bydef$ & $ \{ \nu X.F_{1} \} \cup Sub(F_{1})$\\ $\ \ \ $ & $Sub( \mu X.F_{1} )$ & $ \bydef$ & $ \{ \mu X.F_{1} \} \cup Sub(F_{1})$ \end{tabular} \end{center} We say that $G$ is a subformula of $F$ if $G\!\in\! Sub(F)$. \end{definition} \begin{definition}[Normal Formula] \label{normal_form} We say that a formula $F$ is in \emph{normal form}, if every occurrence of a $\mu$ or $\nu$ binder binds a distinct variable, and no variable appears both free and bound. Every formula can be put in normal form by standard $\alpha$-renaming of the bound variables. \end{definition} For convenience we only consider, from now on, formulas $F$ in normal form. This allows, for instance, the definition below to be given as follows: \begin{definition}[Variable subsumption] Given a formula $F$, we say that $X$ \emph{subsumes} $Y$ in $F$, for $X\!\not =\! Y$, if $X$ and $Y$ are bound in $F$ by the sub-formulas $\star_{1} X.G$ and $\star_{2} Y.H$ respectively, and $\star_{2} Y.H\in Sub(G)$, for $\star_{1},\star_{2}\in\{\mu,\nu\}$.
\end{definition}
Given a PLTS $\langle P, \{ \freccia{a} \}_{a\in L}\rangle$, we denote with $[0,1]^{P}$ or $P\rightarrow[0,1]$ the complete lattice of functions from $P$ to the real interval $[0, 1]$ with the pointwise order. A function $\rho\!:\! \textit{Var}\!\rightarrow\! [0,1]^{P}$ is called a $[0,1]$-valued \emph{interpretation}, or just an \emph{interpretation}, of the variables. Given a function $f\!:\! P\!\rightarrow\! [0,1]$ we denote with $\rho[f /X]$ the interpretation that assigns $f$ to the variable $X$, and $\rho (Y)$ to all other variables $Y$.
\begin{definition}[\cite{MM07}]\label{denotationa_sem_def} The denotational semantics $\sem{F}_{\rho}\!:\! P\! \rightarrow\! [0,1]$ of the pL$\mu$ formula $F$ under the interpretation $\rho$, is defined by structural induction on $F$ as follows: \begin{center} \begin{tabular}{l l l} $\sem{X}_{\rho}(p)$ & $=$ & $\rho(X)(p)$\\ $\sem{G \vee H}_{\rho}(p)$ & $=$ & $\sem{ G}_{\rho}(p) \sqcup \sem{H}_{\rho}(p)$\\ $\sem{G \wedge H}_{\rho}(p)$ & $=$ & $\sem{ G}_{\rho}(p) \sqcap \sem{H}_{\rho}(p)$\\ $\sem{\diam{a}G}_{\rho}(p) $ & $=$ & $ \displaystyle \bigsqcup_{p\freccia{a}d} \big( \sum_{q\in supp(d)} d(q)\! \cdot\! \sem{G}_{\rho}(q) \big)$\\ $\sem{\quadrato{a}G}_{\rho}(p) $ & $=$ & $ \displaystyle \bigsqcap_{p\freccia{a}d} \big( \sum_{q\in supp(d)} d(q)\! \cdot\! \sem{G}_{\rho}(q) \big)$\\ $\sem{\mu X. G}_{\rho}(p) $ & $=$ & $ \mathrm{lfp}\big(\lambda f. ( \sem{G}_{\rho[ f/X]})\big)(p)$ \\ $\sem{\nu X. G}_{\rho}(p) $ & $=$ & $ \mathrm{gfp}\big( \lambda f. ( \sem{G}_{\rho[ f/X]})\big)(p)$ \end{tabular} \end{center} It is easy to verify that the interpretation assigned to every pL$\mu$ operator is monotone. Thus, the existence of the least and greatest fixed points is guaranteed by the Knaster-Tarski theorem. \end{definition}
The main novelty of \cite{MM07,HM96} in the definition of the semantics of pL$\mu$ resides in the interpretation of the modalities $\diam{a}$ and $\quadrato{a}$, for $a\!\in\! L$. The definitions resemble the corresponding ones for L$\mu$ (see, e.g., \cite{Stirling96}) but, crucially, in PLTS's transitions lead to probability distributions over processes, rather than processes. The most natural way to interpret the meaning of a formula $G$ at a probability distribution $d$ is to consider the \emph{expected probability} of the formula $G$ holding at a process $q$, associated by the random choice over processes induced with $d$, and this is formalized by the weighted sums in the definition above.
\begin{remark} As it is common practice when dealing with fixed point logics such as the modal $\mu$-calculus, we presented the syntax of pL$\mu$ in \emph{positive form}, i.e., without including a negation operator. This simplifies the presentation of the denotational semantics because all formulas in positive form are interpreted as monotone functions. A negation operator on (closed) pL$\mu$ formulas can be defined by induction on the structure of the formula, by exploiting the dualities between the connectives of the logic, in such a way that $\sem{\neg F}_{\rho}(p)= 1-\sem{F}_{\rho}(p)$, for all formulas $F$ and process states $p$. We omit the routine details. \end{remark}
\section{Two Player Stochastic Parity Games} \label{game_definitions} In this section we introduce the class two-player stochastic games used to give game semantics to the logic pL$\mu$. This material in standard, and follows similar presentations, as in, e.g., \cite{zielonka04}.
A two-player turn-based stochastic game (or just a $2\frac{1}{2}$-player game) is played on some \emph{arena} ${\mathcal{A}}\!=\!{\langle (S,E), (S_{1},S_{2},S_{N} ),\pi\rangle}$ where $(S,E)$ is a directed graph with (possibly uncountable) set of states $S$ and transition relation $E\!\subseteq\! S\times S$. The sets $S_{1}$, $S_{2}$, $S_{N}$ form a partition of $S$ and $\pi\!:\!S_{N}\!\rightarrow\!\mathcal{D}(S)$ is called the probabilistic transition function. For every state $s\!\in\! S$, we denote with $E(s)$ the (possibly infinite) set $\{ s^{\prime} \ | \ (s,s^{\prime})\!\in\! E \}$ of successors of $s$. We require that for all $s\!\in\! S_{N}$, the equality $E(s)\!=\!supp(\pi(s))$ holds. This implies that the set of successors of a state $s\!\in\! S_{N}$ is non-empty and at most countable. We denote with $S_{t}$ the set of \emph{terminal states}, i.e., those $s\!\in\! S$ such that $E(s)\!=\!\emptyset$.
The game is played on the arena $\mathcal{A}$ by three players named Player $1$, Player $2$ and Nature, the probabilistic agent of the game. The states in $S_{1}$ are under the control of Player $1$, the states in $S_{2}$ are under the control of Player $2$, and the states in $S_{N}$ are probabilistic, i.e., under the control of Nature. At a state $s \!\in\! S_{1}$, if $s\not \in S_{t}$, Player $1$ chooses a successor from the set $E(s)$; if $s\!\in\! S_{t}$ the game ends. Similarly, at a state $s \!\in\! S_{2}$, if $s\not \in S_{t}$, Player $2$ chooses a successor from the set $E(s)$; if $s\!\in\! S_{t}$ the game ends. At a state $s\!\in\! S_{N}$, a successor state is probabilistically chosen according with the probability distribution $\pi(s)$. The outcome of a \emph{play} of the three players is a path in $\mathcal{A}$, either countably-infinite or finite (ending in a terminal state), which we call a \emph{completed} path.
\begin{definition} We denote with $\mathcal{P}^{\omega}$ and $\mathcal{P}^{<\omega}$ the sets of infinite and finite (non empty) paths in $\mathcal{A}$. Given a finite path $\vec{s}\!\in\!\mathcal{P}^{<\omega}$ we denote with $\mathit{last}(\vec{s})$ the last state $s\!\in\! S$ of $\vec{s}$. We write $\vec{s}\lhd \vec{t}$, with $\vec{s},\vec{t}\!\in\!\mathcal{P}^{<\omega}$, if $\vec{t}\!=\!\vec{s}.s$, for some $s\!\in\! S$, where the \emph{dot} symbol denotes the concatenation operator. We denote with $\mathcal{P}^{t}$ the set of finite paths ending in a terminal state, i.e., the set of paths $\vec{s}$ such that $\mathit{last}(\vec{s})\!\in\! S_{t}$. We denote with $\mathcal{P}_{1}^{<\omega}$, $\mathcal{P}_{2}^{<\omega}$ and $\mathcal{P}_{N}^{<\omega}$ the sets of finite paths $\vec{s}$ such that $last(\vec{s})\!\in \!S_{1}$, $last(\vec{s})\!\in\! S_{2}$ and $\mathit{last}(\vec{s})\!\in\! S_{N}$ respectively. We denote with $\mathcal{P}$ the set $\mathcal{P}^{\omega}\cup \mathcal{P}^{t}$ and we refer to this set as the set of \emph{completed} paths in $\mathcal{A}$. Given a finite path $\vec{s}\!\in\! \mathcal{P}^{<\omega}$, we denote with $O_{\vec{s}}$ the set of all completed paths having $\vec{s}$ as prefix. We consider the standard topology on $\mathcal{P}$ where the basis for the open sets is given by the clopen sets $O_{\vec{s}}$, for $\vec{s}\!\in\! \mathcal{P}^{<\omega}$. This is a $0$-dimensional space and, if $S$ is countable, it is a Polish space. We denote with $(\mathcal{P},\Omega)$ the Borel $\sigma$-algebra induced by the topology on $\mathcal{P}$, i.e., the smallest $\sigma$-algebra on $\mathcal{P}$ containing all the open sets. \end{definition}
To specify the reward assigned to Player $1$ when a given completed path $\vec{s}$ is the outcome of a play, we introduce the notion of payoff function.
\begin{definition} A \emph{(Borel) payoff function} for the arena $\mathcal{A}$ is a Borel-measurable function $\Phi\!:\!\mathcal{P}\!\rightarrow\! [0,1]$. \end{definition} The value $\Phi(\vec{s})$, for a given $\vec{s}\!\in\!\mathcal{P}$, should be understood as the reward assigned to Player $1$ when $\vec{s}$ is the outcome of a play in $\mathcal{A}$.
\begin{definition}[Two player stochastic game] A \emph{two-player turn-based stochastic game} (or just a $2\frac{1}{2}$-player game) is a pair $\langle \mathcal{A},\Phi\rangle$, where $\Phi$ is a payoff function for the arena $\mathcal{A}$. \end{definition} The goal of Player $1$ in the game $\langle \mathcal{A},\Phi\rangle$ is to maximize their payoff, while the \emph{dual} goal of Player $2$ is to minimize the payoff assigned to Player $1$.
When working with stochastic games, it is useful to look at the possible outcomes of a play up to the behavior of Nature. This is done by introducing the notion of Markov chain in $\mathcal{A}$, whose precise formulation is given by the following definitions.
\begin{definition}[Tree in $\mathcal{A}$] A \emph{tree} in the arena $\mathcal{A}$ is a collection $T\subseteq \mathcal{P}^{<\omega}$ of finite paths in $\mathcal{A}$, such that \begin{enumerate}[(1)] \item $T$ is down-closed: if $\vec{s}\!\in\! T$ and $\vec{t}$ is a prefix of $\vec{s}$, then $\vec{t}\!\in\!T$. \item $T$ has a root: there exists exactly one finite path $\vec{s}\!=(s_{0})$ of length one in $T$. The state $s_{0}$, denoted by $root(T)$, is called the root of the tree $T$. \end{enumerate}
The set of \emph{children} of the node $\vec{s}$ in $T$ is the set $\{ \vec{t}\!\in\! T \ | \ \vec{t}=\vec{s}.s^{\prime} \ \wedge \ s^{\prime}\!\in\!S \}$. We consider the nodes $\vec{s}$ of $T$ as labeled by the $\mathit{last}$ function. \end{definition}
\begin{definition}[Uniquely and fully branching nodes of a tree] A node $\vec{s}$ in a tree $T$, is said to be \emph{uniquely branching} in $T$ if either $last(\vec{s})\! \in\! S_{t}$ or $\vec{s}$ has a unique child in $T$. Similarly, $\vec{s}$ is \emph{fully branching} in $T$ if, for every $s\! \in\! E(last(\vec{s}))$, it holds that $\vec{s}.s\!\in\!T$. \end{definition}
\begin{definition}[Markov chain in $\mathcal{A}$] A \emph{Markov chain} in $\mathcal{A}$ is a tree $M$ such that for every every node $\vec{s} \!\in\! M$, the following conditions holds: \begin{enumerate}[(1)] \item If $last(\vec{s})\!\in\! S_{1}\cup S_{2}$ then $\vec{s}$ branches uniquely in $M$. \item If $last(\vec{s})\!\in\! S_{N}$ then $\vec{s}$ branches fully in $M$. \end{enumerate} \end{definition} Note that, since the set of $E$-successors of every state $s\!\in\!S_{N}$ is at most countable, every Markov chain in $\mathcal{A}$ is a \emph{countably branching} tree with a countable set of nodes.
\begin{definition}[Probability measure $\mathbb{P}_{M}$] \label{measure_definition} Every Markov chain $M$ determines a probability assignment $\mathbb{P}_{M}(O_{\vec{s}})$ to every basic open set $O_{\vec{s}}\!\subseteq\!\mathcal{P}$, for $\vec{s}$ a finite path $\vec{s}\!=\!(s_{0},s_{1}, ...,s_{n})$ with $n\!\in\!\mathbb{N}$, defined as follows:
\begin{center}
$\mathbb{P}_{M}(O_{\vec{s}}) \bydef \left\{ \begin{array}{l l} \displaystyle \prod \{ \pi (s_{i})(s_{i+1}) \ | \ 0\!\leq\! i \!<\! n \ \wedge \ s_{i}\!\in\! S_{N}\}\ \ & $if $ \vec{s} \in M\\
0 & $otherwise$
\end{array} \right.$
\end{center} In other words, $\mathbb{P}_{M}$ assigns to the basic open set $O_{\vec{s}}\!\subseteq\!\mathcal{P}$, i.e, the set of all completed paths having $\vec{s}$ as prefix, value $0$ if $\vec{s}$ is not a path in $M$, and the product of all probabilities labeling the probabilistic steps in $\vec{s}$, otherwise. Note that if there are no probabilistic steps in $\vec{s}$, then $\mathbb{P}_{M}$ assigns to $O_{\vec{s}}$ probability $1$, which is the value of the empty product. The assignment $\mathbb{P}_{M}$ extends to a unique probability measure on the Borel $\sigma$-algebra ($\mathcal{P},\Omega$) \cite{Tao_measuretheory}, which we also denote with $\mathbb{P}_{M}$. \end{definition}
Given the previous definitions we can define the \emph{expected reward} of Player $1$ when a given Markov chain $M$ is the result (up to the behavior of Nature) of a play in the two-player stochastic game $\langle \mathcal{A},\Phi\rangle$. \begin{definition}[Expected reward of $M$] Let $\langle \mathcal{A},\Phi\rangle$ be a $2\frac{1}{2}$-player game. We define the \emph{expected reward} of a Markov chain $M$ in $\mathcal{A}$, denoted by $E(M)$, as follows: \begin{center} $ E(M)=\displaystyle \int_{\mathcal{P} } \Phi \, \, d\ \mathbb{P}_{M}$. \end{center} This is a good definition because $\Phi$ is assumed to be Borel measurable, thus integrable. \end{definition}
As usual in game theory, players' moves are determined by strategies. \\
\begin{definition} An \emph{unbounded memory deterministic strategy} (or just a strategy) $\sigma_{1}$ for Player $1$ in $\mathcal{A}$ is defined as a function $\sigma_{1}\!:\!\mathcal{P}_{1}^{<\omega}\!\rightarrow\! S\cup \{ \bullet \}$ such that $\sigma_{1}(\vec{s})\!\in\! E(last(s))$ if $E(last(\vec{s}))\!\not =\! \emptyset$ and $\sigma_{1}(\vec{s})\!=\! \bullet$ otherwise. Similarly a strategy $\sigma_{2}$ for Player $2$ is defined as a function $\sigma_{2}\!:\!\mathcal{P}_{2}^{<\omega}\!\rightarrow\! S\cup \{ \bullet \}$. We say that a strategy $\sigma_{1}$ for Player $1$ is \emph{memoryless}, if there exists a function $f \!:\! S_{1}\!\rightarrow\! S\cup \{ \bullet \}$ such that for every $\vec{s}\!\in\! \mathcal{P}_{1}^{<\omega}$, the equality $\sigma_{1}(\vec{s})\!=\! f(last(\vec{s}))$ holds. Similarly, a strategy $\sigma_{2}$ for Player $2$ is memoryless if there exists a function $f \!:\! S_{2}\!\rightarrow\! S\cup \{ \bullet \}$ such that for every $\vec{s}\!\in\! \mathcal{P}_{2}^{<\omega}$, the equality $\sigma_{2}(\vec{s})\!=\! f(last(\vec{s}))$ holds. In other words a strategy is memoryless if its decision on any history $\vec{s}$, only depends on the last state $last(\vec{s})$ of $\vec{s}$. A pair $\langle \sigma_{1},\sigma_{2}\rangle$ of strategies, one for each player, is called a \emph{strategy profile} and determines the behaviors of both players. \end{definition}
\begin{definition}[$M^{s_{0}}_{\sigma_{1},\sigma_{2}}$] Given an initial state $s_{0}\!\in\! S$ and a strategy profile $\langle \sigma_{1},\sigma_{2}\rangle$, a unique Markov chain $M^{s_{0}}_{\sigma_{1},\sigma_{2}}$ is determined: \begin{enumerate}[(1)] \item the root of $M^{s_{0}}_{\sigma_{1},\sigma_{2}}$ is labeled with $s_{0}$, \item for every $\vec{s}\!\in\! M^{s_{0}}_{\sigma_{1},\sigma_{2}}$, if $last(\vec{s})\!=\!s$ with $s\!\in\! S_{1}$ not a terminal state, then the unique child of $\vec{s}$ in $M^{s_{0}}_{\sigma_{1},\sigma_{2}}$ is $\vec{s}.\big(\sigma_{1}(\vec{s})\big)$, \item for every $\vec{s}\!\in\! M^{s_{0}}_{\sigma_{1},\sigma_{2}}$, if $last(\vec{s})\!=\!s$ with $s\!\in\! S_{2}$ not a terminal state, then the unique child of $\vec{s}$ in $M^{s_{0}}_{\sigma_{1},\sigma_{2}}$ is $\vec{s}.\big(\sigma_{2}(\vec{s})\big)$. \end{enumerate} We denote with $\mathbb{P}^{s_{0}}_{\sigma_{1},\sigma_{2}}$ the probability measure $\mathbb{P}_{M^{s_{0}}_{\sigma_{1},\sigma_{2}}}$ over $\langle \mathcal{P},\Omega\rangle$ induced by the Markov chain $M^{s_{0}}_{\sigma_{1},\sigma_{2}}$. \end{definition}
\begin{definition}\label{values_of_the_game} Given a $2\frac{1}{2}$-player game $\mathcal{G}\!=\!\langle \mathcal{A},\Phi \rangle$ and an initial state $s\!\in\!S$, we define the \emph{lower value} and \emph{upper value} of the game $\mathcal{G}$ at $s$, denoted by $Val_{\downarrow}(\mathcal{G})(s)$ and $Val_{\uparrow}(\mathcal{G})(s)$ respectively, as follows: \begin{center} $Val_{\downarrow}(\mathcal{G})(s) = \bigsqcup_{\sigma_{1}} \bigsqcap_{\sigma_{2}} E( M^{s}_{\sigma_{1},\sigma_{2}}) \ \ \ \ \ Val_{\uparrow}(\mathcal{G})(s) = \bigsqcap_{\sigma_{2}} \bigsqcup_{\sigma_{1}}E(M^{s}_{\sigma_{1},\sigma_{2}})$. \end{center} \end{definition}
$Val_{\downarrow}(\mathcal{G})(s)$ represents the (limit) expected reward that Player $1$ can get, when the game begins at $s$, by choosing his strategy $\sigma_{1}$ first and then letting Player $2$ pick an appropriate counter strategy $\sigma_{2}$. Similarly $Val_{2}(\mathcal{G})(s)$ represents the (limit) expected reward that Player $1$ can get, when the game begins at $s$, by first letting Player $2$ choose a strategy $\sigma_{2}$ and then picking an appropriate counter strategy $\sigma_{1}$. Clearly $Val_{\downarrow}(\mathcal{G})(s)\leq Val_{\uparrow}(\mathcal{G})(s)$ for every $s\in S$.
\begin{definition}[$\epsilon$-optimal strategies]\label{eps_strategy} Given a $2\frac{1}{2}$-player game $\mathcal{G}\!=\!\langle \mathcal{A}, \Phi \rangle$, a strategy $\sigma_{1}$ for Player $1$ is called \emph{$\epsilon$-optimal}, for some $\epsilon\!\in\![0,1]$, if the following inequality holds: \begin{center}$\bigsqcap_{\sigma_{2}} E( M^{s}_{\sigma_{1},\sigma_{2}}) > Val_{\downarrow}(\mathcal{G})(s)- \epsilon$ \end{center} for every game state $s$. Similarly a strategy $\sigma_{2}$ for Player $2$ is called \emph{$\epsilon$-optimal}, if the following inequality holds: \begin{center} $\bigsqcup_{\sigma_{1}} E(M^{s}_{\sigma_{1},\sigma_{2}}) < Val_{\uparrow}(\mathcal{G})(s)+ \epsilon$ \end{center} for every game state $s$. We refer to a strategy as \emph{optimal} if it is $0$-optimal. \end{definition} Clearly, for every $\epsilon\! > \! 0$, there exist $\epsilon$-optimal strategies for Player $1$ and Player $2$. However, in general, there could be no optimal strategies, as stated in Proposition \ref{no-optimal} below.
\begin{definition} Given a $2\frac{1}{2}$-player game $\mathcal{G}\!=\!\langle \mathcal{A}, \Phi \rangle$, and an initial game state $s$, we say that the game $\mathcal{G}$ is \emph{determined at $s$} if $Val_{\downarrow}(\mathcal{G})(s)= Val_{\uparrow}(\mathcal{G})(s)$. We say that the game $\mathcal{G}$ is \emph{determined} if it is determined at every game state $s$. \end{definition}
The following fundamental result is due to Donald\ A.\ Martin \cite{Martin98}.
\begin{theorem}[\cite{Martin98,MS98}]\label{blackwell_determinacy} \label{determinacy} Every $2\frac{1}{2}$-player game $\mathcal{G}\!=\!\langle \mathcal{A},\Phi\rangle$ such that every state $s$ has at most countably many successor states, is determined. \end{theorem}
In this paper we are interested in $2\frac{1}{2}$-player \emph{parity} games, which are $2\frac{1}{2}$-player games $\langle \mathcal{A},\Phi\rangle$ whose payoff function $\Phi$ is induced by a \emph{parity structure}. \begin{definition}
Given a $2\frac{1}{2}$-player arena $\mathcal{A}\!=\! \langle (S,E),(S_{1},S_{2},S_{N}),\pi\rangle$, a \emph{parity structure} for $\mathcal{A}$ is a pair $\langle \Pr,\textnormal{R}\rangle$ where $\Pr$ is called the \emph{priority assignment} and $\textnormal{R}$ is a called the \emph{terminal reward assignment}. The priority assignment $\Pr$ is a function $\Pr\!:\! S\!\rightarrow\! \mathbb{N}$, such that the set $\Pr(S)\!=\!\{ n \ | \ \exists s\!\in\! S.\Pr(s)\!=\!n\}$ is finite. In other words $\Pr$ assigns to each state $s\!\in\!S$ a natural number, also referred to as a \emph{priority}, taken from a finite pool of options $\{ n_{0},\dots, n_{k}\}\!=\! \Pr(S)$. We denote with $\max(\Pr)$, $\min(\Pr)$ and $|\Pr|$ the natural numbers $\max\{ n_{0},\dots, n_{k} \}$, $\min\{ n_{0},\dots, n_{k}\}$ and $|\{ n_{0},\dots, n_{k}\} |$ respectively. The terminal reward assignment $\textnormal{R}$ is a function $\textnormal{R}\! :\!S_{t}\!\rightarrow\! [0,1]$ assigning a value in the real interval $[0,1]$ to each terminal state $s\!\in\! S_{t}$. \end{definition}
\begin{definition}\label{WPR} Let $\mathcal{A}\!=\!\langle (S,E),(S_{1},S_{2},S_{N}),\pi\rangle$ be a $2\frac{1}{2}$-player arena and $\langle\Pr,\textnormal{R}\rangle$ a parity structure for it. The payoff function $\Phi_{\langle\Pr,\textnormal{R}\rangle}$ induced by $\langle\Pr,\textnormal{R}\rangle$ is defined on every completed path $\vec{s}\!\in\!\mathcal{P}$ as follows: \begin{enumerate}[(1)] \item if $\vec{s}$ is a finite path, then $\Phi_{\langle\Pr,\textnormal{R}\rangle}(\vec{s})= \textnormal{R}\big(\textit{last}(\vec{s})\big)$, \item if $\vec{s}$ is infinite, i.e., $\vec{s}\!=\! \{s_{i}\}_{i\in\mathbb{N}}$, then $\Phi_{\langle\Pr,\textnormal{R}\rangle}(\vec{s})\!=\!1$ if the greatest priority assigned to infinitely many states $s_{i}$ in $\vec{s}$ is even, and $\Phi_{\langle\Pr,\textnormal{R}\rangle}(\vec{s})=0$ otherwise. \end{enumerate} \end{definition} The payoff $\Phi_{\langle \Pr, \textnormal{R} \rangle}$ is Borel-measurable for every parity structure $\langle \Pr, \textnormal{R} \rangle$ \cite{zielonka04}.
\begin{definition} A $2\frac{1}{2}$-player \emph{parity} game is a $2\frac{1}{2}$-player game $\mathcal{G}\!=\!\langle \mathcal{A},\Phi\rangle$ where $\Phi\!=\!\Phi_{\langle \Pr, \textnormal{R} \rangle}$ for some priority structure $\langle \Pr, \textnormal{R} \rangle$ on $\mathcal{A}$. \end{definition}
\begin{comment} We now state an important property (often called \emph{prefix independence}) of parity payoff functions:
\begin{lemma}\label{prefix_independence} Let $\mathcal{G}=\langle \mathcal{A},\Phi\rangle$ be $2\frac{1}{2}$-player parity game. For every completed paths $\vec{s}$ and $\vec{t}$ in $\mathcal{A}$, if $\vec{s}\!=\!\vec{r}.\vec{t}$ for some finite path $\vec{r}$ in $\mathcal{A}$, then the equality $\Phi(\vec{s})\!=\!\Phi(\vec{t})$ holds. \end{lemma} \end{comment} The following fact about $2\frac{1}{2}$-player parity games is the main obstacle one encounter when trying to extend the proof technique adopted in \cite{MM07}, for proving the equivalence of the denotational and game semantics of pL$\mu$ under finite models, to arbitrary models.
\begin{proposition}[\cite{zielonka04}]\label{no-optimal} There exists a $2\frac{1}{2}$-player parity game (with countably infinite state space $S$), such that no optimal strategy exists for either player. Moreover, no memoryless $\epsilon$-optimal strategy exists for either player. \end{proposition}
Due to this technical issue, we shall prove the desired equivalence by a different proof technique inspired by the \emph{unfolding technique} of \cite{Gradel2007,Santocanale2002}. The following simple proposition will be used in Section \ref{main_proof}.
\begin{proposition}\label{fixed_point_proposition_1} Let $\mathcal{G}\!=\!\langle \mathcal{A}, \Phi_{\langle \Pr, \textnormal{R} \rangle}\rangle$ be a two player stochastic parity game with arena $\mathcal{A}\!=\! \langle (S,E),(S_{1},S_{2},S_{N}),\pi\rangle$. The functions $Val_{\downarrow}(\mathcal{G})$ and $Val_{\uparrow}(\mathcal{G})$, of type $S\rightarrow[0,1]$, are fixed points of the functional $\mathcal{F}\!:\![0,1]^{S}\!\rightarrow\![0,1]^{S}$ defined as follows: \begin{center} $\displaystyle \mathcal{F}(f)(s)= \left\{ \begin{array}{l l} \textnormal{R}(s) & $if $E(s)\!=\!\emptyset$, i.e, if $s$ is a terminal state$ \\
\displaystyle \bigsqcup_{t\in E(s)}f(t) & $if$ \ s\!\in\! S_{1}\\
\displaystyle \bigsqcap_{t\in E(s)}f(t) & $if$ \ s\!\in\! S_{2}\\
\displaystyle \sum_{t\in E(s)} \pi(s)(t) \cdot f(t) & $if$ \ s\!\in\! S_{N}\\
\end{array} \right.$
\end{center} \begin{proof} The result easily follows from the fact, immediate to verify, that given any path $\vec{s}.\vec{t}\!\in\!\mathcal{P}_{\!\mathcal{A}}$, the equality \begin{equation}\label{equation_prefix_independent}
\Phi_{\langle \Pr, \textnormal{R} \rangle}(\vec{s}.\vec{t}) = \Phi_{\langle \Pr, \textnormal{R} \rangle}(\vec{t})
\end{equation} holds, i.e., the payoff assigned by $ \Phi_{\langle \Pr, \textnormal{R} \rangle}$ to a path in $\mathcal{A}$ does not depend on any finite prefix of the path. We just prove that, for every $s\!\in\! S_{N}$, the equality \begin{equation}\label{proposition1_eq1} Val_{\downarrow}(\mathcal{G})(s)=\mathcal{F}\big( Val_{\downarrow}(\mathcal{G})\big)(s) \end{equation} holds. The other cases can be proved in a similar way.
Let $E(s)\!=\!\{t_{i}\}_{i\in I}$, for some (necessarily countable) index set $I$. By definition of $Val_{\downarrow}(\mathcal{G})$, we need to prove that the equality \begin{equation}\label{proposition1_eq2}
\displaystyle \bigsqcup_{\sigma_{1}} \bigsqcap_{\sigma_{2}} E(M^{s}_{\sigma_{1},\sigma_{2}}) = \sum_{i\in I} \pi(s)(t_{i}) \cdot \big( \bigsqcup_{\sigma_{1}} \bigsqcap_{\sigma_{2}} E(M^{t_{i}}_{\sigma_{1},\sigma_{2}}) \big)
\end{equation} holds. This is done by proving the two inequalities ($\leq$) and ($\geq$) of Equation \ref{proposition1_eq2} separately. We just show how to prove the inequality ($\leq$) as the other one can be proved in a similar way. Assume, by contradiction, that the lefthand expression of Equation \ref{proposition1_eq2} is strictly greater that the righthand expression. This means that there exists a strategy $\sigma_{1}$ for Player $1$ such that \begin{equation}\label{proposition1_eq3}
\displaystyle \bigsqcap_{\sigma_{2}} E(M^{s}_{\sigma_{1},\sigma_{2}}) > \sum_{i\in I} \pi(s)(t_{i})\cdot \big( \bigsqcup_{\sigma^{i}_{1}} \bigsqcap_{\sigma^{i}_{2}} E(M^{t_{i}}_{\sigma_{1},\sigma_{2}}) \big)
\end{equation}
holds. Since $s\!\in\! S_{N}$, i.e., the state $s$ is under the control of Nature, no action is made by either Player $1$ or Player $2$ at $s$, because the game progresses to some state $t\!\in\!E(s)$ accordingly with the random choice of Nature. Let us define, for every $i\!\in\! I$, the strategy $\tau^{i}_{1}$ for Player $1$ as follows: $\tau^{i}_{1}(\vec{s})=\sigma_{1}(s.\vec{s})$, for paths $\vec{s}$ starting at $t_{i}$. We do not need describe the behavior of $\tau^{i}_{1}$ at paths of different kind. Informally the strategy $\tau_{i}$, when the game starts at $t_{i}$, acts as the strategy $\sigma_{1}$ when the game starts at $s$ and Nature moves from $s$ to $t_{i}$. The assumption of Equation \ref{proposition1_eq3} clearly implies that the following inequality
\begin{equation}\label{proposition1_eq4}
\displaystyle \bigsqcap_{\sigma_{2}} E(M^{s}_{\sigma_{1},\sigma_{2}}) > \sum_{i\in I} \pi(s)(t_{i})\cdot \big( \bigsqcap_{\sigma^{i}_{2}} E(M^{t_{i}}_{\tau^{i}_{1},\sigma^{i}_{2}}) \big)
\end{equation}
holds. This in turn implies, since the set $I$ is countable, that there exist strategies $\{ \tau^{i}_{2}\}_{i\in I}$, such that the inequality \begin{equation}\label{proposition1_eq5}
\displaystyle \bigsqcap_{\sigma_{2}} E(M^{s}_{\sigma_{1},\sigma_{2}}) > \sum_{i\in I} \pi(s)(t_{i})\cdot E(M^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}})
\end{equation}
holds. Let us define the strategy $\sigma_{2}$ for Player $2$ as follows: $\sigma_{2}(s.\vec{t})=\tau^{i}_{2}(\vec{t})$, for paths $\vec{t}$ starting at $t_{i}$, for $i\!\in\! I$. We do not need describe the behavior of $\sigma_{2}$ at paths of different kind, i.e., on paths not starting at $s$. Informally the strategy $\sigma_{2}$, when the game starts at $s$ and Nature randomly chooses to move to the state $t_{i}$, for $i\!\in\! I$, play the rest of the game as the strategy $\tau^{i}_{2}$ would when the game starts at $t_{i}$. It then follows from Equation \ref{proposition1_eq5} that the following inequality
\begin{equation}\label{proposition1_eq6}
\displaystyle E(M^{s}_{\sigma_{1},\sigma_{2}}) > \sum_{i\in I} \pi(s)(t_{i})\cdot E(M^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}})
\end{equation}
holds.
We have just proved how the truth of Equation \ref{proposition1_eq6} follows from the assumption of Equation \ref{proposition1_eq1}. We now derive the desired contradiction, by proving that Equation \ref{proposition1_eq6} does not hold because its two expression are equivalent.
It follows immediately from the definition of the strategies $\{\tau^{i}_{1}\}_{i\in I}$ and $\sigma_{2}$, that the Markov chain $M^{s}_{\sigma_{1},\sigma_{2}}$ can be depicted as if Figure \ref{fig1_game_section},
\begin{figure}\label{fig1_game_section}
\end{figure} where the letters $i,j$ range over $I$, the value labeling the edge connecting $s$ with $t_{j}$ stands for $\pi(s)(t_{i})$ and highlights the fact that that edge is chosen by Nature with probability $\lambda_{i}$, and the subtree of $M^{s}_{\sigma_{1},\sigma_{2}}$ rooted at the state $t_{i}$ is precisely the Markov chain $M^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}}$ induced by the strategies $\tau^{i}_{1}$ and $\tau^{i}_{2}$ at the starting state $t_{i}$.
It follows from Definition \ref{measure_definition} that the probability measure $\mathbb{P}_{M^{s}_{\sigma_{1},\sigma_{2}}}$ induced by $M^{s}_{\sigma_{1},\sigma_{2}}$ assigns probability $0$ to the set of paths not starting at the state $s$. It then follows that the equality \begin{center}
$ E(M^{s}_{\sigma_{1},\sigma_{2}})\bydef\displaystyle \int_{\mathcal{P}_{\!\mathcal{A}} } \Phi_{\langle \Pr, \textnormal{R} \rangle} \, \, d\ \mathbb{P}^{s}_{\sigma_{1},\sigma_{2}} = \displaystyle \sum_{i\in I} \big( \int_{O_{s.t_{i}} }\!\! \Phi_{\langle \Pr, \textnormal{R} \rangle} \, \, d\ \mathbb{P}^{s}_{\sigma_{1},\sigma_{2}}\big). $ \end{center} holds, where $O_{s.t_{i}}$ denotes the open set of paths having $s$ and $t_{i}$ as first and second state respectively. Furthermore, again by Definition \ref{measure_definition}, the probability measure $\mathbb{P}_{M^{s}_{\sigma_{1},\sigma_{2}}}$ assigns probability $\pi(s)(t_{i})$ to the set $O_{s.t_{i}}$. From this observation, the previous considerations on the structure of $M^{s}_{\sigma_{1},\sigma_{2}}$ and its sub-Markov chains $M^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}}$ and Equation \ref{equation_prefix_independent}, it follows immediately that the equality \begin{center} $\displaystyle \frac{1}{\pi(s)(t_{i})} \cdot \big( \int_{O_{s.t_{i}} }\!\! \Phi_{\langle \Pr, \textnormal{R} \rangle} \, \, d\ \mathbb{P}^{s}_{\sigma_{1},\sigma_{2}}\big) = \int_{\mathcal{P}} \Phi_{\langle \Pr, \textnormal{R} \rangle} \, \, d\ \mathbb{P}^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}} $ \end{center} holds, where $\mathbb{P}^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}}$ is the probability measure over paths induced by the Markov chain $M^{t_{i}}_{\tau^{i}_{1},\tau^{i}_{2}}$. This concludes the proof. \end{proof} \end{proposition}
\section{Stochastic parity games for pL$\mu$} \label{game_semantics} In this section we define the \emph{game semantics} of the probabilistic modal $\mu$-calculus, in terms of $2\frac{1}{2}$-player parity games.
Given a PLTS $\langle P, \{ \freccia{a} \}_{a\in L}\rangle$, a pL$\mu$ formula $F$ and an interpretation $\rho$ of the variables, we denote with $\mathcal{G}^{F}_{\rho}$ the parity game $\langle \mathcal{A},\Phi_{\langle\Pr,\textnormal{R}\rangle}\rangle$ formally defined as follows. The state space of the arena $\mathcal{A}\!=\!\langle (S,E), \{ S_{1},S_{2},S_{N} \},\pi\rangle$, is the set $S\!=\! (P\times Sub(F)) \cup (\mathcal{D}(P) \times Sub(F))$ of pairs of states $p\!\in\! P$ or probability distributions $d\!\in\!\mathcal{D}(P)$, and subformulas $G\!\in\! Sub(F)$. The transition relation $E$ is defined as $E (\langle d,G \rangle)\!=\! \{ \langle p, G \rangle \ | \ p\!\in\! supp(d) \}$ for every probability distribution $d\!\in\! \mathcal{D}(P)$ and $E(\langle p, G\rangle)$, for $p\!\in\! P$, is defined by case analysis on the outermost connective of $G$ as follows: \begin{enumerate}[(1)] \item if $G= X$, with $X$ free in $F$, then $E(\langle p, G\rangle)\!=\!\emptyset$. \item if $G=X$, with $X$ bound in $F$ by the subformula $\star X. H$, with $\star\! \in\! \{ \mu , \nu \}$, then $E(\langle p, G\rangle)\!=\! \{\langle p, H\rangle\}$. \item if $G\!=\!\star X.H$, with $\star\! \in\! \{ \mu, \nu \}$, then $E(\langle p,G\rangle)\!=\! \{\langle p, H\rangle\}$.
\item if $G\!=\!\diam{a}H$ or $G\!=\!\quadrato{a}H$ then $E(\langle p, G\rangle) \!=\! \{ \langle d, H \rangle\ |\ p\freccia{a}d \}$. \item if $G\!=\! H\vee H^{\prime}$ or $G\!=\! H\wedge H^{\prime}$ then $E(\langle p, G\rangle) \!=\!\{ \langle p, H \rangle, \langle p,H^{\prime }\rangle \}$ \end{enumerate}
The partition $\{ S_{1},S_{2},S_{N} \}$ is defined as follows: every state $\langle p, G \rangle$ with $G$'s main connective in $\{ \diam{a}, \vee, \mu X\}$ or with $G\!=\!X$ where $X$ is a $\mu$-variable, is in $S_{1}$. Dually every state $\langle p, G\rangle$ with $G$'s main connective in $\{ \quadrato{a}, \wedge, \nu X\}$ or with $G\!=\!X$ where $X$ is a $\nu$-variable, is in $S_{2}$. Every state $\langle d, G \rangle$ is in $S_{N}$. Finally, the terminal states $\langle p, X\rangle$, with $X$ free in $F$, are in $S_{1}$ by convention. The probability transition function $\pi\!:\! S_{N}\!\rightarrow\! \mathcal{D}(S)$ is defined as $\pi (\langle d, G\rangle) (\langle p, G\rangle)\!=\!d(p)$. The priority assignment $\Pr$ is defined as usual in $\mu$-calculus games (see, e.g., \cite{FGK2010}). The priority assigned to the states $\langle p, X \rangle$, with $X$ a $\mu$-variable, is a positive odd number; dually the priority assigned to the states $\langle p, X \rangle$, with $X$ a $\nu$-variable, is a positive even number. Moreover $\Pr(\langle p, X\rangle) > \Pr (\langle p^{\prime}, X^{\prime}\rangle )$ if $X$ subsumes $X^{\prime}$ in $F$. All other states get priority $0$. The terminal reward assignment $\textnormal{R}$ is defined as $\textnormal{R}(\langle p, X\rangle)=\rho(X)(p)$ for every terminal state $\langle p, X\rangle$ with $X$ free in $F$. All other terminal states in $\mathcal{G}^{F}_{\rho}$ are either of the form $\langle p, \diam{a}H\rangle$ or $\langle p, \quadrato{a}H\rangle$, with $\{ d \ | p\freccia{a}d\}\!=\!\emptyset$. The reward assignment $\textnormal{R}$ is defined on these terminal states as follows: $\textnormal{R}(\langle p, \diam{a}H\rangle)\!=\!0$ and $\textnormal{R}(\langle p, \quadrato{a}H\rangle)\!=\!1$. This implements the policy that a player loses if they get stuck at these kind of states.
Observe that from the above definitions, in general, a pL$\mu$ game state $s\!\in\! {S_{1}\!\cup\! S_{2}}$ can have uncountably many $E$-successors. However the set $ supp(\pi(s))$ of $E$-successors of any state $s\!\in\! S_{N}$ is at most countable.
We are now ready to state our main theorem which asserts that every pL$\mu$-game is determined, and that the \emph{value} of the game at each state $\langle p, F\rangle$ coincides with the denotational interpretation $\sem{F}_{\rho}$ at $p$.
\begin{theorem} \label{teorema} Given an arbitrary PLTS $\langle P, \{ \freccia{a} \}_{a\in L}\rangle$, for every pL$\mu$ formula $F$, interpretation $\rho$ and process-state $p\!\in \!P$, the following equalities hold: \begin{center} $\sem{F}_{\rho}(p) = Val_{\downarrow}(\mathcal{G}^{F}_{\rho})(\langle p, F\rangle) = Val_{\uparrow}(\mathcal{G}^{F}_{\rho})(\langle p, F\rangle)$. \end{center} In particular pL$\mu$ games are determined. \end{theorem}
The proof of theorem \ref{teorema} is carried out in full detail in Section \ref{main_proof}.
\section{Proof of Theorem \ref{teorema}}\label{main_proof}
As anticipated in the introduction, the main difficulty in proving Theorem \ref{teorema} is that in general, as stated in Proposition \ref{no-optimal}, optimal strategies, or even memoryless $\epsilon$-optimal strategies may not exist in a given pL$\mu$-game. This compels us to use a different technique than the one adopted in, e.g., \cite{Stirling96,MM07}, which is based on the existence of optimal memoryless strategies. Moreover, as observed earlier, since pL$\mu$-games might have states with uncountably many successors, even the determinacy of pL$\mu$-games does not follow directly from Theorem \ref{blackwell_determinacy}.
The proof technique we adopt is similar to the \emph{unfolding method} of \cite{FGK2010}. The unfolding method can be roughly described as a technique for proving \emph{properties} of (some sort of) two-player parity games by induction on the number of priorities used in the game. Usually, the first step is to prove that the property under consideration holds for all parity games with just one priority. Then the the general result for games with $n+1$ priorities follows by some argument making use of the inductive hypothesis. In our setting we are interested in pL$\mu$-games of the form $\mathcal{G}^{F}_{\rho}$, and the property we want to prove is that the lower and upper values of these games coincide with the denotational value of $F$ under the interpretation $\rho$. We prove this by induction of the structure of $F$ rather than on the number of priorities used in the game $\mathcal{G}^{F}_{\rho}$. This allows a more transparent and arguably simpler proof.
\begin{comment} \section{Unfolding of a pL$\mu$ game} In this subsection we describe a procedure, called \emph{unfolding}, which takes a pL$\mu$ game and returns a simplified $2\frac{1}{2}$-player parity game having strictly less number of priorities. The procedure is defined only those pL$\mu$ games $\mathcal{G}^{F}_{\rho}$ whose formula $F$ is of the form $F\!=\!\mu X.G$ or $F\!=\! \nu X. G$.
\begin{definition} Let $\mathcal{L}\!=\!\langle P, \{\freccia{a}\}_{a\in L}\rangle$ a PLTS. For every $[0,1]$-interpretation of the variables, every pL$\mu$ formula $F$ of the form $F\!=\!\mu X.G$ or $F\!=\! \nu X. G$, and function $f\!:\!P\rightarrow[0,1]$, \end{definition} \section{Proof}
\end{comment}
More formally we shall prove, by induction on the structure of the formulas, that the following equations hold for every PLTS $\mathcal{L}\!=\!\langle P, \{ \freccia{a} \}_{a\in L}\rangle$, pL$\mu$ formula $F$ and $[0,1]$-interpretation $\rho$ of the variables: \begin{equation}\label{main_equation_to_prove} \forall G \in Sub(F), \ \ \sem{G}_{\rho}(p)= Val_{\downarrow}( \mathcal{G}^{G}_{\rho})(\langle p, G\rangle) = Val_{\uparrow}( \mathcal{G}^{G}_{\rho})(\langle p, G\rangle). \end{equation}
\textbf{Base case:} $\mathbf{G\!=\!X}$, for some variable $X\!\in\!Var$. \\ For every process state $p\!\in\!P$ and every interpretation $\rho$, the equality $\sem{X}_{\rho}(p)\!=\!\rho(X)(p)$ holds by Definition \ref{denotationa_sem_def}. In the game $\mathcal{G}^{X}_{\rho}$ the state $\langle p, X\rangle$ is \emph{terminal} (and therefore the game immediately terminates when starting at this state) and the terminal reward assignment $\textnormal{R}$ is defined as $\textnormal{R}(\langle p, X\rangle)\!=\!\rho(X)(p)$. The desired result (\ref{main_equation_to_prove}) then follows by application of Proposition \ref{fixed_point_proposition_1}.
\textbf{Inductive case} $\mathbf{G\!=\! G_{1} \vee G_{2}}$. \\ For every process state $p\!\in\!P$ and every interpretation $\rho$, we have by Definition \ref{denotationa_sem_def} that $\sem{G_{1}\vee G_{2}}_{\rho}(p)\!=\! \sem{G_{1}}_{\rho}(p) \sqcup \sem{G_{2}}_{\rho}(p)$ holds. Let us consider the state $\langle p, G_{1}\vee G_{2}\rangle$ of the game $\mathcal{G}^{G_{1}\vee G_{2}}_{\rho}$. This state is in $S_{1}$, i.e., under the control of Player $1$, which can choose to move either to $\langle p, G_{1}\rangle$ or $\langle p, G_{2}\rangle$. Observe that once the state $\langle p, G_{1}\vee G_{2}\rangle$ is left after the initial move, it is not reachable again in the game. Moreover, once the state $\langle p, G_{i}\rangle$ is reached, $i\!\in\!\{1,2\}$, the rest of the game is identical to the game $\mathcal{G}^{G_{i}}_{\rho}$ (starting at $\langle p, G_{i}\rangle$). If follows from these observations that the equality
\begin{equation}\label{vee_step_aux_1} Val_{\star}(\mathcal{G}^{G}_{\rho})(\langle p, G_{i}\rangle)=Val_{\star}(\mathcal{G}^{G_{i}}_{\rho})(\langle p, G_{i}\rangle) \end{equation} holds, for $i\!\in\!\{1,2\}$ and $\star\!\in\!\{\uparrow,\downarrow\}$. By induction hypothesis we know that the equalities \begin{equation}\label{vee_step_aux_2} \sem{G_{i}}_{\rho}(p)=Val_{\downarrow}( \mathcal{G}^{G_{i}}_{\rho})(\langle p, G_{i}\rangle) = Val_{\uparrow}( \mathcal{G}^{G_{i}}_{\rho})(\langle p, G_{i}\rangle) \end{equation} hold, for $i\!\in\!\{1,2\}$ and $\star\!\in\!\{\uparrow,\downarrow\}$. The desired result (\ref{main_equation_to_prove}) then follows immediately from equations \ref{vee_step_aux_1} and \ref{vee_step_aux_2} by application of Proposition \ref{fixed_point_proposition_1}.
\textbf{Inductive case} $\mathbf{G\!=\! G_{1} \wedge G_{2}}$. \\ Similar to the previous one.
\textbf{Inductive case} $\mathbf{G\!=\! \diam{a}G_{1}}$. \\
For every process state $p\!\in\!P$ and interpretation $\rho$, we have that the equality $\sem{\diam{a}G_{1}}_{\rho}(p)\!=\! \displaystyle \bigsqcup_{p\freccia{a}d} \big( \sum_{q\in supp(d)} d(q)\! \cdot\! \sem{G_{1}}_{\rho}(q) \big)$ holds, by Definition \ref{denotationa_sem_def}. Let us consider the state $\langle p, \diam{a}G_{1}\rangle$ of the game $\mathcal{G}^{\diam{a}G_{1}}_{\rho}$. This state is in $S_{1}$, i.e., under the control of Player $1$, which can move to a state in the (possibly empty) set $\{ \langle d, G_{1}\rangle \ | \ p\freccia{a}d \}$. As a first observation, note that if the set of $a$-successors of $p$, i.e., the set $\{ d \ | \ p\freccia{a}d\}$, is empty, then $\langle p, \diam{a}G_{1}\rangle$ is a terminal state and the terminal reward assignment $\textnormal{R}$ is defined as $\textnormal{R}(\langle p, \diam{a}G_{1}\rangle)\!=\!0$. The desired result (\ref{main_equation_to_prove}) then follows by Proposition \ref{fixed_point_proposition_1}. Let us then assume that $\{ d \ | \ p\freccia{a}d\}\!=\!\{ d_{i} \}_{i\in I}$, for some non empty index-set $I$. Each state $\langle d_{i},G_{1}\rangle$ is under the control of Nature which moves to the state $\langle q, G_{1}\rangle$ with probability $d_{i}(q)$. More formally we have that $\langle d_{i},G_{1}\rangle\!\in\! S_{N}$ and $\pi \big( \langle d_{i},G_{1}\rangle \big)(\langle q, G_{1}\rangle)=d_{i}(q)$. Once the state $\langle d_{i}, G_{1}\rangle$ is left, and the state $\langle q,G_{1}\rangle$ is reached, the rest of the game is identical to the game $\mathcal{G}^{G_{1}}_{\rho}$ (starting at $\langle q, G_{i}\rangle$), by considerations analogous to those discussed for the case $G\!=\!G_{1}\vee G_{2}$ above. If follows from this observation that the equality
\begin{equation}\label{diam_step_aux_1} Val_{\star}(\mathcal{G}^{\diam{a}G_{1}}_{\rho})(\langle q, G_{1}\rangle)=Val_{\star}(\mathcal{G}^{G_{1}}_{\rho})(\langle q, G_{1}\rangle) \end{equation} holds for $\star\!\in\!\{\uparrow,\downarrow\}$. By induction hypothesis we know that the equalities \begin{equation}\label{diam_step_aux_2} \sem{G_{1}}_{\rho}(q)=Val_{\downarrow}( \mathcal{G}^{G_{1}}_{\rho})(\langle q, G_{1}\rangle) = Val_{\uparrow}( \mathcal{G}^{G_{1}}_{\rho})(\langle q, G_{1}\rangle) \end{equation} hold, where $\star\!\in\!\{\uparrow,\downarrow\}$, for every process state $q$. By applying twice the result of Proposition \ref{fixed_point_proposition_1} it then follows that \begin{equation} Val_{\star}(\mathcal{G}^{\diam{a}G_{1}}_{\rho})(\langle d_{i}, G_{1}\rangle) = \displaystyle \sum_{q\in supp(d)} d(q)\! \cdot\! \sem{G_{1}}_{\rho}(q) \end{equation} for $\star\!\in\!\{\uparrow,\downarrow\}$ and every $d_{i}\!\in\! E(p)$, and \begin{equation} Val_{\star}(\mathcal{G}^{\diam{a}G_{1}}_{\rho})(\langle p,\diam{a} G_{1}\rangle)= \displaystyle \bigsqcup_{p\freccia{a}d} \big( \sum_{q\in supp(d)} d(q)\! \cdot\! \sem{G_{1}}_{\rho}(q) \big) \end{equation} hold as desired.
\textbf{Inductive case} $\mathbf{G\!=\! \quadrato{a}G_{1}}$. \\ Similar to the previous one.
\textbf{Inductive case} $\mathbf{G\!=\! \mu X.G_{1}}$. \\ For every process state $p$ and every interpretation $\rho$ we have, by Definition \ref{denotationa_sem_def}, that the following equality holds: \begin{center} $\sem{\mu X.G_{1}}_{\rho}(p)\bydef \mathrm{lfp} \Big( \lambda f\!\in\![0,1]^{P}. \big( \sem{G_{1}}_{\rho[f/X]}\big) \Big)(p)$. \end{center} By the Knaster-Tarski theorem, the previous equation can be rewritten as: \begin{equation}\label{fix_step_aux_1} \sem{\mu X.G_{1}}_{\rho}(p)=\bigsqcup_{\alpha} \sem{G_{1}}^{\alpha}_{\rho}, \end{equation} where $\alpha$ ranges over the ordinals, and $\sem{G_{1}}^{\alpha}_{\rho}$ is defined as $\bigsqcup_{\beta<\alpha}\sem{G_{1}}_{\rho[\sem{G_{1}}^{\beta}_{\rho}/X]}$. Let us denote with $\gamma$ the least ordinal such that $\sem{G_{1}}^{\gamma}_{\rho}=\sem{\mu X.G_{1}}_{\rho}$, and with $\rho_{\gamma}\!\in\![0,1]^{P}$ the interpretation $\rho[\sem{G_{1}}^{\gamma}_{\rho}/X]$. Thus, the following equation holds: \begin{equation} \sem{G_{1}}_{\rho_{\gamma}}=\sem{\mu X.G_{1}}_{\rho}. \end{equation}
Let us now turn our attention to the $2\frac{1}{2}$-player parity game $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. Our goal is to prove Equation \ref{main_equation_to_prove}, i.e., that the following equalities \begin{equation}\label{fix_step_aux_2} \sem{\mu X.G_{1}}_{\rho}(p) = Val_{\downarrow}\big(\mathcal{G}^{\mu X.G_{1}}_{\rho}\big)(\langle p, \mu X.G_{1}\rangle)= Val_{\uparrow}\big(\mathcal{G}^{\mu X.G_{1}}_{\rho}\big)(\langle p, \mu X.G_{1}\rangle) \end{equation} hold, for every $p\!\in\!P $. As a first observation, note that the state $\langle p, \mu X.G_{1}\rangle$ is not reachable by any other game state, and that it has the state $\langle p, G_{1}\rangle$ as its only successor state. It then follows by application of Proposition \ref{fixed_point_proposition_1} that, in order to prove the desired result (\ref{fix_step_aux_2}), we just have to show that the equalities \begin{equation}\label{fix_step_aux_3} \sem{G_{1}}_{\rho_{\gamma}}(p) = Val_{\downarrow}\big(\mathcal{G}^{\mu X.G_{1}}_{\rho}\big)(\langle p,G_{1}\rangle)= Val_{\uparrow}\big(\mathcal{G}^{\mu X.G_{1}}_{\rho}\big)(\langle p, G_{1}\rangle) \end{equation} hold. In order to improve readability, we shall denote with $\gsem{\mu X.G_{1}}^{\star}_{\rho}\!:\!P\!\rightarrow\![0,1]$ the function defined as $\lambda p\!\in\!P.\Big( Val_{\star}\big(\mathcal{G}^{\mu X.G_{1}}_{\rho}\big)(\langle p,G_{1}\rangle) \Big)$, for $\star\!\in\!\{\downarrow,\uparrow\}$. Thus, Equation \ref{fix_step_aux_3} can be rewritten as follows: \begin{equation}\label{fix_step_aux_3_prime} \sem{G_{1}}_{\rho_{\gamma}}(p) = \gsem{\mu X.G_{1}}^{\downarrow}_{\rho}(p)=\gsem{\mu X.G_{1}}^{\uparrow}_{\rho}(p). \end{equation} Note that the analogous functions $\gsem{G_{1}}^{\star}_{\rho[f/X]}\!:\!P\!\rightarrow\![0,1]$ specified, for $\star\!\in\!\{\downarrow,\uparrow\}$, as \\ $\lambda p\!\in\!P.\Big( Val_{\star}\big(\mathcal{G}^{G_{1}}_{\rho[f/X]}\big)(\langle p,G_{1}\rangle) \Big)$, satisfy the following equation: \begin{equation}\label{fix_step_aux_3_second} \sem{G_{1}}_{\rho[f/X]}= \gsem{G_{1}}^{\downarrow}_{\rho[f/X]} = \gsem{G_{1}}^{\uparrow}_{\rho[f/X]} \end{equation} for all $f\!\in\![0,1]^{P}$, by induction hypothesis on $G_{1}$.
We prove Equation \ref{fix_step_aux_3_prime} by exploiting the similarities between the game $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ and the game $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$, for every $f\!\in\![0,1]^{P}$. The two games are indeed almost identical and differ only in the following two points: \begin{enumerate}[(1)]
\item the set of game states of $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ consists of the game states of $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$ plus the set $\{ \langle p, \mu X.G_{1}\rangle \ | \ p\!\in\! P\}$, \item the states of the form $\langle p, X\rangle$, for $p\!\in\!P$, are terminal in the game $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$ and, instead, have the state $\langle p, G_{1}\rangle$ as unique successor in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. \end{enumerate} The first point does not contribute to any significant difference between the two games, because, as already observed earlier, the states of the form $\langle p, \mu X.G_{1}\rangle$, for $p\!\in\!P$, have a unique child and once left are not reachable again in the game, and hence can be ignored completely. Thus, in what follows, we will assume that the two games $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ and $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$ have the same set of states. The second point is, on the other hand, an important one. In the game $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$, when a state of the form $\langle p, X\rangle$ is reached, the play ends with reward $f(p)$ for Player $1$. In the game $\mathcal{G}^{\mu X.G_{1}}_{\rho}$, instead, the game progresses to the state $\langle p, G_{1}\rangle$ and, from there, continues.
Given these observations it is clear that any finite path in $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$ is also a finite path in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. Moreover we define the functions $count$ and $tail$ from finite paths in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ to natural numbers and finite paths in $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$ respectively, as follows: \begin{center}
$count(\vec{s})= \left| \{ \langle q, X\rangle.\langle q,G_{1}\rangle \in \vec{s} \ | \ q\!\in\! P\}\right|$ \end{center} and \begin{center} $tail(\vec{s}) = \left\{ \begin{array}{l l} \vec{s} & $ if $ count(\vec{s})\!=\!0 \\ \vec{t} & $ if $\vec{s}\!=\! \vec{s^{\prime}}.\langle q,X\rangle.\vec{t} $ and $count(\vec{t})=0 $, for $q\!\in\!P
\end{array} \right.$
\end{center} In other words $count(\vec{s})$ gives us the number of occurrences of pairs of (adjacent) states of the form $\langle q, X\rangle$ and $\langle q, G_{1}\rangle$ in $\vec{s}$, for $q\!\in\!P$, and the finite path $tail(\vec{s})$ is obtained by removing from $\vec{s}$ the initial prefix up to the last occurrence of a state of the form $\langle q, X\rangle$ (immediately followed by the state $\langle q, G_{1}\rangle$) in $\vec{s}$. Note that $tail(\vec{s})$ is indeed a finite path in $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$. The function $count$ extends to an operation from completed (i.e., either terminated or infinite) paths to $\mathbb{N}\cup\{\infty\}$ as expected. Similarly, we extend the function $tail$ to an operation from infinite paths $\vec{s}$ in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ (such that $count(\vec{s})\!<\!\infty$) to infinite paths in $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$, in the obvious way.
As a further remark about the similarities between the two games $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ and $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$, observe that the priorities assigned to the states of the two games coincide (or at least the can be made to coincide) except that the states of the form $\langle p, X\rangle$ are assigned priority $0$ in $ \mathcal{G}^{G_{1}}_{\rho[f/X]}$ and maximal priority in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. Similarly, the terminal reward assignments of the two games coincide on all terminal states except that, on those of the form $\langle p, X\rangle$, the reward assignment of $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ is not defined since because, as observed before, $\langle p, X\rangle$ is not a terminal state in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. It is then simple to verify that the following property holds for every completed path $\vec{s}$ in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$: \begin{equation}\label{fix_step_aux_4} \Phi^{\mu X.G_{1}}_{\rho}(\vec{s})= \left\{ \begin{array}{l l} 0 & $if $count(\vec{s})=\infty \\
\Phi^{G_{1}}_{\rho[f/X]}\big(tail(\vec{s})\big) & $otherwise$
\end{array} \right. \end{equation} where $\Phi^{\mu X.G_{1}}_{\rho}$ and $\Phi^{G_{1}}_{\rho[f/X]}$ denote the payoff functions of the two games $\mathcal{G}^{\mu X.G_{1}}_{\rho}$
and $\mathcal{G}^{G_{1}}_{\rho[f/X]}$
respectively. The first clause of Equation \ref{fix_step_aux_4} holds because the priority assigned to states of the form $\langle p, X\rangle$ is odd and maximal in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. The second clause follows immediately by previous observations.
One last observation, which follows immediately from previous considerations about the similarities between the two games $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ and $\mathcal{G}^{G_{1}}_{\rho[f/X]}$, is the following: \begin{equation}\label{fix_step_aux_6} \gsem{\mu X.G_{1}}^{\star}_{\rho} = \gsem{G_{1}}^{\star}_{\rho[\gsem{\mu X.G_{1}}^{\star}_{\rho}/X]} \stackrel{\textnormal{Eq. \ref{fix_step_aux_3_second}}}{=} \sem{G_{1}}_{\rho[\gsem{\mu X.G_{1}}^{\star}_{\rho}/X]} \end{equation} for $\star\!\in\!\{\downarrow,\uparrow\}$. By application of Equation \ref{fix_step_aux_3_second}, this implies that both $\gsem{\mu X.G_{1}}^{\uparrow}_{\rho}$ and $\gsem{\mu X.G_{1}}^{\downarrow}_{\rho}$ are fixed points of $\lambda f\!\in\![0,1]^{P}.( \sem{G_{1}}_{\rho[f/X]})$. Note that, for all $p\!\in\!P$, the inequality $\gsem{\mu X.G_{1}}^{\downarrow}_{\rho}(p) \leq \gsem{\mu X.G_{1}}^{\uparrow}_{\rho}(p)$ holds. Moreover the inequality $\sem{\mu X.G_{1}}_{\rho}(p) \leq \gsem{\mu X.G_{1}}^{\downarrow}_{\rho}(p)$ hods, for all $p\!\in\!P$, because $\sem{\mu X.G_{1}}_{\rho}$ (or, equivalently, $\sem{G_{1}}_{\rho_{\gamma}}$) is the least fixed point of $\lambda f\!\in\![0,1]^{P}.( \sem{G_{1}}_{\rho[f/X]})$.
We shall prove the desired result (Equation \ref{fix_step_aux_3_prime}) by showing that, for all $p\!\in\!P$, the inequality \begin{equation} \gsem{\mu X.G_{1}}^{\uparrow}_{\rho}(p)\bydef Val^{\uparrow}\big( \mathcal{G}^{\mu X.G_{1}}_{\rho}\big)(\langle p, G_{1}\rangle) \leq \sem{G_{1}}_{\rho_{\gamma}}(p) \end{equation} holds. We do this by constructing, for every $\epsilon\!>\!0$ and for every $k\!\in\!\mathbb{N}$, a strategy $\sigma^{[k]}_{2}$ for Player $2$ in the game $\mathcal{G}^{\mu X.G_{1}}_{\rho}$, satisfying the following inequality:
\begin{equation}\label{strategy_eq_1}
\bigsqcup_{\sigma_{1}}E(M^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}) < \sem{G_{1}}_{\rho_{\gamma}}(p) + \frac{\epsilon}{2^{k}} .
\end{equation}
Let us fix an arbitrary $\epsilon\!>\!0$. In what follows, we adopt the convention of using $\sigma$ and $\tau$ to range over strategies in the games $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ and $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$ respectively. The strategy $\sigma^{[k]}_{2}$, for $k\!\in\!\mathbb{N}$, is built using the collection of $\delta$-optimal strategies $\tau^{\delta}_{2}$, with $\delta \! >\!0$, for Player $2$ in the game $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$, i.e., strategies $\tau_{2}^{\delta}$ such that the inequality \begin{equation} \label{delta_optimal_strat_1}
\bigsqcup_{\tau_{1}}M^{\langle q, G_{1}\rangle}_{\tau_{1},\tau^{\delta}_{2}} \!< \! Val_{\uparrow}(\mathcal{G}^{G_{1}}_{\rho_{\gamma}})(\langle q,G_{1}\rangle)+\delta
\end{equation}
holds, for every $q\!\in\! P$. The strategy $\sigma_{2}^{[k]}$ is defined as follows: \begin{center} $\sigma^{[k]}_{2}(\vec{s}) = \left\{ \begin{array}{l l} \tau^{\frac{\epsilon}{2^{k+1}}}_{2}(\vec{s}) & $ if $ count(\vec{s})\!=\!0 \\ \sigma^{[k+i]}_{2}(\vec{t}) & $ if $ count(\vec{s})\!=\!i\!>\!0 $ and $ \vec{t}\!=\!tail(\vec{s})
\end{array} \right.$ \end{center} where the function $count$ and $tail$ have been defined earlier. The strategy $\sigma_{2}^{[k]}$ can be informally described as follows: at the beginning of the game, $\sigma^{[k]}_{2}$ initially behaves as the strategy $\tau^{\frac{\epsilon}{2^{k+1}}}_{2}$. If a state of the form $\langle q, X\rangle$, for $q\!\in\! P$, is ever reached, then Player $2$ forgets the previous game-history and improves their strategy behaving, from the subsequent state $\langle p, G_{1}\rangle$, as the strategy $\sigma^{[k+1]}_{2}$. Further changes of strategy, from $\sigma^{[i]}_{2}$ to $\sigma^{[i+1]}_{2}$, for $i\!\in\!\mathbb{N}$, are repeated every time a state of the form $\langle q^{\prime}, X\rangle$ is reached, for $q^{\prime}\!\in\!P$. This means that on a history of the form $\vec{s}\!=\!\vec{s^{\prime}}.\langle q, X\rangle.\vec{t}$, where $\langle q, X\rangle$ is the last occurrence of a state of the form $\langle q^{\prime}, X\rangle$ in $\vec{s}$, the choice of $\sigma_{2}^{[k]}$ at $\vec{s}$ coincides with that of $\sigma_{2}^{[k+i]}$ (or equivalently with $\tau^{\frac{\epsilon}{2^{k+i+1}}}_{2}$) at $\vec{t}$, where $i\!=\! count(\vec{s})$.
In other words Player $2$, using the strategy $\sigma_{2}^{[k]}$, plays in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ as if they were playing in the game $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$, and every time a state of the form $\langle q, X\rangle$ is reached, they re-start again (from the unique successor $\langle q,G_{1}\rangle$ of $\langle q, X\rangle$) as if they ware in $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$, but with an improved strategy.
We now prove that, for every $k\!\in\!\mathbb{N}$, the strategy $\sigma^{[k]}_{2}$ satisfies the desired Inequality \ref{strategy_eq_1}. Let us fix an arbitrary strategy $\sigma_{1}$ for Player $1$ in the game $\mathcal{G}^{\mu X.G_{1}}_{\rho}$. We just need to show that the inequality \begin{equation}\label{strategy_eq_2} E(M^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}) \!<\! \sem{G_{1}}_{\rho_{\gamma}}(p) + \frac{\epsilon}{2^{k}} \end{equation}
holds. Let us denote with $\mathcal{X}^{n}$, for $n\!\in\!\mathbb{N}$, the sets of completed paths $\vec{s}$ in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ such that $count(\vec{s})\!=\!n$. Let $\mathcal{X}^{\prec n}$ be the set $\bigcup_{i\prec n} \mathcal{X}^{i}$, for $\prec\in\{<,\leq\}$. Similarly, let us denote with $\mathcal{X}^{\infty}$ the set of completed paths $\vec{s}$ in $\mathcal{G}^{\mu X.G_{1}}_{\rho}$ such that $count(\vec{s})\!=\!\infty$. The following equalities hold: \begin{center} \begin{tabular}{l l l } $E(M^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}})$ & $\bydef$ & $\displaystyle \int_{\mathcal{P}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$\\ $$ & $=_{A}$ & $\displaystyle \sum_{n\in\mathbb{N}} \int_{\mathcal{X}^{n}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} + \int_{\mathcal{X}^{\infty}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ \\ $$ & $=_{B}$ & $\displaystyle \sum_{n\in\mathbb{N}} \int_{\mathcal{X}^{n}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ \\ $$ & $=_{C}$ & $\displaystyle\bigsqcup_{n\in\mathbb{N}}\displaystyle \int_{\mathcal{X}^{\leq n}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ \\ \end{tabular} \end{center} The validity of equations $A$ and $C$ comes from countable additivity and $\omega$-continuity of the probability measure $ \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ respectively, and the validity of equation $B$ follows from the fact that $\Phi^{\mu X.G_{1}}_{\rho}(\vec{s})\!=\!0$, for every $\vec{s}\in \mathcal{X}^{\infty}$ (see Equation \ref{fix_step_aux_4}).
We now prove, by induction on the natural numbers, that for every $n\!\in\!\mathbb{N}$ the inequality \begin{equation}\label{strategy_eq_3}
\displaystyle \int_{\mathcal{X}^{\leq n}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} < \sem{G_{1}}_{\rho_{\gamma}}(p) + \displaystyle \sum_{i\leq n} \frac{\epsilon}{2^{k+i+1}} \end{equation} holds. This clearly implies the desired Inequality \ref{strategy_eq_2} because the indentiy $\displaystyle\bigsqcup_{n\in\mathbb{N}}\displaystyle \sum_{i\leq n} \frac{\epsilon}{2^{k+i+1}}= \frac{\epsilon}{2^{k}}$ holds. Suppose, by inductive hypothesis, that the inequality (\ref{strategy_eq_3}) holds for all $m\!<\!n$, for some $n\!\in\!\mathbb{N}$. The Markov chain $M^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ can be depicted as in figure \ref{fig4} where the triangle (denoted by $\mathcal{X}^{0}$) represents the set of paths in $M^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ never reaching a state of the form $\langle q,X\rangle$, for $q\!\in\!P$, and the finite paths (denoted by $\vec{t}_{i}$) connecting the root $\langle p, G_{1}\rangle$ with the node $\langle q_{i}, X\rangle$, for $i\!\in\! I\subseteq\! \mathbb{N}$, are the prefixes (up to the first occurrence of a state of the form $\langle q_{i},X\rangle$) of all paths $\vec{s}$ in $M^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}$ of the form $\vec{s}\!=\!\vec{t_{i}}.\langle q_{i},G_{1}\rangle.\vec{s^{\prime}}$. The sub-Markov chains rooted at $\vec{t}_{i}$ (having $\langle q_{i},G_{1}\rangle$ as initial state) are denoted by $M_{i}$, for $i\!\in\!I$. \begin{figure}\label{fig4}
\end{figure}
Note that every path $\vec{s}\!\in\!\mathcal{X}^{\leq n}$ is either a path in $\mathcal{X}^{0}$, i.e., does not have any occurrences of states of the form $\langle q, X\rangle$, or is in $\bigcup_{0<j\leq n}\mathcal{X}^{j}$. Moreover observe that any path $\vec{s}\!\in\!\bigcup_{0<j\leq n}\mathcal{X}^{j}$ in $M^{\langle p, G_{1}\rangle}_{\sigma^{\epsilon}_{1},\sigma^{[k]}_{2}}$, i.e., any path in $M^{\langle p, G_{1}\rangle}_{\sigma^{\epsilon}_{1},\sigma^{[k]}_{2}}$ that reaches at least once (and at most $n$ times) a state of the form $\langle q, X\rangle$, can be uniquely written as the concatenation $\vec{s}\!=\!\vec{t_{i}}.\vec{s^{\prime}}$ of some finite path $\vec{t}_{i}$ (ending in the state $\langle q_{i}, X\rangle$, which is the first occurrence of a state of this shape in $\vec{s}$) and some completed path $\vec{s^{\prime}}\!\in\! \mathcal{X}^{< n}$, which is necessarily starting at the state $\langle q_{i}, G_{1}\rangle$. Let us denote with $\vec{t_{i}}.\mathcal{X}^{<n}$, for $i\!\in\!I$, the set of paths $\vec{s}\!\in\! \bigcup_{0<j\leq n}\mathcal{X}^{j}$ of the form $\vec{t_{i}}.\vec{s^{\prime}}$, with $\vec{s^{\prime}}\!\in\! \mathcal{X}^{< n} $. Given the previous observations, and since the set $I$ is countable, the following equality holds: \begin{equation}\label{n1_a} \displaystyle \int_{\mathcal{X}^{\leq n} } \!\!\Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}= \displaystyle \int_{\mathcal{X}^{0} } \!\! \Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} + \displaystyle \sum_{i\in I} \int_{\vec{t}_{i}.\mathcal{X}^{< n} } \!\! \Phi^{\mu X.G_{1}}_{\rho}\, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} \end{equation} Moreover, denoting by $\pi(\vec{t}_{i})$, for $i\!\in\!I$, the multiplication of all probabilities appearing in the probabilistic steps of the path $\vec{t}_{i}$, it is simple to check that the following equality holds: \begin{equation}\label{n2_a}
\displaystyle \int_{\vec{t}_{i}.\mathcal{X}^{< n} } \!\! \Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}= \pi(\vec{t}_{i}) \cdot\displaystyle \int_{\mathcal{X}^{< n} } \!\! \Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}_{M_{i}} \end{equation} where $\mathbb{P}_{M_{i}}$ denotes the probability measure over completed paths induced by the sub-Markov chain $M_{i}$.
\begin{comment} Therefore, from equations \ref{n1_a} and \ref{n2_a}, we have that the following equality holds: \begin{equation}\label{n3_a} \displaystyle \int_{\mathcal{X}^{\leq n} }\!\! \Phi^{\mu X.G_{1}}_{\rho} \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}= \displaystyle \int_{\mathcal{X}^{0} }\!\! \Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} + \displaystyle \sum_{i\in I} \pi(\vec{t}_{i}) \cdot \big( \displaystyle \int_{\mathcal{X}^{< n} } \!\!\Phi^{\mu X.G_{1}}_{\rho} \, \, d\mathbb{P}_{M_{i}}\big) \end{equation} \end{comment}
It follows from the definition of the strategy $\sigma^{[k]}_{2}$, that the sub-Markov chain $M_{i}$, for $i\!\in\!I$, is generated by the strategy profile $\langle \sigma^{i}_{1}, \sigma^{[k+1]}_{2}\rangle$, where $\sigma^{i}_{1}(\vec{s})\bydef\sigma_{1}(\vec{t}_{i}.\vec{s})$, for all completed paths $\vec{s}$ having $\langle q_{i},G_{1}\rangle$ as first state. Thus, $M_{i}=M^{q_{i}}_{\sigma^{i}_{1},\sigma^{[k+1]}_{2}}$. It then follows by inductive hypothesis on $n$ (Inequality \ref{strategy_eq_3}), that the inequality \begin{equation}
\displaystyle \int_{\mathcal{X}^{< n} } \!\!\Phi^{\mu X.G_{1}}_{\rho} \, \, d\mathbb{P}_{M_{i}} < \sem{G_{1}}_{\rho_{\gamma}}(q_{i}) + \displaystyle \sum_{j<n} \frac{\epsilon}{2^{(k+1)+j+1}} \end{equation} holds. Hence, by equations \ref{n1_a}-\ref{n2_a}, the inequality \begin{equation}\label{eq_trick} \displaystyle \int_{\mathcal{X}^{\leq n} } \!\!\Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}}\leq\int_{\mathcal{X}^{0} } \!\! \Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} + \Big( \displaystyle \sum_{i\in I} \pi(\vec{t}_{i}) \cdot\displaystyle \big( \sem{G_{1}}_{\rho_{\gamma}}(q_{i}) \big)\Big)+ \displaystyle \sum_{j<n} \frac{\epsilon}{2^{(k+1)+j+1}} \end{equation} holds.
Let us now consider the Markov chain (depicted in Figure \ref{fig4_prime}) in the game $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$, obtained from $M^{\langle p,G_{1}\rangle}_{\sigma_{1},\sigma_{2}}$ by removing the sub-Markov chains $M_{i}$, for $i\!\in\! I$. It follows from definition of $\sigma^{[k]}_{2}$ that this is precisely the Markov chain induced by the strategy profile $\langle \tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}\rangle$, where $\tau_{1}$ is the strategy for Player $1$ in the game $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$ which behaves as the strategy $\sigma_{1}$ in the game $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$ until a terminal state of the form $\langle q,X\rangle$ is reached. \begin{figure}\label{fig4_prime}
\end{figure} The following equations are easy to verify: \begin{center} \begin{tabular}{l l l } $E(M^{\langle p, G_{1}\rangle}_{\tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}})$ & $\bydef$ & $\displaystyle \int_{\mathcal{P}} \Phi^{ G_{1}}_{\rho_{\gamma}}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}}$\\ $$ & $=_{A}$ & $\displaystyle \int_{\mathcal{X}^{0}} \Phi^{G_{1}}_{\rho_{\gamma}}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}} + \sum_{i\in I} \pi(\vec{t}_{i})\cdot \Phi^{G_{1}}_{\rho_{\gamma}} (\vec{t}_{i})$ \\ $$ & $=_{B}$ & $\displaystyle \int_{\mathcal{X}^{0}} \Phi^{G_{1}}_{\rho_{\gamma}}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}} + \sum_{i\in I} \pi(\vec{t}_{i})\cdot \rho_{\gamma}(X)(q_{i})$\\ $$ & $=_{C}$ & $\displaystyle \int_{\mathcal{X}^{0}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}} + \sum_{i\in I} \pi(\vec{t}_{i})\cdot \rho_{\gamma}(X)(q_{i})$\\ $$ & $=_{D}$ & $\displaystyle \int_{\mathcal{X}^{0}} \Phi^{\mu X.G_{1}}_{\rho}\ d \mathbb{P}^{\langle p, G_{1}\rangle}_{\tau_{1},\tau^{\frac{\epsilon}{2^{k+1}}}_{2}} + \sum_{i\in I} \pi(\vec{t}_{i})\cdot \sem{G_{1}}_{\rho_{\gamma}}(q_{i})$. \end{tabular} \end{center} Step $A$ is justified by the fact that every path in the set $\mathcal{X}^{0}$ defined earlier is also a path in the game $\mathcal{G}^{G_{1}}_{\rho_{\gamma}}$. Step $B$ follows from the fact that, by definition, $\Phi^{G_{1}}_{\rho_{\gamma}}(\vec{t}_{i})= \rho_{\gamma}(X)(q_{i})$, for every $i\!\in\! I$ and $q_{i}\!=\!last(\vec{t}_{i})$. Equation $C$ follows from Equation \ref{fix_step_aux_4}. Lastly, Equation $D$ follows from definition of $\rho_{\gamma}(X)$.
By definition, the strategy $\tau^{\frac{\epsilon}{2^{k+1}}}_{2}$ is $\frac{\epsilon}{2^{k+1}}$-optimal (see Inequality \ref{delta_optimal_strat_1}). Thus, it follows from Equation \ref{eq_trick} that the inequality \begin{equation} \displaystyle \int_{\mathcal{X}^{\leq n} } \!\!\Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} \leq (\sem{G_{1}}_{\rho_{\gamma}}(p) + \frac{\epsilon}{2^{k+1}} )+ \displaystyle \sum_{j<n} \frac{\epsilon}{2^{(k+1)+j+1}} \end{equation} or equivalently, \begin{center} $\displaystyle \int_{\mathcal{X}^{\leq n} } \!\!\Phi^{\mu X.G_{1}}_{\rho} \, \, d \mathbb{P}^{\langle p, G_{1}\rangle}_{\sigma_{1},\sigma^{[k]}_{2}} \leq \sem{G_{1}}_{\rho_{\gamma}}(p) + \displaystyle \sum_{j\leq n} \frac{\epsilon}{2^{k+j+1}}$ \end{center} holds. We have then proved that Equation \ref{strategy_eq_3} holds, as desired. Therefore, following backwards our previous analysis, Equation \ref{strategy_eq_2} and, thus, Equation \ref{fix_step_aux_3_prime} hold, and this concludes the proof.
\textbf{Inductive case} $\mathbf{G\!=\! \nu X.G_{1}}$. \\ Similar to the previous one.
\section{Conclusions and future work} We proved that the denotational and game semantics of \cite{MM07} of the logic pL$\mu$ coincide on all PLTS's. This result, which is yet another example of application of game theory to logic, strengthen the theory of the logic pL$\mu$, which is recently emerging as an interesting tool for expressing properties and reasoning about PLTS's.
Further recent research \cite{MIO11, MioThesis}, explores the extension of the logic obtained by adding two new conjunction/disjunction operators called \emph{product} ($\cdot$) and \emph{coproduct} ($\odot$). The product operator, whose denotational semantics is defined as $\sem{F\cdot G}(p)\!=\!\sem{F}(p)\cdot \sem{G}(p)$, and the coproduct operator (the De Morgan dual of the product respect to the involution $\neg x \!=\! 1-x$) increases the expressive power of the logic. For instance it is possible to encode the \emph{qualitative} modality $\mathbb{P}_{>0}F$ whose semantics can be defined as $\sem{\mathbb{P}_{>0}F}(p)\!=\!1$ if $\sem{F}(p)\!>\!0$; $\sem{\mathbb{P}_{>0}F}(p)\!=\!0$ otherwise. This allows the expression of interesting properties, as well as the encoding of important temporal probabilistic logics such as (qualitative) PCTL.
\begin{comment} Even though PLTS's provides a very general framework for describing probabilistic concurrent processes, they can only model \emph{countable} probabilistic choices. A natural generalization of PLTS's allowing arbitrary probabilistic behaviors consists in considering the class of models given by pairs $(P, \langle \freccia{a} \rangle_{a\in L})$ where the set $P$ is endowed with a $\sigma$-algebra $\Omega$, i.e., $(P,\Omega)$ is a measurable space, and $\freccia{a}\subseteq { P\!\times\! \mathbb{P}(P)}$, where $\mathbb{P}(P)$ denotes the set of probability measures $m$ on $(P,\Omega)$. Extending the semantics of the logic pL$\mu$ to this class of models is non trivial. For example the, arguably natural, denotational semantics for the formula $\diam{a}F$, \begin{center} $\sem{\diam{a}F}_{\rho}(p) = \displaystyle \bigsqcup_{p\freccia{a}m}\displaystyle \int_{P} \sem{F} \, \, d m$ \end{center} would require the denotation $\sem{F}_{\rho}:P\!\rightarrow\! [0,1]$ of $F$ to be measurable. However it is not clear if the denotation of pL$\mu$ operators, in particular modalities and fixed points operators, preserves measurability. Despite these issues, the pL$\mu$-games generated by these models still look quite similar to the ones generated by standard PLTS's; the only significant difference being that the Markov chains to be considered are not countably branching trees anymore. An interesting direction of work might consist in seeing if the above mentioned questions about measurability of the denotational semantics, can be addressed by relating the denotational semantics with the game semantic, adapting the purely inductive method adopted in our main proof. \end{comment}
\end{document} |
\begin{document}
\titlerunning{Spectral Representation of Some C.E.\ Sets With an Application} \title{Spectral Representation of Some Computably Enumerable Sets With an Application to Quantum Provability\thanks{Partially supported by JSPS KAKENHI Grant Number 23650001.}}
\author{Cristian S. Calude\inst{1} \thanks{Work done in part during a visit to Research and Development Initiative, Chuo University, Tokyo, Japan, January 2013; partially supported also by Marie Curie FP7-PEOPLE-2010-IRSES Grant RANPHYS.} \and Kohtaro Tadaki\inst{2}\thanks{Corresponding author.}}
\institute{Department of Computer Science, University of Auckland, Auckland, New Zealand \email{cristian@cs.auckland.ac.nz} \and Research and Development Initiative, Chuo University, Tokyo, Japan\\ \email{tadaki@kc.chuo-u.ac.jp} }
\maketitle
\vspace*{-2mm}
\begin{abstract} We propose a new type of quantum computer which is used to prove a spectral representation for a class $\mathcal S$ of computable sets. When $S\in \mathcal S$ codes the theorems of a formal system, the quantum computer produces through measurement all theorems and proofs of the formal system. We conjecture that the spectral representation is valid for all computably enumerable sets. The conjecture implies that the theorems of a general formal system, like Peano Arithmetic or ZFC, can be produced through measurement; however, it is unlikely that the quantum computer can produce the proofs as well, as in the particular case of $\mathcal S$. The analysis suggests that showing the provability of a statement is different from writing up the proof of the statement. \end{abstract}
\section{Introduction}
\vspace*{-1mm}
Mathematical results are accepted only if they have been proved: {\em the proof concludes with the proven statement, the theorem}. The proof comes first and justifies the theorem. Classically, there is no alternative scenario.
The genius mathematician Srinivasa Ramanujan discovered nearly 3900 results~\cite{Berndt}, many without proofs; nearly all his claims have been proven correct. Ramanujan first {\em recognised} a true statement and only later that statement was {\em proven}, hence accepted as a {\em theorem}. While we don't know how Ramanujan's mind was able to ``discover'' mathematical true facts, we can ask whether there is a way to understand, and possibly imitate, his approach.
In this paper a new type of quantum computer is used to prove a spectral representation for a class $\mathcal S$ of computable sets is proved. For every $S\in \mathcal S$ we construct a quantum system in such a way that the elements of $S$ are exactly the eigenvalues of the Hermitian operator representing an observable of the quantum system, i.e.\ the spectrum of the operator. In particular, $S$ can be represented by the energy of the associated quantum system. The operator associated to $S\in\mathcal S$ has a special numerical form which guarantees that by measurement we get both the element and the proof that the element is in $S$.
{\em We conjecture that the spectral representation is valid for all computably enumerable sets.}
When $S\in \mathcal S$ codes the theorems of a formal system, then the associated quantum computer produces through measurement the theorems of the formal system and their proofs.
The conjecture implies that every theorem of a general (recursively axiomatisable) formal system, like Peano Arithmetic or ZFC, can be produced through measurement. However, we argue that in this general case the quantum procedure produces, like Ramanujan, only the true the statement, but not its proof.
Of course, the proof can be algorithmically generated by a classical algorithm, albeit in a possibly very long time (such a computation makes sense only for statements recognised as ``interesting''). For example, if the Riemann hypothesis is produced by the quantum procedure we will know that the famous hypothesis is true. However, to have a formal proof---whose existence is guaranteed by the correctness of the quantum procedure---we may need to run a very long classical procedure. The proof obtained in this way could be rather unsatisfactory, as it may not convey the ``understanding'', the reason for which the Riemann hypothesis holds true (see also \cite{CM}). Although such a proof may not make us ``wiser''~\cite{Manin}, it may stimulate the search for better arguments.
The paper is structured as follows. In Section~\ref{QM} we present the basic quantum mechanical facts necessary for describing our quantum systems. In Section~\ref{CS} we describe a class of computable sets for which we can prove in Section~\ref{RT} the representability theorem and its application to quantum provability (in Section~\ref{QP}). In Section~\ref{Cnj} we discuss the generalisation of the quantum procedure to all computably enumerable sets and in Section~\ref{QPP} its application to quantum provability for arbitrary formal systems.
\vspace*{-2.0mm}
\section{Quantum mechanical facts} \label{QM}
\vspace*{-1.5mm}
We start with some basic facts on quantum mechanics needed for this paper. The quantum mechanical arguments are presented at the level of mathematical rigour adopted in quantum mechanics textbooks written by physicists, for example, Dirac~\cite{Dirac58} and Mahan~\cite{Mahan10}.
A state of a quantum system is represented by a vector in a Hilbert space $\mathcal{H}$. The vector and the space are called \emph{state vector} and \emph{state space}, respectively. The \emph{dynamical variables} of a system are quantities such as the coordinates and the components of momentum and angular momentum of particles, and the energy of the system. They play a crucial role not only in classical mechanics but also in quantum mechanics.
Dynamical variables in quantum mechanics are represented by Hermitian operators on the state space $\mathcal{H}$. A dynamical variable of the system is called an \emph{observable} if all eigenvectors of the Hermitian operator representing it form a complete system for $\mathcal{H}$. Normally we assume that a measurement of any observable can be performed upon a quantum system in any state (if we ignore the constructive matter, which is one of the points of this paper).
The set of possible outcomes of a measurement of an observable $\mathcal{O}$ of a system is the eigenvalue spectrum of the Hermitian operator representing $\mathcal{O}$. Let $\{\ket{m,\lambda}\}$ be a complete orthonormal system of eigenvectors of the Hermitian operator $A$ representing an observable $\mathcal{O}$ such that $A\ket{m,\lambda}=m\ket{m,\lambda}$ for all eigenvalues $m$ of $A$ and all $\lambda$, where the parameter $\lambda$ designates the degeneracy of the eigenspace of $A$. Suppose that a measurement of $\mathcal{O}$ is performed upon a quantum system in the state represented by a normalized vector $\ket{\Psi}\in\mathcal{H}$. Then the probability of getting the outcome $m$ is given by $p(m)=\sum_{\lambda}\abs{\braket{m,\lambda}{\Psi}}^2$, where $\braket{m,\lambda}{\Psi}$ denotes the inner product of the vectors $\ket{m,\lambda}$ and $\ket{\Psi}$. Moreover, given that the outcome $m$ occurred, the state of the quantum system immediately after the measurement is represented by the normalized vector
\vspace*{-1mm} \begin{equation*}
\frac{1}{\sqrt{p(m)}}\sum_{\lambda}\braket{m,\lambda}{\Psi}\ket{m,\lambda}. \end{equation*}
\vspace*{-3mm}
The \emph{commutator} between two operators $A$ and $B$ is defined to be $[A,B]:=AB-BA$. Let $\mathcal{O}_1,\dots,\mathcal{O}_k$ be observables of a quantum system and let $A_1,\dots,A_k$ be the Hermitian operators which represent $\mathcal{O}_1,\dots,\mathcal{O}_k$, respectively. If the Hermitian operators commute to each other, i.e., $[A_j,A_{j'}]=0$ for all $j,j'=1,\dots,k$, then we can perform measurements of all $\mathcal{O}_1,\dots,\mathcal{O}_k$ simultaneously upon the quantum system in any state. All dynamical variables which we will consider below are assumed to be observables, and we will identify any observable with the Hermitian operator which represents it.
In this paper we consider quantum systems consisting of vibrating particles. The simplest one is the quantum system of \emph{one-dimensional harmonic oscillator}, which consists only of one particle vibrating in one-dimensional space. The dynamical variables needed to describe the system are just one coordinate $x$ and its conjugate momentum $p$. The \emph{energy} of the system is an observable, called \emph{Hamiltonian}, and is defined in terms of $x$ and $p$ by
\vspace*{-1mm} \begin{equation*}
H=\frac{1}{2m}(p^2+m^2\omega^2 x^2), \end{equation*}
\vspace*{-3mm}\\ where $m$ is the mass of the oscillating particle and $\omega$ is $2\pi$ times the frequency. The oscillation of the particle is quantized by the \emph{fundamental quantum condition}
\vspace*{-2mm} \begin{equation}\label{quantum_condition}
[x,p]=i\hbar, \end{equation}
\vspace*{-3mm}\\ where $\hbar$ is \emph{Planck's constant}. The \emph{annihilation operator} $a$ of the system is defined by
\vspace*{-1mm} \begin{equation*}
a=\sqrt{\frac{m\omega}{2\hbar}}\left(x+\frac{ip}{m\omega}\right). \end{equation*}
\vspace*{-1mm}\\ Its adjoint $a^\dag$ is called a \emph{creation operator}. The fundamental quantum condition \eqref{quantum_condition} is then equivalently rewritten as
\vspace*{-1mm} \begin{equation}\label{acqc}
[a,a^\dag]=1, \end{equation}
\vspace*{-3mm}\\ and the Hamiltonian can be represented in the form
\vspace*{-0mm} \begin{equation}\label{Hada}
H=\hbar\omega\left(a^\dag a+\frac{1}{2}\right) \end{equation}
\vspace*{-2mm}\\ in terms of the creation and annihilation operators. In order to determine the values of energy possible in the system, we must solve the eigenvalue problem of $H$. This problem is reduced to the eigenvalue problem of the observable $N:=a^\dag a$, called a \emph{number operator}. Using the condition \eqref{acqc}, the eigenvalue spectrum of $N$ is shown to equal the set ${\mathbb{N}}$ of all nonnegative integers. Each eigenspace of $N$ is not degenerate, and the normalized eigenvector $\ket{n}$ of $N$ belonging to an arbitrary eigenvalue $n\in{\mathbb{N}}$ is given by
\vspace*{-1mm} \begin{equation}\label{evn}
\ket{n}=\frac{(a^\dag)^n}{\sqrt{n!}}\ket{0}, \end{equation}
\vspace*{-3mm}\\ where $\ket{0}$ is the unique normalized vector up to a phase factor such that $a\ket{0}=0$. Since $N$ is an observable, the eigenvectors $\{\ket{n}\}$ forms a complete orthonormal system for the state space. It follows from \eqref{Hada} that the values of energy possible in the system are
\vspace*{-1mm} \begin{equation*}
E_n=\hbar\omega\left(n+\frac{1}{2}\right)\raisebox{.7mm}{,}\qquad(n=0,1,2,\dotsc) \end{equation*}
\vspace*{-3mm}\\ where the eigenvector of $H$ belonging to an energy $E_n$ is given by \eqref{evn}.
Next we consider the quantum system of \emph{$k$-dimensional harmonic oscillators} which consists of $k$ one-dimensional harmonic oscillators vibrating independently without no interaction. The dynamical variables needed to describe the system are $k$ coordinates $x_1,\dots,x_k$ and their conjugate momenta $p_1,\dots,p_k$. The Hamiltonian of the system is
\vspace*{-2mm} \begin{equation}\label{k-H}
H=\sum_{j=1}^k\frac{1}{2m_j}(p_j^2+m_j^2\omega_j^2 x_j^2), \end{equation}
\vspace*{-2mm}\\ where $m_j$ is the mass of the $j$th one-dimensional harmonic oscillator and $\omega_j$ is $2\pi$ times its frequency. The vibrations of $k$ oscillators are quantized by the fundamental quantum conditions \begin{equation}\label{k-quantum_condition}
[x_j,p_{j'}]=i\hbar\delta_{jj'},\qquad [x_j,x_{j'}]=[p_j,p_{j'}]=0. \end{equation} The annihilation operator $a_j$ of the $j$th oscillator is defined by
\vspace*{-1mm} \begin{equation*}
a_j=\sqrt{\frac{m_j\omega_j}{2\hbar}}\left(x_j+\frac{ip_j}{m_j\omega_j}\right). \end{equation*}
\vspace*{-3mm}\\ The adjoint $a_j^\dag$ of $a_j$ is the creation operator of the $j$th oscillator. The fundamental quantum condition \eqref{k-quantum_condition} is then equivalently rewritten as
\vspace*{-1mm} \begin{align}
&[a_j,a_{j'}^\dag]=\delta_{jj'}, \label{k-acqc1}\\
&[a_j,a_{j'}]=[a_j^\dag,a_{j'}^\dag]=0.\label{k-acqc2} \end{align}
\vspace*{-3mm}\\ and the Hamiltonian can be represented in the form
\vspace*{-1mm} \begin{equation}\label{k-Hada}
H=\sum_{j=1}^k\hbar\omega_j\left(N_j+\frac{1}{2}\right) \end{equation}
\vspace*{-3mm}\\ where $N_j:=a_j^\dag a_j$ is the number operator of the $j$th oscillator. In order to determine the values of energy possible in the system, we first solve the eigenvalue problems of the number operators $N_1,\dots,N_k$. We can do this simultaneously for all $N_j$ since the number operators commute to each other, i.e., $[N_j,N_{j'}]=0$ for all $j,j'=1,\dots,k$, due to \eqref{k-acqc1} and \eqref{k-acqc2}. The eigenvalue spectrum of each $N_j$ is shown to equal ${\mathbb{N}}$ using \eqref{k-acqc1}. We define a vector $\ket{n_1,\dots,n_k}$ as the tensor product $\ket{n_1}\otimes\dots\otimes\ket{n_k}$ of $\ket{n_1},\dots,\ket{n_k}$, where each $\ket{n_j}$ is defined by \eqref{evn} using $a_j$ in place of $a$. For each $j$, the vector $\ket{n_1,\dots,n_k}$ is a normalized eigenvector of $N_j$ belonging to an eigenvalue $n_j\in{\mathbb{N}}$, i.e., \begin{equation}\label{kj-evn}
N_j\ket{n_1,\dots,n_k}=n_j\ket{n_1,\dots,n_k}. \end{equation} All the vectors $\{\ket{n_1,\dots,n_k}\}$ form a complete orthonormal system for the state space. It follows from \eqref{k-Hada} that the values of energy possible in the system are \begin{equation*}
E_{n_1,\dots,n_k}=\hbar\sum_{j=1}^k\omega_j\left(n_j+\frac{1}{2}\right)\raisebox{.7mm}{,}\qquad(n_1,\dots,n_k=0,1,2,\dotsc) \end{equation*} The vector $\ket{n_1,\dots,n_k}$ is an eigenvector of $H$ belonging to an energy $E_{n_1,\dots,n_k}$.
The Hamiltonian \eqref{k-H} describes the quantum system of $k$-dimensional harmonic oscillators where each oscillator does not interact with any others and moves independently. In a general quantum system consisting of $k$-dimensional harmonic oscillators, each oscillator strongly interacts with all others. Its Hamiltonian has the general form
\vspace*{-0mm} \begin{equation}\label{gH}
P(a_1,\dots,a_{k},a_1^\dag,\dots,a_{k}^\dag), \end{equation}
\vspace*{-4mm}\\ where $a_1,\dots,a_k$ are creation operators satisfying the quantum conditions \eqref{k-acqc1} and \eqref{k-acqc2}, and $P$ is a polynomial in $2k$ variables with coefficients of complex numbers such that \eqref{gH} is Hermitian. \footnote{In the monomials appearing in $P$, the order of the variables $x_1,\dots,x_{2k}$ does not matter. However, since $a_j$ and $a_j^\dag$ do not commute, in substituting $a_1,\dots,a_{k},a_1^\dag,\dots,a_{k}^\dag$ into the variables of $P$ the order of these operators makes a difference. Thus, the operator \eqref{gH} makes sense only by specifying this order.} For example, we can consider the quantum system of $k$-dimensional harmonic oscillators whose Hamiltonian is
\vspace*{-0mm} \begin{equation*}
H=\sum_{j}\hbar\omega_j\left(a_j^\dag a_j+\frac{1}{2}\right)+\sum_{j\neq j'}g_{jj'}a_j^\dag a_{j'}. \end{equation*}
\vspace*{-3mm}\\ Here the \emph{interaction terms} $g_{jj'}a_j^\dag a_{j'}$ between the $j$th oscillator and the $j'$th oscillator with a real constant $g_{jj'}$ are added to the Hamiltonian \eqref{k-Hada}. Note, however, that solving exactly the eigenvalue problem of an observable in the general form of \eqref{gH} is not an easy task.
\vspace*{-1mm}
\section{A class of unary languages} \label{CS}
\vspace*{-1mm}
In this section we introduce a class of unary languages for which the representability theorem proven in the next section holds true.
Let ${\mathbb{N}}^{*}$ be the set of all finite sequences $(x_{1}, \dots ,x_{m})$ with elements in ${\mathbb{N}}$ ($m \in {\mathbb{N}}$; for $m=0$ we get the empty sequence $\varepsilon$). Let
\vspace*{-1mm} \begin{equation} \label{S} L((x_{1}\dots x_{m}),a)=\left(\prod_{i=1}^{m} \{1^{x_{i}}\}^{*}\right) \{1^{a}\}, \end{equation}
\vspace*{-3mm}\\ for all $(x_{1}, \dots ,x_{m}) \in {\mathbb{N}}^{*}, a \in {\mathbb{N}}.$
\begin{thm} Let $ \mathcal L_{0}$ be the minimal class of languages $\mathcal L$ over $\{1\}$ containing the languages $\{ 1^n \}$ for every $n\in {\mathbb{N}}$, and which is closed under concatenation and the Kleene star operation. \if01 satisfying the following three conditions:\\ \noindent (1) the language $\{ 1^n \} \in \mathcal L$, for every $n\in {\mathbb{N}}$,\\ (2) the class $\mathcal L$ is closed under concatenation,\\ (3) the class $\mathcal L$ is closed under the Kleene star operation.\\ \fi Then, $\mathcal L_{0} = \{ L((x_{1},\dots ,x_{m}),a)\mid (x_{1},\dots ,x_{m}) \in {\mathbb{N}}^{*}, a \in {\mathbb{N}}\}$. \end{thm}
\begin{proof} The class $ \mathcal L_{0}$ has the required properties because $L(\varepsilon, a) = \{1^{a}\}$, the concatenation of $L((x_{1}, \dots ,x_{m}, a)$ and $L((y_{1}, \dots ,y_{l}), b)$ is $L((x_{1}, \dots ,x_{m}), a) L((y_{1}, \dots ,y_{l}), b) =L((x_{1}, \dots ,x_{m},y_{1}, \dots ,y_{l}), a+b)$ and the Kleene star of $L((x_{1}, \dots ,x_{m}), a)$ is $L((x_{1}, \dots ,x_{m}), a)^{*} =L((x_{1},\dots ,x_{m}, a), 0)$. In view of (\ref{S}), $ \mathcal L_{0}$ is included in every class $\mathcal L$ satisfying the properties in the statement of the theorem. \qed \end{proof}
\begin{corollary} The class $\mathcal L_{0}$ coincides with the minimal class of languages $\mathcal L$ over $\{1\}$ which contains the languages $\{ 1^n \}$ and
$\{ 1^n \}^*$, for every $n\in {\mathbb{N}}$ and which is closed under concatenation. \end{corollary}
\begin{comment} i) If $L$ is a finite unary language with more than one element, then $L\not\in \mathcal L_{0}$.\\ ii) The family $\mathcal L_{0}$ is a proper subset of the class of regular (equivalently, context-free) languages.\\ iii) The language $\{1^{p}\mid p \mbox{ is prime}\}$ is not in $\mathcal L_{0}$. \end{comment}
Consider the minimal class $\mathcal D_{0}$ of subsets of $ \mathbb{N}$ containing the sets $\{b\}$, for every $b\in \mathbb{N}$, and which is closed under the sum and the Kleene star operation. Here the sum of the sets
$S, T$ is the set $S+T=\{a+b \mid a\in S, b\in T\}$; the Kleene star of the set $S$ is the set
$S^{*}=\{a_{1}+ a_{2} + \dots + a_{k}\mid k \ge 0, a_{i}\in S, 1\le i \le k\}$.
\begin{thm}The following equality holds true: \label{DLL} $\mathcal L_{0}= \{\{1^{a}\mid a \in S\}\mid S\in \mathcal D_{0}\}.$ \end{thm}
Based on the above theorem, we identify $\mathcal L_{0}$ with $\mathcal D_{0}$ in what follows.
\vspace*{-1mm}
\section{The representation theorem} \label{RT}
\vspace*{-1mm}
Can a set $S\in \mathcal D_{0}$ be represented as the outcomes of a quantum measurement? We answer this question in the affirmative. First we show that the sets in $\mathcal D_{0}$ can be generated by polynomials with nonnegative integer coefficients.
\begin{prop} \label{rep} For every set $S\in \mathcal D_{0}$ there exists a polynomial with nonnegative integer coefficients $F_{S}$ in variables $x_1,\dots,x_k$ such that $S$ can be represented as:
\vspace*{-1mm} \begin{equation}\label{range}
S=\{F_{S}(n_1,\dots,n_k)\mid n_1,\dots,n_k\in{\mathbb{N}}\}. \end{equation}
\vspace*{-5mm} \end{prop}
\begin{proof} Suppose that $S\in \mathcal D_{0}$. It follows from Theorem~\ref{DLL} and \eqref{S} that there exist $a_1,\dots,a_k,a\in{\mathbb{N}}$ such that $S=\{a_1 n_1 + \dots + a_k n_k+a\mid n_1,\dots,n_k\in{\mathbb{N}}\}$. Thus, \eqref{range} holds for the polynomial $F_S(x_1,\dots,x_k)=a_1 x_1 + \dots + a_k x_k+a$. \qed \end{proof}
\begin{comment} There exist infinitely many sets not in $\mathcal D_{0}$ which are representable in the form (\ref{range}). \end{comment}
Motivated by Proposition~\ref{rep}, we show that every set
\vspace*{-1mm} \begin{equation}\label{rangepol}
S=\{F(n_1,\dots,n_k)\mid n_1,\dots,n_k\in{\mathbb{N}}\}, \end{equation}
\vspace*{-4mm}\\ where $F$ is a polynomial in $k$ variables with nonnegative integer coefficients, can be represented by the set of outcomes of a \emph{constructive} quantum measurement. For this purpose, we focus on a quantum system consisting of $k$-dimensional harmonic oscillators whose Hamiltonian has the form
\vspace*{-0mm} \begin{equation}\label{FH}
H=F(N_1,\dots,N_k), \end{equation}
\vspace*{-5mm}\\ where $N_1,\dots,N_k$ is the number operators defined by $N_j=a_j^\dag a_j$ with the annihilation operator $a_j$ of the $j$th oscillator. Note that the substitution of $N_1,\dots,N_k$ into the variables of $F$ is unambiguously defined since the number operators $N_1,\dots,N_k$ commute to each other. This type of Hamiltonian is a special case of \eqref{gH}.
We say an observable of the form \eqref{gH} is \emph{constructive} if all coefficients of $P$ are in the form of $p+qi$ with $p,q\in{\mathbb{Q}}$. Thus, the Hamiltonian \eqref{FH} is constructive by definition. Actually, a measurement of the Hamiltonian \eqref{FH} can be performed \emph{constructively} in an intuitive sense. The constructive measurement consists of the following two steps: First, the simultaneous measurements of the number operators $N_1,\dots,N_k$ are performed upon the quantum system to produce the outcomes $n_1,\dots,n_k\in{\mathbb{N}}$ for $N_1,\dots,N_k$, respectively. This is possible since the number operators commute to each other. Secondly, $F(n_1,\dots,n_k)$ is calculated and is regarded as the outcome of the measurement of the Hamiltonian \eqref{FH} itself. This is constructively possible since $F$ is a polynomial with integer coefficients. Thus, the whole measurement process is constructive in an intuitive sense too.
\begin{thm}\label{representability} For every set $S$ of the form \eqref{rangepol} there exists a constructive Hamiltonian $H$ such that the set of all possible outcomes of a measurement of $H$ is $S$. \end{thm} \begin{proof} Consider the Hamiltonian $H$ of the form \eqref{FH}. It is constructive, as we saw above. We show that the eigenvalue spectrum of $H$ equals to $S$.
First, using \eqref{kj-evn} we get
\vspace*{-1mm} \begin{equation}\label{F-evn}
F(N_1,\dots,N_k)\ket{n_1,\dots,n_k}=F(n_1,\dots,n_k)\ket{n_1,\dots,n_k} \end{equation}
\vspace*{-3mm}\\ for every $n_1,\dots,n_k\in {\mathbb{N}}$. Thus, every element of $S$ is an eigenvalue of $H$. Conversely, suppose that $E$ is an arbitrary eigenvalue of $H$. Then there exists a nonzero vector $\ket{\Psi}$ such that $H\ket{\Psi}=E\ket{\Psi}$. Since all vectors $\{\ket{n_1,\dots,n_k}\}$ form a complete orthonormal system for the state space, there exist complex numbers $\{c_{n_1,\dots,n_k}\}$ such that $\ket{\Psi}=\sum_{n_1,\dots,n_k}c_{n_1,\dots,n_k}\ket{n_1,\dots,n_k}$. It follows from \eqref{F-evn} that
\vspace*{-2mm} $$\sum_{n_1,\dots,n_k}c_{n_1,\dots,n_k}F(n_1,\dots,n_k)\ket{n_1,\dots,n_k}=\sum_{n_1,\dots,n_k}c_{n_1,\dots,n_k}E\ket{n_1,\dots,n_k}.$$
\vspace*{-3mm}\\ Since the vectors $\{\ket{n_1,\dots,n_k}\}$ are independent, we have
\vspace*{-0mm} \begin{equation}\label{cind}
c_{n_1,\dots,n_k}(E-F(n_1,\dots,n_k))=0, \end{equation}
\vspace*{-4mm}\\ for all $n_1,\dots,n_k\in{\mathbb{N}}$. Since $\ket{\Psi}$ is nonzero, $c_{\bar{n}_1,\dots,\bar{n}_k}$ is also nonzero for some $\bar{n}_1,\dots,\bar{n}_k\in{\mathbb{N}}$. It follows from \eqref{cind} that $E=F(\bar{n}_1,\dots,\bar{n}_k)$. \qed \end{proof}
\vspace*{-2.5mm}
\section{An application to quantum provability} \label{QP}
\vspace*{-0.5mm}
Let $S$ be a set of the form \eqref{rangepol}. In the proof of Theorem~\ref{representability}, we consider the measurement of the Hamiltonian of the form \eqref{FH}. In the case where the state $\ket{\Psi}$ over which the measurement of the Hamiltonian is performed is chosen randomly, an element of $S$ is generated randomly as the measurement outcome. In this manner, by infinitely many repeated measurements we get exactly the set $S$.
If the set $S$ codes the ``theorems'' of a formal system $\mathcal S$---which is possible as $S$ is computable---then $F(n_1,\dots,n_k)\in S$ is a \emph{theorem} of $\mathcal S$ and the numbers $n_1,\dots,n_k$ play the role of the \emph{proof} which certifies it.
Suppose that a \emph{single} measurement of the Hamiltonian of the form \eqref{FH} was performed upon a quantum system in a state
represented by a normalized vector $\ket{\Psi}$ to produce an outcome $m\in S$, i.e., a theorem. Then, by the definition of theorems, there exists a proof $n_1,\dots,n_k$ which makes $m$ a theorem, i.e., which satisfies $m=F(n_1,\dots,n_k)$. Can we extract the proof $n_1,\dots,n_k$ after the measurement? This can be possible in the following manner: Immediately after the measurement, the system is in the state represented by the normalized vector $\ket{\Phi}$ given by
\vspace*{-2mm} \begin{equation*}
\ket{\Phi}=\frac{1}{\sqrt{C}}\sum_{m=F(n_1,\dots,n_k)}\braket{n_1,\dots,n_k}{\Psi}\ket{n_1,\dots,n_k}, \end{equation*}
\vspace*{-3mm}\\ where $C$ is the probability of getting the outcome $m$ in the measurement given:
\vspace*{-0mm} \begin{equation*}
C=\sum_{m=F(n_1,\dots,n_k)}\abs{\braket{n_1,\dots,n_k}{\Psi}}^2. \end{equation*}
\vspace*{-3mm}\\ Since the number operators $N_1,\dots,N_k$ commute to each other, we can perform the simultaneous measurements of $N_1,\dots,N_k$ upon the system in the state $\ket{\Phi}$. Hence, by performing the measurements of $N_1,\dots,N_k$, we obtain any particular outcome $n_1,\dots,n_k$ with probability $\abs{\braket{n_1,\dots,n_k}{\Phi}}^2$. Note that
\vspace*{-0mm} \begin{equation*}
\sum_{m=F(n_1,\dots,n_k)}\abs{\braket{n_1,\dots,n_k}{\Phi}}^2=\sum_{m=F(n_1,\dots,n_k)}\abs{\braket{n_1,\dots,n_k}{\Psi}}^2/C=1. \end{equation*}
\vspace*{-3mm}\\ Thus, with probability one we obtain some outcome $n_1,\dots,n_k$ such that $m=F(n_1,\dots,n_k)$. In this manner we can immediately extract the proof $n_1,\dots,n_k$ of the theorem $m\in S$ obtained as a measurement outcome.
\vspace*{-2mm}
\section{A conjecture} \label{Cnj}
\vspace*{-1mm}
In the early 1970s, Matijasevi\v{c}, Robinson, Davis, and Putnam solved negatively Hilbert's tenth problem by proving the MRDP theorem (see Matijasevi\v{c} \cite{Matijasevic93} for details) which states that every computably enumerable subset of ${\mathbb{N}}$ is Diophantine. A subset $S$ of ${\mathbb{N}}$ is called \textit{computably enumerable} if there exists a (classical) Turing machine that, when given $n\in{\mathbb{N}}$ as an input, eventually halts if $n\in S$ and otherwise runs forever. A subset $S$ of ${\mathbb{N}}$ is \textit{Diophantine} if there exists a polynomial $P(x,y_1,\dots,y_k)$ in variables $x,y_1,\dots,y_k$ with integer coefficients such that, for every $n\in{\mathbb{N}}$, $n\in S$ if and only if there exist $m_1,\dots,m_k\in{\mathbb{N}}$ for which $P(n,m_1,\dots,m_k)=0$.\\[-1ex]
Inspired by the MRDP theorem, we conjecture the following:
\vspace*{-0mm}
\begin{conject}\label{T} For every computably enumerable subset $S$ of ${\mathbb{N}}$, there exists a constructive observable $A$ of the form of \eqref{gH} whose eigenvalue spectrum equals $S$. \end{conject}
\vspace*{-1mm}
Conjecture~\ref{T} implies that when we perform a measurement of the observable $A$,
a member of the computably enumerable $S$ is stochastically obtained as a measurement outcome. As we indefinitely repeat measurements of $A$, members of $S$ are being enumerated, just like a Turing machine enumerates $S$.
In this way a new type of quantum mechanical computer is postulated to exist. How can we construct it? Below we discuss some properties of this hypothetical quantum computer.
As in the proof of the MRDP theorem---in which a whole computation history of a Turing machine is encoded in (the base-two expansions of) the values of variables of a Diophantine equation---{\em a whole computation history of a Turing machine is encoded in a single quantum state which does not make time-evolution (in the Schr\"odinger picture).} Namely, a whole computation history of the Turing machine $M$ which recognises $S$ is encoded in an eigenstate of the observable $A$ which is designed appropriately using the creation and annihilation operators. To be precise, let $\ket{\Psi}=\sum_{n_1,\dots,n_k}c_{n_1,\dots,n_k}\ket{n_1,\dots,n_k}$ be an eigenvector of $A$ belonging to an eigenvalue $n\in S$ such that each coefficient $c_{n_1,\dots,n_k}$ is drawn from a certain finite set of complex numbers including $0$ and the set $\{(n_1,\dots,n_k)\mid c_{n_1,\dots,n_k}\neq 0\}$ is finite. The whole computation history of $M$ with the input $n$ is encoded in the coefficients $\{c_{n_1,\dots,n_k}\}$ of $\ket{\Psi}$ such that each finite subset obtained by dividing appropriately $\{c_{n_1,\dots,n_k}\}$ represents the configuration (i.e., the triple of the state, the tape contents, and the head location) of the Turing machine $M$ at the corresponding time step. The observable $A$ is constructed such that its eigenvector encodes the whole computation history of $M$, using the properties of the creation and annihilation operators such as \begin{equation*}
a_j^\dag\ket{n_1,\dots,n_{j-1},n_j,n_{j+1},\dots,n_k}=\sqrt{n_j+1}\ket{n_1,\dots,n_{j-1},n_j+1,n_{j+1},\dots,n_k}, \end{equation*} by which the different time steps are connected in the manner corresponding to the Turing machine computation of $M$. In the case of $n\notin S$, the machine $M$ with the input $n$ does not halt. This implies that the length of the whole computation history is infinite and therefore the set $\{(n_1,\dots,n_k)\mid c_{n_1,\dots,n_k}\neq 0\}$ is infinite, which results in that the norm of $\ket{\Psi}$ being indefinite and hence $\ket{\Psi}$ not being an eigenvector of $A$. In this manner, any eigenvalue of $A$ is limited to a
member~of~$S$.
Note that there are many computation histories of a Turing machine depending on its input. In the proposed quantum mechanical computer, the measurement of $A$ chooses one of the computation histories stochastically and the input corresponding to the computation history is obtained as a measurement outcome. The above analysis shows that Conjecture~\ref{T} is likely to be true.
The main feature of the proposed quantum mechanical computer is that {\em the evolution of computation does not correspond to the time-evolution of the underlying quantum system}. Hence, in contrast with a conventional quantum computer, the evolution of computation does not have to form a unitary time-evolution, so it is not negatively influenced by \textit{decoherence}\footnote{Decoherence, which is induced by the interaction of quantum registers with the external environment, destroys the superposition of states of the quantum registers, which plays an essential role in a conventional quantum computation.}, a serious obstacle to the physical realisation of a conventional quantum computer.
Again, in contrast with a conventional quantum computer, this proposed quantum mechanical computer can be physically realisable even as a solid-state device at room temperature (the lattice vibration of solid crystal, i.e., \emph{phonons}), which strongly interacts with the external environment. A member of $S$ is obtained as a measurement outcome in an instant by measuring the observable $A$. For example, in the case when the observable $A$ is the Hamiltonian of a quantum system, the measurement outcome corresponds to the energy of the system. In this case, we can probabilistically decide---with sufficiently small error probability---whether a given $n\in{\mathbb{N}}$ is in $S$: the quantum system is first prepared in a state $\ket{\Psi}$ such that the expectation value $\braopket{\Psi}{A}{\Psi}$ of the measurement of the energy over $\ket{\Psi}$ is approximately $n$, and then the measurement is actually performed. This computation deciding the membership of $n$ to $S$ terminates in an instant if sufficiently high amount of energy (i.e., around $n$) is pumped.
\vspace*{-2mm}
\section{Quantum proving without giving the proof} \label{QPP}
\vspace*{-2mm}
In Section~\ref{QP} we discussed the quantum provability for a formal system whose theorems can be coded by a set $S$ defined as in \eqref{rangepol}. When an element $m$ is obtained as an outcome of the measurement, we can extract the proof $n_1,\dots,n_k$ which certifies that $m$ is a theorem of the formal system $\mathcal S$, i.e., it satisfies $m=F(n_1,\dots,n_k)$, by performing the second measurement over the state immediately after the first measurement.
Actually, the proof $n_1,\dots,n_k$ may be generated slightly before the theorem $F(n_1,\dots,n_k)$ is obtained, like in the classical scenario. As we saw in Section~\ref{RT}, the measurement of $F(N_1,\dots,N_k)$ can first be performed by simultaneous measurements of the number operators $N_1,\dots,N_k$ to produce the outcomes $n_1,\dots,n_k\in{\mathbb{N}}$; then, the theorem $m=F(n_1,\dots,n_k)$, classically calculated from $n_1,\dots,n_k$, can be regarded as the outcome of the measurement of $F(N_1,\dots,N_k)$ itself.
In general, the set of all theorems of a (recursively axiomatisable) formal system, such as Peano Arithmetic or ZFC, forms a computably enumerable set and not a computable set of the form \eqref{rangepol}. In what follows, we argue the plausibility that, for general formal systems, the proof cannot be obtained immediately after the theorem was obtained via the quantum procedure proposed in the previous section.
Fix a formal system whose theorems form a computably enumerable set. As before we identify a formula with a natural number. Let $M$ be a Turing machine such that, given a formula $F$ as an input, $M$ searches all proofs one by one and halts if $M$ finds the proof of $F$. Assume that Conjecture~\ref{T} holds. Then there exists an observable $A$ of an infinite dimensional quantum system such that $A$ is constructive and the eigenvalue spectrum of $A$ is exactly the set of all provable formulae. Thus, we obtain a provable formula as a measurement outcome each time we perform a measurement of $A$; it is stochastically determined which provable formula is obtained. The probability of getting a specific provable formula $F$ as a measurement outcome depends on the choice of the state $\ket{\Psi}$ on which we perform the measurement of $A$. In some cases the probability can be
very low, and therefore we may be able to get the provable formula $F$ as a measurement outcome only once, even if we repeat the measurement of $A$ on $\ket{\Psi}$ many times.
Suppose that, in this manner, we have performed the measurement of $A$ once and then we have obtained a specific provable formula $F$ as a measurement outcome. Then, where is the proof of $F$? In the quantum mechanical computer discussed in Section~\ref{Cnj}, the computation history of the Turing machine $M$ is encoded in an eigenstate of the observable $A$, hence the proof of $F$ is encoded in the eigenstate of $A$, which is the state of the underlying quantum system immediately after the measurement.
Is it possible to extract the proof of $F$ from this eigenstate? In order to extract the proof of $F$ from this eigenstate, it is necessary to perform an additional measurement on this eigenstate. However, it is impossible to determine the eigenstate in terms of the basis $\{\ket{n_1,\dots,n_k}\}$ completely by a \emph{single} measurement due the principle of quantum mechanics. In other words, there does not exist a POVM measurement which can determine all the expansion coefficients $\{c_{n_1,\dots,n_k}\}$ of the eigenstate with respect to the basis $\{\ket{n_1,\dots,n_k}\}$ up to a global factor with nonzero probability. This eigenstate is destroyed after the additional measurement and therefore we cannot perform any measurement on it any more. We cannot copy the eigenstate prior to the additional measurement due to the no-cloning theorem (see \cite{BH}); and even if we start again from the measurement of $A$, we may have little chance of getting the same provable formula $F$ as a measurement outcome.
The above analysis suggests that even if we get a certain provable formula $F$ as a measurement outcome through the measurement of $A$ it is very difficult or unlikely to simultaneously obtain the proof of $F$. \footnote{For the formal system $\mathcal{S}$ in Section~\ref{QP}, we can obtain a theorem and its proof simultaneously via measurements since the observable $F(N_1,\dots,N_k)$ whose measurements produce ``theorems'' is a function of the commuting observables $N_1,\dots,N_k$ whose measurements produce ``proofs''. However, this is unlikely to be true for general formal systems.} This argument
suggests that {\em for a general formal system proving that a formula is a theorem
is different from writing up the proof of the formula.} Of course, since
$F$ is provable, there is a proof of $F$, hence the Turing machine $M$ with the input $F$ will eventually produce that proof. However, this classical computation may take a long time in contrast with the fact---via the measurement of $A$---it took only a moment to know that the formula $F$ is provable.
As mathematicians guess true facts for no apparent reason we can speculate that human intuition might work as in the above described quantum scenario. As the proposed quantum mechanical computer can operate at room temperature it may be even possible that a similar quantum mechanical process works in the human brain those offering an argument in favour of the quantum mind hypothesis \cite{RPSH}. The argument against this proposition according to which quantum systems in the brain decohere quickly and cannot control brain function (see \cite{MT}) could be less relevant as decoherence plays no role in the quantum computation discussed here.
{\bf Acknowledgement.} We thank Professor K.~Svozil for useful comments.
\vspace*{-2.5mm}
\end{document} |
\begin{document}
\title{Bounding the separable rank via polynomial optimization}
\begin{abstract} We investigate questions related to the set $\mathcal{SEP}_d$ consisting of the linear maps $\rho$ acting on $\mathbb C^d\otimes \mathbb C^d$ that can be written as a convex combination of rank one matrices of the form $xx^*\otimes yy^*$. Such maps are known in quantum information theory as the separable bipartite states, while nonseparable states are called entangled. In particular we introduce bounds for the separable rank $\mathrm{rank_{sep}}(\rho)$, defined as the smallest number of rank one states $xx^*\otimes yy^*$ entering the decomposition of a separable state $\rho$. Our approach relies on the moment method and yields a hierarchy of semidefinite-based lower bounds, that converges to a parameter $\tau_{\mathrm{sep}}(\rho)$, a natural convexification of the combinatorial parameter $\mathrm{rank_{sep}}(\rho)$. A distinguishing feature is exploiting the positivity constraint $\rho-xx^*\otimes yy^* \succeq 0$ to impose positivity of a polynomial matrix localizing map, the dual notion of the notion of sum-of-squares polynomial matrices. Our approach extends naturally to the multipartite setting and to the real separable rank, and it permits strengthening some known bounds for the completely positive rank. In addition, we indicate how the moment approach also applies to define hierarchies of semidefinite relaxations for the set $\mathcal{SEP}_d$ and permits to give new proofs, using only tools from moment theory, for convergence results on the DPS hierarchy from (A.C. Doherty, P.A. Parrilo and F.M. Spedalieri. Distinguishing separable and entangled states. Phys. Rev. Lett. 88(18):187904, 2002). \end{abstract}
\section{Introduction}\label{Intro}
The main object of study in this paper is the following matrix cone \begin{equation}\label{SEP}
\mathcal{SEP}_d := \mathrm{cone}\{xx^* \otimes yy^* \colon x\in \mathbb C^d,~y\in \mathbb C^d,~\|x\| = \|y\| = 1 \} \subseteq \mathcal H^d \otimes \mathcal H^d\simeq \mathcal H^{d^2}, \end{equation} sometimes also denoted as $\mathcal{SEP}$ when the dimension $d$ is not important. Throughout $\mathcal H^d$ denotes the cone of complex Hermitian $d\times d$ matrices and $\mathcal H^d_+$ is the subcone of Hermitian positive semidefinite matrices. Matrices in $\mathcal H^d_+$ are also known as {\em unnormalized states} and matrices in $\mathcal H^d_+$ with trace~1 are called {\em normalized states}. The cone $\mathcal{SEP}_d$ is of particular interest in the area of quantum information theory: its elements are known as the {\em (unnormalized, bipartite) separable states} on $\mathcal H^d \otimes \mathcal H^d$ and a positive semidefinite matrix $\rho \in \mathcal H^d \otimes \mathcal H^d$ that does not belong to $\mathcal{SEP}_d$ is said to be \emph{entangled}. Entangled states can be used to observe quantum, non-classical behaviors that may be displayed by two physically separated quantum systems, as already pointed out in the early work \cite{Einstein}. Entanglement is now recognized as an additional important resource that can be used in quantum information processing to carry out a great variety of tasks such as quantum computation, quantum communication, quantum cryptography and teleportation (see, e.g., \cite{Nielsen-Chuang,Watrous} and references therein). Therefore, deciding whether a state is separable or entangled is a question of fundamental interest in quantum information theory. Gurvits~\cite{Gurvits} has shown that the (weak) membership problem for the set $\mathcal{SEP}_d\cap\{\rho: \mathrm{Tr}(\rho)=1\}$ is an NP-hard problem. In addition, the problem was shown to be strongly NP-hard in~\cite{Gharibian10}. Hence it is important to have tractable criteria for separability or entanglement of quantum states. Throughout, we restrict for simplicity to the case of bipartite states, acting on two copies of $\mathbb C^d$, but the treatment extends naturally to the case of $m$-partite states that act on $\mathbb C^{d_1}\otimes \ldots \otimes \mathbb C^{d_m}$ with $m\ge 2$ and $d_1,\ldots,d_m$ possibly distinct. We will return below to the question of testing separability, but first we introduce the relevant notion of separable rank, which plays a central role in this paper.
\subsubsection*{The separable rank} In this work we consider the following problem: given a state $\rho \in \mathcal{SEP}_d$, what is the smallest integer $r \in \mathbb{N}$ such that there exist vectors $a_1,\ldots,a_r, b_1,\ldots,b_r \in \mathbb C^d$ for which \begin{equation}\label{eqrho} \rho = \sum_{\ell=1}^r a_\ell a_\ell^* \otimes b_\ell b_\ell^*. \end{equation} This smallest integer $r$ is called the \emph{separable rank} of $\rho$ and denoted as $\mathrm{rank_{sep}}(\rho)$. One sets $\mathrm{rank_{sep}}(\rho)=\infty$ when $\rho$ is entangled. The separable rank has been previously studied, e.g., in~\cite{Uhlmann98,DTT00,Chen_2013a} (where it is called the \emph{optimal ensemble cardinality} or the \emph{length} of $\rho$) and it can be seen as a `complexity measure' of the state (with an infinite rank for entangled states). Easy bounds on the separable rank are $\mathrm{rank}(\rho)\le \mathrm{rank_{sep}}(\rho)\le \mathrm{rank}(\rho)^2$, where the left most inequality can be strict (see \cite{DTT00}) and the right most one follows using Caratheodory's theorem \cite{Uhlmann98}. We approach the problem of determining the separable rank from the moment perspective. We use the observation that, if $\mathrm{rank_{sep}}(\rho)=r$ and $\rho$ admits the decomposition (\ref{eqrho}), then
the sum of the $r$ atomic measures at the vectors $(a_\ell, b_\ell)\in \mathbb C^d\times \mathbb C^d$ is a measure $\mu$ whose expectation $\int 1d\mu$ is equal to $r$ and whose fourth-degree moments correspond to the entries of $\rho$. Moreover, as we will see later, this measure may be assumed to be supported on the semi-algebraic set \begin{equation}\label{eqscaling}
{\mathcal V}_\rho= \{(x,y) \in \mathbb C^d\times \mathbb C^d : \|x\|_\infty^2,\|y\|_\infty^2\leq \sqrt{\rho_{\max}},\ xx^* \otimes yy^* \preceq \rho\}, \end{equation} where $\rho_{\max}$ denotes the largest diagonal entry of $\rho$. Then we obtain a lower bound on the separable rank of $\rho$, denoted $\tau_\mathrm{sep}(\rho)$, by minimizing the expectation $\int 1 \, d\mu$ over all measures $\mu$ that are supported on ${\mathcal V}_\rho$ and have fourth-degree moments corresponding to entries of $\rho$ (see~\cref{eq:tausep}). Hence, here we view the separable rank as a moment problem over the product of two balls. This view will enable us to design a hierarchy of tractable semidefinite based parameters, denoted $\xidsep{t}$. These parameters provide lower bounds on the separable rank and converge to $\tau_\mathrm{sep}(\rho)$ (see Section~\ref{sec: poly sep}).
In view of the definition of $\mathcal{SEP}_d$ in \cref{SEP}, one may also view separability of a state $\rho$ as a moment problem on the bi-sphere ${\mathbb S}^{d-1}\times {\mathbb S}^{d-1}$, where ${\mathbb S}^{d-1}=\{x\in\mathbb C^d: \|x\|=1\}$ denotes the (complex) unit sphere. However, this approach does {\em not} (straightforwardly) lead to bounds on the separable rank. Indeed, for a measure $\mu$ on the bi-sphere whose fourth-degree moments correspond to entries of $\rho$, we necessarily have~$\int 1 \, d\mu = \mathrm{Tr}(\rho)$. To get bounds on the separable rank, it is thus {crucial} to use another scaling for the points $(a_\ell,b_\ell)$ entering a separable decomposition of $\rho$ as, for instance, the scaling used in \cref{eqscaling}, but other scalings are possible as indicated in Section \ref{sec:tausep}.
Our approach extends to several other settings, in particular, to the case of multipartite separable states (when $\rho$ acts on the tensor product of more than two spaces) and to the case of real states (instead of complex valued ones). It can also be adapted to the notion of \emph{mixed separable rank}, where one tries to find factorizations of the form $\rho = \sum_{\ell=1}^r A_\ell \otimes B_\ell$ with $A_\ell, B_\ell$ Hermitian positive semidefinite matrices and $r$ as small as possible. In~\cite{de_las_Cuevas_2020} it was shown that, if $\rho$ is a diagonal matrix, then its mixed separable rank is equal to the nonnegative rank of the associated $d \times d$ matrix consisting of the diagonal entries of $\rho$. Vavasis \cite{Vav09} has shown that computing the nonnegative rank of a matrix is an NP-hard problem and, more recently, Shitov~\cite{Shi16b} showed $\exists\mathbb{R}$-hardness of this problem. Hence computing the mixed separable rank has the same hardness complexity status as the nonnegative rank. Determining the complexity status of the separable rank remains open, but there is no reason to expect that it should be any easier than the mixed separable rank.
When using moment methods one typically works with measures supported on semi-algebraic sets, i.e., sets described by polynomial inequalities on the variables. In our approach this is also the case. Indeed the set ${\mathcal V}_\rho$ in \cref{eqscaling} is semi-algebraic since one can encode the condition $xx^* \otimes yy^* \preceq \rho$ by requiring all principal minors of $\rho-xx^* \otimes yy^*$ to be nonnegative. This would however lead to a description of the set ${\mathcal V}_\rho$ with a number of polynomial constraints that is exponential in $d$. Instead, we will directly exploit the constraint $\rho- xx^* \otimes yy^*\succeq 0$, which is of the form $G(x) \succeq 0$ for some polynomial matrix $G(x)$ (i.e., with entries polynomials in $x,\overline x$). This constraint enables us to impose positivity constraints on polynomial matrix localizing maps, a matrix analog of the usual scalar localizing maps used in the moment method (see Section \ref{sec:matrix-setting}). Such polynomial matrix localizing constraints can also be used to bound the completely positive rank of a completely positive matrix, and we will show that this permits to strengthen some known bounds on the completely positive rank from~\cite{GdLL17a} (see \cref{sec:CP}).
Our hierarchy of bounds $\xidsep{t}$ on the separable rank can also be used to detect entanglement. Indeed, as mentioned above, by Caratheodory's theorem, the separable rank of a state $\rho \in \mathcal{SEP}_d$ can be upper bounded, e.g., by $\mathrm{rank}(\rho)^2\le d^4$. We can leverage this fact and the asymptotic convergence of our hierarchy of lower bounds to detect entanglement: a state $\rho\in\mathcal H^d\otimes \mathcal H^d$ is entangled if and only if $\xidsep{t}>\mathrm{rank}(\rho)^2$ for some integer $t\ge 1$, i.e., there is a level of our hierarchy which is infeasible or provides a lower bound on $\mathrm{rank_{sep}}(\rho)$ which is strictly larger than Caratheodory's bound. In addition, a certificate of entanglement is then provided by the dual semidefinite program. Hence our hierarchy of semidefinite parameters $\xidsep{t}$ can also be used to provide a type of entanglement witnesses (see \cref{sec:mem}).
\subsubsection*{The Doherty-Parrilo-Spedalieri (DPS) hierarchy for $\mathcal{SEP}$} As mentioned above a fundamental problem in quantum information theory is to have efficient criteria for checking separability or entanglement of quantum states. A second main contribution of our work concerns a hierarchy of outer approximations to the set $\mathcal{SEP}$ that we describe now. Doherty, Parrilo, and Spedalieri~\cite{DPS04} designed what is now known as the \emph{DPS hierarchy}, a hierarchy of outer approximations $\mathcal{DPS}_{1,t}$ ($t \geq 1$) for the set $\mathcal{SEP}$. It is based on the principle of {\em state extension}: if $\rho:=\sum_\ell \lambda_\ell x_\ell x_\ell^* \otimes y_\ell y^*_\ell\in\mathcal{SEP}_d$ with $\lambda_\ell\ge 0$ then, for any integer $t\ge 1$, $\rho$ admits an extension $\rho_{1,t} := \sum_\ell \lambda_\ell x_\ell x_\ell^* \otimes (y_\ell y^*_\ell)^{\otimes t}$ acting on $\mathbb C^d\otimes (\mathbb C^d)^{\otimes t}$. The state $\rho$ can be recovered from its extension $\rho_{1,t}$ by tracing out $t-1$ of the copies of the second space and the extension $\rho_{1,t}$ satisfies several natural conditions such as symmetry (under permuting the $t$ copies of the second register) and the so-called {\em positive partial transpose (PPT)} criterion from \cite{Horodecki_1996} (which states that taking the transpose of some of the copies preserves positive semidefiniteness). The relaxation $\mathcal{DPS}_{1,t}$ consists of those $\rho$ for which a state $\rho_{1,t}$ exists satisfying these necessary conditions. Here the state extension is one-sided (since one extends only in the $y$-direction); the two-sided analog (in both $x$- and $y$-directions) has also been considered, leading to the hierarchy $\mathcal{DPS}_{t,t}\subseteq \mathcal{DPS}_{1,t}$ (see Section~\ref{sec: state extension perspective} for details). For fixed $t$, deciding membership in $\mathcal{DPS}_{1,t}$ (or $\mathcal{DPS}_{t,t}$) boils down to testing feasibility of a semidefinite program of size polynomial in $d$. The DPS hierarchy is {\em complete}, in the sense that we have equality: $\bigcap_{t\ge 1}\mathcal{DPS}_{1,t}=\mathcal{SEP}_d$ \cite{DPS04}.
One can also interpret the set $\mathcal{SEP}$ in the language of moments of distributions on the bi-sphere: $\rho$ is separable if there exists an atomic measure on the bi-sphere whose fourth-degree moments agree with $\rho$ (see, e.g., \cite{dressler2020separability,HNW17,Li_2020}). Another main contribution in this paper will be to make the links between this moment approach and the DPS hierarchy more apparent. These links enable us to give an alternative proof of completeness for the DPS hierarchy that is based on the theory of positive-operator valued measures. In contrast, existing proofs rely on other tools such as quantum de Finetti theorems or sums of squares. Indeed one can also design approximation hierarchies for $\mathcal{SEP}_d$, starting from its definition in \cref{SEP} and applying the moment approach to the bi-sphere ${\mathbb S}^{d-1}\times {\mathbb S}^{d-1}$. Depending on the degrees that are allowed in the $x,\overline x$ variables and the $y,\overline y$ variables, this leads to several possible variants of relaxations for $\mathcal{SEP}_d$ that we explore in Section~\ref{sec:momentDPS}, denoted there as ${\mathcal{R}}_{t}$ (when the full degree is at most $2t$), ${\mathcal{R}}_{t,t}$ (when the degree in $x,\overline x$ is at most $2t$ and the same for the degree in $y,\overline y$) and ${\mathcal{R}}_{1,t}$ (when the degree in $x,\overline x$ is at most 2 and the degree in $y,\overline y$ is at most $2t$). We provide a convergence proof for each of these hierarchies (i.e., show their completeness) using tools from the moment method (i.e., existence of an atomic representing measure under certain positivity conditions), which we apply to the setting of matrix polynomials for the hierarchy ${\mathcal{R}}_{1,t}$ (see \cref{sec:convergenceR1t}). In addition we show that the hierarchy ${\mathcal{R}}_{1,t}$ (resp., ${\mathcal{R}}_{t,t}$) coincides with the DPS hierarchy $\mathcal{DPS}_{1,t}$ (resp., $\mathcal{DPS}_{t,t}$). Therefore we offer a new convergence proof for the DPS hierarchy that is based on the moment method.
\subsubsection*{Related literature on approximation hierarchies for $\mathcal{SEP}$}
There is a vast literature about the set $\mathcal{SEP}$ of separable states and approximations thereof (such as the DPS hierarchy), so we only mention here some of the results that are most relevant to this paper. The PPT criterion, introduced in \cite{Peres1996,Horodecki_1996}, is a necessary condition for separability. While it was shown to be sufficient to ensure separability of bipartite states acting on $\mathbb C^2\otimes \mathbb C^3$ \cite{WORONOWICZ1976165}, it is in general not sufficient for separability of states acting on larger dimensional spaces (see, e.g., \cite{Horodecki_1997,WORONOWICZ1976165}). In fact it has been shown that no semidefinite representation exists for $\mathcal{SEP}_d$ when $d\ge 3$ \cite{Fawzi2021}. As mentioned above, the authors in \cite{DPS04} use symmetric state extensions and the PPT conditions to define the hierarchy $\mathcal{DPS}_{1,t}$ ($t \geq 1$). They show it to be complete (i.e., $\cap_{t \geq 1} \mathcal{DPS}_{1,t} = \mathcal{SEP}$) using the quantum de Finetti theorem from \cite{Caves-Fuchs-Schack} (note that this completeness proof in fact does not use the PPT conditions).
Navascues, Owari and Plenio \cite{NOP09} show a quantitative result on the convergence of the sets $\mathcal{DPS}_{1,t}$ to $\mathcal{SEP}_d$. Consider $\rho\in \mathcal{DPS}_{1,t}$, whose membership is certified by the extended state $\rho_{1,t}$ acting on $\mathbb C^d\otimes (\mathbb C^d)^{\otimes t}$, and let $\rho_1\in \mathcal H^d$ be obtained by tracing out the part of $\rho_{1,t}$ that acts on $(\mathbb C^d)^{\otimes t}$; then $\rho_1\otimes I_d$ is clearly separable. In \cite{NOP09} it is shown that \begin{equation}\label{eqrhot} \tilde \rho:= (1-\epsilon)\rho+ \epsilon \big(\rho_1\otimes {I_d\over d}\big)\in \mathcal{SEP}_d \ \ \text{ where } \epsilon =O\big(\big({d\over t}\big)^2\big); \end{equation} that is, by moving $\rho$ in the direction of $\rho_1\otimes I_d/d$ by $\epsilon= O\big(\big({d\over t}\big)^2\big)$, one finds a separable state.
{An \emph{entanglement witness} for a state $\rho$ is any certificate that certifies $\rho \not \in \mathcal{SEP}_d$.
One way to obtain such an entanglement witness is to exhibit one of the constraints defining a relaxation of $\mathcal{SEP}_d$ (such as $\mathcal{DPS}_{1,t}$) that is violated by $\rho$, like for example, one of the PPT conditions. More generally one can obtain an entanglement witness for $\rho\not\in\mathcal{SEP}_d$ by finding a hyperplane separating $\rho$ and $\mathcal{SEP}_d$, i.e., a matrix $W \in \mathcal H^d \otimes \mathcal H^d$ such that \begin{equation}\label{eqhsep} \mathrm{Tr}(W\rho) > h_{\mathcal{SEP}}(W):=\max \{ \mathrm{Tr}(W \sigma)\colon \sigma \in \mathcal{SEP}_d\}, \end{equation}
which shows again the importance of linear optimization over the set $\mathcal{SEP}_d$ and of designing tractable relaxations for $\mathcal{SEP}_d$.} The function $h_{\mathcal{SEP}}(W)$ in \cref{eqhsep} is known as the {\em support function} of $\mathcal{SEP}_d$ in the direction $W$. Analogously define the support function of $\mathcal{DPS}_{1,t}$ as $$h_{\mathcal{DPS}_{1,t}}(W):=\max \{\mathrm{Tr}(W\rho): \rho\in\mathcal{DPS}_{1,t}\}. $$ As an application of the quantitative result in \cref{eqrhot} the following is shown in \cite{NOP09}: \begin{equation}\label{eqhsepdps} h_{\mathcal{SEP}}(W)\le h_{\mathcal{DPS}_{1,t}}(W) \le \big(1+O\big(\Big({d\over t}\Big)^2\big)\big) h_{\mathcal{SEP}}(W). \end{equation} Clearly, either \cref{eqrhot} or \cref{eqhsepdps} implies equality $\bigcap_{t\ge 1}\mathcal{DPS}_{1,t}= \mathcal{SEP}_d$, i.e., completeness of the DPS hierarchy.
Fang and Fawzi \cite{FangFawzi} investigate the DPS hierarchy from the dual sum-of-squares perspective. In particular, they show a representation result for matrix polynomials that are nonnegative on the sphere, which they use to give an alternative proof for \cref{eqhsepdps}. Namely, they show that, if $F$ is a polynomial matrix in $d$ variables and degree $2k$ such that $0\preceq F(x)\preceq I$ on ${\mathbb S}^{d-1}$ then, for all $t\ge C_k d$,
$F(x)+ C'_k\big({d\over t}\big)^2 I$ is a Hermitian sum-of-squares matrix polynomial of degree $2t$ on ${\mathbb S}^{d-1}$, where $C_k,C'_k$ are constants depending only $k$. In addition, detailed proofs are given in \cite{FangFawzi} for the description of the dual cones of the cones $\mathcal{DPS}_{1,t}$:
while the dual cone of $\mathcal{SEP}_d$ consists of the matrices $W$ for which the polynomial $p_W:=\langle W, xx^*\otimes yy^*\rangle$ is nonnegative on the bi-sphere ${\mathbb S}^{d-1}\times {\mathbb S}^{d-1}$, the dual cone of $\mathcal{DPS}_{1,t}$ consists of the $W$'s for which the polynomial $\|y\|^{2(t-1)}p_W$ is a sum of Hermitian squares.
For the problem of approximating the support function $h_{\mathcal{SEP}}(W)$, Harrow, Natarayan and Wu \cite{HNW17} propose to strengthen the set $\mathcal{DPS}_{1,t}$ by adding equality constraints arising from the classical optimality conditions. In this way they obtain a hierarchy of bounds for $h_{\mathcal{SEP}}(W)$, stronger than $h_{\mathcal{DPS}_{1,t}}(W)$, that converges in finitely many steps to $h_{\mathcal{SEP}}(W)$.
Li and Ni \cite{Li_2020} use the moment approach on the bi-sphere for testing separability of a state $\rho$ (in the general multipartite setting). For this, given a generic sum-of-squares polynomial $F$, they consider the problem of minimizing the expectation $\int Fd\mu$ over the probability measures $\mu$ on the bi-sphere whose degree-4 moments correspond to the entries of $\rho$, and the corresponding moment relaxations (whose constraints are essentially those in the program defining the set ${\mathcal{R}}_{t}$ in \cref{eq: dpst}). Then a separability certificate can be obtained at a finite relaxation level when the optimal solution satisfies the so-called flatness condition. Note that the separability problem only asks for the existence of such a measure $\mu$, thus, it is a feasibility problem. The optimization approach in \cite{Li_2020}, based on optimizing a generic polynomial $F$, relies on the fact that this `encourages' flatness of an optimal solution (which then permits to get a separable decomposition and thus a certificate of separability). Indeed Nie~\cite{Nie14} shows that if both the objective and constraints of a polynomial optimization problem are generic, then flatness occurs at some finite relaxation level. Dressler, Nie, and Yang~\cite{dressler2020separability} strengthen the approach in \cite{Li_2020}: they use a symmetry argument which permits to replace the bi-sphere by its subset consisting of the points $(x,y)\in \mathbb C^d\times \mathbb C^d$ that have $x_1,y_1$ real and nonnegative. This provides a formulation that uses less real variables ($2(2d-1)$ instead of $4d$) and leads to stronger and more economical moment relaxations. Separability of real states is considered in \cite{NieZhang2016}, where a similar reduction is applied, namely by restricting to the vectors $(x,y)$ in the (real) bi-sphere satisfying $\sum_{i=1}^d x_i\ge 0$ and $\sum_{i=1}^d y_i\ge 0$.
\subsubsection*{Related literature on factorization ranks} Various notions of ``factorization ranks'' have been studied extensively in the literature such as (versions of) tensor ranks \cite{Kolda}, nonnegative matrix factorization (NMF) rank \cite{Gillis}, positive semidefinite matrix factorization rank \cite{FGPRT}, completely positive matrix factorization rank \cite{BSM03}; we refer to these references and further references therein for details. Given the importance of factorizations for applications, designing algorithmic methods for finding a factorization of a given type (when it exists) is a topic of ongoing research (see, e.g., \cite{Gillis,DSSV,Sponsel-Dur} and references therein). The above mentioned factorization ranks are often hard to compute (see \cite{Vav09,Shi16,Shi16b} for nonnegative rank, \cite{Shi17} for positive semidefinite rank, \cite{Hastad90} for tensor rank), which motivates the search for good bounds for a given factorization rank. Such bounds can be obtained using a variety of techniques. For example, using dedicated combinatorial methods (see, e.g., \cite{FGPRT} and references therein), optimization methods (see, e.g., \cite{FP16}), or using a moment-based approach as we do here. A moment-based approach has previously been used to derive hierarchies of bounds for the rank of tensors~\cite{TS15}, for the symmetric nuclear norm of tensors~\cite{Nie17}, for the nonnegative rank, the completely positive rank, the positive semidefinite rank, and the completely positive semidefinite rank of matrices~\cite{GdLL17a}. In this paper, we consider the separable rank, a notion which has been present in the (quantum information theory) literature, although no systematic study of bounds for it has been carried out so far to the best of our knowledge.
\subsubsection*{Contents of the paper} The paper is organized as follows. In Section \ref{sec:preliminaries} we introduce the preliminaries on polynomial optimization that we will need in the rest of the paper. In particular, in Section \ref{sec:matrix-setting}, we introduce some of the main notions in the general setting of sum-of-squares matrix polynomials and matrix-valued linear maps. In Section \ref{sec:moment} we recall the moment method and present the main underlying results from real algebraic geometry and moment theory. Since some of these results are presented in the literature in the real setting while we need the complex setting, we give arguments on how to extend the results from real to complex in Appendix \ref{sec:AppA}. Section~\ref{sec: LB} is devoted to the new hierarchy of bounds for the separable rank. In Section \ref{sec:extension} we indicate several extensions of our approach, in particular for the real separable rank of real states and for getting improved bounds on the completely positive rank. We also present numerical results on examples to illustrate the behavior of the bounds in Section \ref{secnumerics}. Finally, in Section \ref{secDPS} we revisit the Doherty-Parrilo-Spedalieri hierarchy of relaxations for the set $\mathcal{SEP}$ of separable states. In particular, we provide a new, alternative proof for their completeness, that uses the tools from the moment approach previously developed.
\section{Preliminaries on polynomial optimization}\label{sec:preliminaries}
In this section, we group some preliminaries about polynomial optimization that we need in the rest of the paper; for a general reference we refer, e.g., to \cite{Las2001,Las2009,Laurent2009} and further references therein. We will deal with polynomial optimization in real and complex variables, which is the setting needed for the application to the set of separable states and the separable rank treated in this paper, and we will also need to deal with polynomial matrices and matrix-valued linear maps.
\subsection{Polynomials, linear functionals and moment matrices}\label{secNotation} We first fix some notation that we use throughout the paper.
$\mathbb{N}$ denotes the set of nonnegative integers. We set $[n]=\{1,2,...,n\}$ for an integer $n\ge 1$, $[k,n]=\{k,k+1,\ldots,n-1,n\}$ for integers $k\le n$, and $|\alpha| = \sum_{i = 1}^n \alpha_i$ for $\alpha\in \mathbb{N}^n$.
For a complex matrix $X$ we denote its transpose by $X^T$ and its conjugate transpose by $X^*$. For a scalar $a \in \mathbb C$ its conjugate is $a^*=\overline a$ and its modulus is $|a| = \sqrt{a^*a}$. The vector space $\mathbb C^n$ is equipped with the scalar product $\inip{x,y} = x^*y=\sum_{i = 1}^{n} x_i^*y_j$ for $x,y\in \mathbb C^n$
and the Euclidean norm of $x\in\mathbb C^n$ is $\|x\| = \sqrt{x^*x}$. Analogously, $\mathbb C^{n\times n}$ is equipped with the trace inner product
$\langle X,Y\rangle=\text{Tr}(X^*Y)=\sum_{i,j=1}^n \overline X_{ij}Y_{ij}$ and $\|X\|=\sqrt{\langle X,X\rangle}$ for $X\in \mathbb C^{n\times n}$. A matrix $X\in \mathbb C^{n\times n}$ is called Hermitian if $X^*=X$ and we let $\mathcal{H}^{n}$ denote the space of complex Hermitian $n \times n$ matrices, A matrix $X \in \mathcal{H}^{n} $ is positive semidefinite (denoted $X \succeq 0$) if $v^* A v \geq 0$ for all $v \in \mathbb C^n$. We let $\mathcal{H}^n_+$ denote the cone of Hermitian positive semidefinite matrices.
For a set $S$ in a vector space, we let $\mathrm{cone}(S)$ and $\mathrm{conv}(S)$ denote, respectively, its conic hull and its convex hull.
\paragraph{Polynomials.} We consider polynomials in $n$ complex variables $x_1,\ldots, x_n$ and their conjugates $\overline{x_1},\ldots,\overline{x_n}$. For $\alpha,\beta \in \mathbb{N}^n$ we use the short-hand $\mathbf{x}^\alpha \qo{\mathbf{x}}^\beta$ to denote the monomial \[ \mathbf{x}^\alpha \qo{\mathbf{x}}^\beta = \prod_{i=1}^n x_i^{\alpha_i} \prod_{j=1}^n \overline{x_j}^{\beta_j}. \]
The degree of this monomial, denoted by $\deg(\mathbf{x}^\alpha \qo{\mathbf{x}}^\beta)$, is equal to $|\alpha|+|\beta| = \sum_{i=1}^n \alpha_i + \beta_i$. We collect the set of all monomials of degree at most $t \in \mathbb{N}\cup \{\infty\}$ in the vector $[\bx,\bbx]_t$ (using some given ordering of the monomials) and also set $[\mathbf{x},\qo{\mathbf{x}}]=[\mathbf{x},\qo{\mathbf{x}}]_\infty$. We interpret $[\bx,\bbx]_t$ as a set when we write $\mathbf{x}^\alpha \qo{\mathbf{x}}^\beta \in [\bx,\bbx]_t$. Taking the complex linear span of all monomials in $[\bx,\bbx]_t$ gives the space of polynomials with complex coefficients and degree at most $t$: \[
\C\cxbx_t := \Span{m ~|~ m \in [\bx,\bbx]_t}=\Big\{\sum_{m\in [\mathbf{x},\qo{\mathbf{x}}]_t}a_m m: a_m\in \mathbb C\Big\}. \] For $t=\infty$ we obtain the full polynomial ring in $\mathbf{x},\qo{\mathbf{x}}$ over $\mathbb C$, also denoted as $\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. So any polynomial $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$ is of the form $p=\sum_{\alpha,\beta}p_{\alpha,\beta} \mathbf{x}^\alpha\qo{\mathbf{x}}^\beta$, where only finitely many coefficients $p_{\alpha,\beta}$ are nonzero; its {\em degree}
is the maximum degree of the monomials occurring in $p$ with a nonzero coefficient, i.e., $\deg(p) = \max_{p_{\alpha,\beta} \neq 0} \deg(\mathbf{x}^\alpha \qo{\mathbf{x}}^\beta)$. For convenience let $\mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$ denote the set of vectors $\mathbf{a}=(a_{\alpha,\beta})_{(\alpha,\beta)\in\mathbb{N}^n\times \mathbb{N}^n} $ that have only finitely many nonzero entries. Then any polynomial $p$ can be written as $p=\mathbf{a}^*[\mathbf{x},\qo{\mathbf{x}}]$, where we set $\mathbf{a} =( \overline p_{\alpha,\beta})\in \mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$ (the conjugate of the vector of coefficients of $p$).
Conjugation on complex variables extends linearly to polynomials: for $p = \sum_{\alpha,\beta} p_{\alpha,\beta} \mathbf{x}^\alpha \qo{\mathbf{x}}^\beta$ we define its conjugate polynomial $\overline p = \sum_{\alpha,\beta} \overline{p}_{\alpha,\beta} \qo{\mathbf{x}}^\alpha \mathbf{x}^\beta$. Then, $p$ is called {\em Hermitian} if $p = \overline p$. Hermitian polynomials only take real values: $p(x)\in \mathbb{R}$ for all $x\in \mathbb C^n$. We denote the space of Hermitian polynomials by $\Ccxbx^h$. For instance, the polynomial $p=x+\overline{x}$ is Hermitian as well as $p=\mathbf{i} x-\mathbf{i} \overline{x}$, but $q=x-\overline{x}$ is not Hermitian (note $q(\mathbf{i})=2\mathbf{i}\not\in \mathbb{R}$), where $\mathbf{i}=\sqrt{-1}\in\mathbb C$.
To capture positivity on the ring of polynomials, we work with the cone of Hermitian sums of squares.
Any polynomial of the form $q \overline q$ (for some $q\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$) is called a {\em Hermitian square} and $\Sigma[\mathbf{x},\qo{\mathbf{x}}]$ (or simply $\Sigma$) denotes the conic hull of Hermitian squares. For any integer $t\in\mathbb{N}$ we let $\Sigma[\mathbf{x},\qo{\mathbf{x}}]_{2t} = \mathrm{cone}\{p\qo{p} ~|~ p \in [\bx,\bbx]_t \}=\Sigma[\mathbf{x},\qo{\mathbf{x}}]\cap \mathbb C[\mathbf{x},\qo{\mathbf{x}}]_{2t}$ (or simply $\Sigma_{2t}$) denote the cone of Hermitian sums of squares with degree at most $2t$.
\paragraph{The dual space of polynomials.} The algebraic dual of the ring of polynomials $\C\cxbx$ is the vector space of all linear functionals on $\C\cxbx$. To clarify, a linear functional $L$ on $\C\cxbx$ is a linear map from $\C\cxbx$ to $\mathbb C$. For every $t\in \mathbb{N} \cup \{\infty \}$ we denote the dual space of $\C\cxbx_t$ by $\C\cxbxs_t$, defined as \[ \C\cxbxs_t = \{ L:\C\cxbx_t \rightarrow \mathbb C: L~\text{is~linear}\}. \] We again abbreviate $\C\cxbxs_{\infty}$ by $\C\cxbxs$. A linear functional $L \in \C\cxbxs_t$ is called \emph{Hermitian} if $L(\overline p) = \overline{L(p)}$ for all $p \in \C\cxbx_t$. A (Hermitian) linear functional $L\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]_{2t}^*$ is called \emph{positive} if it maps Hermitian squares to nonnegative real numbers, i.e., if $L(p\overline p) \geq 0$ for all $p \in \C\cxbx_t$.
\paragraph{Example of linear functionals.} For any $a \in \mathbb C^{n}$ we can define the \emph{evaluation functional at $a$}, denoted $L_{a} \in \C\cxbxs$, by \[ L_{a}(p) = p(a) ~\text{for~every}~p \in C[\bx,\bbx]. \] It is easy to see that $L_a$ is Hermitian and positive.
\paragraph{Linear functionals applied to polynomial matrices.} It will also be useful to apply linear functionals to polynomial matrices, i.e., matrices whose entries are polynomials, by considering an entrywise action. That is, for a polynomial matrix $G=(G_{ij})_{i,j=1}^m \in \C\cxbx^{m \times m}$ and a linear functional $L \in \C\cxbxs$ we define \[ L(G) := \Big( L(G_{ij}) \Big)_{i,j \in [m]}\in \mathbb C^{m\times m}. \]
\paragraph{Moment matrices.}\label{MomentMat} As an example, applying a linear functional to the (infinite) matrix $[\bx,\bbx] [\bx,\bbx]^*$ leads to the notion of moment matrix. Given $L \in \C\cxbxs_{2t}$, where $t \in \mathbb{N} \cup \{\infty\}$, we define the {\em moment matrix} of $L$ by \begin{equation}\label{eqMtL}
M_t(L) := L( [\bx,\bbx]_t [\bx,\bbx]^*_t) = \big( L(m \overline{m'}) \big)_{m,m' \in [\bx,\bbx]_t } . \end{equation} If $t$ is finite then the moment matrix is said to be {\em truncated} at {\em order} $t$. Note that $L$ is Hermitian if and only if its moment matrix $M_t(L)$ is Hermitian. Similarly, $L$ is positive if and only if its moment matrix $M_t(L)$ is positive semidefinite: \begin{equation}\label{eq_pos_L_ML} L(p\overline p)\ge 0 \ \forall p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]\ \Longleftrightarrow \ M_t(L)\succeq 0. \end{equation}
Indeed, for any $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]_t$, written as $p = \mathbf{a}^* [\bx,\bbx]_t \in \C\cxbx$ with $\mathbf{a}\in \mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$, we have $\overline p=[\mathbf{x},\qo{\mathbf{x}}]_t^*\mathbf{a}$ and thus \begin{equation}\label{eq_pos_herm}
L( p \overline p) = L(\mathbf{a}^*[\bx,\bbx]_t [\bx,\bbx]_t^* \mathbf{a}) = \mathbf{a}^*L([\bx,\bbx]_t [\bx,\bbx]^*_t)\mathbf{a} = \mathbf{a}^*M_t(L)\mathbf{a}. \end{equation} More generally, if $p=\mathbf{a}^*[\mathbf{x},\qo{\mathbf{x}}]_t$ and $q=\mathbf{b}^*[\mathbf{x},\qo{\mathbf{x}}]_t$ with $\mathbf{a},\mathbf{b}\in \mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$, then $L(p\overline q)=\mathbf{a}^*M_t(L)\mathbf{b}.$
If $t = \infty$ we write $M(L)$ instead of $ M_{\infty}(L)$.
Observe that the moment matrix of an evaluation functional $L_a$ at $a\in \mathbb C^d$ satisfies $M(L_a)=[a,\overline a]_t[a,\overline a]_t^*$ and thus it has rank 1. Hence, if $L$ is a linear combination of evaluation functionals, then its moment matrix has finite rank.
\paragraph{Polynomial localizing maps $gL$.}
Given a polynomial $g \in \C\cxbx$ and a linear functional $L \in \C\cxbxs$ we can define a new linear functional $gL \in \C\cxbxs$ by \begin{align*} gL: \C\cxbx &\to \mathbb C \\ p &\mapsto L(gp). \end{align*} In this way, we can say that $g$ acts on $\C\cxbxs$ by mapping $L$ to $gL$. Constraints are often phrased in terms of the positivity of $gL$. As stated before, positivity of $gL$ can be characterized by positive semidefiniteness of its moment matrix: \begin{equation}\label{eqgLpos} gL \ \text{ is positive } \Longleftrightarrow \ L(g \cdot [\bx,\bbx] [\bx,\bbx]^* ) = M(gL) \succeq 0. \end{equation}
If both $g$ and $L$ are Hermitian then $gL$ is Hermitian and hence $M(gL)$ is Hermitian. If $L$ is an evaluation map at a point $a\in\mathbb C^n$ for which $g(a) \geq 0$, then $gL$ is a positive map since we have $(gL)(p\overline p) = g(a)|p(a)|^2\ge 0$. In the literature $M(gL)$ is often called a {\em localizing moment matrix}.
\subsection{SoS-polynomial matrices and matrix-valued linear maps}\label{sec:matrix-setting}
There is a natural extension of the previously defined concepts to the matrix-valued setting. This extension will be useful, in particular, to define a matrix analog of localizing moment constraints and to provide a moment approach to the hierarchy by Doherty, Parrilo and Spedalieri \cite{DPS04}.
\paragraph{SoS-polynomial matrices.} A polynomial matrix $S\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m}$ is called an {\em SoS-polynomial matrix} if $S=U U^*$ for some polynomial matrix $U\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m\times k}$ and some integer $k\in \mathbb{N}$, or, equivalently, if $S\in \mathrm{cone}\{ \avec{p}\avec{p}^*: \avec{p} = (p_1,\ldots, p_m) \in \mathbb C[\bx,\bbx]^m\}$.
\paragraph{Matrix-valued linear functionals.} Consider a matrix-valued linear functional \begin{align*}
\mathcal L: \C\cxbx &\to \mathbb C^{m \times m} \\ p &\mapsto \mathcal L(p)=\big( L_{ij}(p) \big)_{i,j \in [m]}, \end{align*} where $\mathcal L=(L_{ij})_{i,j=1}^m$ and each $L_{ij}\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^*$ is a scalar-valued linear functional. Then $\mathcal L$ is {\em Hermitian} if $\mathcal L(\overline p)=\mathcal L(p)^*$, i.e., $L_{ij}(\overline p) =\overline {L_{ji}(p)}$ for all $i,j\in [m]$, for all $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. In addition $\mathcal L$ is said to be {\em positive} if it maps positive elements (i.e., Hermitian squares $p\overline p $) to positive elements (i.e., Hermitian positive semidefinite $m\times m$ matrices), i.e., if the following holds: \begin{equation}\label{eqGLpositive} \mathcal L(p\overline p) = (L_{ij}(p\overline p))_{i,j=1}^m \succeq 0 \text{ for all } p \in \C\cxbx. \end{equation} In analogy to \cref{eqMtL} it is natural to define the {\em moment matrix} $M(\mathcal L)$ as \begin{equation}\label{eqMML} M( \mathcal L) := \mathcal L( [\bx,\bbx] [\bx,\bbx]^*)= (L_{ij}( [\bx,\bbx] [\bx,\bbx]^*))_{i,j=1}^m=(M(L_{ij}))_{i,j=1}^m, \end{equation} which thus acts on $\mathbb C^m\otimes \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. Clearly, $M(\mathcal L)$ is a Hermitian matrix if $\mathcal L$ is Hermitian. Note that $M(\mathcal L)$ can be viewed as an $m\times m$ block-matrix whose $(i,j)$th block is the moment matrix $M(L_{ij})$.
When $\mathcal L$ acts on a truncated polynomial space $\mathbb C[\mathbf{x},\qo{\mathbf{x}}]_{2t}$ its moment matrix $M_t(\mathcal L)$, {\em truncated at order $t$}, is defined in the obvious way by $$M_t( \mathcal L) := \mathcal L( [\bx,\bbx]_t [\bx,\bbx]_t^*)=(M_t(L_{ij}))_{i,j=1}^m,$$ with $M_t(\mathcal L)=M(\mathcal L)$ if $t=\infty$.
One may also define the action of $\mathcal L$ on a polynomial matrix $S=(S_{ij})_{i,j=1}^m \in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m}$ by \begin{equation}\label{eq:LS} \langle \mathcal L, S\rangle:= \sum_{i,j=1}^m L_{ij}(S_{ij}). \end{equation} If $\mathcal L$ and $S$ are both Hermitian then $\langle \mathcal L,S\rangle\in \mathbb{R}$. As before, given $g\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$ we may define a new (localizing) matrix-valued linear map $g\mathcal L$ by: \begin{align*}
g\mathcal L: \C\cxbx &\to \mathbb C^{m \times m} \\ p &\mapsto (g\mathcal L)(p)=\mathcal L(gp)=\big( L_{ij}(gp) \big)_{i,j \in [m]}. \end{align*}
\paragraph{Positivity of $\mathcal L$ and its moment matrix $M(\mathcal L)$.} The analog of \cref{eqgLpos} does not extend to the matrix-valued case: If $M(\mathcal L)$ is positive semidefinite, then $\mathcal L$ is positive, but the reverse implication may not hold in general. In the next two lemmas, we present alternative characterizations for positivity of a matrix-valued map $\mathcal L$ and positivity of its moment matrix $M(\mathcal L)$ that make this more apparent.
\begin{lemma}\label{lemGLpos} $\mathcal L$ is positive, i.e., \cref{eqGLpositive} holds, if and only if any of the following equivalent conditions holds: \begin{align} v^* \mathcal L(p\overline p)v= \big(\sum_{i,j=1}^m \overline {v_i} v_j L_{ij}\big)(p\overline p) =(v^*\mathcal L v)(p\overline p) \geq 0 \text{ for all } v \in \mathbb C^m \text{ and } p \in \C\cxbx, \label{eq: positivity of GL as lin func} \\ M(v^*\mathcal L v)\succeq 0 \ \text{ for all } v\in \mathbb C^m, \label{eqGL3}\\ (v \otimes \mathbf{a})^* \, M(\mathcal L) \, (v \otimes \mathbf{a}) \geq 0 \text{ for all } v \in \mathbb C^m \text{ and } \mathbf{a} \in\mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0. \label{eq: positivity of ML} \end{align} \end{lemma}
\begin{proof} The equivalence of \cref{eqGLpositive} and \cref{eq: positivity of GL as lin func} is clear. The equivalence of \cref{eq: positivity of GL as lin func} and \cref{eqGL3} follows using \cref{eq_pos_L_ML} applied to each (scalar-valued) map $v^*\mathcal L v$. To see the equivalence of \cref{eq: positivity of GL as lin func} and \cref{eq: positivity of ML}, write a polynomial $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$ as $p=\mathbf{a}^*[\mathbf{x},\qo{\mathbf{x}}]$ with $\mathbf{a}=(a_{\alpha,\beta})\in\mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$. Then, for any $v\in \mathbb C^m$, following \cref{eq_pos_herm}, we have: $$v^*\mathcal L (p\overline p)v=v^*( L_{ij}(p\overline p))_{i,j=1}^m v= v^*( \mathbf{a}^* M(L_{ij}) \mathbf{a})_{i,j=1}^m v=
(v\otimes \mathbf{a})^* M(\mathcal L) v\otimes \mathbf{a},$$ using the definition of $M(\mathcal L)$ from \cref{eqMML}. \end{proof}
\begin{lemma}\label{lemMGLpos} $M(\mathcal L)\succeq 0$ if and only if any of the following equivalent conditions holds: \begin{align} w^* M(\mathcal L) w \geq 0 \text{ for all } w \in \mathbb C^m \otimes \mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0, \label{eqMGpsd}\\ \langle \mathcal L, \avec{p}\avec{p}^*\rangle= \sum_{i,j=1}^m L_{ij}(p_i \overline{p}_j)
\geq 0 \text{ for all } \avec{p} = (p_1,\ldots, p_m) \in \mathbb C[\bx,\bbx]^m,
\label{eq: M(ML) psd} \\
\langle \mathcal L, S\rangle \ge 0 \text{ for all SoS-polynomial matrices } S\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m}.
\label{eq: MMLSoS}
\end{align}
\end{lemma}
\begin{proof} \cref{eqMGpsd} is clear. To see the equivalence with \cref{eq: M(ML) psd} consider a vector $w=(w_{i, (\alpha,\beta)})_{i, (\alpha,\beta)}$ in $ \mathbb C^m\otimes \mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$ and, for each $i\in [m]$, define the vector $\mathbf{a}_i=(w_{i, (\alpha,\beta)})_{(\alpha,\beta)}\in \mathbb C^{\mathbb{N}^n\times \mathbb{N}^n}_0$, the corresponding polynomial $p_i= \mathbf{a}_i^* [\mathbf{x},\qo{\mathbf{x}}]$, and define the polynomial vector $\avec{p}=(p_1,\ldots,p_m)\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^m.$ Then $$ w^* M(\mathcal L) w=w^*(M(L_{ij}))_{i,j=1}^m w= \sum_{i,j=1}^m \mathbf{a}_i^* (L_{ij}([\mathbf{x},\qo{\mathbf{x}}][\mathbf{x},\qo{\mathbf{x}}]^*))_{i,j=1}^m \mathbf{a}_j = \sum_{i,j=1}^m L_{ij}(p_i \overline{p}_j), $$ implying the equivalence of \cref{eqMGpsd} and \cref{eq: M(ML) psd}. The equivalence with \cref{eq: MMLSoS} follows since SoS-polynomial matrices are conic combinations of terms of the form $ \avec{p}\avec{p}^*$. \end{proof}
\noindent Note that \cref{eq: positivity of ML} is the restriction of \cref{eqMGpsd}, where we restrict to vectors $w$ in tensor product form $w=v\otimes \mathbf{a}$. In addition, we recover \cref{eq: positivity of GL as lin func} if, in \cref{eq: M(ML) psd}, we restrict to polynomials $p_1,\ldots,p_m$ of the form $p_i = v_i p$ (for $i \in [m]$) for some $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$ and $v =(v_1,\ldots,v_m)\in \mathbb C^m$. This shows again that \cref{eq: positivity of GL as lin func} is more restrictive than \cref{eq: M(ML) psd}. Summarizing, we have the following implication.
\begin{lemma}\label{lemMGLtoGL} If $M(\mathcal L)\succeq 0$ then $\mathcal L$ is positive. \end{lemma}
\begin{remark}\label{remcomplexity} Note that requiring positivity of the moment matrix $M(\mathcal L)$ not only provides a stronger condition than requiring positivity of $\mathcal L$, but it is also a condition that is computationally easier to check. To make this concrete we consider the truncated case when $\mathcal L$ is restricted to the subspace $ \mathbb C[\mathbf{x},\qo{\mathbf{x}}]_{2t}$. Then, the condition $M_t(\mathcal L)\succeq 0$ asks whether a single matrix is positive semidefinite, which can be efficiently done. On the other hand, asking whether $\mathcal L$ is positive on sums of squares of degree at most $2t$ amounts to checking whether $M_t(v^*\mathcal L v) \succeq 0$ for all $v\in \mathbb C^m$, i.e., positive semidefiniteness of infinitely many matrices.
Note also that \cref{eq: MMLSoS} highlights the duality relationship which exists between $m\times m$ SoS-polynomial matrices and matrix-valued linear maps $\mathcal L$ with $M(\mathcal L)\succeq 0$. \end{remark}
\paragraph{Link to complete positivity of $\mathcal L$.} We now point out a link to the notion of complete positivity. Given a linear map $\mathcal L:\mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to \mathbb C^{m\times m}$ and an integer $k\in \mathbb{N}$ one can define a new linear map \begin{align*} I_k\otimes \mathcal L: \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{k\times k} & \to \mathbb C^{k\times k}\otimes \mathbb C^{m\times m} \\
(p_{i' j'})_{i',j'=1}^k &\mapsto (\mathcal L(p_{i'j'}))_{i',j'=1}^k. \end{align*} Then $\mathcal L$ is said to be {\em completely positive} if $I_k\otimes \mathcal L$ is positive for all $k\in \mathbb{N}$. (See, e.g., \cite{paulsen_2003} for a general reference about completely positive maps.)
\begin{lemma} \label{lemCPL} $\mathcal L$ completely positive $\Longrightarrow$ $I_m\otimes \mathcal L$ positive $\Longrightarrow$ $M(\mathcal L)\succeq 0$. \end{lemma}
\begin{proof} The first implication is obvious. Assume $I_m\otimes \mathcal L$ is positive, we show that $M(\mathcal L)\succeq 0$. In view of \cref{eq: M(ML) psd} it suffices to show that $\sum_{i,j=1}^m L_{ij}(p_i\overline {p}_j)\ge 0$ for all $\avec{p}=(p_1,\ldots,p_m)\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^m$. As $\avec{p}\avec{p}^*$ is a SoS-polynomial matrix (and thus a positive element), it follows that $(I_m\otimes \mathcal L)(\avec{p}\avec{p}^*)= (\mathcal L(p_{i'}\overline{p}_{j'}))_{i',j'=1}^m \succeq 0$. Consider the vector $w=(w_{ii'})_{i,i'\in [m]}$ with entries $w_{ii'}=1$ if $i=i'$ and $w_{ii'}=0$ otherwise. Then, $ \sum_{i,j=1}^m L_{ij}(p_i \overline{p}_j) =w^* (\mathcal L(p_{i'}\overline{p}_{j'}))_{i',j'=1}^m w \ge 0$, as desired. \end{proof}
\paragraph{Polynomial matrix localizing maps $G\otimes L$.} Given a (scalar-valued) linear map $L\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^*$ there is a natural generalization of the above notion of localizing map $gL$, where, instead of considering a scalar polynomial $g$, we consider a polynomial matrix $G=(G_{ij})_{i,j=1}^m \in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m}$. Then, we can define the matrix-valued linear map $\mathcal L:= (G_{ij}L)_{i,j=1}^m$, that we denote by $G\otimes L$, by \begin{align*} G\otimes L: \mathbb C[\mathbf{x},\qo{\mathbf{x}}] & \to \mathbb C^{m\times m} \\ p &\mapsto (G\otimes L)(p):=\big( (G_{ij} L)(p) \big)_{i,j=1}^m= \big(L(G_{ij}p)\big)_{i,j=1}^m= L(Gp). \end{align*} Following \cref{eqMML} the {moment matrix} of $G \otimes L$ is \begin{equation}\label{eqMGL} M(G\otimes L) = (G\otimes L) ([\bx,\bbx] [\bx,\bbx]^*)=((G_{ij}L)([\bx,\bbx] [\bx,\bbx]^*))_{i,j=1}^m= L( G \otimes [\bx,\bbx] [\bx,\bbx]^*). \end{equation}
\begin{remark} \label{rem: M(GL) product and psd}
When $L=L_a$ is the (scalar-valued) evaluation map at a vector $a \in \mathbb C^n$ the moment matrix $M(G\otimes L_a)$ has indeed a tensor product structure, since we have
\[
M(G \otimes L_a) = L_a(G \otimes [\bx,\bbx] [\bx,\bbx]^*) = G(a) \otimes [a,\overline a][a,\overline a]^* = L_a(G) \otimes L_a([\bx,\bbx] [\bx,\bbx]^*).
\]
In particular, if $G(a) \succeq 0$ then we have $M(G \otimes L_a) \succeq 0$. Therefore, $M(G \otimes L) \succeq 0$ when $L$ is a conic combination of evaluation maps at points at which $G$ is positive semidefinite. This property motivates using such a positivity constraint in defining our bounds for the separable rank and the completely positive rank. \end{remark}
As observed above, $M(G\otimes L)\succeq 0$ implies that $G\otimes L$ is positive. Note that, by \cref{eqGL3}, $G\otimes L$ is positive if and only if $M((v^*Gv) L)\succeq 0$ for all $v\in\mathbb C^m$, while, by \cref{eq: M(ML) psd}, $M(G\otimes L)\succeq 0$ if and only if $L(\avec{p}^*G \avec{p})\ge 0$ for all $\avec{p}\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^m$. In particular, for a truncated linear map $L\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^*_{2t}$, the condition $M_t(G\otimes L)\succeq 0$ implies any of the following two equivalent conditions (the truncated analogs of \eqref{eq: positivity of GL as lin func} and \eqref{eqGL3}), which characterize positivity of $G\otimes L$ on $\Sigma_{2t}$: \begin{align} L(v^*Gv \cdot p\overline p)\ge 0 \ \text{ for all } v\in \mathbb C^m \text{ and } p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]_t,\label{eqMtGLa}\\ M_t((v^*Gv) L)\succeq 0\ \text{ for all } v\in \mathbb C^m. \label{eqMtGL} \end{align}
While it is computationally easy to check whether $M_t(G\otimes L)\succeq 0$, it is not clear how to check the above conditions efficiently.
For this reason, we will select the stronger moment matrix positivity condition when defining our new hierarchy of bounds for the separable rank. However, we note that the weaker positivity condition of the localizing map will be sufficient to establish convergence properties of the bounds.
\subsection{The moment method}\label{sec:moment} We now state several widely used definitions and results from polynomial optimization that we will need to design our hierarchy of bounds on the separable rank and for the moment approach to the DPS approximation hierarchy of the set $\mathcal{SEP}$ of separable states.
Given a set of Hermitian polynomials $S \subseteq \Ccxbx^h$ we define the \emph{positivity domain} of $S$ as
\begin{equation}\label{poddom}
\mathscr{D}(S) := \{u \in \mathbb C^{n} ~|~ g(u) \geq 0 ~\text{for~every}~ g \in S\}.
\end{equation} Given a Hermitian polynomial matrix $G\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m}$ we define
the polynomial set
\begin{equation}\label{eqSG}
S_G:=\{v^* Gv: v\in \mathbb C^d, \|v\|=1\}\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h,
\end{equation} so that the set \begin{equation}\label{posdomG}
\mathscr{D}(S_G) = \{u \in \mathbb C^{n} ~|~ G(u) \succeq 0\}
\end{equation}
corresponds to the positivity domain of $G$.
For $t \in \mathbb{N} \cup \{\infty\}$ and $S \subseteq \Ccxbx^h$ the set
$$
\mathcal {M}(S)_{2t} := \mathrm{cone}\{gp\overline p ~|~ p \in \C\cxbx,~ g \in S \cup \{1\},~\deg(gp\overline p) \leq 2t\}
$$
denotes the \emph{quadratic module} generated by $S$, {\em truncated at order $2t$} when $t\in \mathbb{N}$. If $t = \infty$ we simply write $\mathcal {M} (S)$. The quadratic module $\mathcal {M}(S)$ is said to be {\em Archimedean} if, for some scalar $R > 0$,
\begin{equation}\label{ArchimedianCond}
R - \sum_{i = 1}^{n}x_i \overline{x_i} \in \mathcal {M}(S).
\end{equation}
Hence a quadratic module is Archimedean if it contains an algebraic certificate of boundedness of the associated positivity domain. The next lemma shows that, in the case when the algebraic certificate in (\ref{ArchimedianCond}) belongs to the quadratic module $\mathcal {M}(S)_{2}$,
the linear functionals that are nonnegative on $\mathcal {M}(S)$ are bounded. Its proof is standard (and easy) and thus omitted.
\begin{lemma}\label{pointconv}
Let $S \subseteq \Ccxbx^h $ be such that $ R - \sum_{i = 1}^{n} x_i \overline{x_i} \in \mathcal {M}(S)_2$ for some $R> 0$. For any $t \in \mathbb{N}$ assume $L_t \in \C\cxbx_{2t}^*$ is nonnegative on $\mathcal {M}(S)_{2t}$. Then we have
$$
|L_t(w)| \leq R^{|w|/2}L_t(1)~\text{for~all}~ w \in [\bx,\bbx]_{2t}.
$$
Moreover, if
\begin{equation}\label{Ltsup}
\sup_{t\in \mathbb{N}} L_t(1) < \infty,
\end{equation}
then $\{L_t\}_{t \in \mathbb{N}}$ has a point-wise converging subsequence in $ \C\cxbxs$. \end{lemma}
\paragraph{Linear functionals and measures.}
The following result is central to our approach for approximating matrix factorization ranks. It is a complex analog of results by Putinar \cite{Pu93} and Tchakaloff~\cite{Tchakaloff}. For completeness, we will indicate in \cref{AppProofPutinar} how to derive from these results the following complex analog. \begin{theorem} \label{theomainTchakaloff}
Let $S \subseteq \Ccxbx^h$ be a set of Hermitian polynomials such that the quadratic module $\mathcal {M}(S)$ is Archimedean and consider a Hermitian linear map $L: \mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to \mathbb C$. Assume that $L$
is nonnegative on $\mathcal {M}(S)$. Then the following holds. \begin{itemize} \item[(i)] (based on \cite{Pu93}) $L$ has a representing measure $\mu$ that is supported by $\mathscr{D}(S)$, i.e., we have $L(p)=\int_{\mathscr{D}(S)}p d\mu$ for all $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. \item[(ii)] (based on \cite{Tchakaloff}) For any integer $k \in \mathbb{N}$, there exists a linear functional $\widehat{L}: \mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to \mathbb C$ which coincides with $L$ on $\mathbb C[\mathbf{x},\qo{\mathbf{x}}]_k$ and has a finite atomic representing measure supported by $\mathscr{D}(S)$, i.e.,
we have
\begin{align}
\widehat{L}(p) = L(p) \text{ for every } p \in \C\cxbx_k, \label{eqext}\\
\widehat{L} = \sum_{\ell=1}^{K} \lambda_\ell L_{v_\ell},\label{eqatom}
\end{align}
for some integer $K\ge 1$, scalars $\lambda_1,\lambda_2,...,\lambda_K>0$ and vectors $v_1,v_2,...,v_K \in \mathscr{D}(S)$. \end{itemize}
\end{theorem} We will often apply the above theorem to a linear functional $L \in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^*$ that additionally satisfies the positivity condition: $(G \otimes L) (p\overline p) \succeq 0$ for all $p \in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$, for some Hermitian polynomial matrix $G \in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{m \times m}$. Then, in view of \cref{lemGLpos} (combined with \cref{eqMtGLa} and \cref{eqMtGL}), one may still apply Theorem \ref{theomainTchakaloff} after replacing the set $S$ by the set $S \cup S_{G}$ so that the resulting measure $\mu$ will be supported by $\mathscr{D}(S\cup S_G)\subseteq \{x: G(x)\succeq 0\}$, thus within the positivity domain of $G$.
\paragraph{Matrix-valued linear functionals and matrix-valued measures.}
We now mention extensions of the previous results in Theorem \ref{theomainTchakaloff} from the scalar-valued case to the matrix-valued case, that we will use for the moment approach to the DPS hierarchy.
For the next result we use (a specification of) a result of Cimpric and Zalar \cite[Theorem~5]{CimpricZalar}, which shows an operator-valued version of Theorem \ref{theomainTchakaloff} (i). Since the latter is stated in the real case we indicate in Appendix \ref{AppProofCimpric} how to derive from it its complex analog that we need for the implication (ii) $\Rightarrow$ (i) in Theorem \ref{theomainmatrix} below. In a nutshell, this implication relies on a version of Riesz' representation theorem for positive operator valued linear maps (see, e.g., \cite{hadwin81}) combined with a density argument (for going from polynomials to continuous functions) and Putinar's Positivstellensatz.
\begin{theorem}[based on \cite{CimpricZalar}]\label{theomainmatrix} Let $S\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h$ be a set of Hermitian polynomials such that the quadratic module $\mathcal {M}(S)$ is Archimedean and let $\mathcal L:\mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to \mathcal H^m$ be a Hermitian matrix-valued linear map. The following assertions are equivalent. \begin{itemize} \item[(i)] $\mathcal L$ has a representing measure $\mu$ that is supported by $\mathscr{D}(S)$ and takes its values in the cone $\mathcal H^m_+$ of $m\times m$ Hermitian positive semidefinite matrices. \item[(ii)] $\mathcal L$ is nonnegative on $\mathcal {M}(S)$, i.e., $\mathcal L(gp\overline p)\succeq 0$ for all $g\in S\cup\{1\}$ and $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. \item[(iii)] $M(g\mathcal L)\succeq 0$ for all $g\in S\cup\{1\}.$ \item[(iv)] $g\mathcal L$ is completely positive for all $g\in S\cup\{1\}.$ \end{itemize} \end{theorem}
\begin{proof} First we show that (i) implies (iv). Let $k\in \mathbb{N}$, let $P=(p_{i'j'})_{i',j'=1}^k\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^{k\times k}$ be a polynomial matrix such that $P(x)\succeq 0$ for all $x\in \mathbb C^d$, and let $g\in S\cup \{1\}$; we show that $(I_k\otimes g\mathcal L)(P)\succeq 0$. For this note that $$(I_k\otimes g\mathcal L)(P)= (g\mathcal L)(p_{i'j'}))_{i',j'=1}^k = (\mathcal L(gp_{i'j'}))_{i',j'=1}^k = \Big(\int_{\mathscr{D}(S)} g p_{i'j'} d\mu\Big)_{i',j'=1}^k \succeq 0. $$ Here, the last inequality follows (for example) from \cref{theoTchakmatrix} below, using the fact that $g(x)\ge 0$ on $\mathscr{D}(S)$, $P(x)=(p_{i'j'}(x))_{i',j'=1}^k \succeq 0$ for all $x$, and $\mu$ takes its values in $\mathcal H^m_+$. Indeed, say $D$ is an upper bound on the degree of $g(x)P(x)$. Then, by Theorem \ref{theoTchakmatrix} applied to $\mathcal L$ restricted to $\mathbb C[\mathbf{x},\qo{\mathbf{x}}]_D$, there exist an integer $K\in \mathbb{N}$, matrices $\Lambda_\ell\succeq 0$ and vectors $v_\ell\in \mathscr{D}(S)$ (for $\ell\in [K])$) such that $ (\mathcal L(gp_{i'j'}))_{i',j'=1}^k=\sum_{\ell=1}^K g(v_\ell) \Lambda_\ell \otimes P(v_\ell),$ which proves it is a positive semidefinite matrix. The implication (iv) $\Longrightarrow$ (iii) follows from Lemma \ref{lemCPL} and (iii) $\Longrightarrow$ (ii) follows from Lemma \ref{lemMGLtoGL}.
Finally, for the implication (ii) $\Longrightarrow$ (i) we refer to the arguments in Appendix \ref{AppProofCimpric}. \end{proof}
What the above result shows is that, while in general the notions of complete positivity, positivity and having a positive semidefinite moment matrix are not equivalent, these properties become equivalent when considering a linear map $\mathcal L$ acting on an Archimedean quadratic module. We will apply these results to the case of the quadratic module of the unit sphere (with $S=\{1-\sum_i x_i\overline{x_i}\}$) for the moment approach to the DPS hierarchy in Section \ref{sec:momentDPS}.
Finally, there is also an analog of Theorem \ref{theomainTchakaloff} (ii) for the matrix-valued case.
\begin{theorem}[Kimsey \cite{Kimsey}] \label{theoTchakmatrix} Let $S\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h$ be a set of Hermitian polynomials and let $\mathcal L:\mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to \mathcal H^m$ be a Hermitian matrix-valued linear map. Assume $\mathcal L$ has a representing measure supported by $\mathscr{D}(S)$ and taking values in the cone $\mathcal H^m_+$. Then, for any integer $k\in N$, the restriction of $\mathcal L$ to $\mathbb C[\mathbf{x},\qo{\mathbf{x}}]_k$ has another representing measure that is finitely atomic; that is, there exists $K\in \mathbb{N}$, matrices $\Lambda_1,\ldots,\Lambda_K\in \mathcal H^m_+$ and vectors $v_1,\ldots,v_K\in \mathscr{D}(S)$ such that $\mathcal L(p)=\sum_{\ell=1}^K \Lambda_\ell p(v_\ell)$ for all polynomials $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]_k$. \end{theorem}
\section{A hierarchy of lower bounds on the separable rank} \label{sec: LB}
In this section, we show how to use the polynomial optimization techniques developed in the previous section in order to obtain a hierarchy of lower bounds on the separable rank.
\subsection{The parameter $\tau_\mathrm{sep}$}\label{sec:tausep}
Consider a separable state $\rho \in \mathcal{SEP}_d$. As defined earlier, its separable rank is the smallest integer $r\in\mathbb{N}$ for which there exist (nonzero) vectors $a_1,\ldots, a_r, b_1,\ldots,b_r \in \mathbb C^d$ such that \begin{equation}\label{eqseprank} \rho = \sum_{\ell=1}^r a_\ell a_\ell^* \otimes b_\ell b_\ell^*. \end{equation} We mention several properties that are satisfied by the vectors $a_\ell,b_\ell$ entering such a decomposition. First of all, the vectors $a_\ell,b_\ell$ clearly satisfy the positivity condition \begin{equation}\label{eqabpsd} \rho- a_\ell a_\ell^*\otimes b_\ell b_\ell^*\succeq 0 \quad \text{ for all }\ell\in [r]. \end{equation} Let $$\rho_{\max}:=\max_{i,j\in [d]} \rho_{ij,ij}$$ denote the maximum diagonal entry of $\rho$.
Then, in view of (\ref{eqabpsd}), the vectors $a_\ell,b_\ell$ also satisfy $|(a_\ell)_i|^2 |(b_\ell)_j|^2 \le \rho_{ij,ij}$ for all $i,j\in [d]$, which implies the following boundedness conditions \begin{equation}\label{eqbound}
\|a_\ell \|_\infty^2 \cdot \|b_\ell\|^2_\infty \le \rho_{\max} \quad \text{ and } \quad
\|a_\ell \|_2^2 \cdot \|b_\ell \|^2_2\le \mathrm{Tr}(\rho)\quad \text{ for all } \ell\in [r]. \end{equation}
Note that we may rescale the vectors $a_\ell,b_\ell$ so that additional properties can be assumed. For instance we may rescale them so that $\|a_\ell\|_\infty=\|b_\ell\|_\infty$, in which case we may assume without loss of generality that \begin{equation}\label{eqabmax}
\|a_\ell\|^2_\infty, \|b_\ell\|^2_\infty \le \sqrt{\rho_{\max}} \quad \text{ for all } \ell\in [r]. \end{equation}
Another possibility is rescaling so that $\|a_\ell\|_2=\|b_\ell\|_2$, in which case we could instead assume that \begin{equation}\label{eqab2}
\|a_\ell\|_2^2 = \|b_\ell\|_2^2 \le \sqrt {\mathrm{Tr}(\rho)} \quad \text{ for all } \ell\in [r]. \end{equation}
Yet another possibility would be to rescale so that $\|b_\ell\|_2=\sqrt{\mathrm{Tr}(\rho)}$ for all $\ell$, in which case we would have \begin{equation}\label{eqbnorm1a}
\|a_\ell\|_2^2 \le \sqrt{\mathrm{Tr}(\rho)}, \ \|b_\ell\|_2=\sqrt{\mathrm{Tr}(\rho)}\ \text{ for all } \ell\in [r] \end{equation} or, equivalently (up to rescaling), we may assume that \begin{equation}\label{eqbnorm1}
\|a_\ell\|_2^2 \le \mathrm{Tr}(\rho), \ \|b_\ell\|_2=1\ \ \text{ for all } \ell\in [r]. \end{equation} To fix ideas we will now apply the first rescaling (\ref{eqabmax}), so that each $(a_\ell,b_\ell)$ belongs to the set \begin{equation} \label{eq:set}
{\mathcal V}_\rho:= \Big\{ (x,y) \in \mathbb C^d \times \mathbb C^d ~|~ xx^* \otimes yy^* \preceq \rho, \ \|x\|_\infty, \|y\|_\infty \leq \rho_{\max}^{1/4} \Big\}. \end{equation} We will consider the impact of doing other rescalings as in \cref{eqab2}, \cref{eqbnorm1a} or \cref{eqbnorm1} later on in the paper in numerical examples. However, as will be noted in Remark \ref{rembound}, the localizing constraints corresponding to the scaling (\ref{eqabmax}) already imply the localizing constraints corresponding to the inequalities in~(\ref{eqbound}).
From \cref{eqseprank} we have $${1\over r}\rho ={1\over r}\sum_{\ell=1}^r a_\ell a_\ell^*\otimes b_\ell b_\ell^* \in \mathrm{conv}\{xx^*\otimes yy^*: (x,y)\in {\mathcal V}_\rho\}, $$ which motivates defining the following parameter \begin{equation} \label{eq:tausep} \tau_{\mathrm{sep}}(\rho) := \inf\Big\{\lambda: \lambda>0, {1\over \lambda} \rho \in \mathrm{conv} \{xx^*\otimes yy^*: (x,y)\in {\mathcal V}_\rho\}\Big \}. \end{equation} From the above discussion, this parameter gives a lower bound on the separable rank.
\begin{lemma}\label{tauleqsep}
For any $\rho \in \mathcal{SEP}_d$, we have $\tau_\mathrm{sep}(\rho) \leq \mathrm{rank_{sep}}(\rho) $. Moreover, if $\rho\not\in\mathcal{SEP}_d$ then $\tau_\mathrm{sep}(\rho)=\mathrm{rank_{sep}}(\rho)=\infty$. \end{lemma} The parameter $\tau_\mathrm{sep}(\rho)$ does not seem any easier to compute than the separable rank. It, however, enjoys an additional convexity property that the combinatorial parameter $\mathrm{rank_{sep}}(\rho)$ does not have. In the next section, we will present a hierarchy of lower bounds on $\mathrm{rank_{sep}}(\rho)$, constructed using tools from polynomial optimization. These bounds arise from convex (semidefinite) programs, they in fact also lower bound the (weaker) parameter $\tau_\mathrm{sep}(\rho)$ and will be shown to asymptotically converge to it.
\subsection{Polynomial optimization approach for $\tau_{\mathrm{sep}}$ and $\mathrm{rank_{sep}}$} \label{sec: poly sep}
As above, let $\rho\in \mathcal{SEP}_d$ be given, together with a decomposition (\ref{eqseprank}) with $r=\mathrm{rank_{sep}}(\rho)$, where we assume that the points $(a_\ell,b_\ell)$ belong to the set ${\mathcal V}_\rho$ in (\ref{eq:set}). We explain how to define bounds for $\mathrm{rank_{sep}}(\rho)$ by using the moment method from Section \ref{sec:moment}.
For this let us consider the linear functional \begin{equation} \label{eq:defL} L = \sum_{\ell=1}^r L_{(a_\ell,b_\ell)}, \end{equation} the sum of the evaluation functionals at the points entering the decomposition (\ref{eqseprank}). Then $L$ acts on the polynomial space $\mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]$, where it is now convenient to denote the $2d$ variables as $\mathbf{x}=(x_1,\ldots,x_d)$ and $\mathbf{y}=(y_1,\ldots,y_d)$, corresponding to the `bipartite' structure in \cref{eqseprank}. By construction, $L$ corresponds to a finite atomic measure supported on the set ${\mathcal V}_\rho$. Moreover we have $$L(1) = \sum_{\ell=1}^r L_{(a_\ell,b_\ell)}(1) = \sum_{\ell=1}^r 1 = r=\mathrm{rank_{sep}}(\rho)$$ and the fourth-degree moments are given by the entries of $\rho$: $$L(\mathbf{x}\mathbf{x}^* \otimes \mathbf{y}\mathbf{y}^*)=\rho. $$ In addition, since each $(a_\ell,b_\ell)$ belongs to the set ${\mathcal V}_\rho$, it follows that $$M(G_\rho \otimes L)=L(G_\rho\otimes [\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}] [\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]^*) \succeq 0 \quad \text{ and } \quad L \ge 0 \text{ on } \mathcal {M}(S_\rho),$$ after defining the Hermitian polynomial matrix \begin{equation}\label{eqGrho} {G_{\rho}}(\mathbf{x},\mathbf{y}) := \rho - \mathbf{x} \mathbf{x}^* \otimes \mathbf{y} \mathbf{y}^* \in \mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]^{d^2\times d^2}_4 \end{equation} and the localizing set of Hermitian polynomials \begin{equation}\label{eqSrho} S_\rho=\Big\{ \sqrt{\rho_{\max}}-x_i\overline x_i, \sqrt{\rho_{\max}}-y_i\overline y_i : i\in [d]\Big\}\subseteq \mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]^h_2. \end{equation} To see that $M(G_\rho\otimes L)\succeq 0$ we use Remark \ref{rem: M(GL) product and psd}. Recall also the definition of the set $S_{G_\rho}$ of localizing polynomials corresponding to the polynomial matrix $G_\rho$ in (\ref{eqGrho}):
$$S_{G_{\rho}} = \{v^* {G_{\rho}} v: v \in \mathbb C^d \otimes \mathbb C^d\}\subseteq \mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]^h_4.$$ Then, by construction, the combined positivity domains of the sets $S$ and $ S_{{G_{\rho}}}$ recover ${\mathcal V}_\rho$: \[
\mathscr{D}(S_{G_{\rho}} \cup S_\rho) = {\mathcal V}_\rho=\Big\{ (x,y) \in \mathbb C^d \times \mathbb C^d ~|~ xx^* \otimes yy^* \preceq \rho, \ \|x\|_\infty, \|y\|_\infty \leq \rho_{\max}^{1/4} \Big\}. \] \begin{remark}\label{rembound} Note that the localizing constraints for the inequalities in (\ref{eqbound}) are implied by the localizing constraints for $S_\rho \cup S_{G_\rho}$. This follows from the following two identities: $$\rho_{\max}-x_i\overline x_i y_j\overline y_j= (\sqrt{ \rho_{\max}} -x_i\overline x_i)y_j\overline y_j +\sqrt{\rho_{\max}}(\sqrt{\rho_{\max}}-y_j\overline y_j) \in \mathcal {M}(S_\rho)_4,$$ $$\mathrm{Tr}(\rho)-(\sum_i x_i\overline x_i)(\sum_j y_j\overline y_j)= \sum_{i,j} (\rho_{ij,ij} -x_i\overline x_i y_j\overline y_j ) \in \mathcal {M}(S_{G_{\rho}})_4.$$ \end{remark}
Moreover, let us recall for future reference that \begin{equation}\label{eqMGLtoGL} M(G_\rho\otimes L)\succeq 0\ \Longrightarrow\ L\ge 0 \text{ on } \mathcal {M}(S_{G_\rho}), \end{equation} which follows from \cref{lemGLpos} and the characterization of positivity of $G_\rho\otimes L$ from \cref{eqMtGL}. The above observations motivate introducing the following parameters. For $t \in \mathbb{N} \cup \{\infty \}$ with $t\ge 2$, define the parameter
\begin{equation} \label{eqxit}
\begin{split}
\xidsep{t} := \inf \Big\{ L(1) ~|~ &L: \C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{2t}\to\mathbb C \ \text{ Hermitian\ \ s.t. } \\
& L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*) = \rho, \\
& L \geq 0 \text{ on } \mathcal {M}(S_\rho)_{2t}, \\
& M_{t-2}(G_\rho\otimes L)=L({G_{\rho}} \otimes [\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{t-2} [\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{t-2}^*) \succeq 0\Big\}.
\end{split}
\end{equation} For $t=\infty$ the parameter $\xidsep{\infty}$ involves linear functionals acting on the full polynomial space $\C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]$. In addition, we let $\xidsep{*}$ denote the parameter obtained by adding the constraint $\mathrm{rank}(M(L)) < \infty$ to the definition of $\xidsep{\infty}$. One can show that the function $\rho \mapsto \xidsep{t}$ is lower semicontinuous, the proof is analogous to that of \cite[Lemma~7]{GdLL17a} and thus omitted. {In addition, as we will see in \cref{rem: level 2 implies ppt}, if the program defining $\xidsep{t}$ ($t\ge 2$) is feasible then $\rho$ satisfies the PPT criterion, i.e., $\rho^{T_B}\succeq 0$, where $\rho^{T_B}$ is obtained by taking the partial transpose of $\rho$ on the second register (see (\ref{eqPPTB})). }
As is well-known, for finite $t \in \mathbb{N}$, the bound $\xidsep{t}$ can be expressed as a semidefinite program since nonnegativity of $L$ on the truncated quadratic module $\mathcal {M}(S_\rho)_{2t}$ can be encoded through positive semidefiniteness of the moment matrix $M_t(L) = L([\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{t}[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{t}^*)$ and of the localizing moment matrices $M_{t-1}(gL)=L(g[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{t-1}[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{t-1}^*)$ for all $g \in S_\rho$.
By the above discussion, for any $\rho\in \mathcal{SEP}_d$ we have the following chain of inequalities: \begin{equation}\label{eqchain} \xidsep{2} \leq \xidsep{3} \leq \cdots \leq \xidsep{\infty} \leq \xidsep{*} \leq \mathrm{rank_{sep}}(\rho) < \infty. \end{equation} We will now show that the bounds $\xidsep{t}$ in fact converge to the parameter $\tau_{\mathrm{sep}}(\rho)$. In a first step we observe that the parameters $\xidsep{t}$ converge to $\xidsep{\infty}$ and after that we show that $\xidsep{\infty}=\xidsep{*}=\tau_\mathrm{sep}(\rho)$.
\begin{lemma}\label{lemconv} Let $\rho\in\mathcal{SEP}_d$. The infimum is attained in problem \eqref{eqxit} for any integer $t\ge 2$ or $t=\infty$, and we have $\lim_{t\to\infty}\xidsep{t}=\xidsep{\infty}$. \end{lemma}
\begin{proof} First we show that problem \eqref{eqxit} attains its optimum. For this note that, in view of \cref{eqchain}, we may restrict the optimization to linear functionals $L$ satisfying $L(1) \le \mathrm{rank_{sep}}(\rho)$. By the definition of $S_\rho$ in \eqref{eqSrho}, the quadratic module $\mathcal {M}(S_\rho)$ is Archimedean since, with $R=2d\sqrt{\rho_{\max}}$, $R-\sum_{i=1}^d (x_i\overline x_i + y_i\overline y_i )\in \mathcal {M}(S_\rho)_2$. As $L$ is nonnegative on $\mathcal {M}(S_\rho)_{2t}$, we can apply \cref{pointconv}
and conclude that $$
|L(w)| \leq R^{|w|/2} L(1) ~~\text{for any}~~ w \in [\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_{2t}.
$$ Hence we are optimizing a linear objective function over a compact set, and thus the optimum is attained. So, for each integer $t\ge 2$, let $L_t$ be an optimum solution of problem \eqref{eqxit}. As $\sup_t L_t(1)\le \mathrm{rank_{sep}}(\rho)<\infty$, we can conclude from \cref{pointconv} that there exists a linear functional $L\in \mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]^*$ which is the limit of a subsequence of the sequence $(L_t)_t$. Then $L$ is feasible for $\xidsep{\infty}$, which implies $\xidsep{\infty}\le L(1) =\lim_{t\to\infty}L_t(1)=\lim_t\xidsep{t}$. Note that this $L$ is optimal for $\xidsep{\infty}$. \end{proof}
\begin{lemma}\label{lemallequal} For any $\rho\in \mathcal{H}^d\otimes \mathcal{H}^d$ we have $\xidsep{\infty} = \xidsep{*}= \tau_{\mathrm{sep}}(\rho)$. \end{lemma}
\begin{proof} As $\xidsep{\infty}\le \xidsep{*}$ it suffices to show that $\xidsep{*}\le \tau_\mathrm{sep}(\rho)$ and $\tau_\mathrm{sep}(\rho)\le \xidsep{\infty}$.
First we show $\xidsep{*}\le \tau_\mathrm{sep}(\rho)$. If $\tau_\mathrm{sep}(\rho)=\infty$ there is nothing to prove. So assume we have a feasible solution: $\rho=\lambda \sum_{\ell=1}^K \mu_l a_\ell a_\ell^*\otimes b_\ell b_\ell^*$, where $\lambda>0$, $(a_\ell,b_\ell)\in {\mathcal V}_\rho$, $\mu_\ell>0$ and $\sum_\ell \mu_\ell=1$. Define the linear functional $L=\lambda\sum_{\ell=1}^K\mu_\ell L_{(a_\ell,b_\ell)}$. Then $L$ is feasible for $\xidsep{*}$ with $L(1)=\lambda$. Hence, $\xidsep{*}\le L(1)=\lambda$, which shows $\xidsep{*}\le \tau_\mathrm{sep}(\rho)$.
Now we show $\tau_{\mathrm{sep}}(\rho) \leq \xidsep{\infty}$. If $\xidsep{\infty}=\infty$ there is nothing to prove. So assume $L$ is a feasible solution to $\xidsep{\infty}$. Then, in view of \cref{eqMGLtoGL}, $L\ge 0$ on $\mathcal {M}(S_\rho \cup S_{G_\rho})$. As $\mathcal {M}(S_\rho)$ is Archimedean we can apply \cref{theomainTchakaloff} (with $k=4$) and conclude that the restriction of $L$ to $\mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_4$ is a conic combination of evaluations at points in $\mathscr{D}(S_\rho\cup S_{G_\rho})={\mathcal V}_\rho$. In other words, there exist $(a_\ell,b_\ell)\in {\mathcal V}_\rho$ and scalars $\mu_\ell>0$ such that $L(p)=\sum_{\ell=1}^K\mu_\ell p(a_\ell,b_\ell)$ for any $p\in \mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]_4$. In particular, we have $L(1)=\sum_{\ell=1}^K\mu_\ell$ and $\rho=L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*)=\sum_{\ell=1}^K \mu_\ell \ a_\ell a_{\ell}^*\otimes b_\ell b_\ell^*$. This implies that ${1\over L(1)}\rho$ belongs to $\mathrm{conv}\{xx^*\otimes yy^*: (x,y)\in {\mathcal V}_\rho\}$ and thus $\tau_\mathrm{sep}(\rho)\le L(1)$, showing $\tau_\mathrm{sep}(\rho)\le \xidsep{\infty}$. \end{proof}
As observed earlier already, since $\mathcal{SEP}_d$ is a $d^4$-dimensional cone, by Carath\'eodory theorem we have $\mathrm{rank_{sep}}(\rho)\le d^4$ for any $\rho\in \mathcal{SEP}_d$ (or, even stronger, $ \mathrm{rank_{sep}}(\rho)\le \mathrm{rank}(\rho)^2$). Based on this one can also use the bounds $\xidsep{t}$ to test (non-)membership in $\mathcal{SEP}_d$. The bound $\mathrm{rank}(\rho)^2$ in \cref{lemmembership} below can of course be replaced by any other valid upper bound on the separable rank. Such a valid bound can be obtained, e.g., using the {\em birank} of $\rho$, defined as the pair $(\mathrm{rank}(\rho),\mathrm{rank}(\rho^{T_B}))$. Indeed, as
$\mathrm{rank_{sep}}(\rho)=\mathrm{rank_{sep}}(\rho^{T_B})$, we have \begin{equation}\label{eqbirank}\max\{\mathrm{rank}(\rho),\mathrm{rank}(\rho^{T_B})\}\le \mathrm{rank_{sep}}(\rho)\le (\min \{\mathrm{rank}(\rho),\mathrm{rank}(\rho^{T_B})\})^2. \end{equation}
\begin{lemma}\label{lemmembership} Let $\rho\in \mathcal{H}^d\otimes \mathcal{H}^d$. Then, $\rho \in \mathcal{SEP}_d$ if and only if $\xidsep{t}\le \mathrm{rank}(\rho)^2$ for all integers $t\ge 2$. \end{lemma}
\begin{proof} The `only if' part follows from $\xidsep{t}\le \mathrm{rank_{sep}}(\rho)\le \mathrm{rank}(\rho)^2$ when $\rho \in\mathcal{SEP}_d$. Conversely, assume $\xidsep{t}\le \mathrm{rank}(\rho)^2$ for all integers $t\ge 2$. Then, we can use the same argument as in the proof of Lemma \ref{lemconv} and conclude the existence of $L\in\mathbb C[\mathbf{x},\mathbf{y},\qo{\mathbf{x}},\qo{\mathbf{y}}]^*$ feasible for $\xidsep{\infty}$, so that $\xidsep{\infty}\le L(1)<\infty$. Then, by Lemma \ref{lemallequal}, we have $\tau_\mathrm{sep}(\rho)<\infty$, which shows $\rho$ is separable. \end{proof}
\begin{remark}\label{remweakbound} Note that all the results in this section remain valid if, in the definition (\ref{eqxit}) of the parameter $\xidsep{t}$, we omit the `tensor-type' constraint $M_{t-2}(G_\rho\otimes L)\succeq 0.$ Using this additional constraint permits however to define stronger bounds on the separable rank. The results also remain valid if, instead of the polynomials in the set $S_\rho$, we use either of the following sets of polynomials:
$\{\pm(\|x\|^2-\|y\|^2), \sqrt{\mathrm{Tr}(\rho)}- \|y\|^2\}$ corresponding to (\ref{eqab2}), or $\{\sqrt{\mathrm{Tr}(\rho)}-\|x\|^2, \pm 1(\sqrt{\mathrm{Tr}(\rho)}-\|y\|^2)\}$ corresponding to (\ref{eqbnorm1a}) (or, equivalently, $\{\mathrm{Tr}(\rho)-\|x\|^2, \pm (1-\|y\|^2)\}$ corresponding to (\ref{eqbnorm1})). \end{remark}
\subsection{Block-diagonal reduction for the parameter $\xib{sep}{t}(\cdot)$} \label{sec: block diagonalization}
In this section we indicate how to rewrite the program (\ref{eqxit}) defining $\xib{sep}{t}(\rho)$ in a more economical way. Observe that all the terms of each of the localizing polynomials $g\in S_\rho$ and the matrix $G_\rho$ have the same degree in $\mathbf{x}$ and in $\qo{\mathbf{x}}$, and also the same degree in $\mathbf{y}$ and in $\qo{\mathbf{y}}$. This enables us to show (see \cref{lemL0}) that we may restrict the optimization in (\ref{eqxit}) to linear functionals $L$ that satisfy the condition \begin{equation}\label{eqL0}
L(\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'})=0 \ \text{ if } |\alpha|\ne| \alpha' |\text{ or } |\beta| \ne |\beta'|. \end{equation}
Note that this implies in particular that $L(\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'})=0$ if $|\alpha+\alpha'|$ or $|\beta+\beta'|$ is odd.
The computational advantage is that, if $L$ satisfies (\ref{eqL0}), then the moment matrix $M_t(L)$ and the localizing moment matrices $M_{t-1}(gL)$ and $M_{t-2}(G_\rho\otimes L) $ have a block-diagonal form. To see this consider first the matrix $M_t(L)$, which is indexed by the set \begin{equation} \label{eq: index set}
I^t:=\{(\alpha,\alpha',\beta,\beta')\in ( \mathbb{N}^d)^4 : |\alpha+\beta+\alpha'+\beta'|\le t\} \end{equation}
(where the tuple $(\alpha,\alpha',\beta,\beta')$ corresponds to the monomial $\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'}$). Let us partition $I^t$ into sets depending on two integers $r = |\alpha|-|\alpha'|$ and $s = |\beta|-|\beta'|$. For $r,s \in\{-t,-t+1,\ldots, t\}$ let \begin{equation} \label{eq: partition}
I^t_{r,s} := \{(\alpha,\alpha',\beta,\beta') \in I^t: |\alpha|- |\alpha'| =r, \ |\beta|-|\beta'| = s\}, \end{equation} then we have \[ I^t = \bigcup_{r,s=-t}^t I^t_{r,s}. \] Then, with respect to this partition of its index set, the matrix $M_t(L)$ is block-diagonal and thus $M_t(L)\succeq 0$ if and only if its principal submatrices indexed by the sets $I^t_{r,s}$ are positive semidefinite. The analogous reasoning applies to each localizing moment matrix $M_{t-1}(gL)$ for $g\in S_\rho$ (indexed by $I^{t-1}$) and to $M_{t-2}(G_\rho\otimes L)$ (indexed by $I^{t-2}$).
\begin{lemma}\label{lemL0} In the definition of the parameter $\xidsep{t}$ we may restrict the optimization to linear functionals satisfying the additional condition (\ref{eqL0}). \end{lemma} \begin{proof}
Assume $L$ is feasible for $\xidsep{t}$; we construct another feasible solution $\tilde L$ with the same objective value: $\tilde L(1)=L(1)$, and satisfying (\ref{eqL0}). For this define $\tilde L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y} ^\beta\qo{\mathbf{y}}^{\beta'})=L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y} ^\beta\qo{\mathbf{y}}^{\beta'})$ if $|\alpha|=|\alpha'|$ and $|\beta|=|\beta'|$, and $\tilde L(\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'})=0$ otherwise. Then, $\tilde L(1)=L(1)$ and, by construction, $\tilde L$ satisfies (\ref{eqL0}). We claim that $\tilde L$ is feasible for program (\ref{eqxit}). Clearly, we have $\tilde L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*)=\rho$. We now show that $M_t(\tilde L)\succeq 0$, $M_{t-1}(g\tilde L)\succeq 0$ for $g\in S_\rho$, and $M_{t-2}(G_\rho\otimes \tilde L)\succeq 0$.
We first show that $M_t(\tilde L)\succeq 0$. We use the partitioning $I^t = \cup_{r,s=-t}^t I^t_{r,s}$ of the row/column indices.
As the principal submatrix of $M_t(\tilde L)$ indexed by $I^t_{r,s}$ only involves evaluations of $L$ at monomials of the form $\mathbf{x}^\gamma\qo{\mathbf{x}}^{\gamma'}\mathbf{y}^\delta\qo{\mathbf{y}}^{\delta'}$ with $|\gamma|=|\gamma'|$ and $|\delta|=|\delta'|$, it coincides with the principal submatrix of $M_t(L)$ indexed by $I^t_{r,s}$ and thus it is positive semidefinite. Hence, by construction, the matrix $M_t(\tilde L)$ is block-diagonal with respect to the partition $I_t=\cup_{r,s=0}^t I_{r,s}$ of its index set, with positive semidefinite diagonal blocks, which implies $M_t(\tilde L)\succeq 0$.
Consider now a localizing polynomial $g \in S_\rho$. Note that all its terms have the same degree in $\mathbf{x}$ and $\qo{\mathbf{x}}$ and also the same degree in $\mathbf{y}$ and $\qo{\mathbf{y}}$ (equal to 0 or 1). We consider the partition of the index set of $M_{t-1}(gL)$ as $I^{t-1}=\cup_{r,s=-t+1}^{t-1} I^{t-1}_{r,s}.$ Again, the principal submatrix of $M_{t-1}(gL)$ indexed by $I^{t-1}_{r,s}$ involves only values $\tilde L(\mathbf{x}^\gamma \qo{\mathbf{x}}^{\gamma'}\mathbf{y}^\delta \qo{\mathbf{y}}^{\delta'})$ with $|\gamma|=|\gamma'|$ and $|\delta|=|\delta'|$ and thus it coincides with the principal submatrix of $M_{t-1}(gL)$ indexed by $I^{t-1}_{r,s}$. Hence, the matrix $M_{t-1}(g\tilde L)$ is block-diagonal with respect to the partition $I_{t-1}=\cup I^{t-1}_{r,s}$ of its index set, with positive semidefinite diagonal blocks, which implies $M_{t-1}(g\tilde L)\succeq 0$.
The analogous reasoning applies to showing that $M_{t-2}(G_\rho\otimes \tilde L)\succeq 0$. For this we consider the partition of its index set $[d]^2\times I_{t-2}$ into $\cup_{r,s=-t+2}^{t-2} ([d]^2\times I^{t-2}_{r,s})$ and observe that $M_{t-2}(G_\rho\otimes \tilde L)$ is block-diagonal with respect to this partition, with positive semidefinite diagonal blocks. \end{proof}
\paragraph{An alternative way to arrive at \cref{eqL0} by exploiting sign symmetries.} Let $\mathbb T$ be the \emph{circle group}, the multiplicative group of all complex numbers of modulus $1$: \[ \mathbb T = \{z \in \mathbb C \colon \abs{z} = 1\}. \] The set $\mathcal{SEP}_d$ is naturally invariant under the action of $(w_x, w_y) \in \mathbb T \times \mathbb T$ on vectors $(x,y) \in \mathbb C^d \times \mathbb C^d$ given by $(w_x,w_y) (x,y) = (w_x x, w_y y)$ (and its extension to states). Indeed, we have \[ (w_x,w_y) \cdot (xx^* \otimes yy^*) = (w_x x)(w_x x)^* \otimes (w_y y) (w_y y)^* = xx^* \otimes yy^*. \] Likewise, the localizing constraints are invariant under this group action, and this group action extends to the linear functionals $L$ used as variables in the definition of $\xidsep{t}$. Since $\mathbb T \times \mathbb T$ admits a Haar measure, in the derivation of $\xidsep{t}$ we may therefore restrict to linear functionals that are invariant under this group action. That is, we may assume that \[ L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^\beta \qo{\mathbf{y}}^{\beta'}) = w_x^{\abs{\alpha} - \abs{\alpha'}} w_y^{\abs{\beta} - \abs{\beta'}} L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^\beta \qo{\mathbf{y}}^{\beta'})\quad \text{ for all } (w_x,w_y)\in \mathbb T\times\mathbb T. \] This implies that \begin{equation*}
L(\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'})=0 \ \text{ if } |\alpha|\ne| \alpha' |\text{ or } |\beta| \ne |\beta'|. \end{equation*} Indeed, suppose for example that $\abs{\alpha} - \abs{\alpha'} =: r \neq 0$. Then using the above with $w_x = e^{\mathbf{i}\, \pi /r}$ shows that $L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^\beta \qo{\mathbf{y}}^{\beta'}) = - L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^\beta \qo{\mathbf{y}}^{\beta'})$ and hence $L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^\beta \qo{\mathbf{y}}^{\beta'})=0$.
Note that Dressler, Nie and Yang \cite{dressler2020separability} used this same group action to argue that, alternatively, one may restrict to $(x,y)\in \mathbb C^d\times \mathbb C^d$ having leading coordinates that are real nonnegative: $x_1,y_1\ge 0$. While this permits to eliminate variables (and work with $2(2d-1)$ instead of $4d$ real variables), this reduction does not permit to block-diagonalize the moment matrices as indicated above. We also refer to \cite{GP} and the recent paper \cite{TSOS} for more details about exploiting sign symmetries.
\paragraph{Block-diagonal reduction example.} To illustrate the effect of the block-diagonalization we consider an example with $\rho\in \mathcal H^3\otimes \mathcal H^3\simeq \mathcal H^9$ (i.e., $d_1=d_2=3$) and relaxation order $t=3$. In Table \ref{tableblockdiag} we indicate the respective sizes of the matrices involved in the program for $\xisep{3}$ with and without block-diagonalization (in column `block' and `non-block', respectively). There, `\# entries' stands for $\sum_i m_i^2$, where $m_i$ are the sizes of the matrices involved in the program, and `\# variables' indicates the total number of variables in each case. The last line indicates the typical run time for such an instance, we collect the computational details later in \cref{secnumerics}. Note that the full program cannot be solved and thus block-diagonalization is crucial to enable computation. For the next case $(d_1,d_2)=(2,6)$ or $(4,4)$ one can compute the bound of order $t=2$ but not the bound of order $t=3$ even after block-diagonalization.
\begin{table}[!htbp]
\centering
\caption{Matrix sizes block- vs. non-block-diagonalized.}\label{tableblockdiag}
\begin{tabular}{| c | c | c |}
\toprule
Matrix & block & non-block \\
\hline \hline
$M_3(L)$ & $25 \times (12 \times 12 \text{ to } 96 \times 96)$ & $455 \times 455$\\
$M_2(gL)$ & $78 \times (6 \times 6 \text{ to } 38 \times 38)$ & $6 \times (91 \times 91)$ \\
$M_1(G_{\rho} \otimes L)$ & $5 \times (36 \times 36 \text{ to } 108\times 108)$ & $234\times 234$ \\
\hline
\# entries & $110480$ & $286624$ \\
\# variables & 6952 & 18564 \\
\hline
run time & {4.6 min} & memory error \\
\bottomrule
\end{tabular} \end{table}
\begin{remark} {As observed above, using the block-diagonalized version of the program for $\xisep{3}$ is crucial to be able to compute the bounds for some larger matrix sizes. We note however that the optimal solution to this program will not satisfy the flatness condition $\mathrm{rank}\, M_t(L)=\mathrm{rank}\, M_{t-1}(L)$ (with $t=2,3$). Indeed one can check that this flatness condition can hold only in the trivial case $\rho=0$. Intuitively this can be (roughly) explained by noting that, due to its symmetric structure,~$L$ tends to lie within the interior of the feasible region. Hence our approach, which produces lower bounds on $\mathrm{rank_{sep}}(\rho)$, can be viewed as being complementary to the approach in, e.g., \cite{dressler2020separability,Li_2020,NieZhang2016}, which uses flatness to produce separable decompositions of $\rho$ and thus upper bounds on $\mathrm{rank_{sep}}(\rho)$.} \end{remark}
\section{Extensions and connections to other matrix factorization ranks}\label{sec:extension}
Here we explain some simple extensions of the approach given in the previous section to related notions of factorization ranks.
Without going into details let us mention that the approach generalizes in a straightforward way to the separable rank of \emph{multipartite} separable quantum states. In that case we have an $n$-partite quantum state $\rho$ acting on $(\mathbb C^{d})^{\otimes n}$, and separability means that $\rho$ belongs to the set
\[\mathrm{cone}\{x_1 x_1^* \otimes x_2 x_2^* \otimes \ldots \otimes x_n x_n^*: x_1, \ldots, x_n \in \mathbb C^d, \|x_i\| = 1 \ (i \in [n])\}.\] In addition, one can use a different local dimension $d_i$ for each part (i.e., $x_i \in \mathbb C^{d_i}$).
The approach also extends to an alternative (but equivalent) definition of separability, which uses mixed states instead of pure states, i.e., where one requires $\rho$ to be of the form $\rho = \sum_{\ell=1}^r A_\ell \otimes B_\ell$ with $A_\ell, B_\ell \in \mathcal H_+^d$. Analogously, the smallest such integer $r$ is called the \emph{mixed separable rank} of $\rho$.
This notion has been considered, e.g., in~\cite{CDN19,de_las_Cuevas_2020,dressler2020separability} and mixed separable decompositions are called $S$-decompositions in \cite{NieZhang2016} (which deals with real states). To define bounds on the mixed separable rank one can follow the same approach as in \cref{sec: LB} but one here has to introduce more variables. Indeed, we now need variables $\mathbf{x}=(x_{ij})_{1\le i\le j\le d}$ and $\mathbf{y}=(y_{ij})_{1\le i\le j\le d}$ to model the entries of the matrices $A_\ell\in \mathcal H^d_+$ and $B_\ell\in\mathcal H^d_+$ (while we previously only needed variables $(x_i)_{i\in [d]}$ and $(y_i)_{i\in [d]}$ to model the vectors $a_\ell\in\mathbb C^d$ and $b_\ell\in \mathbb C^d$) and one should assume that the corresponding Hermitian matrices $X=(x_{ij})$ and $Y=(y_{ij})$ are positive semidefinite. One may again scale the variables so that they satisfy a boundedness condition $|x_{ij}|,|y_{ij}|\le \sqrt {\rho_{\max}}$. This enables to design hierarchies of lower bounds that converge to the mixed separable analog of the parameter $\tau_{\text{sep}}(\rho)$. The details are analogous and thus omitted.
In what follows we mention two other possible extensions, for the real separable rank and for the completely positive rank, where we give some more details as well as some numerical results.
\subsection{Specialization to bipartite real states} \label{sec: real separable rank} The treatment in Section \ref{sec: LB} for the separable rank can be adapted in an obvious manner to the case of the {\em real} separable rank. Here we are given a real symmetric bipartite state $\rho\in \mathcal {S}^d\otimes \mathcal {S}^d$, where $\mathcal {S}^d$ is the set of real symmetric $d\times d$ matrices. Then $\rho$ is called {\em real separable} if it admits a decomposition (\ref{eqseprank}) with all vectors $a_\ell,b_\ell\in \mathbb{R}^d$ real valued, and the smallest $r$ for which such a decomposition exists is the {\em real separable rank}, denoted $\mathrm{rank}_{\mathrm{sep}}^\R(\rho)$. {Note that it can be that a real state is separable but not real separable; this is the case for the state Sep3 discussed in Section \ref{secnumerics}.} One can define in an analogous manner the corresponding parameter $\tau_{\mathrm sep}^\mathbb{R}(\rho)$ and the hierarchy of bounds $\xidsepR{t}$ that converge asymptotically to $\tau_{\mathrm sep}^\mathbb{R}(\rho)$. The difference in the formulation of these parameters is that we now replace the complex conjugate by the real transpose operation and work with linear functionals $L$ acting on the real polynomial space $\mathbb{R}[\mathbf{x},\mathbf{y}]_{2t}$. So the parameter $\xidsepR{t}$ reads
\begin{equation} \label{eqxitR}
\begin{split}
\xidsepR{t} := \inf \Big\{ L(1) ~|~ &L: \mathbb{R}[\mathbf{x},\mathbf{y}]_{2t}\to\mathbb{R} \ \text{ s.t. } \\
& L(\mathbf{x}\mathbf{x}^T\otimes \mathbf{y}\mathbf{y}^T) = \rho, \\
& L \geq 0 \text{ on } \mathcal {M}(S_\rho)_{2t}, \\
& M_{t-2}(G_\rho\otimes L)=L({G_{\rho}} \otimes [\mathbf{x},\mathbf{y}]_{t-2}[\mathbf{x},\mathbf{y}]_{t-2}^T) \succeq 0\Big\}.
\end{split}
\end{equation} Again we may impose an additional block-diagonal structure on the positive semidefinite matrices entering this program. Indeed, since the polynomials involved in the constraints leading to the above program have the property that all their terms have an even degree in $\mathbf{x}$ and an even degree in $\mathbf{y}$, we may assume that the variable $L$ satisfies the condition \begin{equation}\label{eqL0R}
L(\mathbf{x}^\alpha\mathbf{y}^\beta)=0 \ \text{ if } |\alpha| \text{ or } |\beta| \text{ is odd.} \end{equation}
Note that this is the real analog of condition (\ref{eqL0}) in the complex case. The additional constraint (\ref{eqL0R}) permits to replace each of the positive semidefinite constraints for the matrices $M_t(L)$, $M_{t-1}(gL)$ for $g\in S_\rho$, and $M_{t-2}(G_\rho\otimes L)$ by four smaller positive semidefinite constraints, each of size roughly 1/4 of the original size. For this let $I^t$ denote the index set of the matrix $M_t(L)$ which we partition into $I^t=\cup_{a,b\in \{0,1\}}I^t_{a,b}$, where $I^t_{a,b}$ consists of the pairs $(\alpha,\beta)\in I^t$ with given parity $|\alpha|\equiv a$, $|\beta|\equiv b$ modulo 2. Then, with respect to this partition of its index set, the matrix $M_t(L)$ is block-diagonal and thus $M_t(L)\succeq 0$ if and only if $M_t(L)[I^t_{a,b}]\succeq 0$ for $a,b\in \{0,1\}$. The same block-diagonalization applies to the matrices $M_{t-1}(gL)$ for $g\in S_\rho$. For the matrix $M_{t-2}(G_\rho\otimes L)$ we consider the block-diagonalization obtained by partitioning its index set as $\cup_{a,b\in \{0,1\}} ([d^2] \times I^{t-2}_{a,b})$.
Some numerical results on the behaviour of the bounds will be given in the next section.
\subsection{Numerical results for bipartite complex and real states}\label{secnumerics}
Here we collect some numerical results that illustrate the behaviour of the bounds $\xidsep{t}$ and $\xidsepR{t}$ for different choices of localizing constraints, see \cref{table2,table3,table4} for examples at order $t =2,3,4$ respectively. Computations were made in Windows using Julia \cite{bezanson2017julia}, JuMP \cite{DunningHuchetteLubin2017} and MOSEK \cite{Mosek} with hardware specifications: i7-8750 CPU with 32 Gb Memory.\footnote{The code is available at: \url{https://github.com/JAndriesJ/sep-rank}} For our examples we will use the separable states Sep1, Sep2, and Sep3, and the entangled state Ent1 that we describe now. For numerical stability we do the computations with a scaling of these states so that they have trace equal to 1. We present the examples in matrix form with lines drawn to indicate the block structure $\rho=\big(\big(\rho_{ij,i'j'} \big)_{j,j'\in [d_2]}\big)_{i,i' \in [d_1]}$. Zero-valued entries are left blank. \begin{equation*}
\mathrm{Sep1} :=
\left[\begin{array}{cc|cc}
1 & & & \\
& & & \\
\hline
& & & \ \\
& & & 1 \\
\end{array}\right]
~;~
\mathrm{Sep2} :=
\left[\begin{array}{cc|cc}
2 & 1 & 1 & 1 \\
1 & 1 & 1 & 1 \\
\hline
1 & 1 & 1 & 1 \\
1 & 1 & 1 & 2 \\
\end{array}\right] \end{equation*}
\begin{equation*}
\mathrm{Sep3} :=
\left[\begin{array}{ccc|ccc}
4 & & & & & \\
& 4 & 2 & & & 2 \\
& 2 & 2 &1 & -1 & \\
\hline
& & 1 & 2 & 1 & -1 \\
& & -1 & 1 & 5 & 1 \\
& 2 & & -1 & 1 & 2 \\
\end{array}\right]
~;~
\mathrm{Ent1} =
\left[\begin{array}{ccc|ccc|ccc}
1& & & &1& & & &1\\
&2& &1& & & & & \\
& &\frac{1}{2}& & & &1& & \\
\hline
&1& &\frac{1}{2}& & & & & \\
1& & & &1& & & &1\\
& & & & &2& &1& \\
\hline
& &1& & & &2& & \\
& & & & &1& &\frac{1}{2}& \\
1& & & &1& & & &1
\end{array}\right]. \end{equation*} The separable states Sep1, Sep2, and Sep3 were previously studied for example in \cite{Chen_2012}, where it was moreover shown that for a separable state $\rho$ with local dimensions $(d_1,d_2)=(2,3)$ and birank $(r,s)$ one has $\mathrm{rank_{sep}}(\rho)=\max\{r,s\}$. The entangled state Ent1 was constructed by Choi in~\cite{Choi82} as the first example in dimension $(d_1,d_2) = (3,3)$ of an entangled state $\rho$ that satisfies the PPT condition.
In \cref{sec:tausep} we provided three different choices of localizing constraints in (\ref{eqabmax}), (\ref{eqab2}) and (\ref{eqbnorm1}), that we denote here as S1, S2 and S3, respectively. The examples show that the different choices lead to incomparable bounds. Indeed, let us the notation S1~$<$~S2 as short hand for ``there exists a $\rho$ such that $\xidsep{t}$ (using scaling $S1$) $< \xidsep{t}$ (using scaling $S2$)''. Then at level $t=2$ the state Sep1 demonstrates both S3~$<$~S1 and S2~$<$~S1, and at level $t=3$ Sep2 demonstrates both S2~$<$~S3 and S1~$<$~S3 and Sep3 demonstrates both S1~$<$~S2 and S3~$<$~S2. A case where the various constraints differ in ability to detect entanglement is provided by the state Ent1 at order $ t=2$.
As mentioned in \cref{sec: real separable rank}, there exist real states $\rho \in \mathcal {S}^d\otimes \mathcal {S}^d$ that are separable but do not admit a decomposition using real vectors $a_\ell, b_\ell \in \mathbb{R}^d$. Our bound $\xidsepR{2}$ provides a proof of the latter for the state Sep3: its real separable rank is infinity since our lower bound is infeasible (i.e., there exists a dual certificate that proves $\mathrm{rank}_{\mathrm{sep}}^\R(\text{Sep3}) = \infty$).
Finally we note that one sometimes needs to go beyond level $t=2$ (and thus beyond the PPT criterion) to reveal entanglement: with the localizing constraints S3 the bound for Ent1 is feasible at $t=2$, but infeasible at $t=3$.
In addition, we show in \cref{scat} a scatter plot of the bound $\xi_{3}^{\mathrm{sep}}(\rho)$ vs.~its computation time in seconds for 100 random complex matrices $\rho$ grouped and colored by the respective scalings S1, S2 and S3. These matrices are defined by $\rho = \sum_{j=1}^{5} a^{(j)} {({a}^{(j)})}^* \otimes b^{(j)} {({b}^{(j)})}^*, $ where $a^{(j)},b^{(j)} \in \mathbb C^3$ are random vectors whose entries are of the form $ x + \mathbf{i}\, y$ with $x,y \in \mathcal{N}(0,1)$. (We also normalize the trace here for numerical stability.) This construction guarantees separability and provides the upper bound $\mathrm{rank_{sep}}(\rho)\le 5$. Such states also satisfy the reverse inequality $\mathrm{rank_{sep}}(\rho) \geq 5$ almost surely (since $\mathrm{rank}(\rho)=5$ almost surely). We use this class of examples merely to test the quality of the bounds. From the figure we can draw the following observations. First, the bounds are concentrated around the means 2.7, 3.4 and 3.3 for the scalings S1, S2 and S3, respectively. Second, in this class of examples the S1 rescaling yields inferior bounds as compared to S2 and S3. Third, out of the hundred examples and for the three different scalings considered, no bound exceeded the value 4.
\begin{table}[!htbp]
\centering
\caption{Examples and numerical bounds level $t=2$}\label{table2}
\begin{tabular}{|c c c | c c c | c c c |c|c|}
\toprule
$\rho$ & $(d_1,d_2)$ & $\mathrm{birank}(\rho)$ & \multicolumn{3}{c}{$\xidsep{2}$} & \multicolumn{3}{c}{$\xidsepR{2}$} & $\mathrm{rank_{sep}}(\rho)$ & time \\
\hline\hline
{}&{}&{}&S1&S2&S3&S1&S2&S3&&\\
\hline
Sep1\cite{Chen_2012}&(2, 2)&(2, 2)&\textbf{2.0}&1.0&1.0&\textbf{2.0}&1.0&1.0&2&$<1$\\
Sep2\cite{Chen_2012} &(2, 2)&(3, 3)&1.421&1.0&1.0&1.421&1.0&1.0&3&$<1$ \\
Sep3\cite{Chen_2012} &(2, 3)&(4, 6)&1.333&1.0&1.0&*&*&*&6&$<1$ \\
Ent1\cite{Choi82}&(3, 3)&(4, 4)&2.069&\textbf{*}&1.525&2.069&\textbf{*}&1.525&$\infty$&$<1$ \\
\bottomrule
\end{tabular}
\caption{Examples and numerical bounds level $t=3$}\label{table3}
\begin{tabular}{|c c c | c c c | c c c |c|c|}
\toprule
$\rho$ & $(d_1,d_2)$ & $\mathrm{birank}(\rho)$ & \multicolumn{3}{c}{$\xidsep{3}$} & \multicolumn{3}{c}{$\xidsepR{3}$} & $\mathrm{rank_{sep}}(\rho)$ & time \\
\hline \hline
{}&{}&{}&S1&S2&S3&S1&S2&S3&&\\
\hline
Sep1&(2, 2)&(2, 2)&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&2&$<1$ \\
Sep2&(2, 2)&(3, 3)&1.909&2.0&\textbf{2.178}&1.909&2.0&2.178&3&$2$ \\
Sep3&(2, 3)&(4, 6)&2.423&3.0&2.790&*&*&*&6&25 \\
Ent1&(3, 3)&(4, 4)&-&-&\textbf{*}&-&\textbf{*}&\textbf{*}&$\infty$&267 \\
\bottomrule
\end{tabular}
\caption{Examples and numerical bounds level $t=4$}\label{table4}
\begin{tabular}{|c c c | c c c | c c c |c|c|}
\toprule
$\rho$ & $(d_1,d_2)$ & $\mathrm{birank}(\rho)$ & \multicolumn{3}{c}{$\xidsep{4}$} & \multicolumn{3}{c}{$\xidsepR{4}$} & $\mathrm{rank_{sep}}(\rho)$ & time \\
\hline \hline
{}&{}&{}&S1&S2&S3&S1&S2&S3&&\\
\hline
Sep1&(2, 2)&(2, 2)&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&\textbf{2.0}&2&105 \\
Sep2&(2, 2)&(3, 3)&\textbf{3.0}&\textbf{3.0}&\textbf{3.0}&\textbf{3.0}&\textbf{3.0}&\textbf{3.0}&3& 332 \\
\bottomrule
\multicolumn{11}{@{}p{5in}}{\footnotesize Run time given in seconds}\\
\multicolumn{11}{@{}p{5in}}{\footnotesize * : Infeasibility certificate returned}\\
\multicolumn{11}{@{}p{5in}}{\footnotesize - : Solver could not reach a conclusion (not a memory error)} \\
\end{tabular}
\end{table}
\begin{figure}\label{scat}
\end{figure}
\subsection{Stronger bounds for the completely positive rank} \label{sec:CP}
For a given integer $d \in \mathbb{N}$, the cone of completely positive $d \times d$ matrices is defined as \[ \mathcal{CP}_d := \mathrm{cone}\{ xx^T : x \in \mathbb{R}^d_{+} \}. \] The cone of completely positive matrices and its dual, the cone of copositive matrices, are well known for their expressive power. For example, many NP-hard problems can be formulated as linear optimization problems over these cones~\cite{dKP02,Bur09}. We refer to~\cite{BSM03} for many structural properties about the cone $\mathcal{CP}_d$. As in the case of separable states, given a completely positive matrix $A$ one can ask what is the smallest integer $r \in \mathbb{N}$ such that $A$ admits a decomposition of the form \begin{equation}\label{eqAcp} A = \sum_{\ell=1}^r a_\ell a_\ell^T \end{equation} for entrywise nonnegative vectors $a_\ell \in \mathbb{R}^d_{+}$ ($\ell \in [r]$). The smallest such $r$ is called the \emph{completely positive rank} of $A$ and denoted as $\mathrm{rank_{cp}}(A)$. In~\cite{FP16} the authors defined the parameter $\tau_{\mathrm{cp}}(A)$ as \begin{equation} \label{eq:taucp} \tau_{\mathrm{cp}}(A) := \inf\Big\{\lambda: \lambda>0, {1\over \lambda} \rho \in \mathrm{conv} \{xx^T: x \in \mathbb{R}^d_{+},\ xx^T \leq A,\ xx^T \preceq A\} \Big \} \end{equation} to lower bound the completely positive rank (as well as an SDP-based bound $\tau_{\mathrm{cp}}^{\mathrm{sos}}(A)$). In~\cite{GdLL17a} the authors studied (among others) the completely positive rank from the polynomial optimization perspective and derived a hierarchy of semidefinite programming bounds, denoted here as {$\xib{cp}{t,(2019)}(A)$.} There the fact was used that, if $xx^T \preceq A$, then also $(xx^T)^{\otimes \ell} \preceq A^{\otimes \ell}$ for all $\ell \in \mathbb{N}$ and therefore the following constraints are valid \begin{equation} \label{eq: tensor} L((\mathbf{x}\mathbf{x}^T)^{\otimes \ell}) \preceq A^{\otimes \ell} \qquad \text{for all } \ell \in \mathbb{N} \end{equation} for the linear functional arising from the atomic decomposition (\ref{eqAcp}). Based on this the following bounds are defined in~\cite{GdLL17a} and shown to converge to $\tau_{\mathrm{cp}}(A)$ as $t \to \infty$:
\begin{equation} \label{eqxitcp}
\begin{split}
{\xib{cp}{t,(2019)}(A) }:= \inf \Big\{ L(1): &\ L \in [\mathbf{x}]_{2t}^*, \\
& L(\mathbf{x}\mathbf{x}^T) = A, \\
& L \geq 0 \text{ on } \mathcal {M}(\{\sqrt{A_{ii}} x_i - x_i^2 : i \in [d]\})_{2t}, \\
& L \geq 0 \text{ on } \mathcal {M}(\{A_{ij} - x_i x_j : i,j \in [d], i \neq j\})_{2t}, \\
& L((\mathbf{x}\mathbf{x}^T)^{\otimes \ell}) \preceq A^{\otimes \ell} \text{ for all } \ell \in [t] \Big\}.
\end{split}
\end{equation} The same convergence result holds if we replace the last constraint in (\ref{eqxitcp}) with the constraint \begin{equation} \label{eq: v CP} L \geq 0 \qquad \text{ on } \mathcal {M}(\{v^T (A-\mathbf{x}\mathbf{x}^T) v: v \in \mathbb{R}^d\})_{2t}. \end{equation} Using the same reasoning as in \cref{sec: poly sep}, we see that we can strengthen the parameter $\xib{cp}{t,(2019)}(A)$ by adding the constraint \begin{equation} \label{eq: GL cp} M_{t-1}((A-\mathbf{x}\mathbf{x}^T)\otimes L) = L((A-\mathbf{x}\mathbf{x}^T) \otimes [\mathbf{x}]_{t-1} [\mathbf{x}]_{t-1}^T) \succeq 0. \end{equation} {Let $\xib{cp}{t}(A)$ denote the parameter defined in this way, so that $\xib{cp}{t,(2019)}(A)\le \xib{cp}{t}(A)$.} Note that \cref{lemGLpos,lemMGLtoGL} show that \cref{eq: GL cp} implies \cref{eq: v CP}. We now show that \cref{eq: GL cp} in fact implies \cref{eq: tensor}, which means that adding \cref{eq: GL cp} strengthens both approaches provided in \cite{GdLL17a}; we present below numerical examples that illustrate this. To do so, we introduce the following notation. Let $\langle x \rangle$ denote the vector of noncommutative monomials in the variables $x_1,\ldots, x_d$. Then we can define the \emph{noncommutative localizing matrix} \begin{equation} \label{eq: Mnc} M^{\mathrm{nc}}((A-xx^T)\otimes L) := L( (A-xx^T) \otimes \langle x\rangle \langle x\rangle^T). \end{equation} Note that $M((A-xx^*)\otimes L) \succeq 0$ if and only if $M^{\mathrm{nc}}((A-xx^*)\otimes L) \succeq 0$ (since the latter is obtained by duplicating rows/columns of the former).
\begin{lemma}
Consider $A \in \mathbb{R}^{d \times d}$ and $L \in \mathbb{R}[\mathbf{x}]^*$. If $L(\mathbf{x}\mathbf{x}^T) = A$ and $M((A-\mathbf{x}\mathbf{x}^T)\otimes L) \succeq 0$, then \cref{eq: tensor} holds, i.e.,
\[
L((\mathbf{x}\mathbf{x}^T)^{\otimes \ell}) \preceq A^{\otimes \ell} \text{ for all } \ell \in \mathbb{N}.
\] \end{lemma}
\begin{proof}
As observed above, $M((A-\mathbf{x}\mathbf{x}^T)\otimes L) \succeq 0$ if and only if $M^{\mathrm{nc}}((A-\mathbf{x}\mathbf{x}^T)\otimes L) \succeq 0$. Note that for each $\ell \in \mathbb{N}$, the matrix $M^{\mathrm{nc}}((A-\mathbf{x}\mathbf{x}^T)\otimes L)$ contains $L( (A-\mathbf{x}\mathbf{x}^T) \otimes (\mathbf{x}\mathbf{x}^T)^{\otimes \ell-1})$ as a principal submatrix. To see this write the vector $\langle x\rangle$ of noncommutative monomials as
$
1 \oplus_{\ell \in \mathbb{N}} x^{\otimes \ell}
$
by grouping the monomials according to their degree.
With respect to this partition of its index set
the matrix $M^{\mathrm{nc}}((A-xx^*)\otimes L)$
has the matrices $L( (A-\mathbf{x}\mathbf{x}^*) \otimes (\mathbf{x}\mathbf{x}^*)^{\otimes \ell-1})$ as its diagonal blocks. Since $M^{\mathrm{nc}}((A-\mathbf{x}\mathbf{x}^*)\otimes L) \succeq 0$, we obtain
\[
A \otimes L((\mathbf{x}\mathbf{x}^T)^{\otimes \ell-1}) \succeq L((\mathbf{x}\mathbf{x}^T)^{\otimes \ell}) \qquad \text{ for all } \ell \in \mathbb{N}.
\]
Combined with $L(\mathbf{x}\mathbf{x}^*) = A$ this permits to show \cref{eq: tensor}:
\[
L((\mathbf{x}\mathbf{x}^T)^{\otimes \ell} \preceq A \otimes L((\mathbf{x}\mathbf{x}^T)^{\otimes (\ell-1)} \preceq A^{\otimes 2} \otimes L((\mathbf{x}\mathbf{x}^T)^{\otimes (\ell-2)} \preceq \cdots \preceq A^{\otimes( \ell-1)} \otimes L(\mathbf{x}\mathbf{x}^T) = A^{\otimes \ell}. \qedhere
\] \end{proof}
We conclude this section with some numerical results. To demonstrate the impact of the constraints (\ref{eq: GL cp}) we compare our bounds $\xib{cp}{3}(A)$ to the bounds $\xib{cp}{3,(2019)}(A)$ from \cite{GdLL17a} on the cp-rank of some matrices $A$ known to have a high cp-rank, taken from \cite{BOMZE2014208}. The boldface entries in Table \ref{CP_bounds} show a strict improvement in the bounds. For these computations we used the high precision solver SDPA-GMP \cite{5612693} because MOSEK \cite{Mosek} and SDPA \cite{Yamashita10ahigh-performance, Yamashita03implementationand} could not certify solutions.\footnote{The code is available at: \url{https://github.com/JAndriesJ/ju-cp-rank}}
\begin{table}[!htbp]\label{CP_bounds}
\centering
\caption{Bounds for completely positive rank at level t=3.}
\begin{tabular}{|c c c c c c c |}
\toprule
$A$ & $\mathrm{rank}(A)$ &$n$& $\lfloor \frac{n^2}{4}\rfloor$ & $\xib{cp}{3,(2019)}(A)$ & $\xib{cp}{3}(A)$ & $\mathrm{rank_{cp}}(A)$ \\ [0.5ex]
\hline\hline
$M_7$ & 7&7&12&10.5&\textbf{11.4} & 14 \\
\hline
$\widetilde M_7$ &7&7&12&10.5&10.5 & 14 \\
\hline
$\widetilde M_8$ &8&8&16&13.82 &\textbf{14.5} & 18 \\
\hline
$\widetilde M_9$ &9&9&20&17.74&\textbf{18.4}& 26 \\
\bottomrule
\end{tabular} \end{table}
\section{Entanglement witnesses}\label{secDPS}
The moment approach we have developed in the previous section for bounding the separable rank of a state $\rho$ can be viewed as searching for a (non-normalized) measure on the product of two balls, with the additional property that, for any point $(x,y)$ in its support, we have $\rho - xx^* \otimes yy^* \succeq 0$. We will first observe in Section \ref{sec:mem} how this approach can also be used to detect entanglement, i.e., non-membership in the set $\mathcal{SEP}$.
As mentioned earlier one can also capture the set $\mathcal{SEP}$ by viewing it as a moment problem on the bi-sphere (the product of two unit spheres). In the rest of this section we will show that this second moment approach corresponds exactly to the well-known state extension perspective that leads to the Doherty-Parrilo-Spedalieri hierarchy of approximations of $\mathcal{SEP}$ from \cite{DPS04}.
\subsection{Entanglement witnesses based on the hierarchy of parameters $\xidsep{t}$}\label{sec:mem} Our approach to design lower bounds on the separable rank also directly leads to a way to detect non-membership in the set $\mathcal{SEP}$ or, in other words, to a way to witness entanglement of a state. Indeed, as shown in \cref{lemmembership}, a state $\rho$ is separable if and only if $\xidsep{t} \leq \mathrm{rank}(\rho)^2$ for all $t \ge 2$. In other words, $\rho$ is entangled if and only if $\xidsep{t}>\mathrm{rank}(\rho)^2$ for some integer $t\ge 2$ (which includes $\xidsep{t}=\infty$ in case the program defining $\xidsep{t}$ is infeasible).
In order to get a certificate of entanglement it is therefore convenient to consider the dual semidefinite program to the program (\ref{eqxit}) defining the parameter $\xidsep{t}$, which reads:
\begin{equation} \label{eqxit dual}
\begin{split}
\sup \Big\{ \langle \rho, \Lambda\rangle ~|~ \Lambda \in \mathbb C^{d \times d} \otimes \mathbb C^{d \times d} & \text{ Hermitian s.t. } \\
1-\langle \Lambda, xx^*\otimes yy^*\rangle \in
&\ \mathcal {M}(S_\rho)_{2t}
+ \mathrm{cone}\{ \langle G_\rho, \vec{p} \vec{p}^*\rangle: \vec p \in (\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{t-2})^{d^2} \} \Big\}.
\end{split}
\end{equation}
\begin{lemma}
For any integer $t \geq 2$, the matrix $\Lambda = 0$ is a strictly feasible solution for \eqref{eqxit dual}.
\end{lemma}
\begin{proof}
First we observe that, for small $\lambda>0$, the matrix $\Lambda = \lambda \cdot I_{d^2}$ is a feasible solution for (\ref{eqxit dual}).
For this we show that the polynomial $1-\lambda \langle I, xx^*\otimes yy^*\rangle=1-\lambda \sum_i\sum_j x_i\overline x_iy_j\overline y_j$ lies in the quadratic module $\mathcal {M}(S_\rho)_{2t}$ for small $\lambda>0$. We know that there exists a scalar $R>0$ such that $R - \sum_{i} x_i \overline x_i - \sum_j y_j \overline y_j\in \cal M(S_\rho)$. Then also $R-\sum_{i} x_i \overline x_i$ and
$R- \sum_iy_i \overline y_i$ lie in $\mathcal {M}(S_\rho)_{2t}$, as well as
$(R-\sum_{i} x_i \overline x_i)(R+ \sum_iy_i \overline y_i)$ and
$(R+\sum_{i} x_i \overline x_i)(R- \sum_iy_i \overline y_i)$. Adding up the latter two polynomials we obtain that the polynomial
$R^2- \sum_i\sum_j x_i\overline x_i y_j\overline y_j$ belongs to $\mathcal {M}(S_\rho)_{2t}$, which shows that
$\Lambda = \lambda \cdot I_{d^2}$ is feasible for all $0<\lambda\le R^{-2}$.
We now show that any $\Lambda$ satisfying $\|\Lambda\|\leq R^{-2}$ is feasible (which shows there is a ball contained in the feasible region of~\eqref{eqxit dual}).
For this write
\[
1- \langle \Lambda,xx^*\otimes yy^*\rangle = \underbrace{1- \langle \|\Lambda\| I_{d^2}, xx^*\otimes yy^*\rangle}_{(a)} +
\underbrace{\langle \|\Lambda\| I_{d^2} - \Lambda, xx^*\otimes yy^* \rangle}_{(b)}.
\]
In the first part of the proof we have shown that term $(a)$ belongs to $\mathcal {M}(S_\rho)_{2t}$ if $\|\Lambda\| \leq R^{-2}$. In addition, term $(b)$ is a sum of squares since $\|\Lambda\| I_{d^2} - \Lambda$ is positive semidefinite. Together, this shows $1-\langle \Lambda, xx^*\otimes yy^*\rangle \in \mathcal {M}(S_\rho)_{2t}$ and therefore $\Lambda$ is feasible.
\end{proof}
As a consequence, strong duality holds between the program (\ref{eqxit}) defining $\xidsep{t}$ and its dual~\eqref{eqxit dual}. That is, if the program (\ref{eqxit dual}) is bounded then its optimal value is finite and equal to $\xidsep{t}$ and, otherwise, its optimal value is equal to $\infty$ and thus $ \xidsep{t}$ is infeasible. Therefore, we obtain that $\rho$ is entangled if and only if, for some integer $t\ge 2$,
there exists a matrix $\Lambda \in \mathbb C^{d \times d} \otimes \mathbb C^{d \times d}$ which is feasible for \eqref{eqxit dual} and satisfies $\langle \rho, \Lambda\rangle>\mathrm{rank}(\rho)^2$. In that case such matrix $\Lambda$ provides a certificate that the state $\rho$ is entangled.
\subsection{The Doherty-Parrilo-Spedalieri hierarchy: moment perspective}\label{sec:momentDPS}
Recall definition (\ref{SEP}) of the set of separable states $\mathcal{SEP}_d$, so $\rho \in\mathcal{SEP}_d$ if and only if it is of the form \begin{equation}\label{decrho} \rho=\sum_{\ell=1}^r \lambda_\ell \ a_\ell a_\ell^*\otimes b_\ell b_\ell^*, \end{equation}
where $\lambda_\ell>0$, $a_\ell,b_\ell\in \mathbb C^d$ with $\|a_\ell \|=1=\|b_\ell\|$. To this decomposition we can associate a linear functional on $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]$ that is a conic combination of evaluation functionals at points on the bi-sphere: $L = \sum_{\ell=1}^r \lambda_\ell L_{(a_\ell,b_\ell)}$. By construction, this linear functional is positive on Hermitian squares, it vanishes on the ideal generated by $1-\|\mathbf{x}\|^2$ and $1-\|\mathbf{y}\|^2$ (called the {\em bi-sphere ideal} for short) and it satisfies $L(xx^* \otimes yy^*) = \rho$. This naturally suggests a hierarchy of outer approximations to the set $\mathcal{SEP}$: a state $\rho$ belongs to the $t$-th level of this hierarchy if there exists an $L$ that satisfies these constraints for polynomials of degree at most~$2t$. Formally, we consider the set \begin{equation} \label{eq: dpst} \begin{split}
{\mathcal{R}}_{t} := \{ \rho \in \mathcal{H}^d\otimes \mathcal{H}^d :\ &\exists L:\mathbb C[\mathbf{x}, \qo \mathbf{x}, \mathbf{y}, \qo \mathbf{y}]_{2t} \to \mathbb C \text{ Hermitian s.t. }\\
& L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*)=\rho,\\
& L = 0 \text{ on } \mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2 )_{2t},\\
& M_t(L)\succeq 0
\}. \end{split} \end{equation} We will show in Section \ref{sec: state extension perspective} that this set is in fact closely related to the DPS hierarchy of outer approximations to the set $\mathcal{SEP}_d$: if we introduce separate degree-bounds on the $\mathbf{x},\qo{\mathbf{x}}$ variables and the $\mathbf{y},\qo{\mathbf{y}}$ variables, then we recover the original formulation from \cite{DPS04}.
First we note that we can easily show that the sets ${\mathcal{R}}_{t}$ converge to $\mathcal{SEP}$, i.e., $\mathcal{SEP}_d=\bigcap_{t\ge 2}{\mathcal{R}}_{t}$, using the tools from polynomial optimization (\cref{theomainTchakaloff}).
\begin{proposition} \label{lem: convergence set t} We have: $\mathcal{SEP}_d=\bigcap_{t\ge 2} {\mathcal{R}}_{t}.$ \end{proposition}
\begin{proof}
Assume $\rho\in \bigcap_{t\ge 2}{\mathcal{R}}_{t}$, we show $\rho\in \mathcal{SEP}_d$. For any $t\ge 2$ let $L_t$ be an associated certificate of membership in ${\mathcal{R}}_{t}$. Then we have $L_t(1)=L_t(\|\mathbf{x}\|^2\|\mathbf{y}\|^2)= \mathrm{Tr}(\rho)$. Hence it follows from \cref{pointconv} that the sequence $(L_t)_t$ has a pointwise converging subsequence, with limit $L\in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]^*$.
Then $L\ge 0$ on $\Sigma$ and $L=0$ on $\mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2)$. Using \cref{theomainTchakaloff} we can conclude that there exists scalars $\mu_\ell>0$ and points $(a_\ell,b_\ell)\in \mathbb C^d\times \mathbb C^d$ with $\|a_\ell\| = \|b_\ell\|=1$ such that $L(p)=\sum_{\ell=1}^K \mu_\ell p(a_\ell,b_\ell)$ when $p$ has degree at most 4. In particular, we obtain $$\rho=L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*) =\sum_{\ell=1}^K \mu_\ell \ a_\ell a_\ell^*\otimes b_\ell b_\ell^*,$$ which shows that $\rho\in \mathcal{SEP}_d$. \end{proof}
Next, we reformulate the positivity condition $M_t(L)\succeq 0$ in a way that will be useful for making the link to the DPS hierarchy. As observed in \cref{sec: block diagonalization}, we may additionally require the linear functionals in \cref{eq: dpst} to satisfy the constraint \cref{eqL0}, which we repeat here for convenience: \begin{equation}\label{eqL02}
L(\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'})=0 \ \text{ if } |\alpha|\ne| \alpha' |\text{ or } |\beta| \ne |\beta'|. \end{equation} This permits to block-diagonalize the associated moment matrix $M_t(L)$ according to the partition given in \cref{eq: index set,eq: partition}, thus permitting to replace the constraint $M_t(L)\succeq 0$ by $M_t(L)[I^t_{r,s}]\succeq0$ for $r,s\in [-t,t]$.
In fact, using the bi-sphere ideal constraint, one can reduce the size of these matrices
even further and replace the matrices $M_t(L)[I^t_{r,s}]$ by their submatrices $M_t(L)[I^{=t}_{r,s}]$, where the sets $I^{=t}_{r,s}\subseteq I^t_{r,s}$ are defined by \begin{equation} \label{eq: block diag hom t}
I^{=t}_{r,s} :=\left\{(\alpha,\alpha',\beta,\beta') \in (\mathbb{N}^d)^4: |\alpha+\alpha'+\beta+\beta'|=t, \ |\alpha|-|\alpha'| = r, \ |\beta|-|\beta'|=s\right\}. \end{equation} In other words we can show the following reformulation of the set ${\mathcal{R}}_t$: \begin{equation} \label{eq: dpst SDP} \begin{split}
{\mathcal{R}}_{t} = \big\{ \rho \in \mathcal{H}^d\otimes \mathcal{H}^d :\ &\exists L:\mathbb C[\mathbf{x}, \qo \mathbf{x}, \mathbf{y}, \qo \mathbf{y}]_{2t} \to \mathbb C \text{ Hermitian s.t. }\\
& L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*)=\rho,\\
& L = 0 \text{ on } \mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2 )_{2t},\\
& M_{t}(L)[I^{=t}_{r,s}] \succeq 0 \text{ for all } r,s \in \{-t,-t+1,\ldots,t\} \big\}. \end{split} \end{equation} We will show this result in a slightly different setting (closer to that of the original formulation of the DPS hierarchy). Similar arguments as those used in the proof of Lemma \ref{prop: restrict to hom} below can be used to show the equivalence between \eqref{eq: dpst} and \eqref{eq: dpst SDP}.
In order to connect the moment approach on the bi-sphere to the original formulation of the DPS hierarchy we need to introduce a separate degree bound on the $\mathbf{x},\qo{\mathbf{x}}$ variables and the $\mathbf{y},\qo{\mathbf{y}}$ variables. For integers $k,t \ge 1$ we let $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y}, \qo{\mathbf{y}}]_{k,t}$ (resp., $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{=k,=t}$) denote the set of polynomials that have degree at most $k$ (resp., equal to $k$) in $\mathbf{x},\qo{\mathbf{x}}$ and degree at most $t$ (resp., equal to $t$) in $\mathbf{y},\qo{\mathbf{y}}$, and we set \[ \Sigma_{2k,2t}= \mathrm{cone}\{ p \overline p: p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{k,t}\}=\Sigma \cap \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y}, \qo{\mathbf{y}}]_{2k,2t},\quad \Sigma_{=2k,=2t}=\Sigma\cap \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{=2k,=2t}. \] We define the sets \begin{equation} \label{eq: dps kl} \begin{split}
{\mathcal{R}}_{k, t} := \{ \rho \in \mathcal{H}^d\otimes \mathcal{H}^d :\ &\exists L:\mathbb C[\mathbf{x}, \qo \mathbf{x}, \mathbf{y}, \qo \mathbf{y}]_{2k,2t} \to \mathbb C \text{ Hermitian s.t. }\\
& L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*)=\rho,\\
& L = 0 \text{ on } \mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2 )_{2k,2t},\\
& L \geq 0 \text{ on } \Sigma_{2k,2t} \}. \end{split} \end{equation} Note the inclusion ${\mathcal{R}}_{k,t}\subseteq {\mathcal{R}}_{k+t}$. The two regimes that we will be interested in are $k=1$ and $k=t$ since, as we will show in Section \ref{sec: state extension perspective}, the sets ${\mathcal{R}}_{1,t}$ and ${\mathcal{R}}_{t,t}$ (for $t\in \mathbb{N}$) coincide with the approximation hierarchies $\mathcal{DPS}_{1,t}$ and $\mathcal{DPS}_{t,t}$ from \cite{DPS04}.
We will give a more economical reformulation for the positivity condition on $L$ in \cref{propRkl}. For this we first show that for linear functionals $L$ that vanish on the bi-sphere ideal and satisfy \eqref{eqL02} the following two positivity conditions are equivalent: $L \geq 0$ on $\Sigma_{2k,2t}$ and $L \geq 0$ on $\Sigma_{=2k,=2t}$. That is, we only need to require positivity on \emph{homogeneous} polynomials.
\begin{lemma} \label{prop: restrict to hom}
Let $L \in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{2k,2t}^*$ be such that $L =0$ on $\mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2 )_{2k,2t}$ and $L$ satisfies \cref{eqL02}. Then we have $L \geq 0$ on $\Sigma_{2k,2t}$ if and only if $L \geq 0$ on $\Sigma_{=2k,=2t}$. \end{lemma}
\begin{proof} Assume $L \geq 0$ on $\Sigma_{=2k,=2t}$. We show that $L$ is positive on Hermitian squares in $\Sigma_{2k,2t}$. Let $p \in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{k,t}$, we want to show that $L(p \overline p)\ge 0$. For this we decompose $p$ as $p=p_{00}+p_{01}+p_{10}+p_{11}$, where, for $a,b\in \{0,1\}$, we group in $p_{ab}$ the terms of $p$ that involve a monomial $\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'}$ with
$|\alpha+\alpha'| \equiv k-a$ modulo 2 and $|\beta+\beta'| \equiv t-b$ modulo 2.
Then we have
\[
L(p \overline p) = \sum_{a,b \in \{0,1\}} L(p_{ab}\overline p_{ab}),
\]
where we use \cref{eqL02} to see that $L(p_{ab}\overline p_{a'b'})=0$ if $(a,b)\ne (a',b')$. Hence it remains to show that $L(p_{ab}\overline p_{ab})\ge 0$ for $a,b\in\{0,1\}$. Write $p_{ab}=\sum c_{\alpha\alpha'\beta\beta'} \mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'}$. We define the polynomial \[
q_{ab}= \sum c_{\alpha\alpha'\beta\beta'} \mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'} \|\mathbf{x}\|^{k - a - |\alpha + \alpha'|}\|\mathbf{y}\|^{t-b-|\beta+\beta'|}. \]
Note that in each term the powers of $\|\mathbf{x}\|$ and $\|\mathbf{y}\|$ are by construction both even and nonnegative and therefore the polynomial $q_{ab}$ is homogeneous of degree $k-a$ in $\mathbf{x},\qo{\mathbf{x}}$ and of degree $t-b$ in $\mathbf{y},\qo{\mathbf{y}}$. Since $L$ vanishes on the truncated ideal generated by $1-\|\mathbf{x}\|^2$ and $1-\|\mathbf{y}\|^2$ we have \[
L(p_{ab}\overline p_{ab})= L(q_{ab} \overline q_{ab})= L(q_{ab} \overline q_{ab}\|\mathbf{x}\|^{2a}\|\mathbf{y}\|^{2b})\ge 0, \]
where the last inequality follows from the fact that $q_{ab} \overline q_{ab}\|\mathbf{x}\|^{2a}\|\mathbf{y}\|^{2b} \in \Sigma_{=2k,=2t}$. \end{proof}
We now proceed to define the analog of \cref{eq: block diag hom t} for the $(k,t)$-setting. Given two integers $r \in \{-k,-k+2,-k+4,\ldots,k\}$ and $s \in \{-t, -t+2,-t+4,\ldots,t\}$ define the set of (exponents of) monomials \begin{equation} \label{eq: partition kl}
I^{=k,=t}_{r,s} :=\left\{(\alpha,\alpha',\beta,\beta') \in (\mathbb{N}^d)^4: |\alpha+\alpha'|=k, \ |\alpha|-|\alpha'|=r, \ |\beta+\beta'| = t, \ |\beta|-|\beta'|=s \right\}. \end{equation} Note that we restrict our attention to $r \equiv k \bmod 2$ and $s \equiv t \bmod 2$. If $r,s$ do not satisfy these conditions then $I^{=k,=t}_{r,s}=\emptyset$.
We then have the following semidefinite representation of ${\mathcal{R}}_{k,t}$.
\begin{proposition}\label{propRkl} For $k,t \in \mathbb{N}$ we have \begin{equation} \label{eq: dpskl SDP} \begin{split}
{\mathcal{R}}_{k,t} = \big\{ \rho \in \mathcal{H}^d\otimes \mathcal{H}^d :\ &\exists L:\mathbb C[\mathbf{x}, \qo \mathbf{x}, \mathbf{y}, \qo \mathbf{y}]_{2k,2t} \to \mathbb C \text{ Hermitian s.t. }\\
& L(\mathbf{x}\mathbf{x}^*\otimes \mathbf{y}\mathbf{y}^*)=\rho,\\
& L = 0 \text{ on } \mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2 )_{2k,2t},\\
& M_{k,\ell}(L)[I^{=k,=t}_{r,s}] \succeq 0 \text{ for all } r \in [-k,k], s\in [-t,t]
\big\}. \end{split} \end{equation} \end{proposition} \begin{proof} As mentioned above, we may add the constraint \eqref{eqL02} to the program \eqref{eq: dps kl}. It then follows from \cref{prop: restrict to hom} that we may replace the condition $L \geq 0$ on $\Sigma_{2k,2t}$ with $L \geq 0$ on $\Sigma_{=2k,=2t}$. Finally we observe that the index sets $I^{=k,=t}_{r,s}$ block-diagonalize $M_{=k,=t}(L)$. Indeed, let $p \in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{=k,=t}$ and write $p = \sum_{r,s} p_{r,s}$ where $p_{r,s}$ is the polynomial corresponding to the terms of $p$ with exponents in $I^{=k,=t}_{r,s}$. Then $p_{r,s} \overline p_{r',s'}$ is a linear combination of monomials of the form \[ \mathbf{x}^{\alpha}\qo{\mathbf{x}}^{\alpha'} \mathbf{y}^{\beta} \qo{\mathbf{y}}^{\beta'} \qo{\mathbf{x}}^{\gamma}\mathbf{x}^{\gamma'} \qo{\mathbf{y}}^{\delta} \mathbf{y}^{\delta'}, \]
where we have the following for the degrees in $\mathbf{x},\qo{\mathbf{x}}$. By assumption $|\alpha|-|\alpha'|=r$ and $|\gamma|-|\gamma'|=r'$, and therefore the degree in $\mathbf{x}$ minus the degree in $\qo{\mathbf{x}}$ is $(|\alpha| +|\gamma'|) - (|\alpha'|+|\gamma|) = r-r'$. Similarly, the degree in $\mathbf{y}$ minus the degree in $\qo{\mathbf{y}}$ equals $s-s'$. Hence, if $(r,s) \neq (r',s')$, then \cref{eqL02} shows that $L(p_{r,s} \overline p_{r',s'})=0$. \end{proof}
Finally, we observe the following alternative formulation of the positivity conditions in \cref{eq: dpskl SDP} in terms of the noncommutative moment matrices (cf.~\cref{eq: Mnc}): \begin{equation} \label{eq: ppt on L} M_{k,t}(L)[I^{=k,=t}_{r,s}] \succeq 0 \Longleftrightarrow L\big( (\mathbf{x}\mathbf{x}^*)^{\otimes (k+r)/2} \otimes (\qo{\mathbf{x}}\, \qo{\mathbf{x}}^*)^{\otimes (k-r)/2} \otimes (\mathbf{y}\mathbf{y}^*)^{\otimes (t+s)/2} \otimes (\qo{\mathbf{y}} \, \qo{\mathbf{y}}^*)^{\otimes (t-s)/2}\big) \succeq 0. \end{equation} Although less efficient, this reformulation will permit to connect the program \eqref{eq: dpskl SDP} to the original formulation of the DPS hierarchy $\mathcal{DPS}_{1,t}$ (see the proof of Proposition \ref{prop: equality}).
The analog of \cref{lem: convergence set t} holds for the sets ${\mathcal{R}}_{k,t}$: $$\bigcap_{k,t \ge 1}{\mathcal{R}}_{k,t}= \bigcap_{t\ge 2} {\mathcal{R}}_{t,t} =\mathcal{SEP}_d;$$ the argument is similar, based on standard tools from polynomial optimization (\cref{theomainTchakaloff}). In fact, even the (weaker) sets ${\mathcal{R}}_{1,t}$ already converge to $\mathcal{SEP}$, i.e., we have \begin{equation}\label{eqR1t} \bigcap_{t\ge 2} {\mathcal{R}}_{1,t}=\mathcal{SEP}_d; \end{equation}
in other words, in the moment approach it is sufficient to let only the degree in $\mathbf{y},\qo{\mathbf{y}}$ grow. We will show this in \cref{theoR1t} below, using
the tools about matrix-valued polynomial optimization (\cref{theomainmatrix}).
\subsection{Convergence of the sets ${\mathcal{R}}_{1,t}$ to $\mathcal{SEP}$}\label{sec:convergenceR1t}
We first reformulate the set ${\mathcal{R}}_{1,t}$ from \cref{eq: dps kl,eq: dpskl SDP} (case $k=1$) in terms of matrix-valued linear functionals $\mathcal L$ on the polynomial space $\mathbb C[\mathbf{y},\qo{\mathbf{y}}]$.
\begin{lemma} For $t \in \mathbb{N}$ we have \begin{equation} \label{eq: dps1l SDP} \begin{split}
{\mathcal{R}}_{1,t} = \big\{ \rho \in \mathcal{H}^d\otimes \mathcal{H}^d :\ &\exists \mathcal L:\mathbb C[\mathbf{y}, \qo{\mathbf{y}}]_{2t} \to \mathcal H^d \text{ Hermitian s.t. }\\
& \mathcal L(\mathbf{y}\mathbf{y}^*)=\rho,\\
& \mathcal L = 0 \text{ on } \mathcal I(1-\|\mathbf{y}\|^2 )_{2t},\\
& M_{t}(\mathcal L) \succeq 0 \big\}. \end{split} \end{equation} \end{lemma}
\begin{proof} Let us use $\widehat {\mathcal{R}}_{1,t}$ to denote the set defined in \eqref{eq: dps1l SDP}. We show that ${\mathcal{R}}_{1,t} = \widehat {\mathcal{R}}_{1,t}$ using the formulation of ${\mathcal{R}}_{1,t}$ given in \cref{eq: dps kl}.
First consider $\rho \in {\mathcal{R}}_{1,t}$ and let $L:\mathbb C[\mathbf{x}, \qo{\mathbf{x}}, \mathbf{y}, \qo{\mathbf{y}}]_{2,2t} \to \mathbb C$ be an associated certificate. Define $\mathcal L:\mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t} \to \mathcal H^d$ by \[ \mathcal L(p) = L(\mathbf{x} \mathbf{x}^* p) = \big(L(x_i \overline x_j p \big)_{i,j=1}^d \qquad \text{ for all } p \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t}. \]
So $\mathcal L=(L_{ij})_{i,j=1}^d$ with $L_{ij}(p)=L(x_i\overline x_j p)$. By construction $\mathcal L(\mathbf{y}\mathbf{y}^*)=\rho$. To see that $\mathcal L = 0$ on $\mathcal I(1-\|\mathbf{y}\|^2)_{2t}$, it suffices to observe that, for any $p \in \mathcal I(1-\|\mathbf{y}\|^2)_{2t}$ and any $i,j \in [d]$, the polynomial $x_i \overline x_j p$ lies in $\mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2)_{2,2t}$. To show that $M_t(\mathcal L) \succeq 0$ we use (the degree truncated version of) \cref{lemMGLpos}. That is, we use that $M_t(\mathcal L) \succeq 0$ is equivalent to $\sum_{i,j \in [d]} L_{ij}(p_i \overline p_j) \geq 0$ for all $ (p_1,\ldots, p_d) \in (\mathbb C[\mathbf{y},\qo{\mathbf{y}}]_t)^d$. We have $\sum_{i,j \in [d]} L_{ij}(p_i \overline p_j) = L((\sum_i x_i p_i)(\sum_i x_i p_i)^*) \geq 0$, where the last inequality follows from the fact that $(\sum_i x_i p_i)(\sum_i x_i p_i)^* \in \Sigma_{2,2t}$. This shows that if $\rho \in {\mathcal{R}}_{1,t}$, then $\rho \in \widehat {\mathcal{R}}_{1,t}$.
Conversely, let $\rho \in \widehat {\mathcal{R}}_{1,t}$ and let $\mathcal L:\mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t} \to \mathcal H^d$ be an associated certificate. We write $\mathcal L(p) = \big(L_{ij}(p)\big)_{i,j \in [d]}$ with $L_{ij} \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t}^*$ for all $i,j \in [d]$. We define a linear functional $L$ on $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{2,2t}$ as follows. For a polynomial $p \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t}$ we set $L(p) = \sum_{i\in[d]} L_{ii}(p)$ and, for each $i,j \in [d]$, we set $L(x_i \overline x_j p) = L_{ij}(p)$. We extend $L$ to $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{2,2t}$ by setting $L(x^{\alpha} \overline x^{\beta} p) = 0$ for all $\alpha, \beta \in \mathbb{N}^d$ with $(|\alpha|,|\beta|) \not \in \{(0,0), (1,1)\}$, and then extending by linearity. We show that $L$ is a certificate for $\rho \in {\mathcal{R}}_{1,t}$. First observe that $L(\mathbf{x} \mathbf{x}^* \otimes \mathbf{y} \mathbf{y}^*) = \big(L(x_i \overline x_j \mathbf{y} \mathbf{y}^*)\big)_{i,j\in [d]} = \mathcal L(\mathbf{y} \mathbf{y}^*) = \rho$. By construction we have $L((1-\sum_{i} x_i \overline x_i) p)=0$ for all $p \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t}$. Moreover, by assumption, $\mathcal L((1-\sum_i y_i \overline y_i) p)=0$ for all $p \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{2t-2}$. Using the construction of $L$, this implies that $L((1-\sum_i y_i \overline y_i) p) = 0$ for all $p \in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{2,2t-2}$. Together, this shows that $L=0$ on $\mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2)_{2,2t}$.
It remains to show that $L \geq 0$ on $\Sigma_{2,2t}$. Let $p \in \mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{1,t}$, we show that $L(p\overline p)\ge 0$.
For this, write $p = p_0 + p_1 + p_2$, where $p_0$ has degree $0$ in $\mathbf{x},\qo{\mathbf{x}}$, $p_1$ has degree $(1,0)$ in $(\mathbf{x},\qo{\mathbf{x}})$, and $p_2$ has degree $(0,1)$ in $(\mathbf{x},\qo{\mathbf{x}})$. By definition, $L(p_a \overline p_b)=0$ if $a \neq b$ and thus $L(p\overline p)=L(p_0\overline p_0)+L(p_1\overline p_1)+L(p_2\overline p_2)$. We have $L(p_0 \overline p_0) = \sum_{i=1}^d L_{ii}(p_0\overline p_0)\ge 0$, since $M_t(L_{ii})\succeq 0$ for each $i\in [d]$ as $M_t(\mathcal L)\succeq 0$. Next we show that $L(p_1 \overline p_1) \geq 0$. To do so, write $p_1 = \sum_{i=1}^d x_i q_i$ where $q_i \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{t}$ for $i \in [d]$. It then follows that $L(p_1 \overline p_1) = \sum_{i,j=1} ^d L(x_i \overline x_j q_i \overline q_j) = \sum_{i,j=1}^d L_{ij}(q_i\overline q_j)=\langle \mathcal L, \vec {q} \vec{q}^*\rangle \geq 0$ where $\vec{q} = (q_1,\ldots, q_d) \in (\mathbb C[\mathbf{y},\qo{\mathbf{y}}]_{t})^d$ and the last inequality follows from $M_{t}(\mathcal L) \succeq 0$ (using~\cref{lemMGLpos}). This also directly implies that $L(p_2\overline p_2)\ge 0$. It follows that $L \geq 0$ on $\Sigma_{2,2t}$ and thus $\rho \in {\mathcal{R}}_{1,t}$. \end{proof}
We can now show the convergence of the sets ${\mathcal R}_{1,t}$ to $\mathcal{SEP}$. The proof is analogous to that of \cref{lem: convergence set t}, except that it now relies on the results for matrix-valued polynomial optimization (\cref{theomainmatrix,theoTchakmatrix}).
\begin{theorem}\label{theoR1t} We have $\mathcal{SEP}_d = \bigcap_{t \geq 2} {\mathcal{R}}_{1,t}$. \end{theorem}
\begin{proof}
Assume $\rho \in \bigcap_{t \geq 2} {\mathcal{R}}_{1,t}$ and for each $t \geq 2$ let $\mathcal L_t$ be a corresponding certificate for membership in ${\mathcal R}_{1,t}$. Using \cref{pointconv} one can show that the sequence $(\mathcal L_t)_t$ has a pointwise converging subsequence. Let $\mathcal L$ be its limit. It then follows from \cref{theomainmatrix,theoTchakmatrix} that there exists a $K \in \mathbb{N}$, matrices $\Lambda_1,\ldots, \Lambda_K \in \mathcal S_+^d$, and vectors $v_1,\ldots, v_K \in \mathbb C^d$ with $\|v_i\|=1$ such that \[ \mathcal L(p) = \sum_{k=1}^K \Lambda_k p(v_k) \qquad \text{ for all } p \in \mathbb C[\mathbf{y},\qo{\mathbf{y}}]_2. \] In particular, \[ \rho = \mathcal L(\mathbf{y} \mathbf{y}^*) = \sum_{k=1}^K \Lambda_k \otimes v_k v_k^*, \] which shows that $\rho \in \mathcal{SEP}_d$. \end{proof}
\subsection{The Doherty-Parrilo-Spedalieri hierarchy: state extension perspective} \label{sec: state extension perspective}
In the previous section we introduced the sets ${\mathcal{R}}_{k,t}$ for integers $k,t \ge 1$ and we mentioned that there are two regimes of interest: $k=1$ and $k=t$, leading to the two hierarchies ${\mathcal{R}}_{1,t}$ and ${\mathcal{R}}_{t,t}$. We now show that these hierarchies in fact coincide with the DPS hierarchies, denoted here as $\mathcal{DPS}_{1,t}$ and $\mathcal{DPS}_{t,t}$, that are defined in terms of (one-sided and two-sided) state extensions \cite{DPS04}.
For ease of notation, we will mostly focus on the first regime and show the equality ${\mathcal{R}}_{1,t}=\mathcal{DPS}_{1,t}$; the arguments naturally adapt to the second regime to show ${\mathcal{R}}_{t,t}=\mathcal{DPS}_{t,t}$. This permits to recover the convergence of the DPS hierarchy to $\mathcal{SEP}$ from the corresponding convergence result for the sets ${\mathcal{R}}_{1,t}$ (Theorem \ref{theoR1t}) obtained via the moment approach.
We begin with giving the original formulation of the DPS hierarchy $\mathcal{DPS}_{1,t}$ in terms of (one-sided) state extensions. To do so, we require a few definitions.
Given a bipartite state $\rho\in \mathcal{H}^d\otimes \mathcal{H}^d$ it is convenient to denote the two vector spaces (aka registers) composing the tensor product space on which $\rho$ acts as $A$ and $B$. Then we may also denote $\rho$ as $\rho_{AB}$. The \emph{partial trace} of $\rho_{AB}$ with respect to the second register is the operator $\rho_A=\mathrm{Tr}_B(\rho)$ that acts on the first register and is defined by tracing out the second register. In the same way, $\rho_B=\mathrm{Tr}_A(\rho)$ is the second partial trace, which acts on the second register and is obtained by tracing out the first one. Concretely, say $\rho=(\rho_{ij,i'j'})_{i,j,i',j'\in [d]}$ after fixing a basis of $\mathbb C^d\otimes\mathbb C^d$. Then we have $$\rho_A=\mathrm{Tr}_B(\rho)=\Big(\sum_{j=1}^d \rho_{ij,i'j}\Big)_{i,i'=1}^d\quad \text{ and } \quad \rho_B=\mathrm{Tr}_A(\rho)=\Big(\sum_{i=1}^d\rho_{ij,ij'}\Big)_{j,j'=1}^d. $$ {The {\em partial transpose} $\rho^{T_B}$ of $\rho$ with respect to the second register $B$ is defined by \begin{equation}\label{eqPPTB} (\rho^{T_B})_{ij,i'j'}= \rho_{ij',i'j} \ \text{ for all } i,i'\in [d],\ j,j'\in [d] \end{equation} and $\mathsf{T_B}$ denotes the corresponding transpose operator that acts on $\mathcal H^d\otimes \mathcal H^d$ by taking the partial transpose on the second register, so that $\mathsf{T_B}(\rho)=\rho^{T_B}$.
The partial transpose $\rho^{T_A}$ with respect to the first register is defined analogously by $(\rho^{T_A})_{ij,i'j'}=\rho_{i'j,ij'}$ for all $i,i',j,j'\in [d]$. Note that $\rho^{T_A}= (\rho^{T_B})^T= \overline {\rho^{T_B}}$ if $\rho$ is Hermitian, and thus $\rho^{T_B}\succeq 0$ implies $\rho^{T_A}\succeq 0$.}
Given an integer $t\ge 2$ the construction of the relaxation $\mathcal{DPS}_{1,t}$ relies on the following observation: If $\rho_{AB}$ has a decomposition as in (\ref{decrho}) then one may introduce $t$ copies of the second register and define the following extended state $\rho_{A B_{[t]} }$ acting on $\mathbb C^d\otimes (\mathbb C^d)^{\otimes t}$: \begin{equation}\label{decrhot} \rho_{A B_{[t]} }:= \sum_{\ell=1}^r \lambda _l \ x_\ell x_\ell^*\otimes ( y_\ell y_\ell^*)^{\otimes t}. \end{equation} There is a natural action of the symmetric group ${\text{\rm Sym}}(t)$ on $(\mathbb C^d)^{\otimes t}$, defined by $\sigma(v_1\otimes \ldots \otimes v_t)=v_{\sigma(1)}\otimes \ldots \otimes v_{\sigma(t)}$ for $v_1,\ldots,v_t\in\mathbb C^d$ and $\sigma\in {\text{\rm Sym}}(t)$, and extended to the space $(\mathbb C^d)^{\otimes t}$ by linearity. Let ${\text{\rm Sym}}((\mathbb C^d)^{\otimes t})$ denote the invariant subspace of $(\mathbb C^d)^{\otimes t}$ under this action and let $\Pi_t$ denote the projection from $ (\mathbb C^d)^{\otimes t}$ onto its invariant subspace ${\text{\rm Sym}}((\mathbb C^d)^{\otimes t})$, defined by $$\Pi_t(w) ={1\over t!}\sum_{\sigma\in {\text{\rm Sym}}(t)} \sigma(w) \quad \text{ for } w\in (\mathbb C^d)^{\otimes t}.$$ Then, $I_A\otimes \Pi_t$ acts onto $\mathbb C^d\otimes (\mathbb C^d)^{\otimes t}$.
We now present some natural properties that the extended state $\rho_{A B_{[t]}}$ from \eqref{decrhot} satisfies: \begin{description} \item[(1)] $\rho_{A B_{[t]}}$ is positive semidefinite. \item[(2)] $\rho_{AB} =\mathrm{Tr}_{B_{[2:t]}}(\rho_{A B_{[t]}})$, where, in $\mathrm{Tr}_{B_{[2:t]}}(\rho(A B_{[t]})$, we trace out the last $t-1$ copies of the second register $B$. \item[(3)] $(I_A\otimes \Pi_t)\rho_{A B_{[t]}} (I_A\otimes \Pi_t)=\rho_{AB_{[t]}}$, i.e., $\rho_{A B_{[t]}}$ is symmetric in the last $t$ registers. \item[(4)] $I_A\otimes \mathsf{T}_B^{\otimes s} \otimes I_B^{\otimes (t-s)} (\rho_{A B_{[t]}}) \succeq 0$ for any $1\le s\le t$. \end{description} For property (2) we use the fact that each vector $y_\ell$ lies in the unit sphere and the
last property (4) follows from the fact that $\mathsf{T}_B(yy^*)=(yy^*)^T=\overline y\, \overline y^*$ and thus $$I_A\otimes \mathsf{T}_B^{\otimes s} \otimes I_B^{\otimes (t-s)} (\rho_{A B_{[t]}}) = \sum_{\ell=1}^r \lambda_\ell x_\ell x_\ell^* \otimes (\overline y_\ell \ \overline y_\ell^*)^{\otimes s}\otimes (y_\ell y_\ell^*)^{\otimes (t-s)}\succeq 0 $$ if $\rho_{A B_{[t]}}$ satisfies (\ref{decrhot}). Property (4) is known as the {\em positive partial transpose} (PPT) criterion or as the Peres-Horodecki criterion \cite{Horodecki_1996}. {Clearly, in view of the symmetry property (3), taking the partial transpose of {\em any} $s$ copies (thus not only the first $s$ ones) among the $t$ copies of the second register preserves positivity.} The above properties are used to define the hierarchy $\mathcal{DPS}_{1,t}$.
\begin{definition}\label{defDPS} For an integer $t\ge 2$ the DPS relaxation of order $t$ is defined as \begin{align} \mathcal{DPS}_{1,t} := \big\{ \rho_{AB}\in\mathcal{H}^d\otimes \mathcal{H}^d:\ &\exists \rho_{1,t} \text{ Hermitian linear map acting on } \mathbb C^d\otimes (\mathbb C^d)^{\otimes t} \text{ s.t. } \label{eq:positive} \\ &\mathrm{Tr}_{B_{[2:t]}}(\rho_{1,t}) = \rho_{AB}, \label{eq:partialtrace} \\ &(I_A \otimes \Pi_t) \rho_{1,t} (I_A \otimes \Pi_t) = \rho_{1,t}, \label{eq:symmetry}\\ &I_A \otimes \mathsf T_B^{\otimes s} \otimes I_B^{\otimes (t-s)}(\rho_{1,t}) \succeq 0 \text{ for all } s \in \{0\} \cup [t] \label{eq:PPT} \big\}. \end{align} \end{definition}
\begin{remark} In the definition of the set $\mathcal{DPS}_{1,t}$ only one part of the system is extended, which is why we refer to this as a {\em one-sided} state extension. One can define a stronger relaxation of $\mathcal{SEP}$ by considering a {\em two-sided} state extension. Given two integers $k,t \ge 2$ one can define $\mathcal{DPS}_{k,t}$ as the set of states $\rho_{AB}$ that have an extension $\rho_{k,t}$ acting on $(\mathbb C^d)^{\otimes k}\otimes (\mathbb C^d)^{\otimes t}$, which satisfies the appropriate analogs of the above properties (1)-(4). One may consider in particular the case $k=t$, leading to the sets $\mathcal{DPS}_{t,t}$ that satisfy $$\mathcal{SEP}_d\subseteq \mathcal{DPS}_{t,t}\subseteq \mathcal{DPS}_{1,t}.$$ \end{remark}
Doherty, Parrilo and Spedaglieri \cite{DPS04} show that the relaxations $\mathcal{DPS}_{1,t}$ converge to $\mathcal{SEP}$.
\begin{theorem}[{\cite{DPS04}}]\label{theoDPS} We have $\mathcal{SEP}_d\subseteq \mathcal{DPS}_{1,t+1}\subseteq \mathcal{DPS}_{1,t}$ and $\mathcal{SEP}_d=\bigcap_{t\ge 1} \mathcal{DPS}_{1,t}$. As a consequence, we also have $\mathcal{SEP}_d=\bigcap_{t\ge 1}\mathcal{DPS}_{t,t}$. \end{theorem}
We now show that equality ${\mathcal{R}}_{1,t} = \mathcal{DPS}_{1,t}$ holds for all $t \in \mathbb{N}$. Therefore, Theorem \ref{theoDPS} follows directly from Theorem \ref{theoR1t}. Using similar arguments one can also show that $\mathcal{DPS}_{k,t}={\mathcal{R}}_{k,t}$ and thus $\mathcal{DPS}_{t,t}={\mathcal{R}}_{t,t}$.
\begin{proposition} \label{prop: equality} For any integer $t \geq 2$ we have ${\mathcal{R}}_{1,t} = \mathcal{DPS}_{1,t}$. \end{proposition}
\begin{proof}
Assume first $\rho_{AB}\in {\mathcal{R}}_{1,t}$, with certificate $L$ satisfying \cref{eq: dps kl} (with $k=1,\ell=t$).
We claim that $\rho_{1,t}:= L(\mathbf{x}\mathbf{x}^* \otimes (\mathbf{y}\mathbf{y}^*)^{\otimes t})$ is a certificate for membership of $\rho_{AB}$ in $ \mathcal{DPS}_{1,t}$. Indeed, \cref{eq:partialtrace} holds since $\mathrm{Tr}_{B_{[2:t]}}(\rho_{1,t}) =L(\mathbf{x}\qo{\mathbf{x}}^*\otimes \mathbf{y}\qo{\mathbf{y}}^*)= \rho_{AB}$ follows using the bi-sphere ideal condition on $L$. The symmetry condition in \cref{eq:symmetry} holds for $\rho_{1,t}$ since $L$ acts on commutative polynomials, and the PPT condition in \cref{eq:PPT}
holds for $\rho_{1,t}$ as a consequence of the positivity condition: $L\ge 0$ on $\Sigma_{2,2t}$.
Conversely, assume that $\rho_{AB}\in \mathcal{DPS}_{1,t}$, with state $\rho_{1,t}$ as certificate satisfying (\ref{eq:positive})-(\ref{eq:PPT}). We construct a linear functional $L$ acting on $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{2,2t}$ that certifies membership of $\rho_{AB}$ in ${\mathcal{R}}_{1,t}$, i.e., satisfies the program (\ref{eq: dps kl}) (with $k=1$). In a first step we set
\begin{equation}\label{eqrho1t}
L(\mathbf{x}\mathbf{x}^* \otimes (\mathbf{y}\mathbf{y}^*)^{\otimes t}) := \rho_{1,t}.
\end{equation}
In other words we set
$$L(x_i\overline x_{i'} y_{j_1}\overline y_{j'_1} \cdots y_{j_t}\overline y_{j'_t}):= (\rho_{1,t})_{i \avec{j}, i'\avec{j'}}$$
for any $i,i'\in [d]$ and
$\avec{j}=(j_1,\ldots,j_t), \avec{j'}=(j'_1,\ldots,j'_t)\in [d]^t$. Using the symmetry condition (\ref{eq:symmetry}), it follows that this definition does not depend on the order of the variables $y_j$ (or $\overline y_j$).
Indeed, by \cref{eq:symmetry} we know that
\begin{equation*} \label{eq: perm invariance}
(\rho_{1,t})_{i\avec{j}, i' \avec{j'}} = (\rho_{1,t})_{i\sigma(\avec{j}), i' \tau(\avec{j'})} \text{ for all } i,i' \in [d], \avec{j},\avec{j'} \in [d]^t
\end{equation*}
for all permutations $\sigma, \tau\in{\text{\rm Sym}}(t)$, where $\sigma(\avec{j}) = (j_{\sigma(1)}, j_{\sigma(2)},\ldots, j_{\sigma(t)})$ for $\avec{j}=(j_1,\ldots,j_t)$.
which shows that
$$
L(x_i\overline x_{i'} y_{j_1}\overline y_{j'_1} \cdots y_{j_t}\overline y_{j'_t})
= L(x_i\overline x_{i'} y_{j_{\sigma(1)}}\overline y_{j'_{\tau(1)}} \cdots y_{j_{\sigma(t)}}\overline y_{j'_{\tau(t)}}).
$$
This shows that $\rho_{1,t}$ defines a linear functional $L$ acting on polynomials with degree $1$ in $\mathbf{x}$, degree $1$ in $\qo{\mathbf{x}}$, degree $t$ in $\mathbf{y}$, and degree $t$ in $\qo{\mathbf{y}}$. We now show how to extend this linear functional $L$ to $\mathbb C[\mathbf{x},\qo{\mathbf{x}},\mathbf{y},\qo{\mathbf{y}}]_{2,2t}$ in such a way that it becomes a certificate for $\rho_{AB} \in {\mathcal{R}}_{1,t}$.
First we extend $L$ to all monomials $\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'}$ with degree at most 2 in $\mathbf{x},\qo{\mathbf{x}}$ and degree at most $2t$ in $\mathbf{y},\qo{\mathbf{y}}$. For this we set
\begin{equation}\label{eq:extend L}
L(\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'}):= 0 \ \text{ if } |\alpha|\ne |\alpha'| \text{ or } |\beta|\ne |\beta'|.
\end{equation}
Otherwise, $|\alpha+\alpha'|,|\beta+\beta'|$ are even and we set \[
L(\mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^{\beta} \qo{\mathbf{y}}^{\beta'}) := L(\|\mathbf{x}\|^{2-|\alpha+\alpha'|} \|\mathbf{y}\|^{2t-|\beta+\beta'|} \mathbf{x}^\alpha \qo{\mathbf{x}}^{\alpha'} \mathbf{y}^{\beta} \qo{\mathbf{y}}^{\beta'}). \]
By construction, $L$ is Hermitian (since $\rho_{1,t}$ is Hermitian) and $L$ vanishes on $\mathcal I(1-\|\mathbf{x}\|^2,1-\|\mathbf{y}\|^2)_{2,2t}$.
It remains to show that $L\ge 0$ on $\Sigma_{2,2t}$. In view of Lemma \ref{prop: restrict to hom}, it suffices to show that $L \geq 0$ on $\Sigma_{=2,=2t}$, or, equivalently, that the moment matrix $M_{=1,=t}(L)$, indexed by monomials $\mathbf{x}^\alpha\qo{\mathbf{x}}^{\alpha'}\mathbf{y}^\beta\qo{\mathbf{y}}^{\beta'}$ with $|\alpha+\alpha'|=1$ and $|\beta+\beta'|=t$, is positive semidefinite.
In view of \eqref{eq:extend L} the matrix $M_{=1,=t}(L)$ is block-diagonal with respect to the partition of its index set according to the value of $(|\alpha|, |\beta|)$, i.e., according to the partition of $I^{=1,=t}=\bigcup_{r,s} I^{=1,=t}_{r,s}$ defined in \cref{eq: partition kl} with $-1\le r\le 1, -t\le s\le t$ and $r\equiv 1, s\equiv t$ modulo 2.
So we are left with the task of showing that all diagonal blocks $M_{=1,=t}(L)[I^{=1,=t}_{r,s}]$ are positive semidefinite. For this we use \cref{eq: ppt on L} to obtain that $$ M_{=1,=t}(L)[I^{=1,=t}_{r,s}]\succeq 0\Longleftrightarrow L({\mathbf{x}\mathbf{x}^*}^{\otimes ({r+1\over 2}) } \otimes {\qo{\mathbf{x}}\qo{\mathbf{x}}^*}^{\otimes ({1-r\over 2})} \otimes {\mathbf{y}\mathbf{y}^*}^{\otimes ({t+s\over 2})} \otimes {\qo{\mathbf{y}}\qo{\mathbf{y}}^*}^{\otimes ({t-s\over 2})}) \succeq 0. $$ This holds for all $r,s$ such that $-1\le r\le 1, -t\le s\le t, r\equiv 1, s\equiv t$ modulo 2 if and only if $$ L(\mathbf{x}\mathbf{x}^* \otimes {\mathbf{y}\mathbf{y}^*}^{\otimes (t-s')} \otimes {\qo{\mathbf{y}}\qo{\mathbf{y}}^*}^{\otimes s'}) \succeq 0,\ \ L(\qo{\mathbf{x}}\qo{\mathbf{x}}^* \otimes {\mathbf{y}\mathbf{y}^*}^{\otimes (t-s')} \otimes {\qo{\mathbf{y}}\qo{\mathbf{y}}^*}^{\otimes s'} )\succeq 0$$ for all $s'\in \{0\}\cup [t]$ (setting $s'={t-s\over 2}$). In view of \cref{eqrho1t} we obtain that \begin{align} \label{eq: PPT on L} L(\mathbf{x}\mathbf{x}^* \otimes {\mathbf{y}\mathbf{y}^*}^{\otimes (t-s')} \otimes {\qo{\mathbf{y}}\qo{\mathbf{y}}^*}^{\otimes s'}) =I_A\otimes I_B^{\otimes (t-s')} \otimes \mathsf T_B^{\otimes s'}(\rho_{1,t}) \end{align} {and, since $L$ is Hermitian,} \begin{align*} L(\qo{\mathbf{x}}\qo{\mathbf{x}}^* \otimes {\mathbf{y}\mathbf{y}^*}^{\otimes (t-s')} \otimes {\qo{\mathbf{y}}\qo{\mathbf{y}}^*}^{\otimes s'} ) = \overline{L(\mathbf{x}\mathbf{x}^* \otimes {\qo{\mathbf{y}}\qo{\mathbf{y}}^*}^{\otimes (t-s')} \otimes {\mathbf{y}\mathbf{y}^*}^{\otimes s'}) } =\overline{ I_A\otimes \mathsf T_B^{\otimes (t-s')} \otimes I_B^{\otimes s'} (\rho_{1,t})}. \end{align*} Therefore, the positive semidefiniteness of all the diagonal blocks composing the matrix $M_{=1,=t}(L)$ follows from the PPT condition (\ref{eq:PPT}) combined with the symmetry condition (\ref{eq:symmetry}) and the fact that the conjugate of a Hermitian positive semidefinite matrix remains positive semidefinite. \end{proof}
\begin{remark}\label{rem: level 2 implies ppt} Note that it follows from relation \eqref{eq: PPT on L} in the above proof that,
if $\rho = L(\mathbf{x} \mathbf{x}^* \otimes \mathbf{y} \mathbf{y}^*)$ where the linear functional $L$ satisfies $L \geq 0$ on $\Sigma_{2,2}$, then $\rho$ satisfies the PPT condition \eqref{eq:PPT}. In particular this implies that the PPT condition is contained in the definition of the parameter $\xidsep{t}$: if the program (\ref{eqxit}) defining $\xidsep{t}$ is feasible (for $t \geq 2$), then $\rho$ satisfies the PPT condition. \end{remark}
\appendix
\section{Deriving the complex results from their real analogs} \label{sec:AppA} In this appendix we show how the proofs of \cref{theomainmatrix,theomainTchakaloff} can be obtained from their real versions in \cite{Pu93,Tchakaloff,CimpricZalar}. We begin with recalling in Section \ref{AppPrel} the links between the main properties of the complex objects introduced in the paper and their real analogs. Then we give the proof of Theorem \ref{theomainTchakaloff} in Section \ref{AppProofPutinar} and of Theorem \ref{theomainmatrix} in Section \ref{AppProofCimpric}.
\subsection{Preliminaries on changing variables from complex to real}\label{AppPrel} \paragraph{Vectors and matrices.} Throughout we set $\mathbf{i}\,=\sqrt{-1}\in \mathbb C$. Then any complex scalar $x \in \mathbb C$ can be written (uniquely) as $x = x_\mathrm{Re} + \mathbf{i}\, x_\mathrm{Im}$, where $x_\mathrm{Re}:=\mathrm{Re}(x)$ and $x_\mathrm{Im}:=\mathrm{Im}(x)$ denote, respectively, the real and imaginary parts of $x$. This notation extends to vectors and matrices by letting the maps $\mathrm{Re}(\cdot)$ and $\mathrm{Im}(\cdot)$ act entrywise. Any vector $x \in \mathbb C^n$ can be written $x = x_\mathrm{Re}+ \mathbf{i}\, x_\mathrm{Im}$ with $x_\mathrm{Re}:=\mathrm{Re}(x) , x_\mathrm{Im} := \mathrm{Im}(x)\in \mathbb{R}^n$. This gives a bijection \begin{equation} \label{CRbij}
\phi:\mathbb C^n \to \mathbb{R}^n\times \mathbb{R}^n ~;~ \mathbf{x} \mapsto (\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}). \end{equation} Similarly, for a complex matrix $G \in \mathbb C^{m \times m'}$ set $G_\mathrm{Re} := \mathrm{Re}(G), G_\mathrm{Im} := \mathrm{Im}(G)\in \mathbb{R}^{m\times m'}$ and define the $2m\times 2m'$ real matrix \begin{equation}\label{RCMats} G^\mathbb{R}:= \begin{bmatrix}
G_\mathrm{Re}& -G_\mathrm{Im}\\
G_\mathrm{Im}& G_\mathrm{Re} \end{bmatrix}. \end{equation} Then $G\in \mathbb C^{m\times m}$ is Hermitian, i.e., $G^* = G$, if and only if $G_\mathrm{Re} = G_\mathrm{Re}^T$ and $G_\mathrm{Im}^T = -G_\mathrm{Im}$. Moreover, for $G\in \mathbb C^{m\times m}$ Hermitian and $w\in \mathbb C^m$ we have the identity \begin{equation}\label{RCMatsw} w^*G w = (w_\mathrm{Re} - \mathbf{i}\, w_\mathrm{Im} )^T(G_\mathrm{Re} + \mathbf{i}\, G_\mathrm{Im})(w_\mathrm{Re} + \mathbf{i}\, w_\mathrm{Im} ) = \begin{bmatrix}
w_\mathrm{Re}^T & w_\mathrm{Im} ^T \end{bmatrix} \begin{bmatrix}
G_\mathrm{Re} & -G_\mathrm{Im}\\
G_\mathrm{Im} & G_\mathrm{Re} \end{bmatrix} \begin{bmatrix}
w_\mathrm{Re} \\
w_\mathrm{Im} \end{bmatrix}, \end{equation} which implies the well-known equivalence \begin{equation*} \label{RCMatsequiv} G \succeq 0 \iff G^\mathbb{R}= \begin{bmatrix}
G_\mathrm{Re}& -G_\mathrm{Im}\\
G_\mathrm{Im}& G_\mathrm{Re} \end{bmatrix} \succeq 0. \end{equation*}
\paragraph{Polynomials.} Polynomials in $\C[\mathbf{x},\qo{\mathbf{x}}]$ with complex variables $\mathbf{x} \in \mathbb C^n$ can be transformed into polynomials in $\R\mxrxi$ with real variables $\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}\in \mathbb{R}^n$, via the change of variables $\mathbf{x} = \mathbf{x}_{\mathrm{Re}} + \mathbf{i}\,\mathbf{x}_{\mathrm{Im}}$. In this way, any $p \in \C[\mathbf{x},\qo{\mathbf{x}}]$ corresponds to a unique pair of real polynomials \begin{align*} p_{\mathrm{Re}}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}) :=& \mathrm{Re}(p(\mathbf{x}_{\mathrm{Re}} + \mathbf{i}\, \mathbf{x}_{\mathrm{Im}},\mathbf{x}_{\mathrm{Re}} - \mathbf{i}\, \mathbf{x}_{\mathrm{Im}})) \in \R\mxrxi,\\ p_{\mathrm{Im}}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}) :=& \mathrm{Im}(p(\mathbf{x}_{\mathrm{Re}} + \mathbf{i}\, \mathbf{x}_{\mathrm{Im}},\mathbf{x}_{\mathrm{Re}} - \mathbf{i}\, \mathbf{x}_{\mathrm{Im}})) \in \R\mxrxi \end{align*} satisfying the identity \begin{equation}\label{pCR} p(\mathbf{x},\qo{\mathbf{x}})=p(\mathbf{x}_{\mathrm{Re}}+\mathbf{i}\, \mathbf{x}_{\mathrm{Im}}, \mathbf{x}_{\mathrm{Re}}-\mathbf{i}\, \mathbf{x}_{\mathrm{Im}})= p_\mathrm{Re}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}})+ \mathbf{i}\, p_\mathrm{Im}(\mathbf{x}_{\mathrm{Re}}, \mathbf{x}_{\mathrm{Im}}). \end{equation}
Note that the degrees are preserved: $\deg_{\mathbf{x},\qo{\mathbf{x}}}(p) = \max\{\deg_{\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}}(p_\mathrm{Re}), \deg_{\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}} (p_\mathrm{Im}) \}$. A polynomial $p$ is Hermitian, i.e., $\qo p = p$, if and only if $p_{\mathrm{Im}} = 0$. Hence, the map \begin{align} \mathrm{Re} : & \ \C\mxox^h \to \R\mxrxi ~;~ p(\mathbf{x},\qo{\mathbf{x}}) \mapsto p_{\mathrm{Re}}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}) \label{RCHpolybij} \end{align} is injective. This map is also surjective: take any $f \in \R\mxrxi$ and define the polynomial $p(\mathbf{x},\qo{\mathbf{x}}):= f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$, then $p$ is Hermitian and satisfies $f = p_\mathrm{Re}$. Finally, since any $p\qo p$ is Hermitian we have \begin{equation*}\label{eqsos} \mathrm{Re}(p\qo p) = p_\mathrm{Re}^2 + p_\mathrm{Im}^2. \end{equation*} Hence sums of Hermitian squares in $\C[\mathbf{x},\qo{\mathbf{x}}]$ are mapped to real sums of squares in $\R\mxrxi$ and vice versa.
\paragraph{Polynomial matrices.} For vectors and matrices with polynomial entries in $\C[\mathbf{x},\qo{\mathbf{x}}]$, the maps $\mathrm{Re}(\cdot)$ and $\mathrm{Im}(\cdot)$ act entrywise. Additionally, for a polynomial matrix $G \in \C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m'}$, we can define the real polynomial matrix $G^{\mathbb{R}} \in \R\mxrxi^{2m\times 2m'}$ using relation (\ref{RCMats}), where $G_\mathrm{Re},G_\mathrm{Im}$ are defined entrywise: if $G=(G_{ij}) $ then $G_\mathrm{Re}=((G_{ij})_{\mathrm{Re}})$ and $G_\mathrm{Im}=((G_{ij})_\mathrm{Im})$. Then $G$ is Hermitian if and only if $G^{\mathbb{R}}$ is symmetric and as we next observe this correspondance extends to sums of squares.
\begin{lemma}\label{RCGpsd}
Let $G \in \C[\mathbf{x},\qo{\mathbf{x}}]^{m\times m}$ be a polynomial matrix and let $G^\mathbb{R}\in \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]^{2m\times 2m}$ be the corresponding real polynomial matrix defined via (\ref{RCMats}). Then $G$ is a Hermitian SoS-polynomial matrix if and only if $G^{\mathbb{R}}$ is a (real) SoS-polynomial matrix. \end{lemma} \begin{proof}
Assume $G$ is a Hermitian SoS-polynomial matrix. Let $G = UU^*$ with $U \in \C[\mathbf{x},\qo{\mathbf{x}}]^{m\times k}$.
Applying the change of variables from complex to real we get
$$
G(\mathbf{x},\qo{\mathbf{x}}) = G_\mathrm{Re}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}) + \mathbf{i}\, G_\mathrm{Im}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}})
= U(\mathbf{x}_{\mathrm{Re}} + \mathbf{i}\, \mathbf{x}_{\mathrm{Im}},\mathbf{x}_{\mathrm{Re}} - \mathbf{i}\, \mathbf{x}_{\mathrm{Im}})U^*(\mathbf{x}_{\mathrm{Re}} + \mathbf{i}\, \mathbf{x}_{\mathrm{Im}},\mathbf{x}_{\mathrm{Re}} - \mathbf{i}\, \mathbf{x}_{\mathrm{Im}})
$$
$$
= (U_{\mathrm{Re}} + \mathbf{i}\, U_{\mathrm{Im}})(U_{\mathrm{Re}}^T - \mathbf{i}\, U_{\mathrm{Im}}^T) \\
= U_{\mathrm{Re}} U_{\mathrm{Re}}^T + U_{\mathrm{Im}}U_{\mathrm{Im}}^T + \mathbf{i}\, \big(U_{\mathrm{Im}}U_{\mathrm{Re}}^T - U_{\mathrm{Re}}U_{\mathrm{Im}}^T \big).
$$
This implies $G_\mathrm{Re} = U_{\mathrm{Re}} U_{\mathrm{Re}}^T + U_{\mathrm{Im}}U_{\mathrm{Im}}^T$ and $G_\mathrm{Im} = U_{\mathrm{Im}}U_{\mathrm{Re}}^T - U_{\mathrm{Re}}U_{\mathrm{Im}}^T$.
and thus
\begin{align*}
G^\mathbb{R} :=&
\begin{bmatrix}
G_{\mathrm{Re}} & -G_{\mathrm{Im}} \\
G_{\mathrm{Im}} & G_{\mathrm{Re}}
\end{bmatrix}
=
\begin{bmatrix}
U_{\mathrm{Re}} U_{\mathrm{Re}}^T + U_{\mathrm{Im}}U_{\mathrm{Im}}^T & -(U_{\mathrm{Im}}U_{\mathrm{Re}}^T - U_{\mathrm{Re}}U_{\mathrm{Im}}^T)\\
U_{\mathrm{Im}}U_{\mathrm{Re}}^T - U_{\mathrm{Re}}U_{\mathrm{Im}}^T & U_{\mathrm{Re}} U_{\mathrm{Re}}^T + U_{\mathrm{Im}}U_{\mathrm{Im}}^T
\end{bmatrix}\\
=&
\begin{bmatrix}
U_{\mathrm{Re}} & -U_{\mathrm{Im}} \\
U_{\mathrm{Im}} & U_{\mathrm{Re}}
\end{bmatrix}
\begin{bmatrix}
U^T_{\mathrm{Re}} & U^T_{\mathrm{Im}} \\
-U^T_{\mathrm{Im}} & U^T_{\mathrm{Re}}
\end{bmatrix}
=: U^\mathbb{R}(U^\mathbb{R})^T,
\end{align*}
which shows $G^\mathbb{R}$ is an SoS-polynomial matrix.
The converse result follows from retracing the above steps. \end{proof}
\paragraph{Quadratic modules.} Given a set $S \subseteq \C\mxox^h$ of Hermitian polynomials we define its real analog by applying the map $\mathrm{Re}(\cdot)$ from (\ref{RCHpolybij}) elementwise to the set $S$ and set \begin{equation}\label{eqSRe} S_{\mathrm{Re}} := \mathrm{Re}(S) = \{p_{\mathrm{Re}}: p \in S \} \subseteq \R\mxrxi. \end{equation} Given a Hermitian polynomial matrix $G \in \C[\mathbf{x},\qo{\mathbf{x}}]^{m \times m}$ we define the set of Hermitian polynomials \begin{equation*} S^{G} := \{ w^* G w: w \in \mathbb C^m \} \subseteq \C\mxox^h \end{equation*} and, for the corresponding real symmetric matrix $G^\mathbb{R}\in \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]^{2m\times 2m}$ defined via (\ref{RCMats}), we define the set of real polynomials \begin{equation*} S^{G^\mathbb{R}} := \{ (w_\mathrm{Re}, w_\mathrm{Im})^T G^{\mathbb{R}} (w_\mathrm{Re}, w_\mathrm{Im}): w \in \mathbb C^{m} \} \subseteq \R\mxrxi. \end{equation*} These two sets satisfy the expected correspondance:
$$
S^{G^\mathbb{R}}= \mathrm{Re}(S^{G}),$$ since, in view of relation (\ref{RCMatsw}), we have
$\mathrm{Re}(w^*Gw) = (w_\mathrm{Re}, w_\mathrm{Im})^T G^{\mathbb{R}} (w_\mathrm{Re}, w_\mathrm{Im})$ for all $w\in\mathbb C^m$.
This correspondance extends to the (real part of the) truncated complex quadratic module $\mathcal M(S)_{2t}$ generated by $S\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h$ and the truncated real quadratic module generated by the corresponding set $S_\mathrm{Re}\subseteq \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]$ via (\ref{eqSRe}), which is denoted here as $\mathcal M^\mathbb{R}(S_\mathrm{Re})_{2t}$ and defined by $$ \mathcal {M}^\mathbb{R}(S_{\mathrm{Re}})_{2t} := \text{cone}\{g_{\mathrm{Re}}f^2: f \in \R\mxrxi ~,~ g \in S,~~\deg(g_{\mathrm{Re}}f^2) \leq 2t \}. $$ Namely, we have $$ \mathrm{Re}(\mathcal {M}(S)_{2t})= \mathcal {M}^\mathbb{R}(S_{\mathrm{Re}})_{2t}.$$ Indeed we have $\mathrm{Re}(g p \qo p) = g_{\mathrm{Re}}(p_\mathrm{Re}^2 + p_\mathrm{Im}^2)$ and the next relation, collected for further reference: \begin{equation} \label{RCHquadbij} g p \qo p \in \mathcal {M}_{2t}(S) \iff \mathrm{Re}(g p \qo p) =g_\mathrm{Re}(p_\mathrm{Re}^2+p_\mathrm{Im}^2) \in \calMr[2t](S_{\mathrm{Re}}) \text{ for all } p\in \C[\mathbf{x},\qo{\mathbf{x}}],~g \in S. \end{equation}
\begin{lemma} \label{RCArch}
For any $S \subseteq \C\mxox^h$, the (complex) quadratic module $\mathcal {M}(S)$ is Archimedean if and only if the real quadratic module $\calMr[](S_{\mathrm{Re}})$ is Archimedean. \end{lemma} \begin{proof}
Directly from \cref{RCHquadbij} since, for any scalar $R\in\mathbb{R}$,
$R^2 -\mathbf{x}^*\mathbf{x} \in \mathcal {M}(S)$ if and only if $\mathrm{Re}(R^2-\mathbf{x}^* \mathbf{x})=R^2 - \mathbf{x}_{\mathrm{Re}}^T\mathbf{x}_{\mathrm{Re}} +\mathbf{x}_{\mathrm{Im}}^T\mathbf{x}_{\mathrm{Im}} \in \calMr[](S_{\mathrm{Re}})$. \end{proof}
\paragraph{Positivity domains and measures.} There is a natural correspondance between the complex positivity domain $\mathscr{D}(S)$ of a set $S \subseteq \C\mxox^h$ and the real positivity domain of the corresponding set $S_\mathrm{Re}\subseteq \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]$, which is denoted $ \mathscr{D}^{\R}(S_{\mathrm{Re}})$ and defined by $$ \mathscr{D}^{\R}(S_{\mathrm{Re}}) := \{ (w_\mathrm{Re},w_\mathrm{Im}) \in \mathbb{R}^{2n}: g_{\mathrm{Re}}(w_\mathrm{Re},w_\mathrm{Im}) \geq 0 ~\forall~ g\in S \}. $$
Indeed, in view of \cref{RCHpolybij} and using the complex/real bijection map $\phi$ from (\ref{CRbij}), we have $$\mathscr{D}^{\R}(S_{\mathrm{Re}}) = \phi(\mathscr{D}(S)).$$ {Given a measure $\mu^{\mathbb{R}}$ on $\mathbb{R}^{2n}$ we define the complex measure $\mu$ on $\mathbb C^n$ as $\mu=\mu^{\mathbb{R}}\circ \phi$, the push-forward of $\mu^{\mathbb{R}}$ by the map $\phi^{-1}$, so that \begin{equation}\label{eqmu} \int_{\mathbb C^n} p(\mathbf{x})d\mu =\int_{\mathbb{R}^{2n}} p\circ \phi^{-1}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}) d\mu^{\mathbb{R}} = \int_{\mathbb{R}^{2n}} p_\mathrm{Re}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}})d\mu^\mathbb{R} + \mathbf{i}\, \int_{\mathbb{R}^{2n}}p_\mathrm{Im}(\mathbf{x}_{\mathrm{Re}}, \mathbf{x}_{\mathrm{Im}})d\mu^\mathbb{R} \end{equation} for any $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$ (using (\ref{pCR})).}
If $\mu^{\mathbb{R}}$ is supported by $\mathscr{D}^{\R}(S_{\mathrm{Re}})$ (i.e., $\mu^\mathbb{R}(\mathbb{R}^{2n} \setminus \mathscr{D}^{\R}(S_{\mathrm{Re}})) = 0$), then $\mu$ is supported by $\mathscr{D}(S)$ (i.e., $\mu(\mathbb C^n \setminus \mathscr{D}(S)) = 0$). This follows from the fact that $\phi(\mathbb C^n \setminus \mathscr{D}(S)) = \mathbb{R}^{2n} \setminus \mathscr{D}^{\R}(S_{\mathrm{Re}})$.
\paragraph{Linear functionals.} For a linear functional $L: \C[\mathbf{x},\qo{\mathbf{x}}] \to \mathbb C$ we have $L(p) = \mathrm{Re}(L(p)) + \mathbf{i}\, \mathrm{Im}(L(p))$ for all $p\in \C[\mathbf{x},\qo{\mathbf{x}}]$. Recall that $L$ is Hermitian if $\qo{L(p)} = L(\qo p)$. For any Hermitian $L$, we can define a real linear functional $L^\R : \R\mxrxi \to \mathbb{R}$ by \begin{equation}\label{llrdef}
L^\R (f) := L\big(f\big(\frac{\mathbf{x} + \qo{\mathbf{x}}}{2},\frac{\mathbf{x} - \qo{\mathbf{x}}}{2\mathbf{i}\,}\big)\big) ~\text{ for any }~ f\in \R\mxrxi. \end{equation} For a Hermitian polynomial $p\in \C\mxox^h$, by \cref{RCHpolybij} we have $p_\mathrm{Re}(\frac{\mathbf{x} + \qo{\mathbf{x}}}{2},\frac{\mathbf{x} - \qo{\mathbf{x}}}{2\mathbf{i}\,}) = p(\mathbf{x},\qo{\mathbf{x}})$ and thus \begin{equation}\label{llr}
L(p) = L^\R (p_{\mathrm{Re}})\quad \text{ for any } p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h. \end{equation} Then for any $p\in \C[\mathbf{x},\qo{\mathbf{x}}]$ we have \begin{equation}\label{eqLLRe} L(p) = L\big(p_\mathrm{Re}\big(\frac{\mathbf{x} + \qo{\mathbf{x}}}{2},\frac{\mathbf{x} - \qo{\mathbf{x}}}{2\mathbf{i}\,}\big)\big) + \mathbf{i}\, L\big(p_\mathrm{Im}\big(\frac{\mathbf{x} + \qo{\mathbf{x}}}{2},\frac{\mathbf{x} - \qo{\mathbf{x}}}{2\mathbf{i}\,}\big)\big) = L^\R (p_{\mathrm{Re}}) + \mathbf{i}\, L^\R (p_{\mathrm{Im}}). \end{equation}
In particular, we have $L(p \qo p) = L^\R (p_{\mathrm{Re}}^2 + p_{\mathrm{Im}}^2)$ for any $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. This implies that $L$ is positive (on Hermitian sums of squares) if and only if $L^\R$ is positive (on real sums of squares). Since $\mathrm{Re}(\cdot)$ preserves degrees, the restriction of $L^\R$ to $\R\mxrxi_t$ corresponds to the restriction of $L$ to $ \C[\mathbf{x},\qo{\mathbf{x}}]_t$.
This gives the following correspondance for truncated quadratic modules.
\begin{lemma}\label{RCPosQuad}
Given $S \subseteq \C\mxox^h$, a Hermitian linear map $L\in\C[\mathbf{x},\qo{\mathbf{x}}] \to \mathbb C$, the corresponding set $S_\mathrm{Re}\subseteq \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]$ and the corresponding real linear map $L^\mathbb{R}\in \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]\to\mathbb{R}$ we have
$$L \geq 0 ~\text{on}~ \mathcal {M}(S)_{2t} \iff L^\R \geq 0 ~\text{on}~ \calMr[](S_{\mathrm{Re}})_{2t}\quad \text{ for any } t\in \mathbb{N}\cup\{\infty\}.$$ \end{lemma} \begin{proof} This follows form the linearity of $L$ and $L^\R$ since, by \cref{RCHquadbij}, $g p \qo p \in \mathcal {M}(S)$ if and only if $ \mathrm{Re}(gp \qo p)=g_{\mathrm{Re}}(p_\mathrm{Re}^2 + p_\mathrm{Im}^2) \in \calMr[](S_{\mathrm{Re}})$ and, by \cref{llr}, $L(g p \qo p) = L^\R(g_{\mathrm{Re}}(p_\mathrm{Re}^2 + p_\mathrm{Im}^2))$. \end{proof}
Finally note that an evaluation functional $L_{w}$ at a point $w\in \mathbb C^d$ corresponds to the evaluation functional $L_{(w_\mathrm{Re},w_\mathrm{Im})}$ at the point $(w_\mathrm{Re},w_\mathrm{Im})\in \mathbb{R}^{2d}$ since, for every $p\in \C[\mathbf{x},\qo{\mathbf{x}}]$, we have
\begin{equation*}\label{eqLLw}
L_{w}(p) = p(w,\qo w) = p_\mathrm{Re}(w_\mathrm{Re},w_\mathrm{Im}) + \mathbf{i}\, p_\mathrm{Im}(w_\mathrm{Re},w_\mathrm{Im}) = L^\R_{(w_\mathrm{Re},w_\mathrm{Im})}(p_\mathrm{Re}) + \mathbf{i}\, L^\R_{(w_\mathrm{Re},w_\mathrm{Im})}(p_\mathrm{Im}).
\end{equation*}
\paragraph{Matrix-valued linear functionals.} Consider a complex matrix-valued linear map $$ \mathcal L:\C[\mathbf{x},\qo{\mathbf{x}}] \to \mathbb C^{m\times m}, \quad p\mapsto \mathcal L(p) := \big( L_{ij}(p) \big)_{i,j\in[m]}, $$ where each $L_{ij}:\C[\mathbf{x},\qo{\mathbf{x}}] \to \mathbb C$ is scalar-valued. Then $\mathcal L$ is Hermitian if and only if, for all $p\in \mathbb C[\mathbf{x},\qo{\mathbf{x}}]$, we have $\mathcal L(\qo p) = \mathcal L(p)^*$, i.e., $$ \Big( \mathrm{Re}(L_{ij}(\qo p)) + \mathbf{i}\, \mathrm{Im}(L_{ij}(\qo p)) \Big)_{i,j=1}^m = \Big( \mathrm{Re}(L_{ji}(p)) - \mathbf{i}\, \mathrm{Im}(L_{ji}( p)) \Big)_{i,j=1}^m $$ or, equivalently, $\mathrm{Re}(L_{i,j}(\qo p)) = \mathrm{Re}(L_{j,i}(p))$ and $\mathrm{Im}(L_{i,j}(\qo p)) = -\mathrm{Im}(L_{j,i}( p))$ for all $i,j \in [m]$. This implies that if $\mathcal L$ is Hermitian and $p$ is Hermitian then the complex matrix $\mathcal L(p)$ is Hermitian.
Assume $\mathcal L$ is Hermitian. Then we define the real matrix-valued linear functional $$ \calL^\R: \R\mxrxi \to \mathbb{R}^{2m \times 2m}, \quad f\in \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]\mapsto \mathcal L^\mathbb{R}(f) $$ \begin{equation} \label{calLrdef} \mathcal L^\mathbb{R}(f):=\Big( \mathcal L\big(f\big(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2i}\big)\big) \Big)^\mathbb{R} = \begin{bmatrix}
\mathrm{Re}( \mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2i})) ) & -\mathrm{Im}( \mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})) ) \\
\mathrm{Im}( \mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})) ) & \mathrm{Re}( \mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})) ) \end{bmatrix}. \end{equation} Since $f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})$ is Hermitian it follows that $-\mathrm{Im}( \mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})) )
= \mathrm{Im}( \mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})) )^T$. Hence $\calL^\R$ takes its values in the cone $\mathcal {S}^{2m}$ of symmetric matrices.
\begin{lemma} \label{calLgpp} Given a Hermitian linear map $\mathcal L:\C[\mathbf{x},\qo{\mathbf{x}}] \to \mathbb C^{m\times m}$ and the corresponding map $\mathcal L^\mathbb{R}$ from (\ref{calLrdef}), $g \in \C\mxox^h$ and $p\in \C[\mathbf{x},\qo{\mathbf{x}}]$ we have the following equivalence
$$
\mathcal L(g p \qo p) \succeq 0 \iff \calL^\R(g_\mathrm{Re}(p_\mathrm{Re}^2 +p_\mathrm{Im}^2)) \succeq 0.
$$ \end{lemma}
\begin{proof} From \cref{RCMats,RCHpolybij,calLrdef} we obtain that $$ 0 \preceq \mathcal L(g p \qo p) \iff 0 \preceq \begin{bmatrix}
\mathrm{Re}(\mathcal L(g p \qo p)) & -\mathrm{Im}(\mathcal L(g p \qo p))\\
\mathrm{Im}(\mathcal L(g p \qo p)) & \mathrm{Re}(\mathcal L(g p \qo p)) \end{bmatrix} = \calL^\R(g_\mathrm{Re} (p_\mathrm{Re}^2 +p_\mathrm{Im}^2)) , $$ because $g p \qo p$ is Hermitian. \end{proof}
\begin{corollary} \label{calLgppcor} Given $S\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h$, a Hermitian linear map $\mathcal L:\C[\mathbf{x},\qo{\mathbf{x}}] \to \mathbb C^{m\times m}$ is positive on $\mathcal {M}(S)$ if and only if the corresponding real linear map $\calL^\R$ from (\ref{calLrdef}) is positive on $\calMr[](S_{\mathrm{Re}})$. \end{corollary}
\subsection{Deriving Theorem \ref{theomainTchakaloff} from its real analog} \label{AppProofPutinar} We can now derive Theorem \ref{theomainTchakaloff}, which we stated for complex polynomials, from the following well-known results for real polynomials from \cite{Pu93} and \cite{Tchakaloff}.
\begin{theorem} \label{theomainTchakaloffREAL}
Let $S \subseteq \mathbb{R}[\mathbf{x}]$ such that
$\calMr[](S)$ is Archimedean and
let $L : \mathbb{R}[\mathbf{x}] \to \mathbb{R}$ be a linear map that is nonnegative on $\calMr[](S)$.
Then the following holds.
\begin{itemize}
\item[(i)] (Putinar \cite{Pu93}) There exists a measure $\mu^\mathbb{R}$ that is supported on $\mathscr{D}^\mathbb{R}(S)$, the positivity domain of $S$ defined by
$$
\mathscr{D}^\mathbb{R}(S) = \{a \in \mathbb{R}^n: g(a) \geq 0 \text{ for all } g \in S \},
$$
such that $L(f)=\int f d\mu$ for all $f \in \mathbb{R}[\mathbf{x}]$.
\item[(ii)] (Tchakaloff~\cite{Tchakaloff})
For any integer $k \in \mathbb{N}$ there exists a linear map $ \widehat L: \mathbb{R}[\mathbf{x}] \to \mathbb{R}$
such that
$$ \widehat L (f)= L(f)\ \forall f\in \mathbb{R}[\mathbf{x}]_k \quad \text{ and } \quad \widehat L = \sum_{\ell=1}^{K} \lambda_\ell L_{a^\ell}$$
for some integer $K\ge 1$, scalars $\lambda_1,\ldots, \lambda_K>0$ and vectors $a^1,\ldots,a^K\in\mathscr{D}^\mathbb{R}(S)$.
\end{itemize}
\end{theorem}
We now indicate how to derive Theorem \ref{theomainTchakaloff} from Theorem \ref{theomainTchakaloffREAL}. For this consider $S\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h$ and a linear map $L:\mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to\mathbb C$. Assume $\mathcal {M}(S)$ is Archimedean and $L\ge 0$ on $\mathcal {M}(S)$. We consider the set $S_\mathrm{Re}\subseteq \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]$ of real polynomials defined via (\ref{eqSRe}) and the associated linear map $L^\mathbb{R}:\mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]\to\mathbb{R}$ defined via (\ref{llrdef}). By Lemma \ref{RCArch} the quadratic module $\mathcal {M}^\mathbb{R}(S_\mathrm{Re})$ is Archimedean and, by Lemma \ref{RCPosQuad}, $L^\mathbb{R}\ge 0$ on $\mathcal {M}^\mathbb{R}(S_\mathrm{Re})$. Hence we can apply Theorem \ref{theomainTchakaloffREAL} to $S_\mathrm{Re}$ and $L^\mathbb{R}$.
By Theorem \ref{theomainTchakaloffREAL} (i), there exists a (real) measure $\mu^\mathbb{R}$ that is supported by $\mathscr{D}^\mathbb{R}(S_\mathrm{Re})$ and satisfies $L^\mathbb{R}(f)=\int fd\mu^\mathbb{R}$ for all $f\in\mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]$. Consider the (complex) measure $\mu$ defined by relation (\ref{eqmu}), which is therefore supported by the set $\mathscr{D}(S)$. We claim that $\mu$ is a representing measure for $L$. Indeed, for $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$, using (\ref{eqLLRe}) we have $$L(p) = L^\mathbb{R}(p_\mathrm{Re})+\mathbf{i}\, L^\mathbb{R}(p_\mathrm{Im}) = \int p_\mathrm{Re} d\mu^\mathbb{R} +\mathbf{i}\, \int p_\mathrm{Im} d\mu^\mathbb{R} = \int pd\mu.$$ This completes the proof of Theorem \ref{theomainTchakaloff} (i). We now derive its part (ii).
Fix an integer $k\in\mathbb{N}$. By Theorem \ref{theomainTchakaloffREAL} (ii), there exists $\widehat L:\mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]\to\mathbb{R}$ such that $\widehat L(f)=L^\mathbb{R}(f)$ for all $f\in \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]_k$ and $\widehat L=\sum_{\ell=1}^K\lambda_\ell L_{a^\ell}$ for some $K\in\mathbb{N}$, $\lambda_\ell>0$ and $a^\ell \in\mathscr{D}^\mathbb{R}(S_\mathrm{Re})$. Define the complex linear map $\widetilde L:\mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to\mathbb C$ by $\widetilde L(p) :=\widehat L(p_\mathrm{Re})+\mathbf{i}\, \widehat L(p_\mathrm{Im})$ for any $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. Then, in view of (\ref{eqLLRe}), we have $\widetilde L(p)=L(p)$ for any $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]_k$. For each $\ell\in [K]$ let $w^\ell$ be the complex vector such that $(w^\ell_\mathrm{Re},w^\ell_\mathrm{Im})=a^\ell$. Then each $w^\ell$ belongs to $\mathscr{D}(S)$ and we have $$\widetilde L(p) = \widehat L(p_\mathrm{Re})+\mathbf{i}\, \widehat L(p_\mathrm{Im}) = \sum_\ell \lambda_\ell (p_\mathrm{Re}(a^\ell) +\mathbf{i}\, p_\mathrm{Im}(a^\ell)) =\sum_\ell \lambda_\ell p(w^\ell),$$ which shows $\widetilde L=\sum_\ell\lambda_\ell L_{w^\ell},$ and thus concludes the proof of Theorem \ref{theomainTchakaloff} (ii).
\subsection{Deriving Theorem \ref{theomainmatrix} from its real analog }\label{AppProofCimpric} In this section we prove the implication (ii) $\Longrightarrow$ (i) in Theorem \ref{theomainmatrix} from its real analog in \cite{CimpricZalar}, which we restate below for convenience.
\begin{theorem}\cite{CimpricZalar} \label{theomainmatrixREAL}
Let $S\subseteq \mathbb{R}[\mathbf{x}]$ be a set of polynomials such that the quadratic module $\calMr[](S)$ is Archimedean.
Let $\mathcal L: \mathbb{R}[\mathbf{x}] \to \mathcal S^m$ be a matrix-valued linear functional that is positive on $\mathcal {M}^\mathbb{R}(S)$, i.e.,
$\mathcal L(g f^2) \succeq 0$ for all $g \in S \cup \{1\} $ and $ f \in \mathbb{R}[\mathbf{x}]$.
Then
there exists a matrix-valued measure $\mu$ that is supported on $\mathscr{D}^{\R}(S)$ and takes values in the cone $\mathcal {S}^{m}_+$ of $m \times m$ positive semidefinite matrices such that $\mathcal L(f) =\int fd\mu$ for all $f\in \mathbb{R}[\mathbf{x}].$ \end{theorem}
We now indicate how to derive the implication (ii) $\Longrightarrow$ (i) in Theorem \ref{theomainmatrix} from Theorem \ref{theomainmatrixREAL}. For this let $S\subseteq \mathbb C[\mathbf{x},\qo{\mathbf{x}}]^h$ such that $\mathcal {M}(S)$ is Archimedean and let $\mathcal L:\mathbb C[\mathbf{x},\qo{\mathbf{x}}]\to\mathcal{H}^m$ which is Hermitian and satisfies $\mathcal L(gp\overline p)\succeq 0$ for all $g\in S$ and $p\in\mathbb C[\mathbf{x},\qo{\mathbf{x}}]$. Then the set $S_\mathrm{Re}\subseteq \mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]$ from (\ref{eqSRe}) has a Archimedean quadratic module $\mathcal {M}^\mathbb{R}(S_\mathrm{Re})$ by \cref{RCArch}. Consider the linear map $\mathcal L^\mathbb{R}:\mathbb{R}[\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}]\to\mathcal S^{2m}$ defined via (\ref{calLrdef}).
Then, by
\cref{calLgpp}, $\calL^\R$ is positive on $\calMr[](S_{\mathrm{Re}})$. Hence we can apply \cref{theomainmatrixREAL} and conclude that $\calL^\R$ has a representing measure $\mu^\mathbb{R}$, which is supported on $\mathscr{D}^{\R}(S_{\mathrm{Re}})$ and takes values in the cone $\mathcal {S}^{2m}_+$.
We will now construct a (complex) measure $\mu$, which represents $\mathcal L$ and is supported on the set $\mathscr{D}(S)$, using the following two claims.
\begin{claim} \label{calLrincalW}
The map $\calL^\R$ takes values in the set
$
\mathcal{W} :=
\Big{\{}
\begin{bmatrix}
A & B^T \\
B & C
\end{bmatrix}
\in \mathcal {S}^{2m}: \ A = C,\ B^T = -B
\Big{\}}.
$
\end{claim}
\begin{proof}
Since $\calL^\R$ takes values in $\mathcal {S}^{2m}$ it has the following block-form
$$
\mathcal L^\mathbb{R}
=
\begin{bmatrix}
\mathcal L^\mathbb{R}_{11} & (\mathcal L^\mathbb{R}_{21})^T \\
\mathcal L^\mathbb{R}_{21} & \mathcal L^\mathbb{R}_{22}
\end{bmatrix},
$$
where $\mathcal L^\mathbb{R}_{11}$ and $\mathcal L^\mathbb{R}_{22}$ take values in $\mathcal {S}^m$, $\mathcal L^\mathbb{R}_{21}$ takes values in $\mathbb{R}^{m\times m}$ and, by construction,
\begin{equation}\label{eqLa}
\mathcal L^\mathbb{R}_{11}(f) = \mathcal L^\mathbb{R}_{22}(f) = \mathrm{Re}\big(\mathcal L\big(f\big(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,}\big)\big)\big),\ \ \
\mathcal L^\mathbb{R}_{21}(f) = \mathrm{Im}\big(\mathcal L\big(f\big(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,}\big)\big)\big)
\end{equation}
and $(\mathcal L^\mathbb{R}_{21}(f))^T = -\mathrm{Im}(\mathcal L(f(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2\mathbf{i}\,})))$
for any $f \in \R\mxrxi$. Hence $(\calL^\R_{21})^T = -\calL^\R_{21}$ and thus $\mathcal L^\mathbb{R}$ takes values in $\mathcal{W}$ as desired.
\end{proof}
\begin{claim} \label{muRTakesW}
Without loss of generality we may assume the measure $\mu^\mathbb{R}$ takes values in
$\mathcal{W} \cap \mathcal S^{2m}_+$.
\end{claim}
\begin{proof}
We can write the measure $\mu^\mathbb{R}$ in block-form as
$$
\mu^\mathbb{R}=\begin{bmatrix}
\mu^\mathbb{R}_{11} & (\mu^\mathbb{R}_{21})^T \\
\mu^\mathbb{R}_{21} & \mu^\mathbb{R}_{22}
\end{bmatrix},
$$
where each block is a measure taking its values in $\mathbb{R}^{m\times m}$.
Then we can define the following new matrix-valued measure
$$
\mu' :=
\frac{1}{2}
\begin{bmatrix}
\mu^\mathbb{R}_{11} + \mu^\mathbb{R}_{22} & -(\mu^\mathbb{R}_{21}-(\mu^\mathbb{R}_{21})^T) \\
(\mu^\mathbb{R}_{21}-(\mu^\mathbb{R}_{21})^T) & \mu^\mathbb{R}_{11} + \mu^\mathbb{R}_{22}
\end{bmatrix}
=:
\begin{bmatrix} \mu'_{11} & -(\mu'_{21})^T \\ \mu'_{21} & \mu'_{11}\end{bmatrix}.
$$
First, by construction, $\mu'$ takes its values in the set $\mathcal{W}$. Second, $\mu'$ takes its values in $\mathcal {S}_+^{2m}$. Indeed, by Theorem \ref{theomainmatrixREAL}, $\mu^\mathbb{R}$ takes values in $\mathcal {S}_+^{2m}$ and we have
$$
\mu' =
\frac{1}{2}
\begin{bmatrix}
0 & -I_m \\
I_m & 0
\end{bmatrix}
\mu^\mathbb{R}
\begin{bmatrix}
0 & I_m \\
-I_m & 0
\end{bmatrix}
+
\frac{1}{2}\mu^\mathbb{R}.
$$
Finally, $\mu'$ also represents $\calL^\R$. Indeed, for any $f \in \R\mxrxi$ we have $\calL^\R_{11}(f) = \calL^\R_{22}(f)$ and $-\calL^\R_{21}(f)= (\calL^\R_{21}(f))^T$ by \cref{calLrincalW}. This implies
\begin{align*}
\calL^\R_{11}(f) &= \frac{1}{2}(\calL^\R_{11}(f) + \calL^\R_{22}(f)) = {1\over 2} \int f (d\mu^\mathbb{R}_{11} + d\mu^\mathbb{R}_{22}) =\int fd\mu'_{11},\\
\calL^\R_{21}(f) &= \frac{1}{2}(\calL^\R_{21}(f) - (\calL^\R_{21}(f))^T) ={1\over 2} \int f (d\mu^\mathbb{R}_{21} - d(\mu^\mathbb{R}_{21})^T)= \int f d\mu'_{21},
\end{align*} and thus $\calL^\R(f) = \int f d\mu'.$ Therefore we may replace the measure $\mu^\mathbb{R}$ by $\mu'$, which shows the claim.
\end{proof}
We now define the complex measure $\mu$ by setting
\begin{equation} \label{mucomp}
d\mu := d\mu^\mathbb{R}_{11} \circ\phi + \mathbf{i}\, d\mu^\mathbb{R}_{21}\circ\phi,
\end{equation}
where $\phi$ is the complex/real bijection in \cref{CRbij}. So $\mu$ takes values in $\mathbb C^{m\times m}$.
As shown above in Claim \ref{muRTakesW}, $\mu^\mathbb{R}$ takes values in the set $\mathcal{W} \cap \mathcal {S}^{2m}_+$. Hence, in view of \cref{RCMats}, we can conclude that $\mu$ takes its values in $\mathcal{H}^m_+$.
In addition, as $\mu^\mathbb{R}$ is supported by $\mathscr{D}^{\R}(S_{\mathrm{Re}})$, it follows that $\mu$ is supported by $\mathscr{D}(S)$.
Finally, we verify that $\mu$ represents $\mathcal L$. Indeed, for any $p\in \C[\mathbf{x},\qo{\mathbf{x}}]$, using (\ref{eqLa}) we obtain
\begin{align*}
\mathcal L(p) &= \mathcal L\big(p_\mathrm{Re}\big(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2i}\big)\big) + \mathbf{i}\, \mathcal L\big(p_\mathrm{Im}\big(\frac{\mathbf{x} +\qo{\mathbf{x}}}{2},\frac{\mathbf{x} -\qo{\mathbf{x}}}{2i}\big)\big) \\
&= ( \calL^\R_{11}(p_\mathrm{Re}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}})) + \mathbf{i}\, \calL^\R_{21}(p_\mathrm{Re}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}})) )
+ \mathbf{i}\, \big( \calL^\R_{11}(p_\mathrm{Im}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}}) ) + \mathbf{i}\,\calL^\R_{21}(p_\mathrm{Im}(\mathbf{x}_{\mathrm{Re}},\mathbf{x}_{\mathrm{Im}})) \big) \\
&=
\int p_\mathrm{Re} d\mu^\mathbb{R}_{11}
+ \mathbf{i}\, \int p_\mathrm{Re} d\mu^\mathbb{R}_{21}
+ \mathbf{i}\, \int p_\mathrm{Im} d\mu^\mathbb{R}_{11}
- \int p_\mathrm{Im} d\mu^\mathbb{R}_{21} \\
&=
\int (p_\mathrm{Re} + \mathbf{i}\, p_\mathrm{Im}) d\mu^\mathbb{R}_{11}
+\mathbf{i}\, \int (p_\mathrm{Re}+ \mathbf{i}\, p_\mathrm{Im}) d\mu^\mathbb{R}_{21}\\
&=
\int (p_\mathrm{Re} + \mathbf{i}\, p_\mathrm{Im}) (d\mu^\mathbb{R}_{11} + \mathbf{i}\, d\mu^\mathbb{R}_{21})\\
&= \int p d\mu.
\end{align*} This concludes the proof of the implication (ii) $\Longrightarrow$ (i) in Theorem \ref{theomainmatrix}.
\end{document} |
\begin{document}
\title{A Framework of Sparse Online Learning\\ and Its Applications}
\author{Dayong~Wang,
Pengcheng~Wu,
Peilin~Zhao,
and~Steven~C.H.~Hoi, \IEEEcompsocitemizethanks{\IEEEcompsocthanksitem Dayong Wang is with Department of Computer Science and Engineering, Michigan State University, USA 48824.\protect\\ E-mail: dywang@msu.edu \IEEEcompsocthanksitem Pengcheng Wu and Steven C.H. Hoi are with School of Information Systems, Singapore Management University, Singapore 178902.\protect\\ E-mail: \{pcwu, chhoi\}@smu.edu.sg \IEEEcompsocthanksitem $^\ast$Peilin Zhao, the corresponding author, is with Institute for Infocomm Research, A*STAR, Singapore 138632.\protect\\ E-mail: zhaop@i2r.a-star.edu.sg} \thanks{}}
\newtheorem{thm}{Theorem} \newtheorem{prop}{Proposition} \newtheorem{lemma}{Lemma} \newtheorem{cor}[thm]{Corollary} \newtheorem{definition}[thm]{Definition} \newcommand{\includeMyGraphicA}[1]{\includegraphics[width=2.2in, height=4in]{#1}} \newcommand{\includeMyGraphicB}[1]{\includegraphics[width=3.2in, height=2.6in]{#1}} \newcommand{\makeMyboxA}[1]{\makebox[2.2in]{#1}} \newcommand{\makeMyboxB}[1]{\makebox[3.2in]{#1}} \def \g {\mathbf{g}} \def \x {\mathbf{x}} \def \z {\mathbf{z}} \def \hy {\hat{y}} \def \H {\mathcal{H}_{\kappa}} \def \R {\mathbb{R}} \def \w {\mathbf{w}} \def \u {\mathbf{u}} \def \E {\mathbb{E}} \def \M {\mathcal{M}} \def \L {\mathcal{L}} \def \I {\mathbb{I}} \def \sign {\mathrm{sign}} \def \det {\mathrm{det}} \def \diag {\mathrm{diag}} \newcommand{\marginpar{FIX}}{\marginpar{FIX}} \newcommand{\marginpar{NEW}}{\marginpar{NEW}}
\IEEEcompsoctitleabstractindextext{ \begin{abstract} The amount of data in our society has been exploding in the era of big data today. In this paper, we address several open challenges of big data stream classification, including high volume, high velocity, high dimensionality, high sparsity, and high class-imbalance. Many existing studies in data mining literature solve data stream classification tasks in a batch learning setting, which suffers from poor efficiency and scalability when dealing with big data. To overcome the limitations, this paper investigates an online learning framework for big data stream classification tasks. Unlike some existing online data stream classification techniques that are often based on first-order online learning, we propose a framework of Sparse Online Classification (SOC) for data stream classification, which includes some state-of-the-art first-order sparse online learning algorithms as special cases and allows us to derive a new effective second-order online learning algorithm for data stream classification. In addition, we also propose a new cost-sensitive sparse online learning algorithm by extending the framework with application to tackle online anomaly detection tasks where class distribution of data could be very imbalanced. We also analyze the theoretical bounds of the proposed method, and finally conduct an extensive set of experiments, in which encouraging results validate the efficacy of the proposed algorithms in comparison to a family of state-of-the-art techniques on a variety of data stream classification tasks. \end{abstract}
\begin{keywords} online learning; sparse learning; classification; cost-sensitive learning. \end{keywords}}
\maketitle \IEEEdisplaynotcompsoctitleabstractindextext \IEEEpeerreviewmaketitle
\section{Introduction} In the era of big data today, the amount of data in our society has been exploding, which has raised many opportunities and challenges for data analytic research in data mining community. In this work, we aim to address the challenging real-world big data stream classification task, such as web-scale spam email classification. In general, big data stream classification has several characteristics:
\begin{compactitem} \item{\bf high volume}: one has to deal with huge amount of existing training data, in million or even billion scale; \item{\bf high velocity}: new data often arrives sequentially and very rapidly, e.g., about $182.9$ billion emails are sent/received worldwide every day according to an email statistic report by the Radicati Group~\cite{Radicati2013}; \item{\bf high dimensionality}: there are a large number of features, e.g., for some spam email classification tasks, the length of the vocabulary list can go up from $10,000$ to $50,000$ or even to million scale; \item{\bf high sparsity}: many feature elements are zero, and the faction of active features is often small, e.g., the spam email classification study in \cite{youn2007spam} showed that accuracy saturates with dozens of features out of tens of thousands of features; and \item{\bf high class-imbalance}: some class considerably dominates the others, e.g., for spam email classification tasks, the number of non-spam (ham) emails is often much larger than the number of spam emails. \end{compactitem}
The above characteristics present huge challenges for big data stream classification tasks when using conventional data stream classification techniques that are often restricted to batch learning setting and thus suffer from several critical drawbacks: (i) it requires a large memory capacity for caching arrived examples; (ii) it is expensive to collect and train on the entire data set; (iii) it suffers from expensive re-training cost whenever new training data arrives; and (iv) their assumption that all training data must be available a prior does not hold for real-world data stream applications where data arrives rapidly in a sequential manner.
To tackle the above challenges, a promising approach is to explore online learning methodology that performs incremental training over streaming data in a sequential manner. Typically, an online learning algorithm processes one instance at a time and makes very simple updates with each arriving example repeatedly. In contrast to batch learning algorithms, online algorithms are not only more efficient and scalable, but also able to avoid expensive re-training cost when handling new training data, making them more favorite choices for solving large-scale machine learning tasks towards big data stream applications. In literature, a large variety of algorithms have been proposed, including a number of first-order algorithms~\cite{rosenblatt1958perceptron, DBLP:journals/jmlr/CrammerDKSS06} and second-order algorithms~\cite{DBLP:journals/siamcomp/Cesa-BianchiCG05, DBLP:journals/jmlr/SchraudolphYG07, DBLP:journals/jmlr/BordesBG09}. Despite being studied extensively, traditional online-learning algorithms suffer from critical limitation for high-dimensional data. This is because they assume at least one weight for every feature and most of the learned weights are often nonzero, making them of low efficiency not only in computational time but also in memory cost for both training and test phases. Sparse online learning~\cite{langford-2009-sparse} aims to overcome this limitation by inducing sparsity in the weights learned by an online-learning algorithm.
In this paper, we introduce a framework of Sparse Online Learning for solving large-scale high-dimensional data stream classification tasks. We show that the proposed framework covers some existing first-order sparse online classification algorithm, and is able to further derive new algorithms by exploiting the second order information. The proposed sparse online classification scheme is far more efficient and scalable than the traditional batch learning algorithms for data stream classification tasks. We further give theoretical analysis of the proposed algorithm and conduct an extensive set of experiments. The empirical evaluation shows that the proposed algorithm could achieve state-of-the-art performance. The rest of this paper is organized as follows. Section 2 reviews related work. Section 3 presents our problem formulation. Section 4 proposes our novel framework. Section 5 discusses our experimental results, and section 6 concludes this work.
As a summary, our main contributions include: \begin{itemize} \item We propose a general online learning framework, which can easily derive first order and second order algorithms. \item We provide general theoretical analysis including general regret and mistake bounds for the proposed algorithms. \item The proposed algorithms are evaluated on several high-dimensional large-scale benchmark databases, where the state-of-the-art performances are archived. \end{itemize}
\section{Related Work} Our work is closely related to the studies of online learning in machine learning and data mining. Below we briefly review some important related works.
\subsection{Online Learning}
Online learning represents a family of efficient and scalable machine learning algorithms~\cite{hoi2014libol}, which would online optimize some performance measure including, accuracy~\cite{DBLP:journals/jmlr/CrammerDKSS06}, AUC~\cite{DBLP:conf/icml/ZhaoHJY11}, cost-sensitive metrics~\cite{DBLP:journals/tkde/WangZH14}, etc. Unlike batch learning methods that suffer from expensive re-training cost, online learning works sequentially by performing highly efficient (typically constant) updates for each new training data, making it highly scalable for data stream classification. In literature, various techniques~\cite{bianchi-2006-prediction, rosenblatt1958perceptron, DBLP:journals/jmlr/CrammerDKSS06, DBLP:conf/icml/DredzeCP08, DBLP:conf/nips/CrammerKD09, DBLP:journals/jmlr/ZhaoHJ11, wang2012exact} have been proposed for online learning. The well-known first-order online learning algorithms include Perceptron~\cite{rosenblatt1958perceptron,LMP99}, Passive-Aggressive (PA) algorithms~\cite{DBLP:journals/jmlr/CrammerDKSS06}, etc.
The most well-known method is the Perceptron algorithm~\cite{rosenblatt1958perceptron,LMP99}, which updates the model by adding a new example as a support vector with some constant weight. Recently, a series of sophisticated online learning algorithms have been proposed by following the criterion of maximum margin learning principle~\cite{DBLP:journals/jmlr/Gentile01, KivinenSW01, DBLP:journals/jmlr/CrammerDKSS06}. One famous algorithm is the Passive-Aggressive (PA) algorithm~\cite{DBLP:journals/jmlr/CrammerDKSS06}, which evolves a classifier by suffering less loss on the current instance without moving far from the previous function.
In recent years, the design of many efficient online learning algorithms has been influenced by convex optimization tools. Furthermore, it was observed that most previously proposed efficient online algorithms can be jointly analyzed based on the following elegant model~\cite{Shalev-Shwartz:2012:OLO:2185819.2185820}: \begin{algorithm}[htpb] \caption{Online Convex Optimization Scheme}\label{alg:online_convex_optimiaztion_scheme} \begin{algorithmic} \STATE\textbf{INPUT : A convex set $\R^d$} \FOR{$t=1,\ldots, T$} \STATE predict a vector $\w_t \in \R^d$; \STATE receive a convex loss function $\ell_t : S \rightarrow \mathbb{R}$; \STATE suffer loss $\ell_t(\w_t)$; \ENDFOR \end{algorithmic} \end{algorithm}
Based on the previous framework, we can consider online learning as an algorithmic framework for convex online learning problem: $$\min_{\w}{f(\w)} = \min_{\w}{\sum_{t}{\ell_t(\w)}},$$ where $f(\w)$ is a convex empirical loss function for the sum of losses over a sequence of observations. The regret of the algorithm is defined as follows: $$R_{T} = \sum_{t=1}^{T}\ell_t(\w_t) - \min_{\w} \sum_{t=1}^{T}\ell_t(\w),$$ where $\w$ is any vector in the convex space $\R^d$. The goal of online learning algorithm is to find a low regret scheme, in which the regret $R_T$ grows sub-linearly with the number of iteration $T$. As a result, when the round number $T$ goes to infinity, the difference between the \emph{average} loss of the learner and the \emph{average} lost of the best learner tends to zero.
Although the general online learning algorithms (e.g., Perceptron and PA) have solid theoretical guarantees and performs well on many applications, generally they are limited in several aspects. First, the general online learning algorithms exploit the full features, which is not suitable for large-scale high-dimensional problem. To tackle this limitation, the \emph{sparse online learning} has been extensively studied recently. Second, the general online learning algorithms only exploit the first order information and all features are adopted the same learning rate. This problem can be addressed by \emph{second order online learning} algorithms. Last but not least, the general online learning algorithms are not suitable for the imbalance input data streams, which can be efficiently solved by the \emph{cost-sensitive online learning} algorithms. In the following parts, we will briefly introduce several representative algorithms in the previous three aspects.
\subsection{Sparse Online Learning} \emph{Sparse online learning}~\cite{duchi-2009-sparse,langford-2009-sparse} aims to learn a sparse linear classifier, which only contains limited size of active features. It has been actively studied~\cite{duchi-2009-sparse,xiao2010dual,Shalev-Shwartz2011,wang2013online}. There are two group of solutions for \emph{sparse online learning}. The first group study on sparse online learning follows the general idea of subgradient descent with truncation. For example, Duchi and Singer propose the FOBOS algorithm~\cite{duchi-2009-sparse}, which extends the \emph{Forward-Backward Splitting} method to solve the sparse online learning problem in two phases: (i) an unconstrained subgradient descent step with respect to the loss function, and (ii) an instantaneous optimization for a trade-off between minimizing regularization term and keeping close to the result obtained in the first phase. The optimization problem in the second phase can be efficiently solved by adopting simple \emph{soft-thresholding} operations that perform some truncation on the weight vectors. Following the similar scheme, Langford et al.~\cite{langford-2009-sparse} argue that truncation on every iteration is too aggressive as each step modifies the coefficients by only a small amount, and propose the \emph{Truncated Gradient} (TG) method which truncates coefficients every $K$ steps when they are less than a predefined threshold $\theta$. The second group study on sparse online learning mainly follows the dual averaging method of~\cite{nesterov2009primal}, can explicitly exploit the regularization structure in an online setting. For example, One representative work is \emph{Regularized Dual Averaging}(RDA)~\cite{xiao2010dual}, which learns the variables by solving a simple optimization problem that involves the running average of all past subgradients of the lost functions, not just the subgradient in each iteration. Lee et al.~\cite{lee2012manifold} further extends the RDA algorithm by using a more aggressive truncation threshold and generates significantly more sparse solutions.
\subsection{Second-order Online Learning} \emph{Second Order Online Learning} aims to dynamically incorporate knowledge of observed data in earlier iteration to perform more informative gradient-based learning. Unlike first order algorithms that often adopt the same learning rate for all coordinates, the second order online learning algorithms adopt different distills to the step size employed for each coordinate. A variety of second order online learning algorithms have been proposed recently. Some technique attempts to incorporate knowledge of the geometry of the data observed in earlier iterations to perform more effective online updates. For example, Balakrishnan et al.~\cite{balakrishnan2008algorithms} propose algorithms for sparse linear classifiers in the massive data setting, which requires $O(d^2)$ time and $O(d^2)$ space in the worst case. Another state-of-the-art technique for second order online learning is the family of confidence-weighted (CW) learning algorithms~\cite{DBLP:conf/icml/DredzeCP08, DBLP:conf/nips/CrammerDP08, crammer2009adaptive, ma2010exploiting, wang2012exact}, which exploit confidence of weights when making updates in online learning processes. In general, the second order algorithms are more accurate, converge faster, but fall short in two aspects (i) they incur higher computational cost especially when dealing with high-dimensional data; and (ii) the weight vectors learned are often not sparse, making them unsuitable for high-dimensional data. Recently, Duchi et al. address the sparsity and second order update in the same framework, and proposed the Adaptive Subgradient method~\cite{duchi2011adaptive} (Ada-RDA), which adaptively modifies the proximal function at each iteration to incorporate knowledge about geometry of the data.
\subsection{Cost-Sensitive Online Learning} Cost-sensitive classification has been extensively studied in data mining and machine learning. In the past decade, a variety of cost-sensitive metrics have been proposed to tackle this problem. For example, the weighted sum of \emph{sensitivity} and \emph{specificity}~\cite{Brodersen:2010:BAP:1904935.1905533}, and the weighted \emph{misclassification cost}~\cite{conf/ecml/AkbaniKJ04, Elkan:2001:FCL:1642194.1642224}. Both cost-sensitive classification and online learning have been studied extensively in data mining and machine learning communities, respectively. There are only a few works on \emph{cost-sensitive online learning}. For example, Wang et al.~\cite{DBLP:journals/tkde/WangZH14} proposed a family of cost-sensitive online classification framework, which are designed to directly optimize two well-known cost-sensitive measures. Zhao and Hoi~\cite{Zhao:2013:COA:2487575.2487647} tackle the same problem by adopting the double updating technique and propose Cost-Sensitive Double Updating Online Learning (CSDUOL).
\section{Sparse Online Learning for Data Stream Classification}
In this section, we first introduce a general sparse online learning framework for online data stream classification, and then provide the theoretical analysis on the framework. The framework will be used to derive the family of first-order and second-order sparse online classification algorithms in the following section.
\subsection{General Sparse Online Learning} Without loss of generality, we consider the sparse online learning algorithm for the binary classification problem, which is also mentioned as sparse online classification problem in this paper. The sparse online classification algorithm generally works in rounds. Specifically, at the round $t$, the algorithm is presented one instance $\x_t\in \R^d$, then the algorithm predicts its label as \begin{eqnarray} \hy_t=\sign(\w_t^\top\x_t), \nonumber \end{eqnarray}
where $\w_t\in \R^d$ is linear classifier maintained by the algorithm. After the prediction, the algorithm will receive the true label $y_t\in\{+1,-1\}$, and suffer a loss $\ell_t(\w_t)$. Then, the algorithm would update its prediction function $\w_t$ based on the newly received $(\x_t,y_t)$. The standard goal of online learning is to minimize the number of mistakes suffered by the online algorithm. To facilitate the analysis, we firstly introduce several functions. Firstly, the hinge loss $\ell_t(\w; (\x_t,y_t))=[1-y_t\w^\top\x_t]_+$, where $[a]_+=\max(a, 0)$, is the most popular loss function for binary classification problem. Given a series of $\delta$-strongly convex functions $\Phi_{t=1,\ldots,T}$, with respect to the norms $\|\cdot\|_{\Phi_t}$ and the dual norms $\|\cdot\|^*_{\Phi_t}$. The proposed general sparse online classification (SOC) algorithm is shown in Algorithm~\ref{alg:general_frame}. \begin{algorithm}[htpb] \caption{General Sparse Online Learning (SOL)}\label{alg:general_frame} \begin{algorithmic} \STATE\textbf{INPUT :$\lambda$, $\eta$} \STATE\textbf{INITIALIZATION :} $\theta_1=0$. \FOR{$t=1,\ldots, T$} \STATE receive $\x_t\in \R^d$; \STATE $\u_t=\nabla \Phi^*_t(\theta_t)$;
\STATE $\w_t=\arg\min_{\w}\frac{1}{2}\|\u_t-\w\|^2_2+\lambda_t\|\w\|_1$; \STATE predict $\hy_t=\sign(\w_t^\top\x_t)$; \STATE receive $y_t$ and suffer $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$; \IF{$\ell_t(\w_t)>0$}
\STATE $\theta_{t+1}=\theta_t-\eta_t \z_t$, where $\z_t=\nabla\ell_t(\w_t)$; \ENDIF \ENDFOR \end{algorithmic} \end{algorithm}
\subsection{Theoretical Bound Analysis} In this section, we analysis the regret $R_T$ of the general sparse online learning (SOL) algorithm. Firstly, we will present a key lemma, which will facilitate the following analysis. \begin{lemma}\label{lem:framework}
Let $\Phi_t, t=1,\ldots,T$ be $\delta$-strongly convex functions with respect to the norms $\|\cdot\|_{\Phi_t}$ and let $\|\cdot\|^*_{\Phi_t}$ be the respective dual norms. Let $\Phi(0)=0$, and $\x_1,\ldots,\x_T$ be an arbitrary sequence of vectors in $\R^d$. Assume that algorithm~\ref{alg:general_frame} is run on this sequence with the function $\Phi_t$, Then, we have the following inequality \begin{eqnarray}\label{eqn:sparse-bound} &&\hspace{-0.3in}\sum^T_{t=1}\eta_t (\w_t-\w)^\top\z_t\le\Phi_T(\w)\\
&&\hspace{-0.2in}+\sum^T_{t=1}\Big[\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)+\frac{\eta^2_t}{2\delta}\|\z_t\|^2_{\Phi^*_t}+\eta_t\lambda_t\|\z_t\|_1\Big],\nonumber \end{eqnarray} for any $\w$, and any $\lambda>0$. \end{lemma} \begin{proof} Firstly, define $\Delta_t=\Phi^*_t(\theta_{t+1})-\Phi^*_{t-1}(\theta_t)$, then \begin{eqnarray*} \sum^T_{t=1}\Delta_t&=&\Phi^*_T(\theta_{T+1})-\Phi^*_0(\theta_1)=\Phi^*_T(\theta_{T+1})\\
&\ge&\w^\top\theta_{T+1}-\Phi_T(\w), \end{eqnarray*} where the final inequality is due to Fenchel's inequality. In addition, we have \begin{eqnarray*} &&\hspace{-0.3in}\Delta_t=\Phi^*_t(\theta_{t+1})-\Phi^*_t(\theta_t)+\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)\\
&&\hspace{-0.3in}\le\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)-\eta_t (\nabla \Phi^*_t(\theta_t))^\top\z_t+\frac{\eta^2_t}{2\delta}\|\z_t\|^2_{\Phi^*_t}. \end{eqnarray*} Combining the above two inequalities, we get \begin{eqnarray*} &&-\sum^T_{t=1}\eta_t\w^\top\z_t -\Phi_T(\w)\le \sum^T_{t=1}\Delta_t\\
&&\le\sum^T_{t=1}[\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)-\eta_t \u_t^\top\z_t+\frac{\eta^2_t}{2\delta}\|\z_t\|^2_{\Phi^*_t}]. \end{eqnarray*} Rearranging the above inequality, we get \begin{eqnarray}\label{eqn:dense-bound} &&\hspace{-0.3in}\sum^T_{t=1}\eta_t (\u_t-\w)^\top\z_t \nonumber\\
&&\hspace{-0.3in}\le\Phi_T(\w)+\sum^T_{t=1}[\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)+\frac{\eta^2_t}{2\delta}\|\z_t\|^2_{\Phi^*_t}]. \end{eqnarray} Now, we would connect $\w_t^\top\x_t$ and $\u_t^\top\x_t$ as follows: \begin{eqnarray*}
&&\hspace{-0.3in}\w_t^\top\z_t=\sum^d_{i=1}w_{t,i}z_{t,i}=\sum^d_{i=1}\sign(u_{t,i})[|u_{t,i}|-\lambda_t]_+ z_{t,i}\nonumber\\
&&\hspace{-0.3in}=\sum_{u_{t,i}z_{t,i}\ge 0}[|u_{t,i}|-\lambda_t]_+ |z_{t,i}|-\sum_{u_{t,i}z_{t,i}< 0}[|u_{t,i}|-\lambda_t]_+ |z_{t,i}|\nonumber\\
&&\hspace{-0.3in}\le\sum_{u_{t,i}z_{t,i}\ge 0}|u_{t,i}| |z_{t,i}|+\sum_{u_{t,i}z_{t,i}< 0}(-|u_{t,i}||z_{t,i}|+\lambda_t|z_{t,i}|)\nonumber\\
&&\hspace{-0.3in}\le\sum_{u_{t,i}z_{t,i}\ge 0}u_{t,i}z_{t,i}+\sum_{u_{t,i}z_{t,i}< 0}(u_{t,i}z_{t,i}+\lambda_t|z_{t,i}|)\nonumber\\
&&\hspace{-0.3in}\le\u_t^\top\z_t+\lambda_t\|\z_t\|_1. \end{eqnarray*} Plugging this inequality into inequality~(\ref{eqn:dense-bound}) will conclude the lemma. \end{proof} Given this general lemma, we would provide a general corollary, which could directly upper bound the regret suffered by this framework. To derive this kind of corollary, we only need to lower bound the left hand side of the inequality~(\ref{eqn:sparse-bound}) by using $\ell_t(\w_t)-\ell_t(\w)\le (\w_t-\w)^\top\z_t$, which is the property of convex function. \begin{cor}\label{cor:regret} Under the assumptions of Lemma 1, if we further assume $\ell$ is convex and $\eta_t=\eta$, then the regret $R_T=\sum^T_{t=1}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}\ell_t(\w) $ of the proposed framework~(\ref{alg:general_frame}) satisfies the following inequality \begin{eqnarray}
R_T\le\frac{\Phi_T(\w)}{\eta}+\sum^T_{t=1}[\frac{\eta}{2\delta}\|\z_t\|^2_{\Phi^*_t}+\lambda_t\|\z_t\|_1]+\frac{\sum^T_{t=1}\Delta^*_t}{\eta}, \end{eqnarray} where $\Delta^*_t=\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)$. \end{cor} Given this framework and these analysis, we would drive some specific algorithms and their regret bounds.
\section{Derived Algorithms}
In this section, we will first recover the RDA~\cite{xiao2010dual} algorithm and then derive algorithm utilizing the second order-information. In this section, we will adopt the hinge loss function and denote $\L=\{t|\ell_t(\w_t)>0\}$. We denote $L_t=\I_{(\ell_t(\w_t)>0)}$, where $\I_{v}$ is indicator function, $\I_{v}=1$ if $v$ is true, otherwise $\I_v=0$.
\subsection{First Order Algorithm }
Set $\Phi_t(\w)=\frac{1}{2}\|\w\|^2_2$, which is 1-strongly convex with respect to $\|\cdot\|_2$. And it is known that the dual norm of $\|\cdot\|_2$ is $\|\cdot\|_2$ itself, while $\Phi^*_t=\Phi_t$. Under these assumptions, we get the first order sparse online learning (FSOL) algorithm, which is the same with Regularized Dual Averaging (RDA) algorithm with soft 1-norm regularization~\cite{xiao2010dual}. \begin{algorithm}[htpb] \caption{First Order Sparse Online Learning (FSOL)}\label{alg:First Order} \begin{algorithmic} \STATE\textbf{INPUT :$\lambda$, $\eta$} \STATE\textbf{INITIALIZATION :} $\theta_1=0$. \FOR{$t=1,\ldots, T$} \STATE receive $\x_t\in \R^d$;
\STATE $\w_t=\sign(\theta_t)\odot[|\theta_t|-\lambda_t]_+$; \STATE predict $\hy_t=\sign(\w_t^\top\x_t)$ and receive $y_t\in\{-1,1\}$; \STATE suffer $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$; \STATE $\theta_{t+1}=\theta_t+ \eta L_t y_t \x_t$; \ENDFOR \end{algorithmic} \end{algorithm}
\begin{thm}
Let $(\x_1,y_1),\ldots,(\x_T,y_T)$ be a sequence of examples, where $\x_t\in \R^d$, $y_t\in\{-1,+1\}$ and $\|\x_t\|_1\le X$ for all $t$. If further set $\lambda_t=\eta\lambda$, then the regret $R_T=\sum^T_{t=1}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}\ell_t(\w) $ suffered by the algorithm~(\ref{alg:First Order}) is bounded as follows: \begin{eqnarray*}
R_T\le\frac{\frac{1}{2}\|\w\|^2_2}{\eta}+\frac{\eta}{2}\sum^T_{t=1}X^2+\sum^T_{t=1}\eta\lambda X, \end{eqnarray*}
for any $\w\in\R^d$. Further setting $\eta=\frac{\|\w\|_2}{\sqrt{ (X^2+2\lambda X)T}}$, we could have \begin{eqnarray*} R_T\le D\sqrt{ (X^2+2\lambda X)T}, \end{eqnarray*}
for any $\w\in\{\w\ |\|\w\|_2\le D\}$. \end{thm} \begin{proof} Firstly $\Delta^*_t=\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)=0$, then according to corollary~(\ref{cor:regret}), we have \begin{eqnarray*}
R_T&\le&\frac{\frac{1}{2}\|\w\|^2_2}{\eta}+\sum^T_{t=1}[\frac{\eta}{2}\|L_t y_t\x_t\|^2_2+\lambda_t\|L_t y_t\x_t\|_1]\\
&\le&\frac{\frac{1}{2}\|\w\|^2_2}{\eta}+\frac{\eta}{2}\sum^T_{t=1}X^2+\sum^T_{t=1}\eta\lambda X. \end{eqnarray*} \end{proof} {\bf Remark:} This bound indicates the regret of this algorithm is upper bounded by $O(\sqrt{T})$, which recovers the results in~\cite{xiao2010dual}.
\subsection{Second Order Algorithm}
Set $\Phi_t(\w)=\frac{1}{2}\w^\top A_t\w$, where $A_t=A_{t-1}+\frac{\x_t\x_t^\top}{r}, r>0$ and $A_0=I$. It is easy to verify that $\Phi_t$ is 1-strongly convex with respect to $\|\w\|^2_{\Phi_t}=\w^\top A_t \w$. Its dual function $\Phi^*_t(\w)$ is $\frac{1}{2}\w^\top A^{-1}_t\w$, while $\|\w\|^2_{\Phi^*_t}=\w^\top A^{-1}_t\w$. Using the Woodbury identity, we can incrementally update the inverse of $A_t$ as $A^{-1}_t=A^{-1}_{t-1}-\frac{A^{-1}_{t-1}\x_t\x_t^\top A^{-1}_{t-1}}{r+\x^\top_t A^{-1}_{t-1}\x_t}$. Under these assumptions, we get the second order sparse online learning (SSOL) algorithm. \begin{algorithm}[htpb] \caption{Second Order Sparse Online Learning (SSOL)}\label{alg:Second Order} \begin{algorithmic} \STATE\textbf{INPUT :$\lambda$, $\eta$} \STATE\textbf{INITIALIZATION :} $\theta_1=0$. \FOR{$t=1,\ldots, T$} \STATE receive $\x_t\in \R^d$; \STATE $A^{-1}_t=A^{-1}_{t-1}-\frac{A^{-1}_{t-1}\x_t\x_t^\top A^{-1}_{t-1}}{r+\x^\top_t A^{-1}_{t-1}\x_t}$; \STATE $\u_t=A^{-1}_t \theta_t$;
\STATE $\w_t=\sign(\u_t)\odot[|\u_t|-\lambda_t]_+$; \STATE predict $\hy_t=\sign(\w_t^\top\x_t)$ and receive $y_t\in\{-1,1\}$; \STATE suffer $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$; \STATE $\theta_{t+1}=\theta_t+ \eta L_t y_t \x_t$; \ENDFOR \end{algorithmic} \end{algorithm}
\begin{thm}\label{thm:ssol}
Let $(\x_1,y_1),\ldots,(\x_T,y_T)$ be a sequence of examples, where $\x_t\in \R^d$, $y_t\in\{-1,+1\}$ and $\|\x_t\|_1\le X$ for all $t$. If further set $\lambda_t=\lambda/t$, then the regret $R_T=\sum^T_{t=1}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}\ell_t(\w) $ suffered by the algorithm~(\ref{alg:Second Order}) is bounded as \begin{eqnarray} &&\hspace{-0.3in}R_T \nonumber\le\frac{D^2}{2\eta}+\frac{\eta}{2}rd\log((1+\frac{X^2}{r}T))+\lambda X[\log(T)+1], \end{eqnarray}
for any $\w\in\{\w\ |\w^\top A_T\w\le D^2\}$. \end{thm} \begin{proof} Firstly, it is easy to observe \begin{eqnarray*} \Delta^*_t&=&\frac{1}{2}\theta_t^\top A^{-1}_t\theta_t-\frac{1}{2}\theta_t^\top A^{-1}_{t-1}\theta_t\\ &&=-\frac{(\x_t^\top A^{-1}_{t-1}\theta_t)^2}{2(r+\x^\top_t A^{-1}_{t-1}\x_t)}\le 0. \end{eqnarray*} Then according to the conclusion in the corollary~(\ref{cor:regret}), we have \begin{eqnarray*}
&&\hspace{-0.3in}R_T\le\frac{\w^\top A_T\w}{2\eta}+\sum^T_{t=1}[\frac{\eta}{2}L_t\x_t^\top A^{-1}_t \x_t+\lambda_t\|L_t y_t \x_t\|_1]\\ &&\hspace{-0.2in}\le\frac{\w^\top A_T\w}{2\eta}+\frac{\eta}{2}\sum^T_{t=1}\x_t^\top A^{-1}_t \x_t+X\sum^T_{t=1}\lambda_t\\ &&\hspace{-0.2in}\le\frac{\w^\top A_T\w}{2\eta}+\frac{\eta}{2}\sum^T_{t=1}\x_t^\top A^{-1}_t \x_t+\lambda X[\log(T)+1], \end{eqnarray*} where the final inequality used $\sum^T_{t=1}\frac{1}{t}\le [\log(T)+1]$. Secondly, the second term of the right hand side can be upper bounded as \begin{eqnarray*} &&\hspace{-0.4in}\sum^T_{t=1}\x_t^\top A^{-1}_t \x_t=r\sum^T_{t=1}(1-\frac{\det(A_{t-1})}{\det(A_t)})\nonumber\\ &&\hspace{-0.2in}\le- r\sum^T_{t=1}\log(\frac{\det(A_{t-1})}{\det(A_t)})=r\log(\det(A_T)). \end{eqnarray*} Combining the above two inequalities gives \begin{eqnarray}\label{eqn:temp-bound} &&\hspace{-0.3in}R_T \nonumber\le\frac{\w^\top A_T\w}{2\eta}\\ &&\hspace{-0.0in}+\frac{\eta}{2}r\log(\det(A_T))+\lambda X[\log(T)+1]. \end{eqnarray} Since $A_T=I+\sum^T_{t=1}\frac{\x_t\x_t^\top}{r}$, its eigenvalue $\mu_i$ satisfies \begin{eqnarray*}
\mu_i\le 1+ trace(\sum^T_{t=1}\frac{\x_t\x_t^\top}{r})=1+ \sum^T_{t=1}\frac{\|\x_t\|^2_2}{r}. \end{eqnarray*} As a result, we have \begin{eqnarray*} \det(A_T)=\prod^d_{i=1}\mu_i\le (1+\frac{X^2}{r}T)^d. \end{eqnarray*} Plugging the above inequality into~(\ref{eqn:temp-bound}) will concludes this theorem. \end{proof} {\bf Remark:} According to this theorem, adopt the second order information for the sparse online learning does further minimize the regret bound to an order of $O(\log(T))$.
\subsection{Diagonal Algorithm} Although the previous second order algorithm significantly reduced the regret bound than the first order algorithm, it will consume $O(d^2)$ time, which limits its application to real-world high dimension problems. To keep the computational time still $O(d)$ similar with the traditional online learning, we further explored its diagonal version, which will only maintain a diagonal matrix. Its details are in the Algorithm~(\ref{alg:diagonal}). \begin{algorithm}[htpb] \caption{Diagonal Second Order Sparse Online Learning}\label{alg:diagonal} \begin{algorithmic} \STATE\textbf{INPUT :$\lambda$, $\eta$} \STATE\textbf{INITIALIZATION :} $\theta_1=0$. \FOR{$t=1,\ldots, T$} \STATE receive $\x_t\in \R^d$; \STATE $A^{-1}_t=A^{-1}_{t-1}-\frac{A^{-1}_{t-1}\diag(\x_t\x_t^\top) A^{-1}_{t-1}}{r+\x^\top_t A^{-1}_{t-1}\x_t}$; \STATE $\u_t=A^{-1}_t \theta_t$;
\STATE $\w_t=\sign(\u_t)\odot[|\u_t|-\lambda_t]_+$; \STATE predict $\hy_t=\sign(\w_t^\top\x_t)$ and receive $y_t\in\{-1,1\}$; \STATE suffer $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$; \STATE $\theta_{t+1}=\theta_t+ \eta L_t y_t \x_t$; \ENDFOR \end{algorithmic} \end{algorithm}
In the following experiments, we mainly adopt the diagonal second order sparse online learning algorithm unless otherwise specified, which is also denoted as ``SSOL".
\if 0 \begin{thm}
Let $(\x_1,y_1),\ldots,(\x_T,y_T)$ be a sequence of examples, where $\x_t\in \R^d$, $y_t\in\{-1,+1\}$ and $\|\x_t\|_1\le X$ for all $t$. If further set $\lambda_t=\lambda/t$, then the regret $R_T=\sum^T_{t=1}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}\ell_t(\w) $ suffered by the algorithm~(\ref{alg:diagonal}) is bounded as follows: \begin{eqnarray}
&&\hspace{-0.3in}R_T \nonumber\le\frac{\frac{1}{2}(\|\w\|^2_2+\frac{\sum^T_{t=1}(\w^\top\x_t)^2}{r})}{\eta}\\ &&\hspace{-0.0in}+\frac{\eta}{2}rd\log((1+\frac{X^2}{r}T))+\lambda X[\log(T)+1] \end{eqnarray} \end{thm} \fi
\subsection{Cost-Sensitive Algorithm} For the previous algorithms, the classifier is cost-insensitive, which suffers the same \emph{cost/lost} when the positive samples and the negative samples are misclassified. It is inappropriate for many data stream classification tasks in real-world applications, such as online anomaly detection, where the class distribution is often highly imbalanced. In this section, we propose a cost-sensitive sparse online classification algorithm by extending the sparse online learning framework for online anomaly detection tasks. Without loss of generality, we assume the positive class is the rare class in a set of streaming data, which contains more positive examples than negative samples. We will prefer a high \emph{cost/lost} value when a positive sample is misclassified, while a small \emph{cost/lost} value when a negative sample is misclassified.
Specifically, we respectively denote the number of positive samples and negative sample by $T_+$ and $T_-$; and $M_+, M_-$ are the number of false negative and false positive, respectively. We denote $T = T_+ + T_-$ and $M = M_+ + M_-$. Instead of using the cost-insensitive metric $accuracy = \frac{T - M}{T}$, researchers have proposed a variety of cost-sensitive metrics. One well-know cost-sensitive metric is the weighted \emph{sum} of $sensitivity = \frac{T_+ - M_+}{T_+}$ and $specificity=\frac{T_- - M_-}{T_-}$, which is defined as follows: $$ sum = \mu_+ \frac{T_+ - M_+}{T_+} + \mu_- \frac{T_- - M_-}{T_-},$$ where $\mu_+ + \mu_- =1$ and $0 \leq \mu_+,\mu_- \leq 1$ are two parameters to trade off between sensitivity and specificity. In general, the higher the \emph{sum} value, the better the classification performance. Notably, when $\mu_+ = \mu_- = 0.5$, the corresponding sum is the well known balanced accuracy~\cite{Brodersen:2010:BAP:1904935.1905533}.
In general, the higher the \emph{sum} value, the better the classification performance. To maximize the sum value, based on the previous framework, we propose a cost-sensitive sparse online classification algorithm following the theoretical analysis in~\cite{DBLP:journals/tkde/WangZH14, Zhao:2013:COA:2487575.2487647}. In particular, we adopt a modified hinge loss function: $$ (\rho \I_{y_t=1} + \I_{y_t=-1}) [1 - y_t \w^\top \x_t]_{+},$$ where $\rho = \frac{\mu_+ T_-}{\mu_- T_+}$ and $\I_{v}$ is an indicator function, which $\I_{v}=1$ if $v$ is true, otherwise $\I_{v}=0$. In our experiment, we use the balance accuracy as the metric and set $\mu_+ = \mu_- = 0.5$. Generally, it is difficult to predict the number of positive and negative samples $T_+$ and $T_-$ in advance. So a more realistic setting is to use two weight parameters $c_+$ and $c_-$ for the positive and negative losses, respectively. Hence, the loss function is reformulated as: $$(c_+ \I_{y_t=1} + c_- \I_{y_t=-1}) [1 - y_t \w^\top \x_t]_{+}.$$ Denoting $c_t = c_+ \I_{y_t=1} + c_- \I_{y_t=-1}$, the modified regret is derived as follow: $$R_T = \sum_t c_t \ell_t(w_t) - \sum_t c_t \ell_t(w),$$ where $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$.
Based on the proposed sparse online learning framework and the cost-sensitive loss function, we can achieve the cost-sensitive first order sparse online learning algorithm (CS-FSOL) shown in Algorithm~\ref{alg:cs-fsol}. \begin{algorithm}[htpb] \caption{Cost-Sensitive First Order Sparse Online Learning (CS-FSOL)}\label{alg:cs-fsol} \begin{algorithmic} \STATE\textbf{INPUT : $\lambda$, $\eta$, $c_{+1}$, $c_{-1}$ } \STATE\textbf{INITIALIZATION :} $\theta_1=0$, $A_0^{-1} = I$. \FOR{$t=1,\ldots, T$} \STATE receive $\x_t\in \R^d$;
\STATE $\w_t=\sign(\theta_t)\odot[|\theta_t|-\lambda_t]_+$; \STATE predict $\hy_t=\sign(\w_t^\top\x_t)$ and receive $y_t\in\{-1,1\}$; \STATE suffer $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$; \STATE $\theta_{t+1}=\theta_t+ \eta c_{y_t} L_t y_t \x_t$; \ENDFOR \end{algorithmic} \end{algorithm}
For this algorithm, it is easy to observe that if we treat $\eta c_{y_t}$ as $\eta_t$, then it is the special case of the proposed framework~(\ref{alg:general_frame}) with $\Phi_t(\w)=\frac{1}{2}\|\w\|^2_2$. So, we would like to prove a new corollary for the proposed framework~(\ref{alg:general_frame}), under the situation that $\eta_t = \eta c_{y_t}$. This can be achieved by combining Lemma~\ref{lem:framework} with $\eta c_{y_t}[\ell_t(\w_t)-\ell_t(\w)]\le \eta_t (\w_t-\w)^\top\z_t$. Specifically, we have the following corollary:
\begin{cor}\label{cor:regret-csol} Under the assumptions of Lemma 1, if we further assume $\ell$ is convex and $\eta_t=\eta c_{y_t}$, then the regret $R_T=\sum^T_{t=1}c_{y_t}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}c_{y_t}\ell_t(\w) $ of the proposed framework~(\ref{alg:general_frame}) satisfies the following inequality \begin{eqnarray} &&\hspace{-0.4in}R_T\le\nonumber\\
&&\hspace{-0.4in}\frac{\Phi_T(\w)}{\eta}+\sum^T_{t=1}[\frac{\eta}{2\delta}\|c_{y_t}\z_t\|^2_{\Phi^*_t}+\lambda_t\|c_{y_t}\z_t\|_1]+\frac{\sum^T_{t=1}\Delta^*_t}{\eta}, \end{eqnarray} where $\Delta^*_t=\Phi^*_t(\theta_t)-\Phi^*_{t-1}(\theta_t)$. \end{cor}
Given the above corollary, we can prove the following theorem for Algorithm~\ref{alg:cs-fsol}.
\begin{thm}
Let $(\x_1,y_1),\ldots,(\x_T,y_T)$ be a sequence of examples, where $\x_t\in \R^d$, $y_t\in\{-1,+1\}$ and $\|\x_t\|_1\le X$ for all $t$. If further set $\lambda_t=\eta\lambda$, then the regret $R_T=\sum^T_{t=1}c_{y_t}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}c_{y_t}\ell_t(\w) $ suffered by the algorithm~(\ref{alg:cs-fsol}) is bounded as follows: \begin{eqnarray*}
R_T\le\frac{\frac{1}{2}\|\w\|^2_2}{\eta}+\frac{\eta}{2}\sum^T_{t=1}c_{y_t}X^2+\sum^T_{t=1}\eta\lambda c_{y_t} X, \end{eqnarray*} for any $\w\in\R^d$.
Further setting $\eta=\frac{\|\w\|_2}{\sqrt{ (X^2+2\lambda X)(T_+c_++T_-c_-)}}$, we could have \begin{eqnarray*} R_T\le D\sqrt{ (X^2+2\lambda X)(T_+c_++T_-c_-)}, \end{eqnarray*}
for any $\w\in\{\w\ |\|\w\|_2\le D\}$. \end{thm} We omit the proof, since it is easy.
In addition, we can also get the cost-sensitive second order sparse online classification (CS-SSOL) algorithm shown in Algorithm~\ref{alg:cs-ssol}. However, its time complexity and space complexity are relatively high for high dimension datasets, we will only use its diagonal variant in practice, where only a diagonal $A_t^{-1}$ is maintained and updated.
\begin{algorithm}[htpb] \caption{Cost-Sensitive Second Order Sparse Online Learning (CS-SSOL)}\label{alg:cs-ssol} \begin{algorithmic} \STATE\textbf{INPUT : $\lambda$, $\eta$, $c_{+1}$, $c_{-1}$ } \STATE\textbf{INITIALIZATION :} $\theta_1=0$, $A_0^{-1} = I$. \FOR{$t=1,\ldots, T$} \STATE receive $\x_t\in \R^d$; \STATE $A^{-1}_t=A^{-1}_{t-1}-\frac{A^{-1}_{t-1}\x_t\x_t^\top A^{-1}_{t-1}}{r+\x^\top_t A^{-1}_{t-1}\x_t}$; \STATE $\u_t=A^{-1}_t \theta_t$;
\STATE $\w_t=\sign(\u_t)\odot[|\u_t|-\lambda_t]_+$; \STATE predict $\hy_t=\sign(\w_t^\top\x_t)$ and receive $y_t\in\{-1,1\}$; \STATE suffer $\ell_t(\w_t)=[1-y_t\w_t^\top\x_t]_+$; \STATE $\theta_{t+1}=\theta_t + \eta c_{y_t} L_t y_t \x_t$; \ENDFOR \end{algorithmic} \end{algorithm}
It is easy to verify that this algorithm is the special case of the proposed framework~(\ref{alg:general_frame}), when $\eta_t=\eta c_{y_t}$ and $\Phi_t(\w)=\frac{1}{2}\w^\top A_t\w$. So, the corollary~\ref{cor:regret-csol} holds for this algorithm. Using this corollary, we can prove the following theorem for Algorithm~\ref{alg:cs-ssol}.
\begin{thm}
Let $(\x_1,y_1),\ldots,(\x_T,y_T)$ be a sequence of examples, where $\x_t\in \R^d$, $y_t\in\{-1,+1\}$ and $\|\x_t\|_1\le X$ for all $t$. If further set $\lambda_t=\lambda/t$, then the regret $R_T=\sum^T_{t=1}c_{y_t}\ell_t(\w_t)-\min_{\w}\sum^T_{t=1}c_{y_t}\ell_t(\w) $ suffered by the algorithm~(\ref{alg:Second Order}) is bounded as \begin{eqnarray} &&\hspace{-0.3in}R_T \nonumber\le\frac{D^2}{2\eta}+c_{max}\frac{\eta}{2}rd\log(1+\frac{X^2}{r}T)+c_{max}\lambda X[\log(T)+1], \end{eqnarray}
for any $\w\in\{\w\ |\w^\top A_T\w\le D^2\}$, where $c_{max}=\max(c_+,c_-)$. \end{thm} The proof of this theorem is omitted, since it is easy and can mainly follows the one for Theorem~\ref{thm:ssol}.
\section{Experiments}
In this section, we conduct an extensive set of experiments to evaluate the performance of the proposed sparse online classification algorithms on both synthetic and real datasets.
\subsection{Experimental Setup}
In our experiments, we compare the proposed algorithms with a set of state-of-the-art algorithms, including the sparse online learning algorithms and the cost-sensitive online learning algorithms. The methodology details of these algorithms are listed in Table~\ref{tab:algs}. The three existing algorithms (CS-OGD, CPA and PAUM) are cost-sensitive online learning without sparsity regularizer.
\begin{table*}[htb] \caption{List of Compared Algorithms.}\label{tab:algs} \centering \begin{tabular}{llll} \toprule Algorithm & 1st/2nd Order & Sparsity & Description \\ \midrule STG & First Order & Truncate Gradient & Stochastic Gradient Descent~\cite{langford-2009-sparse} \\ FOBOS & First Order & Truncate Gradient & FOrward Backward Splitting~\cite{duchi-2009-sparse} \\ Ada-FOBOS & Second Order & Truncate Gradient & Adaptive regularized FOBOS~\cite{duchi2011adaptive} \\ Ada-RDA & Second Order & Dual Averaging & Adaptive regularized RDA~\cite{duchi2011adaptive} \\ FSOL & First Order & Dual Averaging & The proposed Algorithm~\ref{alg:First Order}\\ SSOL & Second Order & Dual Averaging & The proposed Algorithm~\ref{alg:diagonal} \\ \midrule CS-OGD & First Order & Non-Sparse & Cost-Sensitive Online Gradient Descent~\cite{DBLP:journals/tkde/WangZH14} \\ CPA & First Order & Non-Sparse & Cost-Sensitive Passive-Aggressive~\cite{DBLP:journals/jmlr/CrammerDKSS06} \\ PAUM & First Order & Non-Sparse & Cost-Sensitive Perceptron Algorithm with Uneven Margin~\cite{li2002perceptron} \\ CS-FSOL & First Order & Dual Averaging & The proposed Algorithm~\ref{alg:cs-fsol}\\ CS-SSOL & Second Order & Dual Averaging & The proposed Algorithm~\ref{alg:cs-ssol}\\ \bottomrule \end{tabular} \end{table*}
To examine the binary classification performance, beside the synthetic dataset, we evaluate all the previous algorithms on a number of benchmark datasets from web machine learning repositories. Table~\ref{tab:datasets} shows the details of all the datasets in our experiments. These datasets are selected to allow us evaluate the algorithms on various characteristics of data, in which the number of training examples ranges from thousands to millions, feature dimensionality ranges from hundreds to about 16-million, and the total number of non-zero features on some dataset is more than one billion. For the very large-scale WEBSPAM dataset, we run the algorithms only once. The sparsity as shown in the last column of the table denotes the ratio of non-active feature dimensions, as some feature dimensions are never active in the training process, which is often the case for some real-world high-dimensional dataset, such as WEBSPAM.
\begin{table*}[htbp] \centering \caption{List of real-world datasets in our experiments.}\label{tab:datasets} \begin{tabular}{@{}llrrrrrl@{}} \toprule DataSet & Balance & \#Train & \#Test & \#Feature Dimension & \#Nonzero Features & Sparsity(\%) & $T_+ \setminus T_-$ \\ \midrule AUT & True & 40,000 & 22,581 & 20,707 & 1,969,407 & 3.07 & $1 \setminus 0.33$\\ PCMAC & True & 1,000 & 946 & 7,510 & 55,470 & 3.99 & $1 \setminus 1.00$\\ NEWS & True & 10,000 & 9,996 & 1,355,191 & 5,513,533 & 29.88 & $1 \setminus 1.50$\\ RCV1 & True & 781,265 & 23,149 & 47,152 & 59,155,144 & 8.80 & $1 \setminus 1.11$\\ URL & True & 2,000,000 & 396,130 & 3,231,961 & 231,249,028 & 7.44 & $1 \setminus 2.02$\\ WEBSPAM & True & 300,000 & 50,000 & 16,071,971 & 1,118,027,721 & 95.82 & $1 \setminus 0.64$\\ URL2 & False & 1,000,000 & 100,000 & 3,231,961 & 114,852,082 & 44.96 & $1 \setminus 99$\\ WEBSPAM2 & False & 100,000 & 10,000 & 16,071,971 & 224,201,808 & 96.19 & $1 \setminus 99$\\ \bottomrule \end{tabular} \end{table*}
We conduct experiments by following standard online learning settings for training a classifier, where an online learner receives a single training example at each iteration and updates the model sequentially. We will examine how different sparsity levels affect test error rate of the classifier trained from a single pass through the training data. Besides, we also measure time cost of different algorithms to evaluate the computational efficiency. To make a fair comparison, all the algorithms adopt the same experimental settings. We use hinge loss as the loss function for the applicable algorithms. To identify the best set of parameters, for each algorithm on each dataset, we conduct a 5-fold cross validation for grid searching the parameters with the fixed sparsity regularization parameter $\lambda=0$. In particular, the learning rates are searched from $2^{-1}$ to $2^9$ and the other parameters are searched from $2^{-5}$ to $2^5$. With the best tuned parameters, each algorithm is evaluated for $5$ times with a random permutation of a train set. All the experiments were conducted on a Linux server (with Intel Xeon CPU E5-2620 @2.00GHz, 4 CPU cores, 8GB memory) and the programming environment is based on C++ implementation compiled by g++.
\subsection{Experiment on Synthetic Dataset}
To evaluate if the proposed sparse online learning algorithm is able to identify effective features for learning the models, we design the first experiment on a synthetic dataset, which allows us to control the exact numbers of {\it effective/noisy} feature dimensions. In particular, we generate a synthetic dataset with high dimensionality and high sparsity by following the similar scheme in~\cite{DBLP:conf/nips/CrammerDP08, crammer2009adaptive}, which contains a set of \emph{effective} feature dimensions that are correlated with the class labels and a set of \emph{noisy} feature dimensions that are uncorrelated with the labels.
Specifically, we generate the synthetic dataset with $100,000$ training examples and $10,000$ test examples in $\mathbb{R}^{1000}$. For each example, the first $100$ dimensions are drawn from a multivariate Gaussian distribution with diagonal covariance. Each dimension of the mean vector is uniformly sampled from $-1$ to $1$, and each dimension of covariance is uniformly sampled from $0.5$ to $100$. We generate the split plane the same as the mean vector. To introduce noisy feature dimensions, we randomly choose $200$ noise dimensions out of the rest 900 dimensions for each example. Noises are drawn from a Gaussian distribution of $\mathcal{N}(0,100)$.
\begin{figure}
\caption{Test error rate of sparse online classification on synthetic dataset.}
\label{fig:synthetic_exp}
\end{figure}
We evaluate all the cost-insensitive sparse online classification algorithms on the synthetic dataset. Figure~\ref{fig:synthetic_exp} shows the test error rates of all the compared algorithms, where the right diagram is a sub-figure of the left one with sparsity from $80\%$ to $100\%$. Several observations can be drawn from the experimental results.
First of all, we observe that the test error rates of the \emph{truncate gradient} based algorithms (STG, FOBOS, Ada-FOBOS) decrease significantly when the sparsity level increases. By contrast, for the \emph{dual averaging} based algorithms (FSOL, Ada-RDA, SSOL), the test error rates keep stable or even decrease when the sparsity level increases; But the test error rate increases dramatically when the sparsity level is higher than $90\%$---the actual sparsity level used for generating the synthetic data. The result indicates that the dual averaging based algorithms more effectively exploit the sparsity in the dataset. Similar observation was also reported in~\cite{xiao2010dual} who argued that the dual averaging based methods take more aggressive truncations and thus can generate significantly more sparse solutions. Second, the proposed second-order algorithm SSOL achieves the lowest error rate among all the compared algorithms, especially for high sparsity level. This observation can be seen more clearly in the right diagram of Figure~\ref{fig:synthetic_exp}. The above encouraging experimental results indicate that the proposed SSOL algorithm can effectively exploit the sparsity for solving the sparse online classification tasks.
\subsection{Test Error Rate on Large Real Datasets} \label{sec:large_dataset} \begin{figure*}
\caption{Test error rate on $6$ large real datasets. (a)-(b) are two general datasets, (c)-(f) are four large-scale high-dimensional sparse datasets. The second and forth rows are the sub-figures of the first and the third rows with high sparsity level, respectively.}
\label{fig:err_rate}
\end{figure*}
In this experiment, we compare the proposed algorithms (FSOL and SSOL) with the other cost-insensitive algorithms on several real-world datasets. Table~\ref{tab:datasets} shows the details of six datasets, which can be roughly grouped into two major categories: the first two datasets (AUT and PCMAC) are general binary small-scale datasets and the corresponding experimental results are shown in Figure~\ref{fig:err_rate} (a)-(b); and the rest four datasets (NEWS, RCV1, URL, and WEBSPAM) are large-scale high-dimensional sparse datasets and the corresponding experimental results are shown in Figure~\ref{fig:err_rate} (c)-(f). We can draw several observation from these results as follows.
First of all, we observe that most algorithms can learn an effective sparse classification model with only marginal or even no loss of accuracy. For example, in Figure~\ref{fig:err_rate} (d), the performances of all the algorithms are almost stable when sparsity level is smaller than $80\%$. It indicates that all the compared sparse online classification algorithm can effectively explore the low level sparsity information.
Second, for most cases, we observe that there exists some sparsity threshold for each algorithm, below which test error rate does not change much; but when sparsity level is greater than the threshold, test error rate gets worse quickly.
Third, we observe that the dual averaging based second order algorithms (Ada-RDA and SSOL) consistently outperform the other algorithms (STG, FOBOS, FSOL, and Ada-FOBOS), especially for high sparsity level. This indicates that the dual averaging technique and second order updating rules are effective to boost the classification performance.
Finally, when the sparsity is high, an essential requirement for high-dimensional data stream classification tasks, the proposed SSOL algorithm consistently outperforms the other algorithms over all the evaluated datasets. For example, when the sparsity is about $99.8\%$ for the WEBSPAM dataset (the total feature dimensionality is $16,609,143$), the test error rate of SSOL is about $0.3\%$, while the Ada-RDA is $0.4\%$ and the Ada-FOBOS is $0.55\%$, as shown in Figure~\ref{fig:err_rate} (f).
\subsection{Running Time on Large Real Datasets}
We also examine time costs of different sparse online classification algorithms, and the experiment results are shown in Figure~\ref{fig:running_time}. In this experiment, we only adopt the four high-dimensional large-scale dataset. Several observations can be drawn from the results.
First of all, we observe that when the sparsity level is low, the time costs are generally stable; on the other hand, when the sparsity level is high, the time cost of the second other algorithms sometimes will somewhat increase. For example, the test costs of Ada-FOBOS, Ada-RDA and FSOL in Figure~\ref{fig:running_time} (b) \& (d). One possible reason may be that when the sparsity level is high, the model might not be informative enough for prediction and thus may suffer significant more updates. Since second-order algorithms are more complicated than first-order algorithms, they are more sensitive to the increasing number of updates.
Second, we can see that the proposed SSOL algorithm runs more efficiently than another second-order based algorithms (Ada-RDA and Ada-FOBOS). It is even sometimes better than the first order based algorithm (e.g. FOBOS and STD). However, the first order FSOL algorithm is consistently faster than the second order SSOL algorithm.
In summary, from the above analysis, we found that the proposed SSOL algorithm is able to achieve the comparable or even better accuracy of existing second-order algorithms, but has the comparably small time cost as state-of-the-art first-order algorithms with truncated gradient methods.
\begin{figure*}
\caption{Time cost on four large-scale datasets: NEWS, RCV1, URL, and WEBSPAM}
\label{fig:running_time}
\end{figure*}
\subsection{Applications on Online Anomaly Detection} Our last two experiments are to explore the proposed sparse online classification technique with application to an online anomaly detection task, i.e., malicious URL detection and web spam detection, where the class distribution is imbalanced in real-world scenarios.
\subsubsection{Malicious URL Detection}\label{sec:url} In this experiment, we evaluate the cost-sensitive based online learning algorithms for malicious URL detection task with the benchmark dataset that can be downloaded from~\footnote{\url{http://sysnet.ucsd.edu/projects/url/}}. The original URL data set is created in purpose to make it somehow class-balanced, and it has already been used in some previous studies.
In this experiment, we create a subset (denoted as ``ULR2") by sampling from the original data set to make it close to a more realistic distribution scenario where the number of normal URLs is significantly larger than the number of malicious URLs. Following the experiment setting in~\cite{Zhao:2013:COA:2487575.2487647}, we choose $10,000$ positive (malicious) instances and $990,000$ negative (normal) instance. Hence, the ratio $T_+ \setminus T_- = 1 \setminus 99$. For test dataset, we collect $100,000$ samples from the original test set with the same ratio. More details of the unbalanced URL dataset are shown in Table~\ref{tab:datasets}. \begin{figure}
\caption{Balanced accuracy of different algorithms for malicious URL detection.}
\label{fig:result_url}
\end{figure}
We compare the proposed CS-FSOL and CS-SSOL with three other cost-sensitive algorithms (CS-OGD, CPA, and PAUM), as shown in Table~\ref{tab:algs}. In addition, we compare all the cost-insensitive based algorithms to evaluate the classification accuracy without adopting the cost-sensitive lost function. The experiment results are shown in Figure~\ref{fig:result_url}, where CS-OGD, CPA, and PAUM are non-sparse online learning algorithms and thus are invariant to the sparsity.
Several observations can be drawn from the results. First of all, all the cost-sensitive algorithms perform consistently better than their cost-insensitive versions. This indicates that the proposed cost-sensitive algorithm with cost-sensitive loss functions is able to effectively resolve the class-imbalance problem. Second, among all the cost-insensitive algorithms, the second order online learning algorithms are generally better than the first order algorithms. Third, among all the compared algorithms, the proposed CS-SSOL algorithm achieves the best performance, which again validates the efficacy of the proposed technique for real-world data stream classification applications.
\subsubsection{Web Spam Detection} \begin{figure}
\caption{Balanced accuracy of different algorithms for web spam detection.}
\label{fig:result_web}
\end{figure}
In this experiment, we evaluate the proposed cost-sensitive based online learning algorithms for web spam detection task. We constructed an unbalanced subset of the original web spam dataset used in Section~\ref{sec:large_dataset}. In particular, for the train dataset, we randomly choose $1,000$ positive instances and $99,000$ negative instances. Hence, the ratio $T_+ \setminus T_-$ of the training set is $1 \setminus 99$. For test dataset, we collect $10,000$ samples from the original test set with the same positive-negative ratio.
We denote the imbalance web spam dataset as ``WEBSPAM2". More details of the unbalanced web spam dataset are shown in Table~\ref{tab:datasets}. As we can see, the feature dimension of WEBSPAM2 dataset ($16,071,971$) is much higher than the one of URL2 ($3,231,961$), and feature representations of WEBSPAM2 dataset are extremely sparse (96.19\% versus 44.96\%). Hence, the anormaly detection task on WEBSPAM2 dataset is very challenge with high-dimensional sparse features and unbalanced data distributions. The experiment settings in this section are the same with Section~\ref{sec:url}, where all cost-sensitive and cost-insensitive algorithms are compared. The experiment results are shown in Figure~\ref{fig:result_web}.
Several observations can be drawn from the results. First of all, for this sparse classification problem, the performances of non-sparse cost-sensitive algorithms decrease significantly. In particular, the cost-insensitive algorithms SSOL and Ada-RDA outperform the cost-sensitive algorithm CAP and PAUM. Second, similar to the previous experiment, the second order online learning algorithms are generally better than the first order algorithms among all the cost-insensitive / cost-sensitive algorithms. Third, the proposed CS-SSOL algorithm consistently achieves the best performance, which again validates the efficacy of the proposed technique for real-world data stream classification applications.
\section{Conclusions and Future work} In this paper we introduced a framework of sparse online classification (SOC) for large-scale high-dimensional data stream classification tasks. We first showed that the framework essentially includes an existing first-order sparse online classification algorithm as a special case, and can be further extended to derive new sparse online classification algorithms by exploiting second-order information. We also extend the proposed technique to solve cost-sensitive data stream classification problems and explore its applications to online anomaly detection tasks: \emph{malicious URL detection} and \emph{web spam detection}. We analyzed the performance of the proposed algorithms with both theoretical analysis and empirical studies, in which our encouraging experimental results showed that the proposed algorithms are able to achieve the state-of-the-art performance in comparison to a large family of diverse online learning algorithms.
\end{document} |
\begin{document}
\title{Robust Macroscopic Quantum Measurements in the presence of limited control and knowledge}
\author{Marc-Olivier Renou} \affiliation{Groupe de Physique Appliqu\'ee, Universit\'e de Gen\`eve, CH-1211 Gen\`eve, Switzerland} \author{Nicolas Gisin} \affiliation{Groupe de Physique Appliqu\'ee, Universit\'e de Gen\`eve, CH-1211 Gen\`eve, Switzerland} \author{Florian Fr\"owis } \affiliation{Groupe de Physique Appliqu\'ee, Universit\'e de Gen\`eve, CH-1211 Gen\`eve, Switzerland}
\date{\today}
\begin{abstract} Quantum measurements have intrinsic properties which seem incompatible with our everyday-life macroscopic measurements. Macroscopic Quantum Measurement (MQM) is a concept that aims at bridging the gap between well understood microscopic quantum measurements and macroscopic classical measurements. In this paper, we focus on the task of the polarization direction estimation of a system of $N$ spins $1/2$ particles and investigate the model some of us proposed in Barnea et al., 2017. This model is based on a von Neumann pointer measurement, where each spin component of the system is coupled to one of the three spatial components direction of a pointer. It shows traits of a classical measurement for an intermediate coupling strength. We investigate relaxations of the assumptions on the initial knowledge about the state and on the control over the MQM. We show that the model is robust with regard to these relaxations. It performs well for thermal states and a lack of knowledge about the size of the system. Furthermore, a lack of control on the MQM can be compensated by repeated "ultra-weak" measurements. \end{abstract}
\maketitle
\section{Introduction}
In our macroscopic world, we constantly measure our environment. For instance, to find north with a compass, we perform a direction measurement by looking at the pointer. Yet, finding a quantum model for this kind of macroscopic measurement faces several problems. Many characteristics of quantum measurements seem to be incompatible with our intuitive notion of macroscopic measurements. For example, to perfectly measure two noncommuting observables is impossible in quantum mechanics and any informative measurement has a nonvanishing invasiveness. Thus, if it exists, such a model can not be of the standard projective kind. Although we have a good intuition of what such a measurement is, the natural characteristics it should satisfy are not obvious. Even if these characteristics can be rigorously formulated, it is not clear whether there exists a quantum model that satisfies them all.
For concreteness, quantum models for macroscopic measurements can be considered as a parameter estimation task. In this paper, we focus on the estimation of the direction of polarization of $N$ qubits, oriented in direction which is uniformaly choosen at random. The question of the optimal way to estimate $N$ qubit polarization is already well studied \cite{Massar95, Gisin99} and can be seen as part of a larger class of covariant estimation problem \cite{Chiribella05}. It is linked to covariant cloning \cite{Scarani05} and purification of state \cite{Cirac99}. In the limit of macroscopic systems, those optimal measurements are arbitrarly precise and potentially with low disturbance of the system \cite{Bagan05, Bagan06}. A tradeoff between the quality of the guess and the disturbance of the state has been demonstrated \cite{Sacchi07}, as well as an improvement of the guess when abstention is allowed \cite{Gendra13}. However, these optimal measurements may not be satisfying models of our everyday-life macroscopic measurements as it is not clear how these optimal measurements could be physically implemented in a natural way. A first attempt to solve this issue has been to look for a reduction of the optimal POVM, which is continuous, into a POVM with a finite (and small) number of elements \cite{Latorre98, Chiribella07}. However, even if this reduction exists, the resulting POVM is difficult to interpret physically and to our best knowledge no family of reduced POVM for every $N$ exists.
In \cite{Barnea17}, we argue that a good model of a macroscopic measurement should be highly non invasive, collect a large amount of information in a single shot and be described by a "fairly simple" coupling between system and observer. Measurements which fulfills this requirements are called "Macroscopic Quantum Measurements" (MQM). Invasiveness seems to be difficult to satisfy with a quantum model. Indeed, the disturbance induced on the state by a measurement is generic in quantum mechanics. This has no counterpart in classical physics, where any measurement can ideally be done without disturbance of the system. However, it is now well known that this issue can be solved by accepting quantum measurements of finite accuracy. In \cite{Poulin05}, Poulin shows the existence of a trade-off between state disturbance and measurement resolution as a function of the size of the ensemble. One macroscopic observable can behave "classically", provided we measure it with sufficiently low resolution. Yet, the question is still open for several non commuting observables. Quantum physics allows precise measurements of only one observable among two non commuting ones.
In this paper, we study the behavior of an MQM model for the measurement of the polarization of a large ensemble of $N$ parallel spin $1/2$ particles, which implies the measurement of the noncommuting spin operators. In this model, the measured system is first coupled to a measurement apparatus through an intuitive Hamiltonian already introduced in \cite{Dariano02}. Then, the apparatus is measured. We extend our previous study to more general cases. In \cite{Barnea17}, it was shown that this model allows good direction estimation and low disturbance for systems of $N$ parallel spin $1/2$ particles. This system can be interpreted as the ground state of a product Hamiltonian. Here, we generalize the scenario to thermal states. We also study a different measurement procedure based on repeated weak measurements.
The paper is structured as follows: We first present a simplified technical framework which describes the measurement of a random direction for a given quantum state and observable. Considering an input state and an observable independent of the particle number and with no preferred direction, we show that the problem reduces to many sub-problems which correspond to systems of fixed total spin $j$. Then, we quantitatively treat the case of the thermal state, which generalizes the $N$ parallel spin 1/2 particle for non-zero temperature, showing that the discussed MQM is still close to the optimal measurement. In the proposed MQM, the precision of the estimated direction highly depends on the optimized coupling strength of the model. In section 4, we follow the ideas of \cite{Poulin05} and we show that one may relax this requirement by doing repeated "ultra-weak" measurements and a naive guess. We conclude and summarize in the last section.
\section{Estimation of a direction}
In this paper, we aim to study the behavior of a specific MQM model for a direction estimation task, e.g. the estimation of the direction of a magnet or a collection of spins. Hence, we first introduce an explicit (and specific) direction estimation problem, which is presented as a game. It concerns the direction estimation of a qubit ensemble. In the following, $S_{\vec{u}}=\vec{S}\cdot\vec{u}$ represents the spin operator projected in direction $\vec{u}$, i.e. the elementary generator of rotations around $\vec{u}$. For a given state $\rho_{\vec{u}}$ of $N=2J$ qubits, we say that $\rho_{\vec{u}}$ points in the direction $\vec{u}$ if it is positively polarized in the $\vec{u}$ direction, i.e. if $[\rho_{\vec{u}},S_{\vec{u}}]=0$ and $\Tr{\rho_{\vec{u}}S_{\vec{u}}}>0$. We consider the problem of polarization direction estimation from states which are all the same, but point in a direction which is choosen uniformally at random. This problem has already been widely studied \cite{Massar95}, \cite{Gisin99}, \cite{Chiribella05}, \cite{Bagan05}, \cite{Holevobook82} . We give here a unified framework adapted to our task.
\subsection{General framework}\label{generalframework}
We consider a game with a referee, Alice, and a player, Bob.
Alice and Bob agree on some initial state $\rho_z$. In each round of the game, Alice chooses a direction $\vec{u}$ from a uniform distribution on the unit sphere. She rotates $\rho_z$ to $\rho_{\vec{u}}=\mathcal{R}_{\vec{u}}^\dagger \rho_z \mathcal{R}_{\vec{u}}$, where $\mathcal{R}_{\vec{u}}$ is a rotation operator which maps $\vec{z}$ to $\vec{u}$. She sends $\rho_{\vec{u}}$ to Bob, who measures it with some given measurement device characterized by a Positive Operator Valued Measure (POVM) $\Omega_r$. He obtains a result $r$ with probability $p(r|\vec{u})=\Tr{\Omega_r \rho_{\vec{u}}}$, from which he deduces $\vec{v}_r$, his guess for $\vec{u}$. Bob's score is computed according to some predefined score function $g(\vec{u},\vec{v}_r)=\vec{u}.\vec{v}_r$. Given his measurement result, Bob's goal is to find the optimal estimate, i.e. the one which optimizes his mean score \footnote{Often the considered score is $F=\int \dd r\int \dd\vec{u}~ p(r|\vec{u})f(\vec{u},\vec{v}_r)$, where $f(\vec{u},\vec{v}_r)=|\braket{\vec{u}}{\vec{v}_r}|^2$ can be seen as the fidelity between qubits $\ket{\vec{u}}$ and $\ket{\vec{v}_r}$, where a unit vector is associated to the corresponding qubit via the Bloch sphere identification. As $F=\frac{1}{2}(1+G)$, this is equivalent. We chose this formulation for practical reason.} \begin{equation}\label{score}
G=\int \dd r\int \dd\vec{u}~ p(r|\vec{u})g(\vec{u},\vec{v}_r) \end{equation}
For simplicity, we consider an equivalent but simplified POVM. In our description, Bob measures the system, obtain results $r$ and then post-process this information to find his guess $\vec{v}_r$. We now regroup all POVM elements corresponding to the same guess and label it by the guessed direction. Formally, we go from $\Omega_r$ to $O_{\vec{v}}=\int \dd r \Omega_r \delta(\vec{v}_r-\vec{v})$.
Some assumptions are made about $\rho_z$ and $O_{\vec{v}}$. We suppose that $\rho_z$ points in the $z$ direction. Moreover, we assume that $\rho_z$ is symmetric under exchange of particles, which implies $[\rho_z,S^2]=0$. Let $\ket{\alpha,j,m}$ be the basis in which $S_z$ and $S^2$ are diagonal (where $j\in\{J,J-1,...\}$ is the total spin, $\alpha$ the multiplicity due to particle exchange and $m$ the spin along $z$). Then $\rho_z$ is diagonal in this basis, with coefficients independent of $\alpha$, denoted as $c_m^j = \sandwich{\alpha,j,m}{\rho_z}{\alpha,j,m}$.
We also suppose that the measurement device does not favor any direction and treats each particle equally. Mathematically, it means that $O_{\vec{v}}$ is covariant with respect to particle exchange and rotations. Then, any POVM element is generated from one kernel $O_z$ and the rotations $\mathcal{R}_{\vec{v}}$: $O_{\vec{v}} = \mathcal{R}_{\vec{v}}^\dagger O_z \mathcal{R}_{\vec{v}}$ (for more technical details, see \cite{Holevobook82}). With this, Eq.~(\ref{score}) simplifies to:
\begin{equation}\label{score2}
G=\int \dd \vec{v}\int \dd\vec{u}~ p(\vec{v}|\vec{u})g(\vec{u},\vec{v}), \end{equation}
\subsection{Score for given input state and measurement}
The following Lemma is already implicitly proven in \cite{Holevobook82}.
\begin{llemma}\label{LlemmaG} Bob's mean score is: \begin{equation}\label{LemmaG} G=\sum_{j}\frac{j A_j\Tr{\rho_z^{j}}}{j+1}\Tr{\frac{S_z}{j}\tilde{\rho}_z^{j}}\Tr{\frac{S_z}{j} \frac{O_z^{j}}{2j+1}} \end{equation}
where $A_j={2J \choose J-j}-{2J \choose J-j-1}$ is the degeneracy of the multiplicity $\alpha$ in a subspace of given $(j, m)$, $O_z^{j}$ is the projections of $O_z$ over all subspaces of fixed $(\alpha, j)$, $\rho_z^{j}$ is the projection of $\rho_z$ over all subspaces of fixed $(\alpha, j)$ and $\tilde{\rho}_z^{j}=\frac{\rho_z^{j}}{\Tr{\rho_z^{j}}}$. \end{llemma}
Lemma~\ref{LlemmaG} says that Bob cannot use any coherence between subspaces associated to different $(\alpha,j)$ to increase his score. In other words, the score Bob achieves is the weighted sum (where the weights are $\Tr{\rho_z^{\alpha,j}}$) of the scores $G^j$ Bob would achieve by playing with the states $\tilde{\rho}_z^{j}$. This property is a consequence of the assumption that no direction or particle is preferred by Bob's measurement or in the set of initial states. For self consistency, we prove this Lemma.
\begin{proof} Bob's mean score is: \begin{equation}\label{score3}
G=\int \dd r\int \dd\vec{u}~ p(\vec{v}|\vec{u})g(\vec{u},\vec{v})=\int \dd v~ \Tr{O_{\vec{v}}\Gamma_{\vec{v}}}, \end{equation} where $\Gamma_{\vec{v}}=\vec{v}\cdot\int \dd\vec{u}~\rho_{\vec{u}}~\vec{u}$. As $\rho_{\vec{u}}$ is the rotated $\rho_z$ and $O_{\vec{v}}$ is covariant, we have: \begin{equation} G=\Tr{O_z \Gamma_z}. \end{equation}
Let $P_{\alpha,j}=\sum_m\ketbra{\alpha,j,m}{\alpha,j,m}$ be projectors, $\Gamma_{z}^{\alpha,j}=P_{\alpha,j}\Gamma_{z}P_{\alpha,j}$ and $O_{z}^{\alpha,j}=P_{\alpha,j}O_{z}P_{\alpha,j}$. Here, as $\rho$ and $O_z$ do not depend on the particle number, $\alpha$ is only a degeneracy.
As $\Gamma_z$ is invariant under rotation around $z$ and commutes with $S^2$, we have $\Gamma_z=\sum_{\alpha,j}\Gamma_z^{\alpha,j}$. Then $G=\sum_{\alpha,j}\Tr{O_{z}^{\alpha,j}\Gamma_{z}^{\alpha,j}}=\sum_{j} A_j \Tr{O_{z}^{j}\Gamma_{z}^{j}}$ where $O_{z}^{j},\Gamma_{z}^{j}$ are respectively the projections of $O_z$, $\Gamma_z$ over any spin coherent subspace of fixed $\alpha, j$. Let $G^j=\Tr{O_{z}^{j}\Gamma_{z}^{j}}$.
$\Gamma_{z}^{j}=\sum_m c_m^j \int \dd\vec{u}~u_z \mathcal{R}_{\vec{u}}^\dagger \ketbra{\alpha,j,m}{\alpha,j,m} \mathcal{R}_{\vec{u}}$ is symmetric under rotations around $z$. Then, it is diagonal in the basis $\ket{\alpha,j,m}$ with fixed $j,\alpha$. As $\bra{\alpha,j,\mu}\int \dd\vec{u}~u_z \mathcal{R}_{\vec{u}}^\dagger \ketbra{\alpha,j,m}{\alpha,j,m} \mathcal{R}_{\vec{u}}\ket{\alpha,j,\mu}=\frac{m\mu}{j(j+1)(2j+1)}=\frac{m}{j(j+1)(2j+1)}\bra{\alpha,j,\mu}S_z^{\alpha,j}\ket{\alpha,j,\mu} $, we have: \begin{equation} \Gamma_{z}^{j}=\sum_{m} c_m^j \frac{m}{j(j+1)(2j+1)}S_z^{\alpha,j} \end{equation} and: \begin{equation} G^{j}=\frac{1}{j(j+1)(2j+1)}\Tr{S_z\rho_z^{j}}\Tr{S_z O_z^{j}}. \end{equation} \end{proof}
\subsection{State independent optimal measurement, optimal state for direction estimation}\label{State_indep_opt_measurement}
Given the state $\rho_z$, the measurement which optimizes Bob's score is the set of $\left\lbrace \Theta_{\vec{v}}^{\alpha,j} \right\rbrace$ such that $\Tr{S_z \Theta_z^{\alpha,j}}$ is maximal. The maximum is obtained when $\Theta_z^{\alpha,j}$ is proportional to a projector on the eigenspace of $S_z$ with the maximal eigenvalue, that is, for $\Theta_z^{\alpha,j}=(2j+1)\ketbra{\alpha,j,\pm j}{\alpha,j,\pm j}$. Here the sign depends of the sign of $\Tr{S_z\rho_z^{j}}$. In the following, we restrict ourselves to the case where the $\Tr{S_z\rho_z^{j}}$ are all positive (this is the case for the thermal state, considered below). Then: \begin{equation}\label{Gopt} G_{\mathrm{opt}}=\sum_{j}\frac{jA_j\Tr{\rho_z^{j}}}{j+1}\Tr{\frac{S_z}{j}\tilde{\rho}_z^{j}}. \end{equation} For $\rho_z=\ketbra{J,J}{J,J}$, the thermal state of temperature $T=0$, we find $G_{\mathrm{opt},T=0}=\frac{J}{J+1}$. Equivalently we recover the optimal fidelity $F_{\mathrm{opt},T=0}=\frac{1}{2}(1+G_{\mathrm{opt},T=0})=\frac{N+1}{N+2}$, already found in \cite{Massar95}. Asymptotically, we have $G_{\mathrm{opt},T=0}=1-1/J+O(1/J^2)$. This induces a natural characterization of the optimality of an estimation procedure. Writing $G_{T=0}$ as $G_{T=0}=1-\epsilon_J/J$ where $\epsilon_J= J(1-G_{T=0})\geq 1$, we say that the procedure is asymptotically optimal if $\epsilon_J=1+O(1/J)$ and almost optimal if $\epsilon_J-1$ is asymptotically not far from 0.
\subsection{Optimality of a state and a measurement for direction guessing}\label{Optimality}
Given the input state $\rho_z$, we can now compare the performances of a given measurement to the optimal measurement. From Eq.~(\ref{LemmaG}) and Eq.~(\ref{Gopt}), we have, for an arbitrary measurement:
\begin{equation}\label{DeltaG} \Delta G\equiv G_{\text{opt}}-G=\sum_{j}\frac{j A_j \Tr{\rho_z^{j}}}{j+1}\Tr{\frac{S_z}{j}\tilde{\rho}_z^{j}}\Tr{\frac{S_z}{j} \frac{\Theta_z^j-O_z^{j}}{2j+1}}. \end{equation}
For every $j$, the three terms of the product are positive. Then, qualitatively, the measurement is nearly optimal if for each $j$, the product of the three is small. We give here the interpretation of each of these terms: \begin{itemize} \item $A_j$ is the degeneracy under permutation of particles (labeled by $\alpha$) and $\Tr{\rho_z^{j}}$ the weight of $\rho_z$ over a subspace $j,\alpha$. Hence the first term, bounded by $j/(j+1)$, only contains the total weight of $\rho_z$ over a fixed total spin $j$. Hence, it is small whenever $\rho$ has little weight in the subspace $j$.
\item $\Tr{\frac{S_z}{j}\tilde{\rho}_z^{j}}$ is small whenever the component of $\rho_z$ on the subspace of total spin $j$, $\rho_z^{j}=P_z\rho_z P_z$, is small or not well polarized. It is bounded by 1. When $\rho_z^{j}$ is not well polarized, the optimality of the measurement in that subspace makes little difference. Then, this second term characterizes the quality of the component $\rho_z^{j}$ for the guess of the direction.
\item The last term is small when $O_z^{j}$ is nearly optimal and is also bounded by 1. More exactly, as $O_z^{j}$ is a covariant POVM, we have $\Tr{O_z^{j}}=2j+1$ and all diagonal coefficients are positive. Because of $S_z/j$, $O_z^{j}$ is (nearly) optimal when it projects (mainly) onto the subspace of $S_z$ with the highest eigenvalue. POVMs containing other projections are sub-optimal. This effect is amplified by the operator $S_z$: the further away these extra projections $\propto\ketbra{j,m}{j,m}$ are from the optimal projector $\propto\ketbra{j,j}{j,j}$ (in the sense of $j-m$), the stronger the sub-optimality is. Then, the last term corresponds to the optimality of the measurement component $O_z^{j}$ for the guess of the direction. \end{itemize} Interestingly, we see here that the state and measurement "decouple": The optimal measurement is independent of the considered state. However, if the measurement is not optimal only for subspaces where $\rho_z$ has low weight or is not strongly polarized, it will still result in a good mean score.
\subsection{Estimation from a thermal state}
We now consider the case where the game is played with a thermal state (with temperature $T=1/\beta$) of $N=2J$ spins: \begin{equation} \rho_z=\frac{1}{Z}\left(e^{-\beta \sigma_z/2}\right)^{\otimes N}=\frac{1}{Z}\sum_{\alpha,j,m}e^{-\beta m}\ketbra{\alpha,j,m}{\alpha,j,m}, \end{equation} where $Z=\left(2 \text{cosh}(\beta/2)\right)^N$ is the partition sum. $\rho_z$ is clearly invariant under rotations around $z$ and symmetric under particle exchange. For later purpose, we define $f_j(\beta)=Z \Tr{\frac{S_z}{j}\rho_z^{\alpha,j}}=\Big[(1+j)\sh{j\beta}-j\sh{(1+j)\beta}\Big]/\Big(2j\sh{\beta/2}^2\Big)$.
Eq.~(\ref{LemmaG}) now reads \begin{equation}\label{GT=0} G_{T=0}=\frac{J}{J+1}\Tr{\frac{S_z}{J}\frac{O_z}{2J+1}}, \end{equation} and for any temperature $\beta$: \begin{equation}
G=\frac{1}{Z}\sum_{j}A_j~f_j(\beta)~G^j_{T=0}, \end{equation} with the optimal measurement, $G_{\mathrm{opt},T}=\frac{1}{Z}\sum_j\frac{jA_j}{j+1}f_j(\beta)$. Note that for low temperatures, this expression can be approximated with $\mean{J}_\beta/(\mean{J}_\beta+1)$, where $\mean{J}_\beta$ is the mean value of the total spin operator for a thermal state.
\section{A Macroscopic Quantum Measurement (MQM)}
\subsection{The model} In the following, we consider a model already introduced in \cite{Dariano02, Barnea17} for polarization estimation. It is adapted from the Arthur Kelly model, which is designed to simultaneously measure momentum and position \cite{Arthurs65, Pal11, Levine89}. The model is expressed in the von Neumann measurement formalism \cite{Neumannbook55, Buschbook91, Peresbook02}. The measurement device consists of a quantum object -the pointer- which is first initialized in a well-known state and coupled to the system to be measured. At last, the pointer is measured in a projective way. The result of the measurement provides information about the state of the system. Tuning the initial state of the pointer and the strength of interaction, one can model a large range of measurements on the system, from projective measurements which are partially informative but destruct the state to weak measurements which acquire few information but do not perturb much.
More specifically, to measure the direction of $\rho_{\vec{u}}$, we use a pointer with three spatial degrees of freedom: \begin{equation} \ket{\phi}=\frac{1}{(2\pi\Delta^2)^{3/4}}\int \dd x\dd y\dd z e^{-\frac{x^2+y^2+z^2}{4\Delta^2}}\ket{x}\ket{y}\ket{z}, \end{equation} where $x,y,z$ are the coordinates of the pointer. The parameter $\Delta$ in $\ket{\phi}$ represents the width of the pointer: A small $\Delta$ corresponds to a narrow pointer and implies a strong measurement, while a large $\Delta$ gives a large pointer and a weak measurement. The interaction Hamiltonian reads: \begin{equation} H_{\text{int}}=\vec{S}\cdot\vec{p}\equiv p_x\otimes S_x + p_y\otimes S_y + p_z\otimes S_z, \end{equation} where $p_x,p_y,p_z$ are the conjugate variables of $x,y,z$. A longer interaction time or stronger coupling can always be renormalized by adjusting $\Delta$. Hence, we take the two equal to 1. Finally, a position measurement with outcome $\vec{r}$ is performed on the pointer. The POVM elements associated to this measurement are $O_{\vec{r}}=E_{\vec{r}}E_{\vec{r}}^\dagger$, where the Krauss operator $E_{\vec{r}}$ reads: \begin{equation}\label{defKrauss} E_{\vec{r}}\propto\int \dd \vec{p}e^{i\vec{r}\cdot\vec{p}}e^{-\Delta^2p^2 e^{-i\vec{p}\cdot\vec{S}}} \end{equation} The POVM associated to this model is already covariant. Indeed, the index of each POVM element is the direction of guess \footnote{To exactly obtain the form given in Sec.~\ref{generalframework}, one has to define introduce $O_{\vec{v}}=\int_{0}^{\infty}r^2 O_{\vec{r}}\dd r$, which is equivalent to identify each vector with its direction.}
and any $O_{\vec{r}}$ is a rotation of $O_{z}$: $O_{\vec{r}}= \mathcal{R}_{\vec{r}}^\dagger O_{z} \mathcal{R}_{\vec{r}}$.
\subsection{Behavior for zero temperature states}\label{behavior_T0}
At zero temperature, it is already known that the score obtained for a game where Bob does the MQM remains close to the optimal one. In our previous study \cite{Barnea17}, we demonstrated a counter-intuitive behavior of the quality of the guess: a weaker coupling strength can achieve better results than a strong coupling, see Fig.~\ref{epsN_LargeJ_strange_behavior}(a). In particular, we show that for well chosen finite coupling strength, the score of the guess is almost optimal. The optimal value of the coupling is $\Delta=\sqrt{J/4}$: It scales with the square root of the number of particles. \begin{figure}\label{epsN_LargeJ_strange_behavior}
\end{figure}
Additional calculations confirm this first conclusion (see Fig.~\ref{epsN_LargeJ_strange_behavior}(b)). Exploiting the conclusion of the discussion of Sec.~\ref{Optimality}, we only considered the first diagonal coefficient of $O_z$, $o_J=\sandwich{J,J}{O_z}{J,J}$, to lower bound the performence of the POVM
\footnote{This method is equivalent to the one previously used in \cite{Barnea17}. There, $\bra{z}^{\otimes N}\Omega_{\vec{r}}\ket{z}^{\otimes N}$ is lower bounded by $|\bra{z}^{\otimes N}E_{\vec{r}}\ket{\vec{r}}^{\otimes N}|^2$, but as the Krauss operator is diagonal, this last term is nothing else than $o_J|\bra{z}^{\otimes N}\ket{\vec{r}}^{\otimes N}|^2$.}. Numerical simulations suggest that for a coupling strength $\Delta=\sqrt{J/4}$, only considering the bound over $o_J$, $G_{T=0}$ develops as $G_{T=0}=1-\epsilon_J/J$ with $\epsilon_J= J(1-G_J)\lesssim 19/18$ for large $J$. Hence, the asymptotic difference between $G_{\mathrm{opt},T=0}$ and $G_{MQM,T=0}$ is such that $J\Delta G_{T=0}$ remains bounded, in the order of 0.05.
From Eq.~(\ref{LemmaG}) and the discussion about Eq.~(\ref{DeltaG}), we see that, to achieve optimality the first diagonal coefficient, $o_J$ must be maximal \footnote{We can interpret this physically. We see from Sec.~\ref{State_indep_opt_measurement} that the best covarient measurement is obtained from $O_z\propto\ketbra{J,J}{J,J}$. Other covarient measurements can be obtained with $O_z\propto\ketbra{m,J}{m,J}$ for $0 \leq m<J$. The coefficients $o_m$ can be interpreted as how much each of these measurements is done. The term $\ketbra{m,J}{m,J}$ can also be thought as the physical system used to measure. When it is highly polarized ($m=J$), the measurement is efficient. But when the polarization is low, the information gain is weak. E.g., $m=0$, we clearly see that all POVM elements are $\propto1\!\!1$. } , that is equal to $2J+1$. When it is not the case, as $\Tr{O_z}=2J+1$, the difference $(2J+1)-o_J=\Tr{O_z}-o_J=\sum_{m\neq J}o_m$ is distributed between the other diagonal coefficients $o_m=\sandwich{J,m}{O_z}{J,m}$, for $m\neq J$. The score achieved by the measurement is given by Eq.~(\ref{GT=0}): \begin{equation} G_{T=0}=\Tr{\frac{S_z}{J} \frac{O_z^{J}}{2J+1}}=\frac{J}{J+1}\sum_m \frac{m}{J}\frac{o_m}{2J+1}. \end{equation} Our bound only considers the coefficient $o_J$. However, a simple calculation shows that this is enough to deduce the strict suboptimality of the measurement. Indeed, one can derive: \begin{align*} \epsilon_J&=J\left(1-\frac{J}{J+1}\left(\frac{o_J}{2J+1}+\sum_{m\neq J}\frac{m}{J}\frac{o_m}{2J+1}\right)\right)\\ &\geq J\left(1-\frac{J}{J+1}\left(\frac{o_J}{2J+1}+\frac{J-1}{J}\left(1-\frac{o_J}{2J+1}\right)\right)\right)\\ &\geq 2-\frac{o_J}{2J+1}+o(1), \end{align*} where $o(1)\rightarrow 0$ when $J\rightarrow \infty$. Hence if $o_J$ is not asymptotically $2J+1$, $\epsilon_J$ cannot be asymptotically 1.
In the following, we show that a lower bound on G for thermal states can be calculated with methods based on the $T=0$ case.
\subsection{Behavior for finite temperature states}\label{behaviorTnon0}
As it is build from the spin operators only, the measurement scheme depends only on the properties of the system with respect to the spin operators. More precisely, for a given system size $N=2J$, we consider the basis $\{\ket{\alpha,j,m}^{(N)}\}$ and for given total spin $j$ and permutation multiplicity $\alpha$ the projector $P_{\alpha,j}^{(N)}=\sum_{\alpha,j}\ketbra{\alpha,j,m}{\alpha,j,m}^{(N)}$. Then, the projection of Eq.~(\ref{defKrauss}) for $N=2J$ spins onto the subspace $j,\alpha$ is equivalent to the projected Krauss operator for $n=2j$ spins onto $j$: \begin{equation}\label{independentN} P_{\alpha,j}^{(N)}E_{\vec{r}}^{(N)}P_{\alpha,j}^{(N)}\equiv P^{(n)}_{j}E_{\vec{r}}^{(n)}P^{(n)}_{j}, \end{equation} where the equivalence $\equiv$ is interpreted as $\ket{\alpha,j,m}^{(N)}\equiv\ket{m}^{(n)}$ (there is no multiplicity for $n$ and $j=n/2$).
\begin{figure}
\caption{$J \Delta G$ (Eq.~(\ref{DeltaG})) as a function of $J$, for various $\beta$ chosen such that $\left<S_z\right>=J \tanh{\beta/2}$. The MQM is close to optimal even for finite temperature. See Sec.~\ref{behaviorTnon0} for further details.}
\label{thermal}
\end{figure} For non zero temperature, we adapt the numerical estimation model of \cite{Barnea17}. Due to Lemma~\ref{LlemmaG} and Eq.~(\ref{independentN}), we can directly exploit the same model and combine the results for the different subspaces for given $j$. However, in this case, we are limitated by the choice of the coupling strength $\Delta$ of the pointer with the system. At zero temperature, only the total spin subspace which corresponds to $j=J$ is involved. The optimal coupling strength is then $\Delta=\sqrt{J/4}$. For a non zero temperature, all possible $j$ appears and the value of $\Delta$ cannot be optimized for each one. Our strategy is to chose the optimal coupling value for the equivalent total spin $J_{\mathrm{eq}}$ satisfying $\mean{S^2}=J_{\mathrm{eq}}(J_{\mathrm{eq}}+1)$, which can be deduced from $\mean{S^2}=\frac{1}{1} (3J+J(2J-1)\text{tanh}^2{\beta/2})$ (for a thermal state). Depending on the sensitivity of the MQM guessing scheme with respect to a change in the value of $\Delta$, this method may work or not. Numeric simulations show that a change of order $O(\sqrt{J})$ perturb the score. However, one can hope that for smaller variation, the perturbation is insignificant.
We tested the method for different values temperature $T=1/\beta$ corresponding to spin polarization $\left<S_z\right>=J \tanh{\beta/2}$. We find again that the asymptotic difference between $G_{\mathrm{opt}}$ and $G_{MQM}$ is small. More precisely, Fig.~\ref{thermal} shows $J\Delta G_{\beta}$ as a function of $J$, for different temperature corresponding to $\mean{S_z}=c J$, for various $c$. For each $\Delta$, the error $J\Delta G_{\beta}$ seems to be bounded for large $J$.
\section{Estimation of a direction through repeated weak measurements}
In the previous section, we considered a specific MQM and studied the mean score of the state direction for pure states as well as for more realistic thermal states. We compared it to its optimal value, obtained with the optimal theoretical measurement. We showed that the difference remained bounded. As the model makes use of a simple Hamiltonian coupling between system and observer, it satisfies the requirements of an MQM as stated in the introduction for thermal states.
However, this model requires that three one-dimensional (1D) pointers (or equivalently one three-dimensional (3D) pointer) are coupled to the system at the very same time, to be then measured. This requirement is difficult to meet. Moreover, an optimized coupling strength between system and pointer is necessary: the pointer width has to be $\Delta=\sqrt{J/4}$ within relatively tight limits. This requires a good knowledge about the system to be measured (its size, its temperature, ...) and fine control over the measurement. Following \cite{Poulin05}, we can overcome this problem by implementing many ultra-weak measurements. To this end, we focus on a relaxation of the measurement procedure, where we consider repeated very weak measurements (with $\Delta\gg \sqrt{J/4}$) in successive orthogonal directions on the state, which is gradually disturbed by the measurements. This idea has already been implemented experimentaly \cite{Hacohen16}. The guessed state is obtained by averaging the results in each of the three directions. Note that this is not optimal, as the first measurements are more reliable than the last. However, we show in the following that this intuitive approach gives almost optimal results. For simplicity, we restrict ourselves to the case of a perfectly polarized state, or equivalently a thermal state at a zero temperature. \subsection{The model}
We modify the game considered so far in the following way. Bob now uses a modified strategy, in which he successively repeats the same measurement potentially in different measurement basis. First, he weakly couples the state to a 1D Gaussian pointer through an interaction Hamiltonian in some direction $w$. The pointer state is \begin{equation} \ket{\phi}=\frac{1}{(2\pi\Delta^2)^{1/4}}\int \dd we^{-\frac{w^2}{4\Delta^2}}\ket{w}, \end{equation} and the Hamiltonian reads \begin{equation} H_w\propto p_w\otimes S_w, \end{equation} where $w\in \{x,y,z\}$. Then, Bob measures the pointer. The post measured state is used again for the next measurement and is disturbed in each round. We first analytically derive the case where Bob only measures in one direction ($w=z$). Then, we consider the case where Bob does $t$ measurements successively in each orthogonal direction $x, y, z$. He obtains results $x_1, y_1, z_1, x_2, y_2, ..., z_t$ and estimates the direction with the vector which coordinates are the average of the $x_i$, the $y_i$ and the $z_i$.
\subsection{Measurement in a single direction}\label{ultraweak1D}
We first study the 1D case. First, note that the optimal strategy when the measurement operators $O_r$ are required to measure in a fixed direction $z$ (i.e. $[O_r,S_z]=0$) is to measure the operators $S^j_z$: As the $O_r$ commutes with $S_z$, they can be simulated with a measurement of $S^j_z$. The optimum is to answer $\pm z$ depending on the sign of the result. The obtained score is then $G=\frac{J}{2J+1}$ for integer $J=N/2$ and $G=\frac{2J+1}{4(J+1)}$ otherwise.
In our model, we consider an interaction Hamiltonian $H_w$ taken in a constant direction $w=z$. The total number of measurements is $t$.
\begin{figure}
\caption{Score for repeated weak measurement in a single fixed direction with $\Delta=10$ and $J=2, 4, 8, 16$. See Sec.~\ref{ultraweak1D} for further details.}
\label{repetitiveweak1d}
\end{figure}
The measurement results form a vector $\vec{r}=\{r_1, ..., r_t\}$. The POVM of the full measure sequence is: \begin{equation} \Omega_{\vec{r}} =
\begin{bmatrix}
\ddots & & \\
& F_m({\vec{r}}) & \\
& & \ddots
\end{bmatrix} \end{equation} where: \begin{equation}
F_m(\vec{r})=\frac{1}{\left(\Delta\sqrt{2\pi}\right)^p}~ e^{\frac{-||\vec{r}-m\vec{1}||^2}{2\Delta^2}}, \end{equation} where $\vec{1}=\{1, ..., 1\}$. As all measurements for each step commute, this case can be solved analytically. Note first that the ordering of the measurement results is irrelevant. From Eq.~(\ref{score}), we find: \begin{align*} G&=\frac{1}{(J+1)(2J+1)}\Tr{S_z O_z}\\
&=\frac{2}{(J+1)(2J+1)\left(\Delta\sqrt{2\pi}\right)^t}\int \dd \vec{r} \delta(\vec{v}_{\vec{r}}-\vec{z})e^{-\frac{||\vec{r}||^2}{2\Delta^2}}\left(\sum_{m>0} m~e^{\frac{-mt}{2\Delta^2}}\text{sinh}\left(\frac{m~\vec{r}\cdot\vec{1}}{\Delta^2}\right)\right), \end{align*} where $\vec{v}_{\vec{r}}$ is the optimal guess. For $\vec{r}$ such that $\vec{r}\cdot\vec{1}\geq 0$, the optimal guess is clearly $\vec{v}_{\vec{r}}=\vec{z}$. By symmetry, $\vec{v}_{-\vec{r}}=-\vec{v}_{\vec{r}}$ and the optimal guess is $\vec{v}_{\vec{r}}=\text{sign}(\vec{r}\cdot\vec{1})\vec{z}$. Then: \begin{equation} G=\frac{2}{(J+1)(2J+1)}\sum_{m>0}m~\text{erf}\left(\frac{m}{\Delta}\sqrt{\frac{t}{2}}\right) \end{equation} is easily computed by integration over $\vec{r}$ and by decomposition into its parallel and orthogonal components to $\vec{1}$. We see here that the score only depends on the ratio $\frac{\sqrt{t}}{\Delta}$ and reaches the 1D strong measurement limit for $\frac{\sqrt{t}}{\Delta}\gg 1$ (see Fig.~\ref{repetitiveweak1d}). Here erf is the error function. We see that $G\rightarrow 1/2$ for $J\rightarrow \infty$, which is the optimal value for optimal measurements lying on one direction.
\begin{figure}\label{Goptsaturation}
\end{figure}
\subsection{Ultra-weak measurements in three orthogonal directions}\label{ultraweak3D}
We now study the relaxation of our initial MQM model. In this case, for large number of measurement $t$, we could not analytically derive the mean score. We hence implemented a numerical simulation of the model. We fix the number of qubits $N=2J$ and pointer width $\Delta$. The vector $\vec{u}$ is drawn at random on the Bloch sphere. Then, we simulate $\tau$ successive weak measurements in directions $x, y, z$ of the system $\ket{\vec{u}}^{\otimes N}$. For each $t \leq \tau$, we guess $\vec{u}$ from the mean of the results for $x, y, z$ for measurements up to $t$.
For large $\Delta$, our procedure can be seen as successive weak measurements of the system. Each measurement acquires a small amount of information and weakly disturbs the state. We attribute the same weight to each measurement result to find the estimated polarization. As each measurement disturbs the state, this strategy is not optimal. However, keeping the heuristic of "intuitive measurement", we consider this guessing method as being natural.
The results from the numerical simulation suggest that for fixed number of particle $N=2J$ and fixed pointer width $\Delta$, the score as a function of $t$ increases and then decreases (see Fig.~\ref{Goptsaturation} (a)), which is intuitive. Indeed, for few measurements, the state is weakly disturbed and each measurement acquires only a small amount of information about the original state. Then, after a significant number of measurements, the state is strongly disturbed and each measurement is done over a noisy state and gives no information about the initial state. Hence, there is an optimal number of measurements $t^{\mathrm {max}}(N,\Delta)$ which gives a maximal score $G^{\mathrm {max}}(N,\Delta)$. Moreover, for a fixed $N=2J$, $G^{\mathrm {max}}(N,\Delta)$ increases smoothly as the measurements are weaker, i.e. as $\Delta$ increases. It reaches a limit $G^{\mathrm {max}}(N)$ (see Fig.~\ref{Goptsaturation} (a)) This suggest that for weak enough measurements, we observe the same behavior as in the 1D case. More measurements compensate a weaker interaction strength, without loss of precision. Hence, the precision of a single measurement is not important, as long as the measurement is weak enough. Moreover, in that case, we observe a plateau which suggests that the exact value of $t$ is not important. For $N\gg1$, even with $t$ far from $t^\mathrm {max}$, the mean score is close to $G^{\mathrm {max}}$. Interestingly, the trade-off between $t^{\mathrm {max}}$ and $\Delta$ found for the 1D case seems to repeat here. We numerically find that $\sqrt{t^{\mathrm {max}}}/\Delta$ is constant for a given $N=2J$ (see Fig.~\ref{Goptsaturation} (c)) and scales as $1/\sqrt{N}$.
Most importantly, for weak enough measurements, the obtained score is close to the optimal one, as shown in Fig.~\ref{Consecutive3dVariousDelta}. Numerical fluctuations prevent any precise statements about an estimation of the error, but the error is close to what was obtained with the initial measurement procedure, see Fig.~\ref{Consecutive3dVariousDelta}.
\begin{figure}\label{Consecutive3dVariousDelta}
\end{figure}
\section{Conclusion}
In this paper, we asked the question of how to model everyday measurements of macroscopic system within quantum mechanics. We introduced the notion of Macroscopic Quantum Measurement and argued that such a measurement should be highly non invasive, collect a large amount of information in a single shot and be described by a "fairly simple" coupling between system and observer. We proposed a concrete model based on a pointer von Neumann measurement inspired by the Arthur-Kelly model, where a pointer is coupled to the macroscopic quantum system through a Hamiltonian and then measured. This approach applies to many situations, as long as a natural Hamiltonian for the measured system can be found.
Here, we focused on the problem of a direction estimation. The Hamiltonian naturally couples the spin of the macroscopic quantum state to the position of a pointer in three dimensions, which is then measured. This reveals information about the initial direction of the state. We extended our previous study to consider a collection of aligned spins, which exploits the non monotonic behavior of the mean score as a function of the coupling strength. We presented more precise results. We relaxed the assumptions about the measured system, by considering a thermal state of finite temperature, and showed that our initial conclusions are still valid. We also relaxed the assumptions over the measurement scheme, looking at its approximation by a repetition of ultra weak measurements in several orthogonal directions. Here again, we obtained numerical results supporting the initial conclusion. In summary, this MQM proposal tolerates several relaxations regarding lack of control or knowledge.
It is likely that these two relaxations can be unified: polarization measurement of systems with unknown number of particle or temperature should be accessible via the repeated 1D ultra-weak measurement way. However, this claim has to be justify numerically. Further open questions include the behavior of Arthur Kelly models in other situation where two or more noncommuting quantities have to be estimated, e.g. for position and velocity estimation.
\section*{Author Contributions} Nicolas Gisin suggested the study. Marc-Olivier Renou and Florian Fröwis performed the simulations and worked out the theory. Marc-Olivier Renou wrote the paper. All authors discussed the results and implications and commented on the manuscript at all stages. All authors have read and approved the final manuscript.
\end{document} |
\begin{document}
\begin{abstract} In this paper we have introduced two new classes $\mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu)$ and $\overline{\mathcal{H}\mathcal{M}} (\beta, \lambda, k, \nu)$ of complex valued harmonic multivalent functions of the form $f = h + \overline g$, satisfying the condition \[ Re \left\{ (1 - \lambda) \frac{\Omega^vf}{z} + \lambda(1-k) \frac{(\Omega^vf)'}{z'} + \lambda k \frac{(\Omega^vf)''}{z''}
\right\} > \beta ,~ (z\in \mathcal{D})\] where $h$ and $g$ are analytic in the unit disk $\mathcal{D} = \{ z : |z| < 1\}.$ A sufficient coefficient condition for this function in the class $\mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu)$ and a necessary and sufficient coefficient condition for the function $f$ in the class $\overline{\mathcal{H}\mathcal{M}}(\beta, \lambda, k, \nu)$ are determined. We investigate inclusion relations, distortion theorem, extreme points, convex combination and other interesting properties for these families of harmonic functions. \end{abstract}
\maketitle \section{Introduction} Let $u,v$ be real harmonic function in a simply connected domain $\Omega$ , then the continuous function $f=u+iv$ defined in $\Omega$ is said to be harmonic in $\Omega$. If $f=u+iv$ is harmonic in $\Omega$ then there exist analytic functions $G,H$ such that $u=Re~ G$ and $v=Im~ H$ , therefor $f=u+iv=h+\overline g$ where $h=\frac{G+H}{2},~ \overline g=\frac{\overline G-\overline H}{2}$ and we call $h$ and $g$ analytic part and co-analytic part of $f$ respectively. The jacobian of $f$ is given by
$J_f|z|=|h'(z)|^2-|g'(z)|^2$ , also we show by $w(z)$ the dilatation function for $f$ and define $w(z)=\frac {g'(z)}{h'(z)}.$ Lewy [6], Clunie and Small [3] have showed that the mapping $z\longrightarrow f(z)$ is sense preserving and injective in $\Omega$ if and only if
$J_f|z|>0$ in $\Omega$. The function $f=h+\overline g$ is said to be univalent in $\Omega$ if the mapping $z\longrightarrow f(z)$ is sense preserving and injective in $\Omega$. Denote by $\mathcal{H}$ the class of all harmonic functions $f=h+\overline g$ that are univalent and sense preserving in the open unit disk $\mathcal{D}$ where \begin{equation} h(z)=z+\sum_{n=2}^\infty a_nz^n,~ g(z)=\sum_{n=1}^\infty b_n z^n~~
|b_1|<1. \end{equation} With normalization conditions $f(0)=0,~ f_z(0)=1$ where $f_z(0)$ denotes the partial derivative of $f(z)$ at $z=0.$ In case $g=0$ this class reduces to the class of $\mathcal{S}$ consisting of all analytic univalent functions.
\begin{definition} \label{th2.2}( See [7] and [9]) Let the function $f(z)$ be analytic in a simply-connected region of the $z$-plane containing the origin. The fractional derivative of $f$ of order $\nu$ is defined by \[D_z^\nu f(z)=\frac{1}{\Gamma(1-\nu)}\frac{d}{dz}\int_0^1\frac{f(\zeta)}{(z-\zeta)^\nu}d\zeta, ~~0\leq\nu<1\] where the multiplicity of $(z-\zeta)^\nu$ is removed by requiring $\log (z-\zeta)$ to be real when $z-\zeta>0 .$ \end{definition} Making use of fractional derivative and its known extensions involving fractional derivatives and fractional integrals, Owa and Srivastava [8] introduced the operator $\Omega_z^\nu:\mathcal{A}_0\longrightarrow \mathcal{A}_0$ defined by \[ \Omega_z^\nu f(z):=\Gamma(2-\nu)z^\nu D_z^\nu f(z) ~~ \nu\neq 2,3,4,...\] where $\mathcal{A}_0$ denote the class of functions which are analytic in the unit disk $\mathcal{D}$, satisfying normalization conditions $f(0)=f'(0)-1=0.$
It is easy to see that \[ \Omega_z^\nu f(z)=z+\sum_{n=2}^\infty \frac{\Gamma(2-\nu)\Gamma(n+1)}{\Gamma(n+1-\nu)}a_nz^n.~~ f\in \mathcal{A}_0\]
\begin{definition} \label{th2.2} Suppose that $f=h+\overline g$ where $h$ and $g$ are in (1.1), define $\Omega_z^\nu f(z)=\Omega_z^\nu h(z)+\overline {\Omega_z^\nu g(z)}.$ \end{definition} Then we obtain \[\Omega_z^\nu f(z)=z+\sum_{n=2}^\infty \frac{\Gamma(2-\nu)\Gamma(n+1)}{\Gamma(n+1-\nu)}a_nz^n+ \sum_{n=1}^\infty \frac{\Gamma(2-\nu)\Gamma(n+1)} {\Gamma(n+1-\nu)}b_n{\overline z}^n.\]
By making use of Definition 1.2, we introduce a new class of harmonic univalent functions in the unit disk $\mathcal{D}$ as in definition 1.3.
\begin{definition} \label{th2.2} Let $\mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu)~ (0\leq k\leq 1,~0<\beta\leq 1,~0\leq\lambda,~0\leq\nu <1)$ be the class of functions $f\in \mathcal{H}$ satisfying the following inequality: \[ Re~ \left\{ (1 - \lambda) \frac{\Omega^vf}{z} + \lambda(1-k) \frac{(\Omega^vf)'}{z'} + \lambda k \frac{(\Omega^vf)''}{z''} \right\} > \beta .~ (z=re^{i\theta})\] where \[z'=\frac{\partial}{\partial\theta}\left(re^{i\theta}\right),~~ z''=\frac{\partial}{\partial\theta}( z') ,\] and \[(\Omega^\nu f(z))'= \frac{\partial}{\partial\theta}\left(\Omega^\nu f(z)\right)=iz(\Omega^\nu h(z))'-i\overline {z(\Omega^\nu g(z))'},\] \[(\Omega^\nu f(z))''=\frac{\partial}{\partial\theta}(\Omega^\nu f(z))'=-z(\Omega^\nu h(z))'-z^2(\Omega^\nu h(z))''-\overline {z(\Omega^\nu g(z))'}-\overline {z^2(\Omega^\nu g(z))''},\] also we denote by $\overline{\mathcal{H}\mathcal{M}}(\beta, \lambda, k, \nu)$ the subclass of $\mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu)$ consisting of functions $f=h+\overline g$ such that \begin{equation}
h(z)=z-\sum_{n=2}^\infty |a_n|z^n,~~ g(z)=\sum_{n=1}^\infty
|b_n|z^n,~~|b_1|<1. \end{equation} \end{definition} In [9] H. M. Srivastava and S. Owa investigated this class with $D^\nu f(z)$ instead of $\Omega^\nu f(z)$ where $D^\nu f(z)$ is the Ruscheweyh derivative of $f$, for $p$-valent harmonic functions. This class in special cases involve the works studied by the previous authors such as Bhoosnurmath and Swamay [2], Ahuja and Jahangiri [1,5].
In this paper the coefficient inequalities for the classes $\mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu)$ and $\overline {\mathcal{H}\mathcal{M}}(\beta, \lambda, k, \nu)$ are obtained also some other interesting properties of these classes are investigated.
\section{Coefficient Bounds}
In the first theorem we give the sufficient condition for $f\in\mathcal{H}$ to be in the class $\mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu).$
\begin{theorem} \label{th2.2} Let $f\in\mathcal{H},$ and
\[\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n|+\sum_{n=1}^\infty|\psi(n,k,\lambda,\nu)||b_n |<1-\beta,\]where \begin{equation} \phi(n,k,\lambda,\nu):=\frac{[1+\lambda(n-1)(1+nk)]\Gamma(n+1)\Gamma(2-\nu)}{\Gamma(n+1-\nu)}, \end{equation} and \begin{equation} \psi(n,k,\lambda,\nu):=\frac{[1-\lambda(n+1)(1-nk)]\Gamma(n+1)\Gamma(2-\nu)}{\Gamma(n+1-\nu)}, \end{equation} then $f\in \mathcal{H}\mathcal{M}(\beta, \lambda, k, \nu).$ The result is sharp for the function $f(z)$ given by \begin{eqnarray} f(z)&&=z+\sum_{n=2}^\infty\frac{\gamma^n\Gamma(n+1-\nu)z^n}{[1+\lambda(n-1)(1+nk)]\Gamma(n+1)\Gamma(2-\nu)}\nonumber\\
&&+\sum_{n=1}^\infty\frac{\delta^n\Gamma(n+1-\nu)}{|1-\lambda(n+1)(1-nk)|\Gamma(n+1)\Gamma(n-\nu)}\overline z^n\nonumber \end{eqnarray}
where $\sum_{n=2}^\infty |\gamma_n|+\sum_{n=2}^\infty
|\delta_n|=1-\beta.$ \end{theorem} \begin{proof} Suppose \[E(z)=(1-\lambda)\frac{\Omega^\nu f(z)}{z}+\lambda(1-k)\frac{(\Omega f(z))'}{z'}+\lambda k\frac{(\Omega f(z))''}{z''}.\]
It suffices to show that $|1-\beta+E(z)|\geq |1+\beta-E(z)|.$ A simple calculation by substituting for $h$ and $g$ in $E(z)$ shows \begin{eqnarray} E(z)&&=1+\sum_{n=2}^\infty\frac{[1+\lambda(n-1)(1+nk)]\Gamma(n+1)\Gamma(2-\nu)}{\Gamma(n+1-\nu)}a_nz^{n-1}\nonumber\\&&+ \sum_{n=1}^\infty\frac{[1-\lambda(n+1)(1-nk)]\Gamma(n+1)\Gamma(2-\nu)}{\Gamma(n+1-\nu)}b_n\frac{\overline z^n}{z},\nonumber \end{eqnarray} Considering (2.1) and (2.2) we have \[ \phi(n,k,\lambda,\nu)=n(n-1)[1+\lambda(n-1)(1+nk)]B(n-1,2-\nu), \] and \[ \psi(n,k,\lambda,\nu)=n(n-1)[1-\lambda(n+1)(1-nk)]B(n-1,2-\nu), \] where $B(\alpha,\beta)=\int_0^1 t^{\alpha-1}(1-t)^{\beta-1}dt=\frac{\Gamma(\alpha)\Gamma(\beta)}{\Gamma(\alpha+\beta)}$ is the familiar Beta function. Then we obtain \[E(z)=1+\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)a_nz^{n-1}+\sum_{n=1}^\infty\psi(k,n,\lambda,\nu)b_n\frac{\overline z^n}{z}.\] Now we have \begin{eqnarray}
&&|1-\beta+E(z)|-|1+\beta-E(z)|\nonumber\\
&&=|2-\beta+\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)a_nz^{n-1}+\sum_{n=1}^\infty\psi(n,k,\lambda,\nu)b_n\frac{\overline z^n}{z}|\nonumber\\&&-|\beta-\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)a_nz^{n-1}-\sum_{n=1}^\infty\psi(n,k,\lambda,\nu)b_n\frac{\overline z^n}{z}|\nonumber\\
&&\geq 2-\beta+\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n||z|^{n-1}+\sum_{n=1}^\infty|\psi(n,k,\lambda,\nu)||b_n||\frac{\overline z^n}{z}|\nonumber\\
&&-\beta-\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n||z|^{n-1}-\sum_{n=1}^\infty|\psi(n,k,\lambda,\nu)||b_n||\frac{\overline z^n}{z}|\nonumber\\
&&=2-2\beta-2\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n||z|^{n-1}-2\sum_{n=1}^\infty|\psi(n,k,\lambda,\nu)||b_n||\frac{\overline z^n}{z}|\nonumber\\
&&>2-2\beta-2\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n||z|^{n-1}-2\sum_{n=1}^\infty|\psi(n,k,\lambda,\nu)||b_n||\frac{\overline z^n}{z}|\nonumber\\ &&\geq 0,\nonumber \end{eqnarray} and the proof is complete. \end{proof} In our next theorem we obtain the necessary and sufficient coefficients condition for the $f\in\mathcal{H}$ to be in $\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu).$ \begin{theorem} \label{th2.2} Let $f\in\mathcal{H}$ then $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)$ if and only if \begin{equation}
\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n|+\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_n|<1-\beta. \end{equation} \end{theorem} \begin{proof} Since $\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)\subset\mathcal{H}\mathcal{M}(\beta,\lambda,k,\nu)$ then the "if" part of theorem follows from Theorem 2.1, for "only if" part we show that if the condition (2.3) dose not hold then $f\ne\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu).$ Let $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)$ then we have \begin{eqnarray} 0&&\leq Re~ \left\{(1-\lambda)\frac{\Omega^\nu f(z)}{z}+\lambda(1-k)\frac{(\Omega^\nu f(z))'}{z'}+\lambda k\frac{(\Omega^\nu f(z))''}{z''}-\beta\right\}\nonumber\\ &&=Re~ \left\{1-\beta-\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)a_nZ^{n-1}-\sum_{n=1}^\infty\psi(n,k,\lambda,\nu)b_n\frac{\overline z^n}{z}\right\}.\nonumber \end{eqnarray}
This inequality holds for all values of $z$ for which $|z|=r<1$ so we can choose the values of $z$ on positive real axis such that $0\leq z=r<1$ therefore we get the followin inequality \[ 0\leq 1-\beta-\sum_{n=2}^\infty
\phi(n,k,\lambda,\nu)|a_n|r^{n-1}-\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_n|r^{n-1}.\] Now by letting $r\longrightarrow 1^-$ we have \begin{equation}
0\leq 1-\beta-\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n|-\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_n|. \end{equation} If the condition (2.3) dose not hold then the right hand of (2.4) is negative for $r$ sufficiently close to $1.$ Thus there exists a $z_0=r_0\in (0,1)$ for which the right hand of (2.4) is negative. This contradicts the required condition for $f\in\overline{\mathcal{H}{M}}(\beta,\lambda,k,\nu)$ and so the proof is complete. \end{proof} Putting $\lambda=0$ in Theorem 2.2 we get: \begin{corollary} \label{th2.2} $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,0,k,\nu)=\left\{f:~ Re~ \left(\frac{\Omega^\nu f(z)}{z}\right)>\beta\right\}$ if and only if \[
\sum_{n=1}^\infty n(n-1)B(n-1,2-\nu)|a_n|+\sum_{n=1}^\infty n(n-1)B(n-1,2-\nu)|b_n|< 1-\beta. \] \end{corollary} Putting $\lambda=1$ in Theorem 2.2 we have: \begin{corollary} \label{th2.2} $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,1,k,\nu)=\left\{f:~ Re~ \left((1-k)\frac{(\Omega^\nu f(z))'}{z'}
+k\frac{(\Omega^\nu f(z))''}{z''}\right)>\beta\right\}$ if and only if \[\sum_{n=2}^\infty n^2(n-1)(1-k+nk)B(n-1,\nu)|a_n|+\sum_{n=1}^\infty n^2(n-1)|nk+k-1|B(n-1,2-\nu)|b_n|<1-\beta.\] \end{corollary} Putting $k=1$ in Theorem 2.2 we have: \begin{corollary} \label{th2.2} $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,1,\nu)=\left\{f:~ Re~ \left((1-\lambda)\frac{\Omega^\nu f(z)}{z}
+\lambda\frac{(\Omega^\nu f(z))''}{z''}\right)>\beta\right\}$ if and only if \[\sum_{n=2}^\infty n(n-1)[1+\lambda(n^2-1)]B(n-1,2-\nu)(|a_n|+|b_n|)<1-\beta.\] \end{corollary} Finally putting $k=0$ in Theorem 2.2 we obtain: \begin{corollary} \label{th2.2} $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,0,\nu)=\left\{f:~ Re~ \left((1-\lambda)\frac{\Omega^\nu f(z)}{z}
+\lambda\frac{(\Omega^\nu f(z))'}{z'}\right)>\beta\right\}$ if and only if \[\sum_{n=2}^\infty n(n-1)[1+\lambda(n-1)]B(n-1,2-\nu)|a_n|+\sum_{n=1}^\infty n(n-1)|1-\lambda(n+1)|B(n-1,2-\nu)|b_n|<1-\beta.\] \end{corollary} \begin{theorem} \label{th2.2} $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)$ if and only if \begin{equation} f(z)=t_1z+\sum_{n=2}^\infty t_nf_n(z)+\sum_{n=1}^\infty s_ng_n(z) ~~ (z\in\mathcal{D}), \end{equation} where $t_i\geq 0,~ s_i\geq 0,~ t_1+\sum_{n=2}^\infty t_n+\sum_{n=1}^\infty s_n=1$ and \[f_n(z)=z-\frac{1-\beta}{\phi(n,k,\lambda,\nu)}z^n,\]
\[g_n(z)=z+\frac{1-\beta}{|\psi(n,k,\lambda,\nu)|}\overline z^n.\] \end{theorem} \begin{proof} Let $f$ be of the form (2.5) then we have \begin{eqnarray}
f(z)&&=t_1z+\sum_{n=2}^\infty t_n\left(z-\frac{1-\beta}{\phi(n,k,\lambda,\nu)}z^n\right)+\sum_{n=1}^\infty s_n \left(z+\frac{1-\beta}{|\psi(n,k,\lambda,\nu)|}\overline z^n\right)\nonumber\\ &&=z-\sum_{n=2}^\infty \frac{1-\beta}{\phi(n,k,\lambda,\nu)}t_nz^n+\sum_{n=1}^\infty
\frac{1-\beta}{|\psi(n,k,\lambda,\nu)|}s_n\overline z^n.\nonumber \end{eqnarray} Therefore we have \begin{eqnarray} &&\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)
\frac{1-\beta}{\phi(n,k,\lambda,\nu)}t_n+\sum_{n=1}^\infty|\psi(n,k,\lambda,\nu)|
\frac{1-\beta}{|\psi(n,k,\lambda,\nu)|}s_n\nonumber\\ &&=(1-\beta)\left[\sum_{n=2}^\infty t_n+\sum_{n=1}^\infty s_n\right]=(1-\beta)(1-t_1)\nonumber\\ &&<1-\beta.\nonumber \end{eqnarray} This shows that $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu).$ Conversely suppose that $f\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)$ letting \[t_1=1-\sum_{n=2}^\infty t_n-\sum_{n=1}^\infty s_n,\] where
\[t_n=\frac{\phi(n,k,\lambda,\nu)}{1-\beta}|a_n|,~
s_n=\frac{|\psi(n,k,\lambda,\nu)|}{1-\beta}|b_n|.\] We obtain \begin{eqnarray}
f(z)&&=z-\sum_{n=2}^\infty |a_n|z^n+\sum_{n=1}^\infty |b_n|\overline z^n\nonumber\\ &&=z-\sum_{n=2}^\infty \frac{1-\beta}{\phi(n,k,\lambda,\nu)}t_nz^n+\sum_{n=1}^\infty
\frac{1-\beta}{|\psi(n,k,\lambda,\nu)|}s_n\overline z^n.\nonumber\\ &&=z-\sum_{n=2}^\infty(z-f_n(z))t_n+\sum_{n=1}^\infty(g_n(z)-z)s_n\nonumber\\ &&=\left(1-\sum_{n=2}^\infty t_n-\sum_{n=1}^\infty s_n\right)z+\sum_{n=2}^\infty t_nf_n(z)+\sum_{n=1}^\infty s_ng_n(z)\nonumber\\ &&=t_1z+\sum_{n=2}^\infty t_nf_n(z)+\sum_{n=1}^\infty s_ng_n(z).\nonumber \end{eqnarray} This completes the proof. \end{proof} \section{Convolution and Convex combinations} In the present section we investigate the convolution properties of the class $\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu).$ The convolution of two harmonic function $f_1$ and $f_2$ given by \begin{equation}
f_1(z)=z-\sum_{n=2}^\infty |a_n|z^n+\sum_{n=1}^\infty |b_n|\overline z^n,\\
f_2(z)=z-\sum_{n=2}^\infty |c_n|z^n+\sum_{n=1}^\infty |d_n|\overline z^n, \end{equation} is defined by \begin{equation}
(f_1*f_2)(z)=z-\sum_{n=2}^\infty |a_nc_n|z^n+\sum_{n=1}^\infty
|b_nd_n|\overline z^n. \end{equation} \begin{theorem} \label{th2.2} For $0\leq\beta<\alpha<1 $ let $f_1,~
f_2$ be of the form (3.1) such that for every $n,~ |c_n|<1,~
|d_n|<1.$ If $f_1,~ f_2\in\overline{\mathcal{H}\mathcal{M}}(\alpha,\lambda,k,\nu)$ then \[f_1*f_2\in\overline{\mathcal{H}\mathcal{M}}(\alpha,\lambda,k,\nu)\subset\mathcal{H}\mathcal{M}(\beta,\lambda,k,\nu).\] \end{theorem} \begin{proof} Considering (3.2) we have \begin{eqnarray}
&&\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_nc_n|+\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_nd_n|\nonumber\\
&&<\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_n|+\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_n|\nonumber\\ &&<1-\alpha, \end{eqnarray} and the proof is complete. \end{proof} In the last theorem we examine the convex combination properties of the elements of $\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu).$ \begin{theorem} \label{th2.2} The class $\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)$ is closed under convex combination. \end{theorem} \begin{proof} Suppose that
\[f_i(z)=z-\sum_{n=2}^\infty |a_{n,i}|z^n+\sum_{n=1}^\infty
|b_{n,i}|\overline z^n,~ i=1,2,...\] then the convex combinations of $f_i$ may be written as
\[\sum_{i=1}^\infty t_if_i(z)=z-\sum_{n=2}^\infty \left(\sum_{i=1}^\infty t_i|a_{n,i}|\right)z^n+\sum_{n=1}^\infty \left(\sum_{i=1}^\infty t_i|b_{n,i}|\right)\overline z^n,\] where $\sum_{i=1}^\infty t_i=1,~ 0\leq t_i\leq 1.$ Since
\[\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_{n,i}|+\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_{n,i}|<1-\beta,\] so we have \begin{eqnarray}
&&\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)\left(\sum_{i=1}^\infty t_i|a_{n,i}|\right)+\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)|\left(\sum_{i=1}^\infty t_i|b_{n,i}|\right)\nonumber\\
&&=\sum_{i=1}^\infty t_i\left\{\sum_{n=2}^\infty\phi(n,k,\lambda,\nu)|a_{n,i}|+\sum_{n=1}^\infty
|\psi(n,k,\lambda,\nu)||b_{n,i}|\right\}\nonumber\\ &&<(1-\beta)\sum_{i=1}^\infty t_i=1-\beta.\nonumber \end{eqnarray} This shows that $\sum_{i=1}^\infty t_if_i(z)\in\overline{\mathcal{H}\mathcal{M}}(\beta,\lambda,k,\nu)$ and the proof is complete. \end{proof}
\end{document} |
\begin{document}
\title{Neostability-properties of Fra\"{\i}ss\'e limits of 2-nilpotent groups of exponent $p> 2$} \author{Andreas Baudisch} \date{\today} \maketitle
\begin{abstract} \noindent Let $L(n)$ be the language of group theory with $n$ additional new constant symbols $c_1,\ldots,c_n$. In $L(n)$ we consider the class ${\mathbb K}(n)$ of all finite groups $G$ of exponent $p > 2$, where $G'\subseteq\langle c_1^G,\ldots,c_n^G\rangle \subseteq Z(G)$ and $c_1^G,\ldots,c_n^G$ are linearly independent. Using amalgamation we show the existence of Fra\"{\i}ss\'e limits $D(n)$ of ${\mathbb K}(n)$. $D(1)$ is Felgner's extra special $p$-group. The elementary theories of the $D(n)$ are superstable of SU-rank 1. They have the independence property. \end{abstract}
\section{Introduction}
We consider the variety ${\mathbb G}_{2,p}$ of nilpotent groups of class 2 of exponent $p>2$ in the language $L$ of group theory. To get the Amalgamation Property (AP) in \cite{Bau} an additional predicate $P(G)$ for $G\in{\mathbb G}_{2,p}$ with $G'\subseteq P(G)\subseteq Z(G)$ is introduced. Let ${\mathbb G}_{2,p}^P$ be the category of this groups in the extended language $L_P$ where the morphisms are embeddings. Using the class ${\mathbb K}_{2,p}^P$ of finite structures in ${\mathbb G}_{2,p}^P$ we get a Fra\"{\i}ss\'e limit $D$. If we build $D$ by amalgamation then $P(a)$ says that $a$ will become an element of the commutator subgroup $D'$ of $D$ in that process. In \cite{Bau} it is shown that ${\rm Th}(D)$ is not simple. Here we point out that $D$ has the tree property of the second kind (TP$_2$). This is easily seen.
Let $L(n)$ be the language of group theory with $n$ additional new constant symbols $c_1,\ldots,c_n$. In $L(n)$ we consider the class ${\mathbb G}(n)$ of all groups $G\in {\mathbb G}_{2,p}$, where $G'\subseteq\langle c_1^G,\ldots,c_n^G\rangle \subseteq Z(G)$ and $c_1^G,\ldots,c_n^G$ are linearly independent. We use linear independence, since we can consider an abelian group of expoent p as a vector space over ${\mathbb F}_p$. $\langle X \rangle$ denotes the substructure generated by $X$. Hence $\langle c_1^G,\ldots,c_n^G\rangle = \langle \emptyset \rangle$. ${\mathbb G}(n)$ is uniformly locally finite. Let ${\mathbb K}(n)$ be the class of finite structures in ${\mathbb G}(n)$. ${\mathbb K}(n)$ has the Hereditary Property (HP), the Joint Embedding Property (JEP) and the Amalgamation Property (AP). Hence the Fra\"{\i}ss\'e limit $D(n)$ of the class ${\mathbb K}(n)$ exists. Note that $D(1)$ is the extra special $p$-group considered by U.~Felgner in \cite{Fe}. In \cite{MacSt} the corresponding bilinear alternating map is obtained as an ultraproduct of finite structures. It is a well-known example of a supersimple theory of SU-rank 1.
We show that the theories of all Fra\"{\i}ss\'e limits $D(n)$ are supersimple of SU-rank 1. To prove this we check the properties of non-forking that characterize simple theories \cite{KP}. Before we show that each group $G$ in ${\mathbb G}(n)$ where $G/Z(G)$ is infinite has the Independence Property especially all $D(n)$.
\section{TP{\boldmath$_2$} of {\boldmath${\rm Th}(D)$}}\label{s1}
\begin{prop}\label{p2.1} In ${\rm Th}(D)$ the formulae $[x,y_1]=[y_2,y_3]$ has the tree property of the second kind. \end{prop}
{\em Proof\/}. Since $D$ is the Fra\"{\i}ss\'e limit of ${\mathbb K}_{2,p}^P$ there is an embedding of an infinite free group of $G_{2,p}$ in $D$. Assume that $\{b_\alpha:\alpha<\omega\}\cup\{c_{\alpha,i},d_{\alpha,i}:\alpha<\omega, i<\omega\}$ are free generators of such an infinite free subgroup. We consider the array \[
\overline{a}_{\alpha,i}=\{(b_{\alpha,i}, c_{\alpha,i},d_{\alpha,i}):\alpha<\omega, i<\omega\} \] where $b_{\alpha,i}=b_\alpha$ for all $\alpha$ and $i$: \[ \mbox{Then }\; D\vDash\neg\exists x([x,b_\alpha]=[c_{\alpha,i},d_{\alpha,i}]\wedge[x,b_\alpha] = [c_{\alpha,j},d_{\alpha,j}]) \] for fixed $\alpha$ and $i\ne j$. Now let $f$ be any map of $\omega$ into $\omega$. Then the set \[
\{[x,b_\alpha]=[c_{\alpha,f(\alpha)},d_{\alpha,f(\alpha)}]:\alpha<\omega\} \] is consistent, since $D$ is a Fra\"{\i}ss\'e limit.
$\Box$
\section{The amalgamation property for {\boldmath${\mathbb K}(n)$}}\label{s3}
Let $G$ be a group in ${\mathbb G}(n)$ with the elements $c_1^G,\ldots,c_n^G$ short $c_1,\ldots,c_n$. Let $P(G)$ be the subgroup generated by $c_1,\ldots,c_n$. In the language $L(n)$ \, $P(G)$ is the $L(n)$-substructure generated by the empty set. By definition $G'\subseteq P(G)\subseteq Z(G)$ and the linear dimension ${\rm ldim}(P(G))$ of $P(G)$ is $n$.
In \cite{Bau} a functor $F$ from ${\mathbb G}_{2,p}^P$ into the category ${\mathbb B}^P$ of bilinear alternating maps $(V,W,\beta)$ is defined where $V$ and $W$ are ${\mathbb F}_p$-vector spaces and $\beta$ is a bilinear alternating map from $V$ into $W$. Morphisms of ${\mathbb B}^P$ from $(V_1,W_1,\beta_1)$ to $(V_2,W_2,\beta_2)$ consists of vector space embeddings $f:V_1$ into $V_2$ and $g:W_1$ into $W_2$ that commute with the bilinear maps $\beta_i$. \[ \begin{xy} \xymatrix{ V_1\ar[d]^{\textstyle f}&\times&V_1\ar[d]^{\textstyle f}\ar[r]^{\textstyle\beta_1} &W_1\ar[d]^{\textstyle g, }\\ V_2&\times&V_2 \ar[r]^{\textstyle\beta_2}&W_2 } \end{xy} \] $F$ is defined in the following way: $F(G)$ is $(V,W,\beta)$ where $V=G/P(G)$, $W= P(G)$ and $\beta$ is induced by $[\;,\;]$. If $f: G\to H$ then $F(f)=(\overline{f}, f\restriction P)$ where $\overline{f}:G/P(G)\to H/P(H)$ is induced by $f$. $F$ is a bijection on the level of objects up to isomorphisms.
If we consider the category ${\mathbb G}(n)$, then the morphisms $f:G\to H$ send $c_i^G$ to $c_i^H$. Hence $f$ induces an isomorphism of $P(G)$ onto $P(H)$. We call ${\mathbb B}(n)$ the corresponding category of bilinear alternating maps $(V,P,\beta)$ where $P=\langle c_1,\ldots,c_n\rangle$ is fixed. The morphisms have the form $(g,{\rm id})$. We define the functor $F$ from ${\mathbb G}(n)$ to ${\mathbb B}(n)$ as above and obtain as in \cite{Bau2}:
\begin{lemma}\label{l3.1} \begin{enumerate}
\item[{\rm i)}] $F$ is a functor of ${\mathbb G}(n)$ onto ${\mathbb B}(n)$ that is a bijection for the objects of the categories up to isomorphisms.
\item[{\rm ii}] If $G_0 \in {\mathbb G}(n)$ and $(g,id)$ is ann embedding of $F(G_0)$ into some $(V,P,\beta)$, then there are some $G \in {\mathbb G}(n)$ and some embedding $f$ of $G_0$ into $G$, such that $F(G) = (V,P, \beta)$ and $F(f) = (g,id)$.
\item[{\rm iii)}] In ${\mathbb G}(n)$ we consider $e_0:G_0\to G$, $e_1:H_0\to H$ where $f_0$ is an isomorphism of $G_0$ onto $H_0$. In ${\mathbb B}(n)$ we assume that there is $g$ such that \[ \begin{xy} \xymatrix{ F(G_0)\ar[d]^{F(f_0)}\ar[r]^{F(e_0)} &F(G)\ar[d]^{(g,{\rm id})}\\ F(H_0) \ar[r]^{F(e_1)}&F(H)\;\;. } \end{xy} \] \end{enumerate} Then there is an embedding $f$ of $G$ into $H$ such that $F(f)=(g,{\rm id})$ and \[ \begin{xy} \xymatrix{ G_0\ar[d]^{f_0}\ar[r]^{e_0} &G\ar[d]^{f}\\ H_0 \ar[r]^{e_1}&H\;\;. } \end{xy} \] \end{lemma}
Lemma~\ref{l3.1} shows that AP for ${\mathbb B}(n)$ implies AP for ${\mathbb G}(n)$ as in \cite{Bau}. To show AP for ${\mathbb B}(n)$ we cannot use the free amalgam as in \cite{Bau}.
Assume \begin{eqnarray*} (f_A,{\rm id}):&&(V_B,P,\beta_B)\longrightarrow(V_A,P,\beta_A),\\ (f_C,{\rm id}):&&(V_B,P,\beta_B)\longrightarrow(V_C,P,\beta_C). \end{eqnarray*} W.l.o.g. $V_B$ is a common subspace of $V_A$ and $V_C$. Let $V_D$ be the vector space amalgam $V_C\bigoplus\limits_{V_B} V_A$ with respect to $f_A$ and $f_C$. We get the desired amalgam $\langle V_D,P,\beta_D\rangle$ if \[ \beta_D=\beta_A\;\mbox{ on }\; V_A \quad\mbox{ and } \quad \beta_D=\beta_C\;\mbox{ on }\;V_C \] and the rest is obtained in the following way: If $X$ is a basis of $V_A$ over $V_B$ and $Y$ is a basis of $V_C$ over $V_B$ then we can choose for each pair $x\in X$ and $y\in Y$ \, $\beta_0(x,y)$ in $P$ as we want.
In our context AP implies JEP.
\begin{theorem}\label{t3.1} ${\mathbb K}(n)$ has HP, JEP and AP. Hence the Fra\"{\i}ss\'e limit $D(n)$ exists. It is $\aleph_0$-categorical. ${\rm Th}(D(n))$ has the elimination of quantifiers. \end{theorem} The theorem uses the known theory. See \cite{Ho}. Uniform local finiteness and finite signature for ${\mathbb K}(n)$ imply $\aleph_0$-categoricity and elimination of quantifiers. ${\rm Th}(D(n))$ can be axiomatized by the following sentences: Let $M$ be a model of ${\rm Th}(D(n))$. \begin{enumerate}
\item[$\Sigma\,1)$] $M$ is a nilpotent group of class 2 with exponent $p$. \item[$\Sigma\,2)$] $M'=Z(M)=\langle c_1,\ldots,c_n\rangle$ is of linear dimension $n$. \item[$\Sigma\,3)$] For $B\subseteq A$ in ${\mathbb K}(n)$ it holds: If $B'\subseteq M$ and $B'\cong B$, then this embedding of $B$ into $M$ can be extended to $A$. \end{enumerate}
In the case $n=1$ these axioms imply that $M$ is infinite and $M'=Z(M)$ is cyclic. By U.~Felgner \cite{Fe} $D(1)$ is the extra special $p$-group, since his axiomatization is $\Sigma\,1)$ $M'=Z(M)$ is cyclic and infiniteness.
{\bf Question} \, Is there an easier axiomatization of ${\rm Th}(D(n))$ for $n\ge 2$?
\section{Independence property in {\boldmath${\mathbb G}(n)$}}\label{s4}
Assume $M\vDash {\rm Th}(D(1))$ and $M$ is countable. We write $c$ instead of $c_1$. By \cite{Fe} $M$ is a central product over $\langle c\rangle$: \[
M=\bigodot\limits_{\stackrel{\scriptstyle\langle c\rangle}{i<\omega}}\langle c,a_ib_i\rangle \] where $c$ is a generator of the cyclic subgroup $M'=Z(M)$ and $[b_i,a_i]=c$.
By the elimination of quantifiers of ${\rm Th}(D(1))$ \, $a_0\hat{\;}b_0,a_1\hat{\;}b_1\ldots,a_n\hat{\;}b_n,\ldots$ is an indiscernible sequence in $M$. Then $a_1\hat{\;}b_1,a_2\hat{\;}(b_2\circ b_0),a_3\hat{\;}b_3,a_4\hat{\;}(b_4\circ b_0),\ldots$ and $b_1,b_2\circ b_0,b_3,b_4\circ b_0,\ldots$ are indiscernible sequences in $M$. We have $M\vDash[b_{2i+1},a_0]=1$ for $i<\omega$ and $M\vDash[b_{2i}\circ b_0,a_0]=c$ for $i\le i<\omega$.
We have shown (see \cite{A}):
\begin{lemma}\label{l4.1} The formula $[y,x]=1$ has the independence property in ${\rm Th}(D(1))$. \end{lemma}
Let $G$ be in ${\mathbb G}(n)$ with $G/Z(G)$ is infinite. For $a\in G\setminus Z(G)$ choose a maximal linearly independent subset $\{e_1,\ldots,e_m\}=X_a$ of $P(G)$ such that for every $1\le i\le m\le n$ there is some $b_i\in G$ with $[a,b_i]=e_i$. Let $E_a$ be $\{b_1,\ldots,b_m\}$. If $[a,b]=t\ne 1\in P(G)$, then $t=\sum\limits_{1\le i\le m} e_i^{r_i}$ and $[a,b\cdot b_1^{p-r_1}\cdot\ldots\cdot b_m^{p-r_m}]=1$. Hence every element $a\in G$ has a centralizer $C(a)$ of index $\le n$ and $G=\langle a,E_a\rangle\circ C(a)$.
Now we start again with $d_0\in G\setminus Z(G)$. Then $X_{d_0}\ne \emptyset$ and $E_0=E_{d_0}\ne\emptyset$ and we choose $e_0\in E_0$ with $[d_0,e_0]\ne 1$. Since $C(\langle d_0,E_0\rangle)$ has finite index in $G$ there is some \[
d_1\in C(\langle d_0,E_0\rangle)\quad\mbox{ with }\; d_1\not\in Z(G). \] We get $E_1=E_{d_1}\ne\emptyset$ and choose $b_1\in E_1$. We can repeat this argument and get \[
d_2\in C(\langle d_0,e_0,E_0,E_1\rangle), \quad d_2\not\in Z(G). \] Finally we have $d_0,e_0, d_1,e_1,\ldots$ with $[d_i,e_i]\ne 1$ and $[d_i,d_j]=1$, $[d_i,e_j]=1$ and $[e_i,e_j]=1$ for $i\ne j$. We can select a subsequence with $[d_i,e_i]=c\ne 1$ for some $c\in P(G)$. Assume w.l.o.g. $[d_i,e_i]=c$ for all $i<\omega$. We have shown that $D(1)$ is a subgroup of $G$. Since the independence property of $D(1)$ is given by a quantifier formula we get
\begin{theorem}\label{t4.2} For every $G\in{\mathbb G}(n)$ with $G/Z(G)$ infinite we have: \begin{enumerate}
\item[{\rm i)}] There is an embedding of $D(1)$ in $G$. \item[{\rm ii)}] $G$ has the independence property. \end{enumerate} \end{theorem}
\begin{corollary}\label{c4.3} The Fra\"{\i}ss\'e limits $D(n)$ of ${\mathbb K}(n)$ have the independence property. \end{corollary}
\section{Superstability of {\boldmath$Th(D(n))$}}\label{s5}
Let ${\mathbb C}(n)$ be a monster model of $Th(D(n))$. We define \[ A\mathop{\mathpalette\Ind{}}\limits_B{}\!\!^0 C, \mbox{ if }\; \langle A\rangle\cap\langle C\rangle=\langle B\rangle. \] Note that all substructures as $\langle A\rangle$ contain $P({\mathbb C}(n))$.
We have to check that $\mathop{\mathpalette\Ind{}}^0$ fulfils the conditions of B.~Kim and A.~Pillay \cite{KP} that characterize Non-forking. Working in the vector space ${\mathbb C}(n)/P({\mathbb C}(n))$ Monotonicity, Transitivity, Symmetry, Finite Character, and Local Character are easily shown.
{\bf Existence:} $\overline{a},B\subseteq A$ are considered in ${\mathbb C}(n)$. Then there is some $\overline{d}$ in ${\mathbb C}(n)$ with ${\rm tp}(\overline{a}/B)={\rm tp}(\overline{d}/B)$ and $\overline{d}\mathop{\mathpalette\Ind{}}\limits_B{}\!\!^0A$.
W.l.o.g. $B$ and $A$ are $L(n)$-substructures. Since $P\subseteq B$ we can assume that $\overline{a}$ is linearly independent over $B$. Choose $X_B$ and $X_A$ such that the images of $X_B$ and $X_BX_A$ are vector space bases of $B/P$ and $A/P$, respectively. Let $\beta((X_BX_A)^2)$ be the set of all $\beta(b_1,b_2)=[b_1,b_2]$ where $b_1,b_2\in X_BX_A$. Then $A$ is uniquely determined by $X_BX_A$ and $\beta((X_BX_A)^2)$.
Now we define an extension $G$ of $A$. Let $\overline{e}$ linearly independent over $A$. $\overline{e}X_BX_A$ is linearly independent over $P$. $\beta((\overline{e}\,\hat{\;}X_BX_A)^2)$ is chosen as any extension of $\beta((X_BX_A)^2)$ and $\beta((\overline{e}X_B)^2)$, where the last set is obtained from $\beta((\overline{a}X_B)^2)$ by replacing $a_i$ in $\overline{a}$ by $e_i$ in $\overline{e}$. $G=\langle\overline{e}A\rangle$ is a structure in ${\mathbb K}(n)$. By the axioms $\Sigma\,3)$ of ${\rm Th}(D(n))$ there is an embedding of $\overline{e}$ onto $\overline{d}$ over $A$ in ${\mathbb C}(n)$ . By quantifier elimination ${\rm tp}(\overline{d}/B)={\rm tp}(\overline{a}/B)$. Furthermore $\overline{d}\mathop{\mathpalette\Ind{}}\limits_B{}\!\!^0A$ by construction.
Finally we have to show:
\subsubsection*{Independence over Models}
Let $M\preceq {\mathbb C}(n)$, ${\rm tp}(\overline{a}{}^0/M)={\rm tp}(\overline{a} {}^1/M)$ \[ \overline{b}{}^0\mathop{\mathpalette\Ind{}}\limits_M{}\!\!^0\;\overline{b}{}^1,\qquad \overline{a}{}^0\mathop{\mathpalette\Ind{}}\limits_M{}\!\!^0\;\overline{b}{}^0,\qquad \overline{a}{}^1\mathop{\mathpalette\Ind{}}\limits_M{}\!\!^0\;\overline{b}{}^1. \] Then there is some $\overline{e}$ with \[
{\rm tp}(\overline{e}/M\overline{b}{}^0)={\rm tp}(\overline{a}_0/M\overline{b}{}^0),\qquad {\rm tp}(\overline{e}/M\overline{b}{}^1)={\rm tp}(\overline{a}_1/M\overline{b}{}^1), \] and \[
\overline{e}\mathop{\mathpalette\Ind{}}\limits_M{}\!\!^0\;\overline{b}{}^0\overline{b}{}^1. \] Let $X_M$ be a set in $M$ such that its image is a vector space basis of $M/P$. By assumption we can assume that w.l.o.g. $\overline{b}{}^0\overline{b}{}^1$ is linearly independent over $M$ modulo~$P$. We choose $\overline{d}$ linearly independent over $\overline{b}{}^0\overline{b}{}^1X_M$ modulo~$P$. Now we extend $\langle \overline{b}{}^0,\overline{b}{}^1,X_M\rangle$ to a group $G$ in ${\mathbb G}(n)$ defined on $\overline{d}\,\overline{b}{}^0\overline{b}{}^1X_M$. We extend $\beta((\overline{b}{}^0\overline{b}{}^1X_M)^2)$ to $\beta((\overline{d}\,\overline{b}{}^0\overline{b}{}^1X_M)^2)$ by the following: \begin{eqnarray*} \beta(d_i,m)&&\mbox{for }\:d_i\in\overline{d}\;\mbox{ and }\; m\in X_M\;\mbox{ is given by }\;\beta(a_i^0,m)=\beta(a_i^1,m),\\ \beta(d_i,b_j^0)&&\mbox{is given by }\;\beta(a_i^0,b_j^0), \mbox{ and}\\ \beta(d_i,b_j^1)&&\mbox{is given by }\; \beta(a_i^1,b_j^1). \end{eqnarray*} Now we find an image $\overline{e}$ of $\overline{d}$ in ${\mathbb C}(n)$ over $\langle\overline{b}{}^0,\overline{b}{}^1,M\rangle$ by axioms $\Sigma\,3)$ that defines an embedding. By elimination of quantifiers and the consltruction $\overline{e}$ has the desired properties.
\begin {theorem}\label{t5.1} $\mathop{\mathpalette\Ind{}}^0$ is non-forking for $D(n)$. $D(n)$ is supersimple of SU-rank $1$. It is not stable. \end {theorem}
{\em Proof\/}. As shown above $\mathop{\mathpalette\Ind{}}^0$ is non-forking and $D(n)$ is simple. Any type ${\rm tp}(\overline{a}/A)$ does not fork on a finite subset of $A$. By the description of non-forking we have SU-rank 1. In Chapter~4 it is shown that $D(n)$ has the independence property.
\end{document} |
\begin{document}
\title{Towards Turing-Complete Quantum Computing Coming From Classical Assembler }
\author{\IEEEauthorblockN{Thomas Gabor, Marian Lingsch Rosenfeld, Claudia Linnhoff-Popien} \IEEEauthorblockA{\textit{LMU Munich} \\ thomas.gabor@ifi.lmu.de, M.Rosenfeld@campus.lmu.de} }
\maketitle
\begin{abstract} Instead of producing quantum languages that are fit for current quantum computers, we build a language from standard classical assembler and augment it with quantum capabilities so that quantum algorithms become a subset of it. This paves the way for the development of hybrid algorithms directly from classical software, which is not feasible on today's hardware but might inspire future quantum programmers. \end{abstract}
\begin{IEEEkeywords} quantum computing, quantum language, assembler \end{IEEEkeywords}
\section{Introduction}
The capabilities of current quantum computers remain very limited, most importantly with respect to the depth of the executed algorithm. As astonishing as it is that we can still run notably important algorithms (maybe even better than classical ones~\cite{bravyi2018quantum,arute2019quantum}) on this kind of hardware, the algorithms that can be fully implemented on a quantum computer lack several characteristics that programmers are used to for classical algorithms, most notably: recursion. The absence of arbitrary-depth recursion (or simply jumps) prohibits pure quantum algorithm from emulating every algorithm that can be run on a Turing machine.\footnote{We leave out the discussion that finite-memory systems can never execute all algorithms a Turing machine can execute.} For today's quantum computers, this is a more than sensible assumption as decoherence times only allow for a couple of gates to be executed in sequence before all useful information is lost. But what if we had the best quantum computer we could wish for?
In contrast to most other approaches (most notably quantum assembly languages~\cite{cross2021openqasm}) we attempt to design an assembly language not for quantum computers with today's capabilities but for an imaginary quantum computer that is not limited by today's challenges in hardware design and maybe not even limited by all the rules of quantum mechanics at all times. However, we argue that such a language (even though conceivably it will not run on any real quantum computer for a long time) can still be useful for two main reasons:
\begin{itemize}
\item First, for now and at least for the immediate future a large amount of quantum algorithms will be run on classical simulators. As these classical simulators run on classical machines, it is reasonable to write algorithms that can access these classical machines' capabilities directly and thus forego any quantum simulation when possible. This may produce algorithms that -- while not compatible with (current) quantum computers -- may at least not be as superfluously slow when run on a quantum simulator for testing purposes or simply due to a lack of better quantum options.
\item Second, we need tools to discover new quantum algorithms that are more accessible to programmers well versed in standard programming languages. As of now, most setups of quantum software are built around a few algorithm archetypes which are known or suspected to provide some form of quantum advantage. As these few archetypes already hold up all the fascination regarding quantum computers, the discovery of new archetypes should be a big step towards more usage scenarios for quantum computers. Enabling programmers to write an algorithm classically first and then translate it part by part to quantum-suitable reformulations (while maintaining a runnable program) could enable new creativity in algorithm design. \end{itemize}
\section{Approach}
In this work-in-progress paper we provide a very simple assembly language that we augmented with a few quantum-specific instructions and capabilities. Table~\ref{tab:instructions} shows all instructions at our disposal. We provide basic mathematical and logical operations as well as the ``set'' and ``swap'' instructions. We then provide a set of instructions to manipulate program flow including the very simple ``setpc'' and ``jump'' instructions and the more elaborate ``ifte'' instruction, which provides the functionality of a standard if-then-else expression in most programming languages. However, all of our registers can be quantum registers, which means that they can contain a superposition instead of a single scalar value. This also means that our program counter can be in superposition, which causes the entire program state to be in superposition and possibly execute different instructions in each of its superposed branches.
To actually introduce said superpositions into the registers, Table~\ref{tab:instructions} also shows a few quantum-specific instructions. Most notably, we can apply a Hadamard gate to a single qubit or a range of qubits. We also provide a shortcut for the diffusion operator used within Grover's algorithm as well as for phase multiplication.
Obviously, this set of instruction is far from complete even for classical algorithms but it allows us to formulate a few simple examples of how to use our language.\footnote{See \url{https://github.com/marian-lingsch/quantum-assembler}.}
\begin{table}[t] \centering
\begin{tabular}{p{9em} p{19em}}
\hline
Classic Instructions & \\ \hline
add d$i$ d$j$ d$k$ & add the value of cell $i$ with the value of cell $j$ and write the result to cell $k$ \\
mul d$i$ d$j$ d$k$ & multiply the value of cell $i$ with the value of cell $j$ and write the result to cell $k$ \\
div d$i$ d$j$ d$k$ & divide the value of cell $i$ with the value of cell $j$ and write the result to cell $k$ \\
sub d$i$ d$j$ d$k$ & substract the value of cell $j$ from the value of cell $i$ and write the result to cell $k$ \\
sqrt d$i$ d$j$ & write the square root of the value of cell $i$ into cell $j$ \\
mod d$i$ d$j$ d$k$ & writes into cell $k$ the modulo of the value of cell $i$ with respect to the value of cell $j$ \\
neg d$i$ & negate the boolean value of the cell $i$ \\
and d$i$ d$j$ d$k$ & write into cell $k$ the boolean value resulting from v$i$ $\land$ v$j$, where v$l$ is the value of cell d$l$ \\
or d$i$ d$j$ d$k$ & write into cell $k$ the boolean value resulting from v$i$ $\lor$ v$j$, where v$l$ is the value of cell d$l$ \\
set d$i$ $j$ & set the value of cell $i$ to $j$ \\
swap d$i$ d$j$ & swap the values of cells $i$ and $j$ \\
setpc d$i$ & set the program counter to the value of cell $i$ \\
jump $i$ & set the program counter to the value of $i$ \\
skip & do nothing \\
stop & remain in this instruction indefinitely \\
ifte d$i$ $j$ $k$ & if cell $i$ is true, set the pc to the value of $j$ else set the pc to the value of $k$ \\ \hline
Quantum Instructions & \\ \hline
havoc d$i$ $j$ $k$ & apply Hadamard gates to the qubits in range from $j$ to $k$ of cell $i$ \\
havocb $i$ & apply a Hadamard gate to the data qubit $i$ \\
diffusion & apply Grover's diffusion operator \\
phase $x$ $y$ & multiply the phase of the state by $x + \mathrm{i}\mkern1mu \cdot y$ \\
\end{tabular}
\caption{Instruction set for assembly with quantum capabilities}
\label{tab:instructions}
\end{table}
\section{Examples}
Listing~1 shows a simple example of what we can easily produce when we apply superposition to the program counter. Lines 12--15 all multiply the value in d4 by 2 (see line~6). However, we do not always execute all of them. Instead, we initialize the program counter to the start of these lines (see line~5) and then add the equally distributed superposition of the values $\langle 0,1,2,3 \rangle$ to that value. When all superposed branches of the program have been executed, we end up with an equal superposition of values $\langle 1, 2, 4, 8 \rangle$ saved in register d4. We were able to define such a superposed value using patterns familiar from imperative programming and the approach can be generalized to more complicated superposed program flows.
Listing~2 shows how we could evolve quantum algorithms from classical ones. We use a staple example of quantum computing, i.e., finding the prime factors of a given number; but we start out with the most common classical approach to that problem, i.e., the sieve of Eratosthenes~\cite{hoare1972proof}. If we assume that in the classical world we would simply derive the prime factors of a number by trying out all possible divisors, it appears evident that such a search for a divisor might be accelerated by employing Grover's algorithm~\cite{mandviwalla2018implementing}. Naturally, this is a bit counterintuitive to anyone familiar with Shor's algorithm~\cite{lanyon2007experimental}, which we will discuss in a bit. However, the approach still works: In Listing~2, we assume that the number we want to split into factors is given as ``NUMBER1'' (see line~4) and just to keep things simple we assume that said number's bit count is given as ``NUMBER2''. Obviously, a slightly more powerful assembly language could just derive that information with a single instruction. Line~8 then creates a superposition of all numbers of the same length as our input ``NUMBER1'' and with line~9 we ensure that this superposition no longer contains the values $0$ and $1$ so that we can try all remaining values as possible divisors. In line~10 we again utilize the superposed value in d1 to compute the remainder for all divisors. We then set up the parameters for Grover's search (lines 11--16) and subsequently perform the calculated amount of iterations within Grover's search, looking for a single $0$ remainder within the superposed value of d2. Note that we implemented the instructions ``phase'' and ``diffusion'' within the instruction set specifically to enable a straightforward definition of algorithms like Listing~2 based on Grover's search. Specialized instructions like these not only increase readability but might also allow future versions of the simulator to implement classical shortcuts for certain behavior.
\lstdefinelanguage{tcqc} {morekeywords={set,havoc,add,mod,sqrt,div,mul,ifte,phase,diffusion,skip,stop,setpc,sub,jump}, sensitive=false, morecomment=[l]{;}}
\lstset{language=tcqc} \lstset{frameround=tttt}
\begin{lstlisting}[float,caption={Nondeterministic multiplication},basicstyle=\small,captionpos=b,frame=single,numbers=left, numberstyle=\tiny, stepnumber=2, numbersep=5pt,belowcaptionskip=2pt] ; nondeterministic multiplication ; produces multiple powers of 2 start:
havoc d0 0 2
set d1 subMul
set d2 2
set d4 1
add d0 d1 d3
setpc d3
subMul:
mul d4 d2 d4
mul d4 d2 d4
mul d4 d2 d4
mul d4 d2 d4
stop \end{lstlisting}
\begin{lstlisting}[float,caption={Factoring via Grover},basicstyle=\small,captionpos=b,frame=single,numbers=left, numberstyle=\tiny, stepnumber=2, numbersep=5pt,belowcaptionskip=2pt] start:
; set number for which is to
; be factorized
set d0 NUMBER1
; havoc is used in range format
; each int is represented by 8 bits
; NUMBER2 equals log2(sqrt(NUMBER1))
havoc d1 0 NUMBER2
add d1 2 d1
mod d0 d1 d2
; d3 is the amount of iterations
; of Grover's algorithm
add d0 0 d3
sqrt d3 d3
div d3 4 d3
mul d3 3 d3
grover:
ifte d2 jumpTrue jumpFalse
jumpFalse:
phase -1.0 0.0
jump subDiffusion
jumpTrue:
skip
jump subDiffusion
subDiffusion:
diffusion
sub d3 1 d3
ifte d3 subStop grover
subStop:
stop \end{lstlisting}
We compared the quasi-quantum program in Listing~2 to its purely classical counterpart, for which we do not provide an additional listing to save space (and because it looks entirely as expected). It is important to note that we also wrote the classical algorithm for the sieve of Eratosthenes using the same quantum-enabled assembly language and ran it using the same quantum simulator \cite{gabor2022simple}. Figure~1 shows the respective run times of both algorithms for the whole range of inputs. We can see that the classical algorithm takes the longest on actual prime numbers where it needs to try out every possible divisor while the quantum algorithm needs to produce the superposition of all possible divisors for every single input value regardless. However, it is interesting to note that within our simulator, both approaches show a remarkably similar performance. This justifies our approach to offer a ``soft transition'' from a purely classical execution of a purely classical algorithm to a classical simulation of an algorithm with quantum parts without necessarily blow up the run time disproportionally.
\begin{figure}
\caption{Comparison between classical and quantum factoring algorithms written in our assembly language.}
\label{fig:benchmark}
\end{figure}
\section{Conclusion}
The assembly language shown here is very limited but is intended to provide an idea how to re-think hybrid programming coming from full-fledged classical languages. We have analyzed how quantum program flow might look like on an assembly level and how it may allow to construct complex quantum states in an intuitive and imperative manner (Listing~1). We have also presented how very versatile quantum approaches like Grover's search can be integrated into well-known classical algorithms in a straightforward way (Listing~2), even though we sacrifice the ability to run the resulting algorithms on today's quantum computers immediately. In that case, we can also see where the approach still falls short: While implementing Grover's search here may provide some benefits, quantum programmers know that Shor's algorithm provides an even better way to solve the factorization problem on a quantum computer. However, that algorithm is not as easily derived from its classical counterpart -- or at least our language does not yet allow to do so. Future versions of the language should provide more versatile and more powerful instruction sets in order to provide a ``soft transition'' into quantum software for more quantum algorithms.
In principle, there is no reason to keep this ``transition language'' at an assembly level; in fact, for usability it might be especially important to translate our concepts of quantum program flow to more high-level language models (and perhaps offer automatic translation back to assembly level to maintain compatibility). An ideal case might be to allow a programmer to use a modern programming language and insert quantum-compatible expressions one by one until an interpreter or compiler notices that a certain module is now fully quantum-compatible and then offers the option to also run it on quantum hardware (compare Tyagi et al.~\cite{tyagi2016toward} for the case of reversible computing). However, high-level abstractions for quantum computing that are both intuitive and powerful are still very much sought after.
\end{document} |
\begin{document}
\begin{frontmatter} \title{Numerical Solution of the Robin Problem of Laplace Equations with a Feynman-Kac Formula and Reflecting Brownian Motions} \author[UNCC]{Yijing Zhou}, \author[UNCC]{Wei Cai} \address[UNCC]{Department of Mathematics and Statistics, University of North Carolina at Charlotte, Charlotte, NC 28223-0001}
\begin{abstract} In this paper, we present numerical methods to implement the probabilistic representation of third kind (Robin) boundary problem for the Laplace equations. The solution is based on a Feynman-Kac formula for the Robin problem which employs the standard reflecting Brownian motion (SRBM) and its boundary local time arising from the Skorohod problem. By simulating SRBM paths through Brownian motion using Walk on Spheres (WOS) method, approximation of the boundary local time is obtained and the Feynman-Kac formula is calculated by evaluating the average of all path integrals over the boundary under a measure defined through the local time. Numerical results demonstrate the accuracy and efficiency of the proposed method for finding a local solution of the Laplace equations with Robin boundary conditions. \end{abstract} \begin{keyword} Skorohod problem, boundary local time, Feynman-Kac formula, Reflecting Brownian Motion, Brownian motion, Laplace equation, WOS, Robin boundary problem \end{keyword} \end{frontmatter} \numberwithin{equation}{section}
\section{Introduction}
Partial differential equations (PDEs) have been widely used to describe a variety of phenomena such as electrostatics, electrodynamics, fluid flow or quantum mechanics. Traditionally, finite difference, finite element and boundary element methods are the mainstream numerical approaches to solve the PDEs. Recently, using the Feynman-Kac formula \cite{[5]}\cite{[6]}\cite{[7]} which connects solutions of differential equations of diffusion and heat flow and random processes of Brownian motions, numerical methods based on random walks or Monte Carlo diffusions have been explored for solving parabolic and elliptic PDEs \cite{[11]}\cite{[24]}.
The Feynman-Kac formula represents the solutions of parabolic and elliptic PDEs as the expectation functionals of stochastic processes (specifically Brownian motions), and conversely, the probabilistic properties of diffusion processes can be obtained through investigating related PDEs characterized by corresponding generators \cite{[21]}. The formula involves the path integrals of the diffusion process starting from an arbitrarily prescribed location, and this enables us to find a local numerical solution without constructing space and time meshes as in traditional deterministic numerical methods mentioned above, which incur expensive costs in high dimensions. In many applications it is also of practical importance and necessity to seek a local solution of PDEs at some interested points. If the sample paths of a diffusion process are simulated, then by computing the average of path integrals we can obtain approximations to the exact solutions of the PDEs. For second order elliptic PDEs with Dirichlet and Neumann boundaries, the average of path integrals is reduced to the average of boundary integrals under certain measure where the detailed trajectories of the diffusion process have no effect on the averages except the hitting locations on the boundaries.
Simulations of diffusion paths can be done by random walks methods \cite{[3]}\cite{[8]} \cite{[11]} \cite{[13]} either on lattice or in continuum space. In some cases such as for the Poisson equation, the Feynman-Kac formula has a pathwise integral requiring the detailed trajectory of each path. Moreover, one may need to adopt random walks on a discrete lattice in order to incorporate inhomogeneous source terms. As for the continuum space approach, the Walk on Spheres (WOS) method is preferred where the path of diffusion process within the domain does not appear in the Feynman-Kac formula. For both approaches, the geometry of the boundaries need special care for accurate results \cite{[14]}. In our previous work on Laplace equation with Neumann boundary conditions \cite{[4]}, we proposed a numerical method to simulate the standard reflecting Brownian motion (SRBM) path using WOS and obtained the boundary local time of the SRBM. As a result, a local numerical solution of the PDE is achieved by using the Feynman-Kac formula. Other literatures \cite{[9]}\cite{[10]}\cite{[13]}\cite{[14]} have also explored similar problems. Especially, in \cite{[13]} schemes based on the WOS, Euler schemes and kinetic approximations are proposed to treat inhomogeneous Neumann problems. It turns out that the pointwise resolution is much harder due to the choice of the truncation of time. However, the local time was not handled explicitly in \cite{[13]}. On the other hand, Monte Carlo simulations were discussed in \cite{[14]} where the positive part of the boundary needs to be identified first. In this paper, following \cite{[4]} we continue the use of SRBM to solve Robin boundary problems for the Laplace operator, which has many applications in heat transfer and impedance tomograph. Our goal again is to obtain a local approximation to the exact solution of the Robin problem.
The rest of paper is organized as follows. Firstly, the Skorohod problem is introduced in section 2, where both the concepts of standard reflecting Brownian motion and boundary local time will be reviewed briefly. This lays the foundation for the underlying diffusion process of the Robin boundary problem and the sampling of the diffusion paths. Secondly, an overview of the Feynamn-Kac formula is given in section 3. Thirdly, the probabilistic representation of the solution for the Robin boundary value problem proposed in \cite{[2]}\cite{[12]} is discussed in section 4, and we will see the relation between the Neumann and Robin problems and gain a new perspective. Section 5 presents the numerical approaches and test results. Finally, conclusions and future work are given in section 6.
\section{Skorohod problem, SRBM and boundary local time}
Assume that $D$ is a domain with a $C^{1}$ boundary in $R^{3}$. The generalized Skorohod problem is stated as follows: \newline
\begin{defn} Let $f\in C([0,\infty),R^{3})$, a continuous function from $[0,\infty]$ to $R^{3}$. A pair $(\xi_{t},L_{t})$ is a solution to the Skorohod equation $S(f;D)$ if
\begin{enumerate} \item $\xi$ is continuous in $\bar{D}$;
\item $L(t)$ is a nondecreasing function which increases only when $\xi \in\partial D$, namely, \begin{equation} L(t)=\int_{0}^{t}I_{\partial D}(\xi(s))L(ds); \label{eq1} \end{equation}
\item The Skorohod equation holds: \begin{equation} S(f;D):\qquad\ \xi(t)=f(t)-\frac{1}{2}\int_{0}^{t}n(\xi(s))L(ds), \label{eq3} \end{equation} where $n(x)$ denotes the outward unit normal vector at $x\in\partial D$. \end{enumerate} \end{defn}
The Skorohod problem was first studied in \cite{[1]} by A.V. Skorohod in addressing the construction of paths for diffusion processes with boundaries, which results from the instantaneous reflection behavior of the processes at the boundaries. Skorohod presented the result in one dimension in the form of an Ito integral and Hsu \cite{[12]} later extended the concept to $d$-dimensions ($d\geq2$).
In the simple case that $D=[0,\infty)$, the solution to the Skorohod problem uniquely exists and can be explicitly given by \begin{equation} \centering\xi(t)=\left\{ \begin{aligned} &f(t), &if\ t\leq\tau;\\ &f(t)-\inf_{\tau\leq s\leq t}f(s), &if\ t>\tau;\\ \end{aligned}\right. \end{equation} where $\tau=\inf\left\{ t>0:f(t)<0\right\} $. In general, solvability of the Skorohod problem is closely related to the smoothness of the domain $D$. For higher dimensions, the existence of ($\ref{eq3}$) is guranteed for $C^{1}$ domains while uniqueness can be acheived for a $C^{2}$ domain by assuming the convexity for the domain \cite{[15]}. Later, it was shown by Lions and Sznitman \cite{[16]} that the constraints on $D$ can be relaxed to some locally convex properties.
Next we introduce the concept of SRBM and boundary local time which play important roles in solving Robin boundary problem by probabilistic approaches.
Suppose that $f(t)$ is a standard Brownian motion (SBM) starting at $x\in \bar{D}$ and $(X_{t},L_{t})$ is the solution to the Skorohod problem $S(f;D)$, then $X_{t}$ will be the standard reflecting Brownian motion (SRBM) on $D$ starting at $x$. Because the transition probability density of the SRBM satisfies the same parabolic differential equation as that by a BM, a sample path of the SRBM can be simulated simply as that of \ the BM within the domain. However, the zero Neumann boundary condition for the density of SRBM implies that the path be pushed back at the boundary along the inward normal direction whenever it attempts to cross the latter. The full construction of a SRBM from a SBM can be found in our previous work \cite{[4]}.
The boundary local time $L_{t}$ is not an independent process but associated with SRBM $X_{t}$ and defined by \begin{equation} L(t)\equiv\lim_{\epsilon\rightarrow0}\frac{\int_{0}^{t}I_{D_{\epsilon}} (X_{s})ds}{\epsilon}, \label{eq5} \end{equation} where $D_{\epsilon}$ is a strip region of width $\epsilon$ containing $\partial D$ and $D_{\epsilon}\subset\overline{D}$. Here $L_{t}$ is called the local time of $X_{t}$, a notion invented by P. L\'{e}vy \cite{[22]}. This limit exists both in $L^{2}$ and $P^{x}$-$a.s$. for any $x\in\overline{D}$.
It is obvious that $L_{t}$ measures the amount of time that the standard reflecting Brownian motion $X_{t}$ spends in a vanishing neighborhood of the boundary within the time period $[0,t]$. Besides, it is the unique continuous nondecreasing process that appears in the Skorohod equation. An interesting part of ($\ref{eq5}$) is that the set $\left\{ t\in R_{+}:X_{t}\in\partial D\right\} $ has a zero Lebesgue measure while the sojourn time of the set is nontrivial \cite{[22]}. This concept is not just a mathematical one but also has physical relevance in understanding the \textquotedblleft crossover exponent" associated with \textquotedblleft renewal rate" in modern renewal theory \cite{[17]}.
In \cite{[12]}, an alternative explicit form of the local time was found, \begin{equation} L(t)=\sqrt{\frac{\pi}{2}}\int_{0}^{t}I_{\partial D}(X_{s})\sqrt{ds}, \label{eq7} \end{equation} where the the right-hand side of (\ref{eq7}) is understood as the limit of \begin{equation} \sum_{i=1}^{n-1}
\smash{\displaystyle\max_{s\in\Delta_i}I_{\partial D}(X_s)\sqrt{|\Delta_i|}},\quad
\smash{\displaystyle\max_i}|\Delta_{i}|\rightarrow0, \label{eq9} \end{equation} where $\Delta=\{\Delta_{i}\}$ is a partition of the interval $[0,t]$ and each $\Delta_{i}$ is an element in $\Delta$. ($\ref{eq5}$) and ($\ref{eq7}$) provide us different ways to approximate local time and in \cite{[4]}, it was found that ($\ref{eq5}$) yields better approximations in Neumann problem than ($\ref{eq7}$). Therefore, in this paper, we will also choose ($\ref{eq5}$) as the approach to estimate the local time here.
\section{A Feynman-Kac formula}
The Feynman-Kac formula named after Richard Feynman and Mark Kac, establishes a link between PDEs and stochastic processes. It first arose in the potential theory for Sch\"{o}dinger equations, leading to a profound reformulation of the quantum mechanics by the means of path integrals. Later, the formula also finds its applications in mathematical finance, where the probabilistic and the PDE representations in derivative pricing are connected.
Let us first look at the Dirichlet problems. Given a domain $D\subset R^{d}$ with a boundary $\partial D$, \begin{equation} \left\{ \begin{aligned} Lu(x)-c(x)u(x)&=f(x), \ x\in D\\ u(x)&=\phi (x), \ x\in\partial D\\ \end{aligned}\right. , \label{eq11} \end{equation} where the operator $L=-\frac{1}{2}\sum_{i,j=1}^{d}a_{ij}(x)\frac{\partial^{2} }{\partial x^{i}\partial x^{j}}-\sum_{i=1}^{d}b_{i}(x)\frac{\partial}{\partial x^{i}}$ and both the coefficients in $L$ and $c(x)$ are Lipschitz continuous and bounded.
The Feynman-Kac formula in this case \cite{[18]} represents the solution to ($\ref{eq11}$) in terms of an Ito diffusion process $X_{t}(\omega),$ \begin{equation} u(x)=E^{x}[\int_{0}^{\tau_{D}}f(X_{t})exp\left\{ \int_{0}^{t}c(X_{s} )ds\right\} dt]+E^{x}[\phi(X_{\tau_{D}})exp\left\{ \int_{0}^{\tau_{D} }c(X_{s})ds\right\} ], \label{eq13} \end{equation} with $\tau_{D}=\inf\{t:X_{t}\in\partial D\}$ and $X_{t}(\omega)$ is defined by \begin{equation} dX_{t}=b(X_{t})dt+\alpha(X_{t})dB_{t}, \label{eq15} \end{equation} where $B_{t}$ is the Brownian motion and $[a_{ij}]=\frac{1}{2}\alpha (x)\alpha^{T}(x),[b_{ij}]=b$.
The expectation $E^{x}$ is an integration with respect to a measure $P_{x}$ taken over all sample paths $X_{t=0}(\omega)=x$, thus ($\ref{eq13}$) is a representation of a solution of Dirichlet problem in the form of functional integral. Moreover, ($\ref{eq13}$) is obtained by killing process $X_{t}$ at a stopping time $\tau_{D}$ at which $X_{t}$ will be absorbed on the boundary. If $c(x)\geq0$, then the function $c(x)$ can be interpreted as the killing rate \cite{[21]}. It should be pointed out that (\ref{eq13}) is equivalent to the formulation of weak solution and it is a classical solution as well if some smoothness conditions are satisfied.
The Feynman-Kac formula above offers a method for solving certain PDEs by simulating random paths of a stochastic process. Conversely, an important class of expectations of random processes can be computed by deterministic methods. For the Neumann boundary condition, a similar formula was derived by Hsu \cite{[12]} for the Poisson equation, which is in the form of a functional integral based on the boundary local time introduced in section 2. In this case, though the Feynman-Kac formula remains in a similar form, it should be understood as a path integral over the stochastic process $L_{t}$ associated with the standard reflecting Brownian motion.
\section{Robin boundary value problem}
We focus on Robin boundary value problem for the time-independent Schr\"{o}dinger equation. \begin{equation} \centering\left\{ \begin{aligned} \frac{1}{2}\Delta u+qu&=0,\quad in\ D;\\ \frac{\partial u}{\partial n}-cu&=f,\quad on\ \partial D.\\ \end{aligned}\right. \label{eq17} \end{equation}
A generalization of the Feynman-Kac formula of section 3 in \cite{[2]} gives a probablistic solution of ($\ref{eq17}$) as follows, \begin{equation} u(x)=E^{x}\left\{ \int_{0}^{\infty}e_{q}(t)\hat{e}_{c}(t)f(X_{t} )dL_{t}\right\} , \label{eq19} \end{equation} where $X_{t}$ is a SRBM starting at $x$. The term Feynman-Kac functional $e_{q}(t),$ also appeared in the Neumann problem \cite{[12]}, is defined as \begin{equation} e_{q}(t)=\exp\left[ \int_{0}^{t}q(X_{s})\,ds\right] , \label{eq21} \end{equation} and a second functional is introduced for the Robin boundary problem, for $c\in\Sigma_{d}(\partial D)$ \begin{equation} \hat{e}_{c}(t)=exp\left[ \int_{0}^{t}c(X_{s})dL_{s}\right] . \label{eq23} \end{equation}
Using these two functionals, we have, \begin{equation} u(x)=E^{x}\left\{ \int_{0}^{\infty}exp\left[ \int_{0}^{t}\left( q(X_{s})ds+c(X_{s})dL_{s}\right) \right] f(X_{t})dL_{t}\right\} . \label{eq25} \end{equation}
Recalling the definition of the local time in ($\ref{eq5}$), we have the following approximation \begin{equation} L(t)\approx\frac{1}{\epsilon}\int_{0}^{t}I_{D_{\epsilon}}(X_{s})ds, \label{eq27} \end{equation} thus, \begin{equation} dL(s)\approx\frac{1}{\epsilon}I_{D_{\epsilon}}(X_{s})ds. \label{eq29} \end{equation}
Therefore, ($\ref{eq25}$) can be modified as \begin{equation} u(x)\approx E^{x}\left\{ \int_{0}^{\infty}exp\left[ \int_{0}^{t}\left( q(X_{s})+\frac{1}{\epsilon}c(X_{s})I_{D_{\epsilon}}(X_{s})\right) ds\right] f(X_{t})dL_{t}\right\} , \label{eq31} \end{equation} It can also be shown that as $\epsilon$ goes to zero, ($\ref{eq31}$) converges to ($\ref{eq25}$) uniformly on $\bar{D}$.
As (\ref{eq31}) resembles the Feyman-Kac formula for the Neumann problem with a modified $q(x)$ \cite{[4]}, it indicates a connection between the Robin and the Neumann problems, namely, we may introduce \begin{equation} q_{\epsilon}(x)=q(x)+\frac{1}{\epsilon}c(x)I_{D_{\epsilon}}(x), \label{eq33} \end{equation} then, the Robin boundary problem ($\ref{eq17}$) can be viewed as a limiting case ($\epsilon\rightarrow0$) of Neumann problems \begin{equation} \centering\left\{ \begin{aligned} \frac{1}{2}\Delta u+q_\epsilon u&=0,\quad in\ D;\\ \frac{\partial u}{\partial n}&=f,\quad on\ \partial D.\\ \end{aligned}\right. \label{eq35} \end{equation}
\section{Numerical approach and results}
In the present work, we only consider the case of the Laplace equation where $q=0$ in ($\ref{eq35}$). From ($\ref{eq25}$), \begin{equation} u(x)=E^{x}\left\{ \int_{0}^{\infty}e^{\int_{0}^{t}c(X_{t})dL_{t}} f(X_{t})dL_{t}\right\} , \label{eq37} \end{equation} where $X_{t}$ represents the standard reflecting Brownian motion. For the sake of computer simulation, the time period is truncated into $[0,T]$ to produce an approximation for $u(x)$, i.e., \begin{equation} \tilde{u}(x)=E^{x}\left\{ \int_{0}^{T}e^{\int_{0}^{t}c(X_{t})dL_{t}} f(X_{t})dL_{t}\right\} . \label{eq39} \end{equation}
Next we will give a general description on the realization of SRBM paths and the calculation of the corresponding local time, as implemented in \cite{[4]}. A SRBM path can be constructed by pulling back a BM path back onto the boundary whenever it runs out of the domain. Specifically, a SRBM path behaves exactly the same way as a BM which is simulated by the WOS method.
\subsection{Simulating SRBM by the method of Walk on Spheres (WOS)}
\begin{itemize} \item Method of WOS for Brownian paths \end{itemize}
Random walk on spheres (WOS) method was first proposed by M\"{u}ller \cite{[7]}, which can solve the Dirichlet problem for the Laplace operator efficiently \cite{[8]}\cite{[10]} .
To illustrate the WOS method for the Dirichlet problem (\ref{eq11}), let us consider the Laplace equation again where $f=0,a_{ij}=\delta_{ij}$ and $b_{i}=0$ in (\ref{eq11}) and the It\^{o} diffusion is then simply the standard Brownian motion with no drift. The solution to the Laplace equation can be rewritten in terms of a measure $\mu_{D}^{x}$ defined on the boundary $\partial D$, \begin{equation} u(x)=E^{x}(\phi(X_{\tau_{D}}))=\int_{\partial D}\phi(y)d\mu_{D}^{x}, \end{equation} where $\mu_{D}^{x}$ is the harmonic measure\ defined by \begin{equation} \mu_{D}^{x}(F)=P^{x}\left\{ X_{\tau_{D}}\in F\right\} ,F\subset\partial D,x\in D. \end{equation} It can be shown easily that the harmonic measure is related to the Green's function $g(y,x)$ for the domain with a homogeneous boundary condition \cite{CKL}, i.e., \begin{equation} \left\{ \begin{aligned} -\Delta g(x,y) &= \delta(x-y), \ &x\in D,\\ g(x,y) &= 0, \ &x\in\partial D\\ \end{aligned}\right. , \end{equation} as follows \begin{equation} p(\mathbf{x},\mathbf{y})=-\frac{\partial g(x,y)}{\partial n_{y}}. \end{equation}
If the starting point $x$ of a Brownian motion is at the center of a ball, the probability of the BM exiting a portion of the boundary of the ball will be proportional to the portion's area. Therefore, sampling a Brownian path by drawing balls within the domain can significantly reduce the path sampling time. To be specific, given a starting point $x$ inside the domain $D$, we simply draw a ball of largest possible radius fully contained in $D$ and then the next location of the Brownian path on the surface of the ball can be sampled, using a uniform distribution on the sphere, say at $x_{1}$. Treat $x_{1}$ as the new starting point, draw a second ball fully contained in $D$, make a jump from $x_{1}$ to $x_{2}$ on the surface of the second ball as before. Repeat this procedure until the path hits a absorption $\epsilon $-shell of the domain (see Fig. 2) \cite{[5]}. When this happens, we assume that the path has hit the boundary $\partial D$ (see Fig. 1(a) for an illustration).
\begin{figure}
\caption{Walk on Spheres method}
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
Now we can define an estimator of (\ref{eq13}) with $c=0$ by \begin{equation} u(x)\approx\frac{1}{N}\sum_{i=1}^{N}u(x_{i}), \end{equation} where $N$ is the number of Brownian paths sampled and $x_{i}$ is the first hitting point of each path on the boundary. To speed up the WOS process, maximum possible size of the sphere for each step would allow faster first hitting on the boundary.
\begin{itemize} \item
WOS and RBM \end{itemize}
For the reflecting boundary, we will construct a strip region around the boundary (see Fig. 2) and allow the process $X_{t}$ to move according to the law of BM continuously. Before the path enters the strip region, the radius of WOS is chosen to be of a maximum possible size less than the distance to the boundary. Once the particle is in the strip region, the radius of the WOS\ sphere is fixed at a constant $\Delta x$ (or $2\Delta x$, see Fig. 3). With this approach, according to the definition ($\ref{eq5}$), the local time may be interpreted as \begin{equation} dL(t)\approx\frac{\int_{t_{j-1}}^{t_{j}}I_{D_{\epsilon}}(X_{s})ds}{\epsilon}, \label{eq41} \end{equation} which is \begin{equation} dL(t)\approx\frac{\int_{t_{j-1}}^{t_{j}}I_{D_{\epsilon}}(X_{s})ds}{\epsilon }=(n_{t_{j}}-n_{t_{j-1}})\frac{(\Delta x)^{2}}{3\epsilon}, \label{eq43} \end{equation} given a prefixed constant $\Delta x$ in the strip region and $n_{t_{j}}$ be the cumulative steps that path stays within the $\epsilon$-region from the begining until time $t_{j}$ (see Remark below for definition). Notice that only those steps where the path of $X_{t}$ remains in the $\epsilon$-region will contribute to $n_{t_{j}}$ because the SRBM may lie out of the $\epsilon $-region at other steps. More details can be found in \cite{[4]}, where the same construction is applied for the Neumann boundary value problem. One may refer to Fig. 3 for an illustration of the behavior of path near the boundary.
\begin{figure}
\caption{A $\epsilon$-region for a bounded domain in $R^{3}$}
\end{figure}
\begin{figure}
\caption{WOS in the $\epsilon$-region. At point $x_{1}$, BM path first hits the $\epsilon$-region. By WOS with a prefixed radius $\Delta x$, the path continues moving subsequently to $x_{2}$ where the distance to the boundary is less than $\Delta x$. Enlarge the radius to $2\Delta x$, the path then have a probability to run out of the domain to $x_{3}$. Pull back to the closest point $x_{4}$ on the boundary, record $\phi(x_{4})$ and continue WOS-sampling starting at $x_{4}$.}
\end{figure}
\begin{rem} Occupation time of SRBM $X_{t}$ in the numerator of ($\ref{eq41}$) was calculated in terms of that of BM sampled by the walks on spheres. Notice here that within the $\epsilon$-region, the radius of the WOS may be $\Delta x$ or $2\Delta x$, which implies that the corresponding elapsed time of one step for local time could be $(\Delta x)^{2}/3$ or $(2\Delta x)^{2}/3$. The latter is four times bigger than the former. But if we absorb the factor $4$ into $n_{t}$, $(\ref{eq43})$ still holds. In practical implementation, we treat $n_{t}$ as a vector of entries of increasing value, the increment of each component of $n_{t}$ over the previous one after each step of WOS will be 0, 1 or 4, corresponding to the scenarios that $X_{t}$ is out of the $\epsilon $-region, in the $\epsilon$-region while sampled on the sphere of a radius $\Delta x$, or in the $\epsilon$-region while sampled on the sphere of a radius $2\Delta x$, respectively. \end{rem}
Robin boundaries represent a general form of an insulating boundary condition for convection-diffusion equations where $c(x)$ stands for the positive diffusive coefficients. For our numerical test, we will consider two cases: a positive constant $c$ and a positive function $c(x)$.
\subsection{Numerical Tests}
The numerical approximations obtained are compared to the true solutions on a selected circle and a line segment, respectively, for the following three test domains in $R^{3}$:
\begin{enumerate} \item A cube centered at the origin with a length 2;
\item A sphere centered at the origin with a radius 1;
\item An ellipsoid centered at the origin with axial lengths [3, 2, 1]. \end{enumerate}
The location of the circle is given by \begin{equation} \{(x,y,z)^{T}=(r\cos\theta_{1}\sin\theta_{2},r\sin\theta_{1}\sin\theta _{2},r\cos\theta_{2})^{T}\}\label{eq45} \end{equation} with $r=0.6$, $\theta_{1}=0:k\cdot2\pi/30:2\pi$, $\theta_{2}=\pi/4$ with $k=1,...,15$. While the line segment is defined with endpoints $(0.4,0.4,0.6)^{T}$ and $(0.1,0,0)^{T}$. Fifteen uniformly spaced points on the line are selected to monitor the accuracy of the numerical solutions.
Finally, we set the true solution of the Robin boundary problem (\ref{eq17}) to be \begin{equation} u(x)=\sin3x\sin4y\ e^{5z}+5.\label{eq47} \end{equation}
\subsubsection{Constant $c(x)$}
\textbf{Example 1} \quad$c(X_{t})=1$
In this case, ($\ref{eq37}$) is reduced to \begin{equation} u(x)=E^{x}\{\int_{0}^{\infty}e^{\int_{0}^{t}dL_{t}}f(X_{t})dL_{t}\}, \label{eq49} \end{equation} which is equivalent to \begin{equation} u(x)=E^{x}\{\int_{0}^{\infty}e^{L_{t}-L_{0}}f(X_{t})dL_{t}\} \label{eq51} \end{equation} or \begin{equation} u(x)=E^{x}\{\int_{0}^{\infty}e^{L_{t}}f(X_{t})dL_{t}\}, \label{eq53} \end{equation} for a starting point $x$ belonging to the interior of the solution domain.
We will truncate the time interval to $[0,T]$, an approximation to ($\ref{eq53}$) will be \begin{equation} \tilde{u}(x)=E^{x}\{\int_{0}^{T}e^{L_{t}}f(X_{t})dL_{t}\}.\label{eq55} \end{equation} Using the fact that \begin{equation} dL_{t}\approx(n_{t}-n_{t-1})\frac{(\Delta x)^{2}}{3\epsilon},\label{eq57} \end{equation}
we can rewrite ($\ref{eq55}$) as \begin{equation} \tilde{u}(x)=E^{x}\{\int_{0}^{T}e^{n_{t}\frac{(\Delta x)^{2}}{3\epsilon} }f(X_{t})(n_{t}-n_{t-1})\frac{(\Delta x)^{2}}{3\epsilon}\}. \end{equation}
Next identifying the time interval with the length of sample path NP, we have \begin{equation} \tilde{u}(x)=E^{x}\left\{ \sum_{j^{\prime}=0}^{NP}e^{n_{t_{j}}\frac{(\Delta x)^{2}}{3\epsilon}}f(X_{t_{j}})(n_{t_{j}}-n_{t_{j-1}})\frac{(\Delta x)^{2} }{3\epsilon}\right\} ,\label{eq59} \end{equation} where $j^{\prime}$ denotes each step of the path and $j$ denotes the steps where the path hits the boundary.
At each step along a path we first evaluate \[ e^{n_{t_{j}}\frac{(\Delta x)^{2}}{3\epsilon}}f(X_{t_{j}})(n_{t_{j}} -n_{t_{j-1}})\frac{(\Delta x)^{2}}{3\epsilon}, \] if $X_{t_{j}}$ hits the boundary, we then compute $f(X_{t_{j}})(n_{t_{j} }-n_{t_{j-1}})\frac{(\Delta x)^{2}}{3\epsilon}$, followed by multiplying it by $e^{n_{t_{j}}\frac{(\Delta x)^{2}}{3\epsilon}}$, which uses the cumulative time of $L_{t_{j}}$ from $t=0$ to $t_{j}$. Finally, the expectation is done via the average over $N$ sample paths.
The simulation results of a cubic domain are presented in Fig. 4 and 5. The two figures show the convergency of the approximations as the length of path increases from $1.35e4$ to $1.43e4$ and $1.6e4$ to $1.7e4$ over the circle and the line segment, respectively. Some deviations are seen at the tail in Figure 4(a) and among the middle points in Figure 4(b). Meanwwhile, for the spherical and ellipsoid domains (Figure 6 and 7), the approximations are better and the errors are relatively smaller especially over the line segments, which are below 3\% in Figure 6(b) and Figure 7(b).
\begin{figure}
\caption{Cubic domain: number of paths $N=2e5$ and $c(X_{t})=1$. (Left - circle; right - line segement) }
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\begin{figure}
\caption{Cubic domain: number of paths $N=2e5$ and $c(X_{t})=1$. (Left - circle; right - line segement) }
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\begin{figure}
\caption{Spherical domain: number of paths $N=2e5$ and $c(X_{t})=1$. (Left - circle; right - line segement)}
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\begin{figure}
\caption{Ellipsoid domain: number of paths $N=2e5$ and $c(X_{t})=1$. (Left - circle; right - line segement)}
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\subsubsection{Variable c(x)}
\textbf{Example 2}\ $c(X_{t})=|x|$, $x$ is the first component of $X_{t}$ on the boundary. Similar to Example 1, we have
\begin{equation} u(x)=E^{x}\left\{ \int_{0}^{\infty}e^{\int_{0}^{t}c(X_{s})dL_{s}} f(X_{t})dL_{t}\right\} .\label{eq61} \end{equation} It can be seen that $c(X_{s})dL_{s}$ and $f(X_{t})dL_{t}$ have the same form, so we can handle $c(X_{s})dL_{s}$ exactly the same way as $f(X_{t})dL_{t}$. Then, we have
\begin{equation} u(x)=E^{x}\left\{ \sum_{j^{\prime}=0}^{NP}e^{\sum_{k=0}^{j}c(X_{t_{k} })(n_{t_{k}}-n_{t_{k-1}})\frac{h^{2}}{3\epsilon}}f(X_{t_{j}})(n_{t_{j} }-n_{t_{j-1}})\frac{h^{2}}{3\epsilon}\right\} . \label{eq63} \end{equation}
Notice that the term \begin{equation} e^{\sum_{k=0}^{j}c(X_{t_{k}})(n_{t_{k}}-n_{t_{k-1}})\frac{h^{2}}{3\epsilon} }\label{eq65} \end{equation}
cumulates all the information of $c(X_{t})$ with respect to the local time from the beginning to the current time. If $c(X_{t})=|x|$, then
\begin{equation}
u(x)=E^{x}\left\{ \sum_{j^{\prime}=0}^{NP}e^{\sum_{k=0}^{j}|x_{t_{k}}| (n_{t_{k}}-n_{t_{k-1}})\frac{h^{2}}{3\epsilon}}f(X_{t_{j}})(n_{t_{j} }-n_{t_{j-1}})\frac{h^{2}}{3\epsilon}\right\} , \label{eq67} \end{equation} where $j^{\prime}$ denote each step for the path and $j$ denotes the steps where the path hits the boundary.
Numerical results are shown in Figure 8-10 for a cubic, a spherical and an ellipsoid domain, respectively with some adjustment in $\Delta x$ and $NP$. Here we still have similar results for cube with errors around 6.5\%. For the sphere, we change $\Delta x$ to $4e-4$ and there are deviation around the middle in Figure 9(a) which may explain the overall error only 6.74\% while it performs well over the line segment in Figure 9(b) with a smaller error of 3.1\%. For the ellipsoid, the results are similar as in Example 1 and maintain an error below 4\%.
\begin{figure}
\caption{Cubic domain:
number of paths $N=2e5$ and $c(X_{t})=|x|$. (Left - circle; right - line segement) }
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\begin{figure}
\caption{Spherical domain:
number of paths $N=2e5$ and $c(X_{t})=|x|$. (Left - circle; right - line segement)}
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\begin{figure}
\caption{Ellipsoid domain:
number of paths $N=2e5$ and $c(X_{t})=|x|$. (Left - circle; right - line segement)}
\label{fig:subfig:a}
\label{fig:subfig:b}
\label{fig:subfig}
\end{figure}
\section{Conclusions and future work}
This paper presents a Monte Carlo simulation method to solve the third boundary problems associated with Laplace equations. The idea of simulating sample paths of SRBM by the WOS within the strip region shows its efficiency and accuracy in estimating local time and evaluating Feynman-Kac formula. It should be noted that the cases that $q\neq0$ needs further work due to the unknown exit time out of the sphere at each step. For the Poisson equation, the contribution of the source term might be computed as a conditional integral \cite{[19]}. Moreover, the proper truncation of time period is unknown, though it is proven that the variance of the approximation increases linearly of $T$ \cite{[13]}.
For future work, more flexible domains with local convexity will be considered as it relates to the calculations of electrical properties such as the conductivity of composite materials where the particle shapes plays an important role \cite{[20]}.
\end{document} |
\begin{document}
\title{Lower Bounds for Special Cases of Syntactic Multilinear ABPs}
\begin{abstract} Algebraic Branching Programs(ABPs) are standard models for computing polynomials. Syntactic multilinear ABPs (smABPs) are restrictions of ABPs where every variable is allowed to occur at most once in every path from the start to the terminal node. Proving lower bounds against syntactic multilinear ABPs remains a challenging open question in Algebraic Complexity Theory. The current best known bound is only quadratic [Alon-Kumar-Volk, ECCC 2017].
In this article we develop a new approach upper bounding the rank of the partial derivative matrix of syntactic multlinear ABPs: Convert the ABP to a syntactic mulilinear formula with a super polynomial blow up in the size and then exploit the structural limitations of resulting formula to obtain a rank upper bound.
Using this approach, we prove exponential lower bounds for special cases of smABPs and circuits - namely sum of Oblivious Read-Once ABPs, $r$-pass mulitlinear ABPs and sparse ROABPs. En route, we also prove super-polynomial lower bound for a special class of syntactic multilinear arithmetic circuits.
\end{abstract}
\section{Introduction}
\paragraph*{}Algebraic Complexity Theory investigates the inherent complexity of computing polynomials with arithmetic circuit as the computational model. Arithmetic circuits introduced by Valiant~\cite{Val79} are standard models for computing polynomials over an underlying field.
An {\em arithmetic formula} is a subclass of arithmetic circuits corresponding to arithmetic expressions. For circuits and formulas, the parameters of interest are {\em size} and {\em depth}, where size represents the number of nodes in the graph and depth the length of longest path in the graph. The arithmetic formulas are computationally weaker than circuits, a proper separation between them is not known.
Nested in-between the computational power of formulas and circuits is yet another well-studied model for computing polynomials referred to as {\em Algebraic Branching Programs} (ABPs for short).
We know,
\begin{center} Arithmetic Formula $\subseteq_{\P}$ ABP $\subseteq_{\P}$ Arithmetic Circuits. \end{center}
where the subscript $\P$ denotes the containment upto polynomial blow-up in size. Most of algebraic complexity theory revolves around understanding whether these containments are strict or not.
Separation of complexity classes of polynomials involves obtaining lower bound for specific polynomial against classes of arithmetic circuits. For general classes of arithmetic circuits, Baur and Strassen~\cite{BS83} proved that any arithmetic circuit compuitng an explicit $n$-variate degree $d$ polynomial must have size $\Omega(n\log d)$. In fact, this is the only super linear lower bound we know for general arithmetic circuits.
While the challenge of proving lower bounds for general classes of circuits still seems to be afar, recent research has focused on circuits with additional structural restrictions such as multilinearity, bounded read etc. We now look at some of the models based on these restrictions in more detail.
An arithmetic circuit~(formula,ABP) is said to be {\em multilinear} if every gate (node) computes a multilinear polynomial. A seminal work of Raz~\cite{Raz09} showed that multilinear formulas computing $\det_n$ or $perm_n$ must have size $n^{\Omega(\log n)}$.
Although we know strong lower bounds for multilinear formulas, the best known lower bound against syntactic multilinear circuits is almost quadratic in the number of variables~\cite{AKV17}.
Note that any multilinear ABP of $n^{O(1)}$ size computing $f$ on $n$ variables can be converted to a multilinear formula of size $n^{O(\log n)}$ computing $f$. In order to prove super-polynomial lower bounds for ABPs, it is enough to obtain a multilinear formula computing $f$ of size $n^{o(\log n)}$ or prove a lower bound of $n^{\omega(\log n)}$ for multilinear formulas, both of which are not known.
Special cases of multilinear ABPs have been studied time and again. In this work, we focus on the class of Read-Once Oblivious Algebraic branching programs~(ROABP for short). ROABPs are ABPs where every edge is labeled by a variable and every variable appears as edge labels in atmost one layer. There are explicit polynomials with $2^{\Omega(n)}$ ROABP size lower bound \cite{Nis91,Jan,KNS16}. Also, ROABPs have been well studied in the context of polynomial identity testing algorithms ~(See e.g.,\cite{For14})
In this article, we prove lower bounds against sum of multilinear ROABPs and other classes of restricted multilinear ABPs and circuits. Definitions of the models considered in this article can be found in Section~\ref{sec:prelim}.
\paragraph*{Our Results} Let $X=\{x_1,\ldots,x_N\}$ and $\mathbb{F}$ be a field. Let $g$ denote the family of $N$ variate (for $N$ even) defined by Raz and Yehudayoff~\cite{RY08}. (See Definition~\ref{def:raz-poly} for more details.) As our main result, we show that any sum of sub-exponential ($2^{o(N^{\epsilon})}$) size ROABPs to represent $g$ requires $2^{N^{\epsilon}}$ many summands:
\begin{theorem} \label{thm:lb-roabp} Let $f_1,\ldots f_m$ be polynomials computed by oblivious ROABPs such that $g= f_1+\cdots + f_m$. Then, $m = \frac{2^{\Omega(N^{1/5})}}{s^{c\log N}}$, where $c$ is a constant and $s=\max\{s_1,s_2,\ldots,s_m\}$, $s_i$ is the size of the ROABP computing $f_i$. \end{theorem}
Further, we show that Theorem~\ref{thm:lb-roabp} extends to the case of $r$-pass multilinear ABPs (Theorem~\ref{thm:lb-rpass}) for $r=o(\log n)$ and $\alpha$-sparse multilinear ABPs (Theorem~\ref{thm:sparse-factor}) for $1/1000 \leq \alpha \leq 1/2$.
Finally, we develop a refined approach to analyze syntactic multilinear formulas based on the central paths introduced by Raz~\cite{Raz09}. Using this, we prove exponential lower bound against a class of $O(\log N)$ depth syntactic multilinear circuits (exact definition can be found in Section~\ref{sec:signature}, Definition~\ref{def:variable-close}).
\begin{theorem} \label{thm:lb-delta-close} Let $\delta < N^{1/5}/10$ and $c= N^{o(1)}$. Any $O(\log N)$ depth $(c,\delta)$ variable close syntactically multilinear circuit computing the polynomial $g $ requires size $ 2^{\Omega(N^{1/5}/\log N)}$. \end{theorem} \paragraph*{Our approach} Our proofs are a careful adaptation of the rank argument developed by Raz~\cite{Raz09}. This involves upper bounding the dimension of the partial derivative matrix (Definition~\ref{def:partition}) of the given model under a random partition of variables. However, upper bounding the rank of the partial derivative matrix of a syntactic multilinear ABP is a difficult task and there are no known methods for the same. To the best of our knowledge, there is no non-trivial upper bound on the rank of the partial derivative matrix of polynomials computed by ABPs (or special classes of ABPs) under a random partition.
Our crucial observation is, even though conversion of a syntactic multilinear ABP of size $s$ into a syntactic multilinear formula blows the size to $s^{O(\log s)}$, the resulting formula is much simpler in structure than an arbitrary syntactic multilinear formula of size $n^{O(\log s)}$. For each of the special classes of multilinear ABPs (ROABPS, $r$-pass ABPs etc) ) considered in the article, we identify and exploit the structural limitations of the formula obtained from the corresponding ABP to prove upper bound on the rank of the partial derivative matrix under a random partition. Overall our approach to upper bound the rank can be summarized as follows: \begin{enumerate} \item Convert the given multilinear ABP $P$ of size $s$ to a multilinear formula $\Phi$ of size $s^{O(\log s)}$ (Lemmas~\ref{lem:abptoformula},~\ref{lem:rpasstoformula} and ~\ref{lem:sparsetoformula}); \item Identify structural limitations of the resulting formula $\Phi$ and exploit it to prove upper bound on the rank of the partial derivative matrix under a random partition (Lemmas~\ref{lem:kbl},~\ref{lem:rankub}, ~\ref{lem:sparse-ub} and ~\ref{lem:covering-sign}); \item Exhibit a hard polynomial that has full rank under all partitions. (Lemma~\ref{lem:ry}.) \end{enumerate}
\subsubsection*{Related Results}
Anderson et. al~\cite{AFSSV16} obtained exponential lower bound against oblivious read $k$ branching programs. Kayal et. al~\cite{KNS16} obtained a polynomial that can be written as sum of three ROABPs each of polynomial size such that any ROABP computing it has exponential size. Arvind and Raja~\cite{AR16} show that if permanent can be written as a sum of $N^{1-\epsilon}$ many ROABPs, then at least one of the ROABP must be of exponential size. Further, sum of read-once polynomials, a special class of oblivious ROABPs was considered by Mahajan and Tawari~\cite{MT15}, independently by the authors~\cite{CR16}. Recently, Chillara et. al~\cite{CLS18} show that any $o(\log N)$ depth syntactic multilinear circuit cannot a polynomial that is computable by width-2 ROABPs.
The existing lower bounds against ROABPs or sm-ABPs,
implicitly restrict the number of different orders in which the variables can be read along any $s$ to $t$ path. In fact, the lower bound given in Arvind and Raja~\cite{AR16} allows only $N^{1-\epsilon}$ different ordering of the variables. To the best of our knowledge, this is the state of art with respect to the number of variable orders allowed in ABPs. Without any restriction on the orderings, the best known lower bound is only quadratic upto poly logarithmic factors~\cite{AKV17}. In this light, our results in Theorems~\ref{thm:lb-roabp} and~\ref{thm:lb-rpass} can be seen as the first of the kind where the number of different orders allowed is sub-exponential.
Proofs omitted due to space constraints can be found in the Appendix.
\section{Preliminaries} \label{sec:prelim}
In this section we include necessary definitions and notations used. We begin with the formal definition of the models considered in this article.
An {\em arithmetic circuit}~$\mathcal{C}$ over a field $\mathbb{F}$ and variables $X={x_1,\ldots, x_N}$ is a directed acyclic graph with vertices of in-degree 0 or 2 and exactly one vertex of out-degree 0 called the output gate. The vertices of in-degree 0 are called input gates and are labeled by elements from $X \cup \mathbb{F}$. The vertices of in-degree 2 are labeled by either $+$ or $\times$. Every gate in $\cal{C}$ naturally computes a polynomial. The polynomial $f$ computed by $\mathcal{C}$ is the polynomial computed by the output gate of the circuit. The {\em size} of an arithmetic circuit is the number of gates in $\mathcal{C}$ and {\em depth} of $\mathcal{C}$ is the length of the longest path from an input gate to the output gate in $\mathcal{C}$.
An {\em arithmetic formula} is an arithmetic circuit where the underlying undirected graph is a tree.
An {\em Algebraic Branching Program} $P$ (ABP for short) is a layered directed acyclic graph with two special nodes, a start node $s$ and a terminal node $t$.
Each edge in $P$ is labeled by either an $x_i\in X$ or $\alpha\in\mathbb{F}$. The size of $p$ is the total number of nodes, width is the maximum number of nodes in any layer of $P$. Each path $\gamma$ from $s$ to $t$ in $P$ computes the product of the labels of the edges in $\gamma$ which is a polynomial. The ABP $P$ computes the sum over all $s$ to $t$ paths of such polynomials.
An ABP $P$ is said to be {\em syntactic multilinear} (sm-ABP for short) if every variable occurs at most once in every path in $P$. An ABP is said to be {\em oblivious} if for every layer $L$ in $P$ there is at most one variable that labels edges from $L$.
\begin{defn}{\em (Read-Once Oblivious ABP.)} An ABP $P$ is said to be Read-Once Oblivious (ROABP for short) if $P$ is an oblivious and each $x_i\in X$ appears as edge label in at most one layer. \end{defn}
In any Oblivious ROABP, every variable appears in exactly one layer and all variables in a particular layer are the same. Hence, variables appear in layers from the start node to the terminal node in the {\em variable order} $x_{i_1},x_{i_2},\ldots,x_{i_n}$ where $(i_1,i_2,\ldots,i_n)\in S_n$ is a permutation on $[n]$.
A natural generalization of ROABPs is the $r$-pass ABPs defined in~\cite{AFSSV16}:
\begin{defn}{\em ($r$-pass multilinear ABP).} An oblivious sm-ABP $P$ is said to be $r$-pass if there are permutations $\pi_1,\pi_2,\ldots,\pi_r\in S_n$ such that $P$ reads the variables from $s$ to $t$ in the order $(x_{\pi_1(1)},x_{\pi_1(2)},\ldots,x_{\pi_1(n)}),\ldots$,$(x_{\pi_r(1)},x_{\pi_r(2)},\ldots,x_{\pi_r(n)}).$ \end{defn}
Recall that a polynomial $f\in\mathbb{F}[X]$ is $s$-sparse if it has at most $s$ monomials with non-zero coefficients. \begin{defn}{\em ($\alpha$-Sparse ROABP).}~\cite{For14}
An $d+1$ layer ABP $P$ is said to be an $\alpha$-sparse ROABP if there is a partition of $X$ into $d = \Theta(N^{\alpha})$ sets $X_1,X_2,\ldots,X_d$ with $|X_i| = N/d$ such that every edge label in layer $L_i$ is an $s$-sparse multilinear polynomial in $\mathbb{F}[X_i]$ for $s=N^{O(1)}$. \end{defn}
Let $\Psi$ be a circuit over $\mathbb{F}$ with $X=\{x_1,\ldots, x_N\}$ as inputs. For a gate $v$ in $\Psi$, let $X_{v}$ denote the set of variables that appear in the sub-circuit rooted at $v$. The circuit $\Psi$ is said to be {\em syntactic multilinear} (sm for short), if for every $\times$ gate $v= v_1\times v_2$ in $\Psi$, we have $X_{v_1} \cap X_{v_2} =\emptyset $. By definition, every syntactic multilinear circuit is a multilinear circuit. In~\cite{Raz09}, it was shown that every multilinear formula can be transformed into a syntactic multilinear formula of the same size, computing the same polynomial.
Let $\Psi$ be a circuit (formula) and $v$ be a gate in $\Psi$. The {\em product-height} of $v$ is the maximum number of $\times$ gates along any $v$ to root path in $\Psi$.
We now review the partial derivative matrix of a polynomial introduced in~\cite{Raz09}. Let $Y = \{ y_1,\ldots,y_m\}$ and $Z=\{z_1,\ldots,z_m\}$ be disjoint sets of variables.
\begin{defn}{\em (Partial Derivative Matrix.)} \label{def:partition} Let $f\in\mathbb{F}[Y,Z]$ be a polynomial. The {\em partial derivative matrix} of $f$(denoted by $M_f$) is a $2^m\times 2^m$ matrix defined as follows. For monic multilinear monomials $p$ and $q$ in variables $Y$ and $Z$ respectively, the entry $M_f [p,q]$ is the coefficient of the monomial $pq$ in $f$. \end{defn}
For a polynomial $f$, let ${\sf rank}(M_f)$ denote the rank of the matrix $M_f$ over the field $\mathbb{F}$. It is known that ${\sf rank}(M_f)$ satisfies sub-additivity and sub-multiplicativity:
\begin{lemma}{{\em \cite{Raz09}(Sub-additivity, sub-multiplicativity) .}} \label{lem:sub-aditivity} Let $f,g \in \mathbb{F}[Y,Z]$. Then, we have that ${\sf rank}(M_{f+g}) \leq {\sf rank}(M_f)+{\sf rank}(M_g).$ Further, if ${\sf var}(f) \cap {\sf var}(g) = \emptyset$, then ${\sf rank}(M_{fg}) = {\sf rank}(M_f){\sf rank}(M_g)$.
\end{lemma} Further, since row-rank of a matrix is equal to its column rank, we have:
\begin{lemma}{\em \cite{Raz09}} \label{lem:rankub}
For $f\in\mathbb{F}[Y_1,Z_1]$, ${\sf rank}(M_f)\leq 2^{\min\{|Y_1|,|Z_1|\}}$, where $Y_1\subseteq Y, Z_1\subseteq Z$. \end{lemma}
For $f\in\mathbb{F}[X]$, it may be noted that the parital derivative matrix $M_f$ is dependent on the partition of the variable set $X$ into variables in $Y\cup Z$. In most of the cases, partition of the variable set is not apparent. In such cases, we need to consider a distribution over the set of all such partitions. We represent a partition as a bijective function $\varphi : X \rightarrow Y \cup Z$, where $|Y| = |Z| = |X|/2$.
Let ${\cal D}$ be the uniform distribution on the set of all partitions $\varphi: X \to Y \cup Z$, with $|Y| = |Z| = |X|/2$.
Now, we state a useful property of the standard hypergeometric distribution that will be needed later.
\begin{prop} \label{prop:hyper}{\em \cite{Raz06,RP15}} {\em(Hypergeometric Distribution).} Let $M_1,M_2\leq S$ be integers. Let ${\cal{H}}(M_1,M_2,S)$ denote the distribution of size of the intersection of a random set of size $M_2$ and a set of size $M_1$ in a universe of size $S$. Let $\chi$ be a random variable distributed according to ${\cal{H}}(M_1,M_2,S)$ : \begin{enumerate} \item If $S^{1/2} \leq M_1 \leq S/2$ and $S/4 \leq M_2 \leq 3S/4$ then $\Pr[\chi=a]\leq O(S^{-1/4})$. \item If $0 \leq M_1 \leq 2S/3$ and $S/4 \leq M_2 \leq 3S/4$ then $\Pr[\chi=a]\leq O(M_1^{-1/2})$ for any $a\leq M_1$. \end{enumerate} \end{prop}
We consider the full rank polynomial $g$ defined by Raz and Yehudayoff~\cite{RY08} to prove lower bounds for all models that arise in this work.
\begin{defn}{\em (Hard Polynomial.)} \label{def:raz-poly}
Let $N\in\mathbb{N}$ be an integer. Let $X=\{x_1,\ldots, x_{N}\}$ and $\mathcal{W} = \{w_{i,k,j} \}_{i,k,j\in[N]}$. For any two integers $i,j\in\mathbb{N}$, we define an interval $[i,j] = \{ k\in\mathbb{N}, i\leq k\leq j \}$. Let $|[i,j]|$ be the length of the interval $[i,j]$. Let $X_{i,j} = \{ x_p \mid p\in [i,j]\} $ and $W_{i,j}=\{ w_{i',k,j'}\mid i',k,j'\in[i,j] \}$. Let $\mathbb{G}=\mathbb{F}(\mathcal{W})$, the rational function field. For every $[i,j]$ such that $|[i,j]|$ is even we define a polynomial $g_{i,j}\in\mathbb{G}[X]$ as
$g_{i,j}=1$ when $|[i,j]|=0$ and
if $|[i,j]|>0$ then, {\small $g_{i,j }\triangleq (1+x_ix_j)g_{i+1,j-1} + \sum_{k}w_{i,k,j}g_{i,k}g_{k+1,j}.$}
where $x_k$, $w_{i,k,j}$ are distinct variables, $1\le k\le j$ and the summation is over $k\in [i+1,j-2]$ such that $|[i,k]|$ is even. Let $g\triangleq g_{1,N}$. \end{defn}
\begin{lemma}\cite[Lemma~4.3]{RY08} \label{lem:ry} Let $X=\{x_1,\ldots, x_{N}\}$ and $\mathcal{W} = \{w_{i,k,j} \}_{i,k,j\in[N]}$. Let $\mathbb{G}=\mathbb{F}(\mathcal{W})$ be the set of rational functions over field $\mathbb{F}$ and $\mathcal{W}$. Let $g\in\mathbb{G}[X]$ be the polynomial in Definition \ref{def:raz-poly}. Then for any $\varphi\sim {\cal D}$, ${\sf rank}(M_{g^\varphi})= 2^{N/2}$. \end{lemma}
\section{Lower Bounds for Special cases of sm-ABPs}
\label{sec:explb} In this section, we obtain exponential lower bound for sum of ROABPs and related special classes of syntactic multilinear ABPs. \subsection{Sum of ROABPs: Proof of Theorem~\ref{thm:lb-roabp}} \label{subsec:sumroabp} Let $P$ be an ROABP with $\ell+1$ layers $L_0,L_1,L_2,\ldots,L_{\ell}$ computing a multilinear polynomial $f\in\mathbb{F}[x_1,x_2,\ldots,x_N]$. For every $i\in \{0,1,\ldots,\ell-1\}$, we say a layer $L_i$ is a {\em constant} layer if every edge going out of a vertex in $L_i$ is labeled by a constant from $\mathbb{F}$, else we call the layer $L_i$ a {\em variable} layer. For any {\em variable} layer $L_i$ denote by ${\sf var}(L_i)$ the variable in $X$ that labels edges going out of vertices in $L_i$. For nodes $u,v$ in $P$, we denote by $[u,v]$ the polynomial computed by the subprogram with $u$ as the start node and $v$ as the terminal node and let $X_{u,v}$ be the set of variables that occur in $P$ between layers containing $u$ and $v$ respectively. We can assume without loss of generality that $P$ does not have any two consecutive constant layers and that every ROABP $P$ has exactly $2N$ layers by introducing dummy constant layers in between consecutive variable layers. Further, we assume that the variables occur in $P$ in the order $x_1,\ldots x_N$, and hence indices of variables in $X_{u,v}$ is an interval $[i,j]=\{t\in\mathbb{N}\mid i\leq t\leq j\}$ for some $i< j$. (In case of a different order $\pi$ for occurrence of variables, the interval would be $[i, j ] = \{\pi(i), \pi(i+1), \ldots, \pi(j)\}$.)
\noindent{\bf Approach:} In order to prove Theorem~\ref{thm:lb-roabp}, we use ${\sf rank}(M_{f^\varphi})$ as a complexity measure, where $\varphi\sim {\cal D}$. The outline is as follows:
\begin{enumerate} \item Convert the ROABP $P$ into a multilinear formula $\Phi$ with a small (super polynomial) blow up in size (Lemma~\ref{lem:abptoformula}). \item Obtain a partition $B_1,\ldots, B_t$ of the variable set with $O(\sqrt{N})$ parts of almost equal size, so that there is at least one set that is highly unbalanced under a random $\varphi$ drawn from ${\cal D}$. (Observation~\ref{obs:blocks} and Lemma~\ref{lem:kbl}.) \item Using the structure of the formula $\Phi$, show that if at least on of the $B_i$ is highly unbalanced, then the formula $\Phi$ has low rank (Lemma~\ref{lem:rank}). \item Combining with Lemma~\ref{lem:ry} gives the required lower bound. \end{enumerate} The following lemma lists useful properties of the straightforward conversion of an ROABP into a multilinear formula:
\begin{lemma} \label{lem:abptoformula} Let $P$ be an ROABP of size $s$ computing a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$. Then $f$ can be computed by a syntactic multilinear formula $\Phi$ of size $s^{O(\log N)}$ and depth $O(\log N)$ such that \begin{enumerate} \item $\Phi$ has an alternative of layers of $+$ and $\times$ gates; and \item $\times$ gates have fan-in bounded by two; and \item Every $+$ gate $g$ in $\Phi$ computes a polynomial $[u,v]$ for some $u, v$ in $P$; and \item Every $\times$ gate computes a product $[u,v]\times [v,w]$, for some $u,v$ and $w$ in $P$. \item The root of $\Phi$ is a $+$ gate. \end{enumerate} \end{lemma}
\makeproof{lem:abptoformula}{
The proof is a simple divide and conquer conversion of branching programs to formulas. Let $P$ be an ROABP with $\ell+1$ layers $L_0,L_1,\ldots,L_{\ell}$ with $s$ and $t$ as the start and terminal nodes respectively. Let $L_i$ be such that $|{\sf var}(L_0)\cup{\sf var}(L_1) \cup \cdots \cup {\sf var}(L_i)|, |{\sf var}(L_{i+1})\cup \cdots\cup {\sf var}(L_\ell)| \in \{\lceil{N/2}\rceil, \lfloor N/2\rfloor\}$ and $u_{i_1},u_{i_2},\ldots,u_{i_k} (k\leq s)$ be the nodes at the layer $L_i$. Then, \begin{equation} \label{eqn:formula1} f = \sum\limits_{j=1}^{k} [s,u_{i_j}]\times [u_{i_j},t] \end{equation} where $[u,v]$ is the polynomial computed by the subprogram with start node $u$ and $v$ as the terminal node. By induction on $N$, Let $\phi_j$ (respectively $\psi_j$) be the formula computing $[s,u_{i_j}]$ (respectively $[u_{i_j},t]$ ). Then $\Phi = \sum_{j=1}^k \phi_j\times \psi_j.$ By induction, it follows that the resulting formula $\Phi$ has size $s^{O(\log N)}$, depth $O(\log N)$ and is syntactic multilinear. Also, by the construction above, it can be verified that $\Phi$ satisifes the conditions $1$ to $5$. \qed
}
Let $P$ be an ROABP and $\Phi$ be the syntactic multilinear formula obtained from $P$ as in Lemma~\ref{lem:abptoformula}. Let $g$ be a $+$ (respectively $\times$) gate in $\Phi$ computing $[u_g, v_g]$ (respectively $[u_g,v_g]\times [v_g, w_g]$) for some nodes $u_g$, $v_g$ and $w_g$ in $P$. Since $P$ is an ROABP with variable order $x_1,x_2,\ldots x_N$, the set $X_{u_g, v_g}$ (respectively $ X_{u_g,v_g} \cup X_{v_g, w_g}$) corresponds to an interval $I_g$ in $\{1,\ldots, N\}$. We call $I_g$ the {\em interval associated with } $g$. By the construction of $\Phi$ in Lemma \ref{lem:abptoformula}, the intervals have the following properties : \begin{enumerate}
\item For any gate $g$ in $\Phi$ at product-height $i$, $|I_g| \in [ N/2^i - i,N/2^i+i]$. \item For any $+$ gate $g$ in $\Phi$ with children $g_1,\ldots,g_w$, we have $I_g=I_{g_1}=\cdots=I_{g_w}$.
\item Let $\cal{I}$ be the set of all distinct intervals associated with gates at product-height $\frac{\log N}{2}$ in $\Phi$. The intervals in ${\cal I}$ are disjoint and $|{\cal{I}}|= \Theta(\sqrt{N})$. For any $I_j\in{\cal{I}}$, $\sqrt{N} -\log N \leq |I_j|\leq \sqrt{N}+\log N$. \end{enumerate}
We call the intervals in ${\cal{I}}$ as {\em blocks} $B_1,B_2,\ldots,B_{t}$ in $\Phi$ where $t=\Theta(\sqrt{N})$. For any block $B_\ell=[i_\ell,j_\ell]$, $X_{\ell} = \{x_{i_a}\mid i_\ell\leq i_a\leq j_\ell\}={\sf var}(L_{i_\ell})\cup {\sf var}(L_{i_\ell+1})\cup \cdots \cup {\sf var}(L_{j_\ell})$.
Let $\varphi: X \to Y \cup Z$ be a partition. We say a block $B_\ell$ is $k\mbox{-}unbalanced$ with respect to $\varphi$ iff $||Y\cap \varphi(X_\ell)|-|Z\cap \varphi(X_\ell)||>k$. For any two intervals $I_1=[i_1,j_1]$ and $I_2=[i_2,j_2]$ we say $I_1\subseteq I_2$ iff $i_2\leq i_1\leq j_1\leq j_2$.
\begin{obs} \label{obs:blocks} Let $P$ be an ROABP and $\Phi$ be the syntactic multilinear formula obtained from $P$ and $B_1,\ldots, B_t$ be the blocks in $\Phi$. Then, for any gate $v$ in $\Phi$, \begin{itemize} \item[(1)] If $v$ is at a product-height $<\frac{\log N}{2}$ in $\Phi$, then $B_i\subseteq I_v$ for some block $B_i$.
\item[(2)]If $v$ is at product-height $>\frac{\log N}{2}$ in $\Phi$, then for every $1 \le i \le t$, either $I_v \subseteq B_i$ or $B_i \cap I_v = \emptyset$.
\item[(3)]If $v$ is at product-height $\frac{\log N}{2}$ in $\Phi$, then for every $1 \le i \le t$, either $I_v = B_i$ or $B_i \cap I_v = \emptyset$. \end{itemize} \end{obs} We need the following before formalizing Step 3 in the approach outlined. \begin{defn}({$k_B$-hitting~formula}.) Let $\varphi: X \to Y \cup Z$ be a partition and $B$ be a $k$-unbalanced block in $\Phi$ with respect to $\varphi$. A gate $v$ with product-height $\le \frac{\log N}{2}$ in $\Phi$ is $k_B$-hitting~ if either \begin{itemize} \item[(i)] $I_v=B$; Or \item[(ii)] $B\subseteq I_v$ and,
\begin{itemize}
\item If $v$ is a sum gate with children $v_1,\ldots,v_w$, the gates $v_1,\ldots,v_w$ are $k_B$-hitting.
\item If $v$ is a product gate with children $v_1,v_2$, then atleast one of $v_1$ or $v_2$ are $k_B$-hitting. \end{itemize} \end{itemize} A formula $\Phi$ is $k_B$-hitting\ with respect to $\varphi$~ if the root $r$ is $k_B$-hitting\ for some $k$-unbalanced block $B\in \{ B_1,B_2,\ldots,B_t\}$ where $t=\Theta(\sqrt{N})$. \end{defn}
In the following, we note that the partial derivative matrix of $k_B$-hitting~ formulas have low rank:
\begin{lemma} \label{lem:rank}
Let $P$ be an ROABP computing $f$ and $\Phi_P$ be the multilinear formula obtained from $P$ computing $f$. Let $\varphi\sim{\cal D}$ such that block $B$ is k\mbox{-}unbalanced~ in $\Phi$ with respect to $\varphi$. Let $v$ be a gate in $\Phi$ that is $k_B$-hitting~then ${\sf rank}(M_{f_v^\varphi})\leq |\Phi_v|\cdot 2^{|X_v|/2-k/2}$. \end{lemma} \makeproof{lem:rank}{
Proof is by induction on the structure of the formula. \\
For the base case, let $v$ be a gate in $\Phi$ at product-height $(\log N)/2$. By Observation \ref{obs:blocks}, either $I_v = B$ or $I_v\cap B=\emptyset$. As $v$ is $k_B$-hitting, $I_v = B$. Since $B$ is $k\mbox{-}unbalanced$, we have $X_{v}$ is $k\mbox{-}unbalanced$. By Lemma \ref{lem:rankub}, ${\sf rank}(M_{f_v^\varphi})\leq 2^{\min\{|Y_v|,|Z_v|\}} \leq 2^{|X_v|/2-k/2}$. For the induction step, let $v$ be a node at product depth $ \geq (\log N)/2$. \begin{description}
\item[Case~1]$v$ is a product gate with two children $v_1,v_2$. Since $v$ is $k_B$-hitting, atleast one of $v_1$ or $v_2$ is $k_B$-hitting. Without loss of generality let $v_1$ be $k_B$-hitting. By induction hypothesis, ${\sf rank}(M_{f_{v_1}^\varphi})\leq |\Phi_{v_1}|\cdot2^{|X_{v_1}|/2-k/2}$ and ${\sf rank}(M_{f_{v_2}^\varphi})\leq 2^{|X_{v_2}|/2}$. Then ${\sf rank}(M_{v})\leq {\sf rank}(M_{v_1})\cdot {\sf rank}(M_{v_2}) \leq |\Phi_{v_1}|\cdot2^{|X_{v_1}|/2+|X_{v_2}|/2-k/2}\leq |\Phi_v|\cdot 2^{|X_v|/2-k/2}$ as $X_v=X_{v_1}\cup X_{v_2}$.
\item[Case~2] $v$ is a sum gate with children $v_1,v_2,\ldots,v_w$. Since $v$ is $k_B$-hitting, every child of $v$ is $k_B$-hitting. Then by induction hypothesis, ${\sf rank}(M_{v_i})\leq |\Phi_{v_i}|\cdot2^{|X_{v_i}|/2-k/2}$. As $X_{v_1}=X_{v_2}=\cdots=X_{v_w}$, ${\sf rank}(M_{v})\leq |\Phi_v|\cdot 2^{|X_v|/2-k/2}$. \qed \end{description}
}
\begin{obs} \label{obs:not-kbl} Let $\varphi: X \to Y \cup Z$ be a partition and $B$ be a $k$-unbalanced block in $\Phi$ with respect to $\varphi$. \begin{enumerate} \item If a $+$ gate $v$ in $\Phi$ with children $v_1,\ldots,v_w$ is not $k_B$-hitting~ then $I_{v_j}\cap B =\emptyset$ for some $j\in[w]$. \item If a $\times$ gate $v$ with children $v_1,v_2$ is not $k_B$-hitting~ then $I_{v_1}\cap B =\emptyset$ and $I_{v_2}\cap B =\emptyset$. \end{enumerate} \end{obs}
Further, we observe that, proving that a formula $\Phi$ is $k_B$-hitting\ with respect to a partition, is equivalent to showing existence of a $k$-unbalanced block among $B_1,\ldots, B_t$. \begin{obs} \label{obs:hitting} Let $B_1,\ldots, B_t$ be the blocks of the formula $\Phi$ obtained from an ROABP $P$. Let $B\in\{B_1,\ldots, B_t\}$ be a $k$-unbalanced block with respect to a partition $\varphi$. Then, $\Phi$ is $k_B$-hitting\ with respect to $\varphi$. \end{obs} \makeproof{obs:hitting}{
Suppose not, $B\in\{B_1,\ldots, B_t\}$ be a $k$-unbalanced block with respect to a partition $\varphi$ and $\Phi$ is not $k_B$-hitting\ with respect to $\varphi$. Let gate $g$ be at product-height $(\log N)/2$ in $\Phi$ such that $I_g=B$. Since $\Phi$ is not $k_B$-hitting\, root gate $r$ of $\Phi$ is not $k_B$-hitting. We know $r$ is a $+$ gate with children say $r_1,r_2,\ldots,r_w$. By Observation \ref{obs:not-kbl}, there exists $i\in[w]$ such that $r_i$ is not $k_B$-hitting\ i.e. $I_{r_i}\cap B=\emptyset$. Also as $r$ is a $+$ gate, $I_{r_1}=I_{r_2} = \cdots = I_{r_w}$. This implies that none of $r_1,r_2,\ldots,r_w$ are $k_B$-hitting. $r_1,r_2,\ldots,r_w$ being product gates, $r_1,r_2,\ldots,r_w$ are not $k_B$-hitting\ implies that none of their children are $k_B$-hitting\ by Observation \ref{obs:not-kbl}. In this way, we get that no descendant of $r$ is $k_B$-hitting\ which is a contradiction to the fact that gate $g$ is $k_B$-hitting. \qed
} In the remainder of the section, we estimate the probability that at least one of the blocks among $B_1,\ldots, B_t$ is $k$-unbalanced.
\begin{lemma} \label{lem:kbl} Let $P$ be an ROABP computing a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$ and $\Phi_P$ be the syntactic multilinear formula computing $f$. Let $\varphi\sim{\cal D}$. Then, for any $k \le N^{1/5}$, there exists a block $B$ in $\Phi$ such that such that $$\Pr\limits_{\varphi\sim {\cal D}}[\text{$\Phi$ is $k_B$-hitting~}]\geq 1-2^{-\Omega(\sqrt{N}\log N)}$$ \end{lemma}
\makeproof{lem:kbl}{
By Observation \ref{obs:hitting}, $\Pr\limits_{\varphi\sim {\cal D}}[\text{$\Phi$ is $k_{B_i'}$-hitting~}]\geq \Pr[\exists~i,\text{$B_i$ is $k\mbox{-}unbalanced$}]$. Here we1 estimate $\Pr[\exists~i,\text{$B_i$ is $k\mbox{-}unbalanced$}]$. Let $P$ be an ROABP and $B_1,\ldots,B_{t}$ be blocks in $\Phi$. Note that for any $\ell\in[t],~\sqrt{N} - \log N\leq|X_\ell|\leq \sqrt{N} + \log N$. Let ${\cal{E}}_i$ be the event that the block $B_i$ is not k\mbox{-}unbalanced. For any block $\ell\in[t]$, denote $Y_\ell =\varphi(X_\ell)\cap Y$. Let $\chi =|Y_\ell|$ be a random variable. Observe that $\chi$ has the distribution ${\cal{H}}(S,M_1,M_2)$ with \begin{align*}
S &= N-(|X_1|+\cdots+|X_{\ell-1}|) \in [ N-(\ell-1)(\sqrt{N} + \log N), N-(\ell-1)(\sqrt{N} - \log N)] \\
M_1 &= |X_\ell| \\
M_2 &= N/2-(|Y_1|+\cdots+|Y_{\ell-1}|)\in [ N/2-(\ell-1)(\sqrt{N} + \log N), N/2-(\ell-1)(\sqrt{N} - \log N)] \end{align*} For $(\ell-1)<\sqrt{N}/4$, we have : \begin{itemize} \item[(i)] $3N/4\leq S \leq N$; and \item[(ii)] $S/4\leq N/4\leq M_2 \leq N/2\leq 2S/3 \leq 3S/4 $; and \item[(iii)] have $S^{1/2}\leq \sqrt{N}\leq M_1 \leq 2\sqrt{N}\leq 3N/8 \leq S/2$ for large enough $N$. \end{itemize} By Proposition \ref{prop:hyper} (1), we have $\Pr[\chi=a]\leq O(S^{-1/4}) = O(N^{-1/4})$. Therefore, for $i<\sqrt{N}/4,~\Pr[{\cal{E}}_i] \leq O(k\cdot N^{-1/4}) = O(N^{-1/20})$ for $k\leq N^{1/5}$. Let ${\cal{E}}$ be the event that for all $i\in[\sqrt{N}/4]$, block $B_i$ is not k\mbox{-}unbalanced. \begin{align*} \mathcal{E} &= \mathcal{E}_1 \cap \mathcal{E}_2 \cap \cdots \cap \mathcal{E}_{\sqrt{N}/4} \\ \Pr[\mathcal{E}] &= \Pr[\mathcal{E}_1 \cap \mathcal{E}_2 \cap \cdots \cap \mathcal{E}_{\sqrt{N}/4}] \\ &= \Pr[\mathcal{E}_1]\cdot \prod\limits_{i=2}^{\sqrt{N}/4}\Pr[\mathcal{E}_i\mid \cap_{j=1}^{i-1}\mathcal{E}_{j}] \\ & \leq O(2^{-\sqrt{N}\log N/80}) \end{align*} Note $\bar{{\cal{E}}}$ is the event that there exists an $i\in \sqrt{N}/4$ such that $B_i$ is k\mbox{-}unbalanced. $\Pr[\bar{{\cal{E}}}] =1- \Pr[{\cal{E}}]\geq 1- \frac{1}{2^{-\frac{1}{80}\sqrt{N}\log N}}$. \qed
}
\begin{corollary} \label{cor:rank1}
Let $P$ be an ROABP and $\Phi_P$ be the multilinear formula obtained from $P$ computing $f$. Let $\varphi\sim{\cal D}$. Then with probability $1-2^{-\Omega(\sqrt{N}\log N)}$, ${\sf rank}(M_{f^\varphi})\leq |\Phi|\cdot 2^{N/2-N^{1/5}}$. \end{corollary} \begin{proof} Follows directly from Lemmas \ref{lem:rank} and \ref{lem:kbl}. \qed \end{proof}
We are ready to combine the above to prove Theorem~\ref{thm:lb-roabp}:
\begin{proof}[of Theorem~\ref{thm:lb-roabp}] Suppose, $f_i$ has an ROABP $P_i$ of size $s_i$. Then, by Lemma~\ref{lem:abptoformula}, there is a multilinear formula $\Phi_i$ computing $f_i$. By Lemma~\ref{lem:kbl}, probability that $\Phi_i$ is not $k_B$-hitting\ is at most $2^{-\Omega(\sqrt{N}\log N)}$. Therefore, if $m < 2^{cN^{1/5}}$, there is a partition $\varphi\sim{\cal D}$ such that $\Phi_i$ is $k_B$-hitting\ for every $1\leq i\le m$. Therefore, by Lemma~\ref{lem:rank}, there is a partition $\varphi\sim{\cal D}$ such that ${\sf rank}(M_{g^\varphi}) \le m\cdot s^{O(\log N)}\cdot 2^{N/2 - k}$. If $m < 2^{c(N^{1/5})}/s^{\log N}$, we have ${\sf rank}(M_{g^\varphi}) < 2^{N/2}$, a contradiction to Lemma~\ref{lem:ry}. \qed \end{proof}
\subsection{Lower Bound against multilinear $r$-pass ABPs} \label{subsec:rpass} In this section, we extend Theorem~\ref{thm:lb-roabp} to the case of $r$-pass ABPs. Let $P$ be a multilinear $r$-pass ABP of size $s$ having $\ell$ layers. Let $\pi_1,\pi_2,\ldots,\pi_r$ be the $r$ orders associated with the $r$-pass ABP. Lemmas~\ref{lem:rpasstoformula} and Lemma~\ref{lem:rpass} show that techniques in Section~\ref{subsec:sumroabp} can be adapted to the case of $r$-pass sm-ABPs. Proofs are deferred to the appendix. \begin{lemma} \label{lem:rpasstoformula} Let $P$ be a multilinear $r$-pass ABP of size $s$ having $\ell$ layers computing a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$. Then there exists a syntactic multilinear formula $\Psi_P=\Psi_1 +\Psi_2 + \cdots + \Psi_t, t= s^{O(r)}$ where each $\Psi_i$ is a syntactic multilinear formula obtained from an ROABP. \end{lemma}
\makeproof{lem:rpasstoformula}{
Let $P$ be a multilinear $r$-pass ABP of size $s$ computing a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$. Then, there exists $i_1,i_2,\ldots,i_{r+1}\in [\ell]$ be such that for $j\in[r]$, the subprogram $[u,v]$ is an ROABP for any nodes $u$ and $v$ in layers $L_{i_j}$ and $L_{i_{j+1}}$ respectively. The polynomial $f$ computed by $P$ can be expressed as \begin{equation} \label{eqn:k-pass} f=\sum\limits_{\bar{u}}\prod\limits_{i=1}^r [u_{i},u_{i+1}] \end{equation} where the summation is over $\bar{u}=(u_1,u_2,\ldots,u_r)$ where $u_1,u_2,\ldots,u_r$ are nodes in layers $L_{i_1},L_{i_2},\ldots,L_{i_r}$ respectively. As $P$ is a syntactic multilinear ABP, the product term $\prod_{i=1}^r [u_{i},u_{i+1}]$ in Equation (\ref{eqn:k-pass}) is an ROABP of size atmost $s$ and has a syntactic multinear formula $\Psi_{\bar{u}}$ of size $s^{O(\log N)}$. Thus, $\Psi_P=\Psi_1 +\Psi_2 + \cdots + \Psi_t, t= s^{O(r)}$ where each $\Psi_i$ is a syntactic multilinear formula obtained from an ROABP. The formula $\Psi$ computing $f$ has size $rs^{O(r+\log N)}$. \qed
}
\begin{lemma} \label{lem:rpass}
Let $P$ be a multilinear $r$-pass ABP computing a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$ and $\Psi_P=\Psi_1+\Psi_2+\cdots+\Psi_t,~ t=s^{O(r)}$ be the syntactic multilinear formula computing $f$. Let $\varphi\sim{\cal D}$ and $k\leq N^{1/5}$. Then with probability $1-2^{-\Omega(\sqrt{N}\log N)}$, ${\sf rank}(M_{f})\leq |\Psi|\cdot 2^{N/2-k/2}$. \end{lemma} \makeproof{lem:rpass}{
Let $P$ be a multilinear $r$-pass ABP computing a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$. By Lemma \ref{lem:rpasstoformula}, $\Psi_P=\Psi_1+\Psi_2+\cdots+\Psi_t,~ t=s^{O(r)}$ be the syntactic multilinear formula computing $f$. Note that $\Psi_i$ is a multilinear formula obtained from an ROABP computing a polynomial $f_i$. By Corollary \ref{cor:rank}, with probability $1-2^{-\Omega(\sqrt{N}\log N)}$, we have ${\sf rank}(M_{f_i^\varphi})\leq |\Psi_i|2^{|X|/2-k/2}$ for $k\leq N^{1/5}$. By sub-additivity in Lemma\ref{lem:sub-aditivity}, ${\sf rank}(M_{f^\varphi})\leq {\sf rank}(M_{f_1^\varphi})+\ldots+{\sf rank}(M_{f_t^\varphi})\leq (|\Psi_1|+|\Psi_2|+\cdots +|\Psi_t|)2^{|X|/2-k/2}\leq |\Psi|\cdot 2^{N/2-k/2}$. \qed
}
Combining the above Lemmas with Lemma~\ref{lem:ry} we get: \begin{theorem} \label{thm:lb-rpass} Let $f_1,\ldots f_m$ be polynomials computed by multilinear $r$-pass ABPs of size $s_1,s_2,\ldots,s_m$ respectively such that $g= f_1+\cdots + f_m$. Then, $m = \frac{2^{\Omega(N^{1/5})}}{s^{c(r +\log N)}}$, where $c$ is a constant and $s=\max\{s_1,s_2,\ldots,s_m\}$. \end{theorem}
\makeproof{thm:lb-rpass}{
Suppose, $f_j$ has a multilinear $r$-pass ROABP $P$ of size $s$. Then, by Lemma~\ref{lem:rpasstoformula}, there is a multilinear formula $\Psi_j = \Psi_{j_1} + \dots + \Psi_{j,t}$ computing $f_j$ such that $t \le s^{O(r)}$ and each $\Psi_{j_i}$ is a syntactic multilinear formula of size $s^{O(\log N)}$ obtained from an ROABP of size at most $s$. By Lemma~\ref{lem:rpass}, ${\sf rank}(M_{f_j})\leq t \max_{i}\{|\Psi_{j_i}|\}\cdot 2^{N/2-k/2} \le s^{O(r+\log N)} 2^{N/2 - k/2}$ with probability atleast $1-2^{-\Omega(\sqrt{N})}$. Therefore, if $s< 2^{o(\sqrt{N})}$, there is a partition $\varphi\sim{\cal D}$ such that ${\sf rank}(M_{g^\varphi}) \le m\cdot s^{O(r+\log N)}\cdot 2^{N/2 - k}$. If $m < 2^{c(N^{1/5})}/s^{O(r+\log N)}$, we have ${\sf rank}(M_{g^\varphi}) < 2^{N/2}$, a contradiction to Lemma~\ref{lem:ry}. \qed
}
\input{sparse}
\section{Super polynomial lower bounds for special a classes of multilinear circuits}
\label{sec:signature} In this section, we develop a framework for proving super polynomial lower bound against syntactic multilinear circuits and ABPs based on Raz~\cite{Raz09}. Our approach involves a more refined analysis of central paths introduced by Raz~\cite{Raz09}.
\begin{defn}{\em (Central Paths.)}
Let $\Phi$ be a syntactic multilinear formula. For node $v$ in $\Phi$, let $X_v$ denote the set of variables appearing in the sub-formula rooted at $v$. A leaf to root path $\rho = v_1,\ldots, v_\ell$ in $\Phi$ is said to be {\em central}, if $|X_{v_{i+1}}| \le 2 |X_{v_i}|$ for $1\le i\le \ell-1$. \end{defn}
For a leaf to root path $\rho: v_1,\ldots, v_\ell$ in $\Phi$, $X_{v_1}\subseteq \ldots \subseteq X_{v_\ell}$ is called the {\em signature} of the path $\rho$. A signature $X_{v_1}\subseteq \ldots \subseteq X_{v_\ell}$ is called central if $|X_{v_{i+1}}| \le 2 |X_{v_i}|$ for $1\le i\le \ell-1$.
Let $\varphi: X \to Y\cup Z$ be a partition. A central signature $X_{v_1}\subseteq \ldots \subseteq X_{v_\ell}$ of a formula $\Phi$ is said to be $k$-unbalanced with respect to $\varphi$ if for some $i\in[\ell]$, $X_{v_i}$ is $k$-unbalanced with respect to $\varphi$ , i.e., $|\varphi(X_{v_i}) \cap Y - \varphi(X_{v_i}) \cap Z| \ge k $.
The formula $\Phi$ is said to be $k$-weak with respect to $\varphi$, if every central signature that terminates at the root is $k$-unbalanced. Our first observation is, we can replace central paths in Lemma~4.1, \cite{Raz09} with central signatures. Using the same arguments as in~\cite{Raz09} we get:
\begin{obs} \label{obs:central-sign-prob} Let $\varphi:X\rightarrow Y \cup Z$ be a partition of $X=\{x_1,\ldots,x_N\}$. Let $\Phi$ be any multilinear formula compuitng a polynomial $f\in\mathbb{F}[x_1,\ldots,x_N]$. \begin{enumerate}
\item If $\Phi$ is $k$-weak with respect to $\varphi$, then ${{\sf rank}}(M_{f^\varphi}) \le |\Phi|\cdot 2^{N/2 - k}$.
\item Let $C: X_{v_1} \subseteq X_{v_2} \subseteq \cdots \subseteq X_{v_\ell}$ be a central signature in $\Phi$ such that $ k <|X_{v_1}| \le 2k$. Then ${\sf Pr}_{\varphi \sim {\cal D}}[\mbox{$C$ is not $k$-unbalanced}] = N^{-\Omega(\log N)}.$ \end{enumerate} \end{obs}
Unfortunately, it can be seen that even when $P$ is an ROABP the number of central signatures in a formula from an ROABP can be $N^{\Omega{\log N}}$. In Section~\ref{subsec:super-poly} we show that a careful bound on the number of central signatures yields super-polynomial lower bounds for sum of ROABPs.
Now, we consider a subclass of syntactic multilinear circuits where we can show that the equivalent formula obtained by duplicating nodes as and when necessary, has small number of central signatures. To start, we consider a refinement of the set of central signatures of a formula, so that Lemma~\ref{lem:rank-full-sign} is applicable to a subset of central signatures in a formula.
Let $\Phi$ be a syntactically multilinear formula of $O(\log N)$ depth. Two central paths $\rho_1$ and $\rho_2$ in $\Phi$ are said to {\em meet at $\times$}, if their first common node along leaf to root is labeled by $\times$. A set ${\cal T}$ of central paths in $\Phi$ is said to be {\em $+$-covering }, if for every central path $\rho\notin {\cal T}$, there is a $\rho' \in {\cal T}$ such that $\rho$ and $\rho'$ meet at $\times$. A {\em signature-cover} ${\cal C}$ of $\Phi$ is the set of all signatures of the $$+$-covering$ set $T$ of central paths in $\Phi$.
\begin{lemma} \label{lem:covering-sign} Let $\Phi$ be a syntactic multilinear formula. Let $\varphi$ be a partition. If there is a signature-cover~${\cal C}$ of $\Phi$ such that every signature in ${\cal C}$ is $k\mbox{-}unbalanced$ with respect to $\varphi$, then
${\sf rank}(M_{f^\varphi}) \le |\Phi| \cdot 2^{N/2 - k/2}$. \end{lemma}
\makeproof{lem:covering-sign}{
We prove by induction on the structure of the formula. Let $v$ be the root gate of $\Phi$. Without loss of generality, assume that $|X_v| > 2k$. Base case is when $X_v$, is $k$-unbalanced. Then clearly, ${\sf rank}(M_{f^\varphi}) \le 2^{N/2 - k/2}$. \\ {\bf Case 1} $v$ is a $\times$ gate with children $v_1$ and $v_2$.
Then, there is an $i\in\{1,2\}$ such that every central signature containing $X_{v_i}$ is contained in ${\cal C}$. Suppose not, let $\rho_1$ and $\rho_2$ be central signatures in $\Phi$ containing $X_{v_1}$ and $X_{v_2}$ respectively such that $\rho_1,\rho_2\not\in {\cal C}$. Note that $\rho_1$ and $\rho_2$ meet at $\times$ a contradiction to the fact that ${\cal C}$ is an signature-cover. By induction, we have ${\sf rank}(M_{f_{v_i}^{\varphi}}) \le |\Phi_{v_i}|2^{|X_{v_i}|/2 - k/2}$. The required bound follows, since $|X_v| = |X_{v_1}| + |X_{v_2}|$.\\ {\bf Case 2} $v$ is a $+$ gate with children $v_1,\ldots, v_r$. Then, for every $i \in [r]$, \begin{itemize} \item Either every central signature in $\Phi$ containing $X_{v_i}$ is in ${\cal C}$; or
\item $|X_{v_i}| < |X_{v}|/2$. \end{itemize}
In first of the above cases, we have ${\sf rank}(M_{f_{v_i}^{\varphi}}) \le |\Phi_{v_i}|2^{|X_{v_i}|/2 - k/2}$ by inductive hypothesis. In the second case, we have ${\sf rank}(M_{f_{v_i}^{\varphi}}) < 2^{|X_v|/4} \le 2^{|X_v|/2 - k/2}$ since $|X_v|> 2k$. By sub additivity, we have ${\sf rank} (M_{f^\varphi}) \le \sum_{i=1}^r {\sf rank}(M_{f_{v_i}^{\varphi}}) \le \sum_{i=1}^r |\Phi_{v_i}|2^{|X_v|/2 - k/2} \le |\Phi|\cdot 2^{|X_v|/2 - k/2}$. \qed
}
Let $X_1,\ldots, X_r \subseteq X$, be subsets of variables. Let $\Delta(X_i, X_j)$ denote the Hamming distance between $X_i$ and $X_j$, i.e, $\Delta(X_i, X_j) = |(X_i \setminus X_j) \cup (X_j \setminus X_i)|$. Let $C_1: X_{11} \subseteq X_{12} \subseteq \cdots \subseteq X_{1\ell}$ and $C_2: X_{21} \subseteq X_{22} \subseteq \cdots \subseteq X_{2\ell}$ be two central signatures in $\Phi$. Define $\Delta(C_1,C_2) = \max_{1\le i\le \ell} \Delta(X_{1i}, X_{2i})$. Let ${\cal C}$ be signature-cover ~in $\Phi$.
For $\delta>0$, a {\em $\delta$-cluster} of ${\cal C}$ is a set of signatures $C_1,\ldots, C_t \in {\cal C}$ such that for every $C \in {\cal C}$, there is a $j\in [t]$ with $\Delta(C,C_j) \le \delta$. The following is immediate:
\begin{obs} \label{lem:delta-unbalanced} Let ${\cal C}$ be a signature-cover, and $C_1,\ldots, C_t$ be a $\delta$-cluster of ${\cal C}$. If $\varphi$ is a partition of $X$ such that for every $i\in [t]$, signature $C_i$ is $k\mbox{-}unbalanced$, then for every $C \in {\cal C}$, signature $C$ is $k-2\delta$ unbalanced. \end{obs}
We are ready to define the special class of sm-circuits where the above mentioned approach can be applied. For $X_1,\ldots, X_r \subseteq X$ and $\delta>0$, a $\delta$-equivalence class of $X_1,\ldots, X_r$, is a minimal set of indices $i_1,\ldots, i_t$ such that for $1\le i\le r$, there is an $i_j, 1\le j\le t$ such that $\Delta(X_i, X_{i_j}) \le \delta$.
\begin{defn} \label{def:variable-close} Let $\delta \le N \in \mathbb{N}$. Let $\Psi$ be an sm-circuit with alternating layers of $+$ and $\times$ gates. $\Psi$ is said to be $(c,\delta)$-variable close, if for for every $+$ gate $v = v_{11}\times v_{12} + \cdots + v_{r1}\times v_{r2}$, there are indices $b_1, b_2, \ldots, b_r \in \{1,2\}$ such that there is a $\delta$-equivalence class of $X_{v_{1b_1}}, \ldots, X_{v_{rb_r}}$ with at most $c$ different sets. \end{defn}
Now, we show that $(c,\delta)$ close circuits have small number of signatures: \begin{lemma} \label{lem:delta-signature-bound} Let $\Psi$ be a $(c,\delta)$-variable close syntactic multilinear arithmetic circuit of size $s$ and depth $O(\log N)$. Let $\Phi$ be the syntactic multilinear formula of size $s^{O(\log N)}$ and depth $O(\log N)$ obtained by duplicating gates in $\Psi$ as and when necessary. Then there is a signature-cover~${\cal C}$ for $\Phi$ such that ${\cal C}$ has a $\delta$-cluster consisting of at most $c^{O(\log N)}$ sets. \end{lemma}
\makeproof{lem:delta-signature-bound}{
Without loss of generality, assume the root gate of $\Phi$ is a $+$ gate, $\times$ gates have fan-in bounded by $2$, and the layers of $+$ and $\times$ gates is alternating. We construct the required $\delta$-cluster $D$ in a top down fashion as follows. \begin{enumerate} \item Initialize $D = X_v$, where $v$ is the root gate in the formula. \item For a $+$ gate $v = v_{11}\times v_{12} + \dots + v_{r1}\times v_{r2}$, let $b_1,\ldots b_r \in \{1,2\}$ be the indices guaranteed by Definition~\ref{def:variable-close}. Let the $c$ different sets in the $\delta$-equivalence class of $\{X_{v_{1b_1}}, \ldots, X_{v_{rb_r}}\}$ be $X_{i_1,b_{i_1}}, \ldots X_{i_c,b_{i_c}}$. For each partial signature $C'= C_1\subseteq C_{2} \subseteq \cdots \subseteq C_{\ell'}$ such that $C_1 = X_v$ : Add to set $D$, the signatures $C'^{j} = X_{i_j,b_{i_j}} \subseteq C_1\subseteq C_2 \subseteq \dots \subseteq C_{\ell'}$ for $1\le j\le c$. Now, mark the $+$ gates $v_{i_1b_{i_1}},\ldots, v_{i_c, b_{i_c}}$ \item Repeat $2$ for every marked node until there are nor marked nodes left. \end{enumerate}
The set $D$ thus obtained is a $\delta$-cluster for some signature-cover~${\cal C}$ of $|\Phi|$. $|D|$ is at most $c^{O(\log N)}$, since at every iteration, at most $c$ new signatures might be included for each marked node. \qed
}
Finally we conclude with the proof of Theorem~\ref{thm:lb-delta-close}: \begin{proof}[of Theorem~\ref{thm:lb-delta-close}]
Let $\Psi$ be a $(c,\delta)$ variable close circuit of depth $O(\log N)$. Let $\Phi$ be the formula obtained by duplicating nodes in $\Psi$ as necessary. By Lemma~\ref{lem:delta-signature-bound}, let $\{C_1,\ldots, C_t\}$ be a $\delta$-cluster of a signature-cover~ ${\cal C}$ of $\Phi$, for $t = N^{o(\log N)}$. Then, by Observations~\ref{obs:central-sign-prob} and~\ref{lem:delta-unbalanced}, the probability that there is a signature in ${\cal C}$ that is not $k-2\delta$ unbalanced is at most $t\cdot N^{-\Omega(\log N)} <1$ for $\varphi \sim {\cal D}$. Therefore, there is a $\varphi$ such that every signature in $\{C_1,\ldots, C_t\}$ is $k-2\delta$ unbalanced. By Lemma \ref{lem:covering-sign}, there is a $\varphi$ such that ${\sf rank}(M_{g^\varphi}) \le |\Phi|\cdot 2^{N/2 - (k-2\delta)} \le s^{O(\log N)} 2^{N/2 - k/5} < 2^N/2$ for $s< 2^{k/10\log N}$, a contradiction to Lemma \ref{lem:ry}. \qed \end{proof}
\appendix \appendixproofsection{Appendix}
\subsection{Oblivious Read-Once Algebraic Branching Programs} \label{subsec:super-poly}
In this section we demonstrate the usefulness of central signatures in the case of oblivious ROABPs. This exposition is only for demonstrative purpose, the lower bound obtained here is subsumed by Theorem~\ref{thm:lb-roabp}.
Let $P$ be an oblivious ROABP and $\Phi$ the multilinear formula for $P$ as in Lemma~\ref{lem:abptoformula}. For a gate $v$ in $\Phi$, let $I_v=[i_{v},j_{v}]$ denote the {\em interval associated with gate $v$} as in Section~\ref{sec:explb}. Let $S_v=\{x_{\ell}\mid i_v\leq \ell \leq j_v\}$ be the set of variables. Note that $X_v \subseteq S_v$. A full central signature in $\Phi$ is a sequence of sets $S_{v_1} \subseteq S_{v_2} \subseteq \cdots \subseteq S_{v_\ell}$, with $|S_{v_{i+1}}| \le 2|S_{v_i}|$ where $v_1, \ldots, v_{\ell}$ is a leaf to root path in $\Phi$.
\begin{obs} \label{obs:full-sign} Let $P$ be an oblivious ROABP computing $f$ and $\Phi$ be a multilinear formula obtained from $\Phi$ for $f$. Let $N$ be a power of $2$ and $C : S_{v_1} \subseteq S_{v_2} \subseteq \cdots \subseteq S_{v_\ell}$
be a full central signature in $\Phi$. For $i\in \{2,\ldots\ell\}$, we have, either $|S_{v_i}| = 2|S_{v_{i-1}}|$ or $|S_{v_i}| = |S_{v_{i-1}}|$. Further, the number of full central signatures in $\Phi$ is $O(N)$. \end{obs}
\makeproof{obs:full-sign}{
Let $P$ be an oblivious ROABP $\Phi$ be a multilinear formula obtained from $\Phi$ for polynomial $f$. Let $N$ be a power of $2$ and $C : S_{v_1} \subseteq S_{v_2} \subseteq \cdots \subseteq S_{v_\ell}$ be a full central signature in $\Phi$. For every $2 \leq i\leq \ell$, \begin{enumerate} \item $v_i$ is a $+$ gate : $v_{i-1}$ is a child of $v_i$. Then, $I_{v_{i-1}}=I_{v_i}$. Since $S_{v_i}$ is the set of variables corresponding to the interval $I_{v_i}$ and $I_{v_{i-1}}=I_{v_i}$, we have $S_{v_{i-1}}=S_{v_i}$.
\item $v_i$ is a $\times$ gate : $v_{i-1}$ is a child of $v_i$. Let $w$ be the other child of $v_i$. Then, $I_{v_i}=I_{v_{i-1}}\cup I_w$. Since $N$ is a power of $2$, from the construction of $\Phi$ in Lemma \ref{lem:abptoformula}, we have $|I_{v_{i-1}}| = |I_w|$. Hence, $ S_{v_i} = S_{v_{i-1}} \cup S_w$ implying that $|S_{v_i}| = 2|S_{v_{i-1}}|$. \end{enumerate} For any child $u$ of a $+$ gate $v$, we have $S_u=S_v$. Therefore, we only consider full central signatures where $v_1,v_2,\ldots,v_{\ell}$ are product gates. From construction of $\Phi$ in Lemma \ref{lem:abptoformula}, depth of $\Phi$ is $O(\log N)$ and every $\times$ gate has fan-in 2. Hence, $\ell=O(\log N)$ and number of full central signatures is $2^{O(\log N)}=O(N)$. \qed
}
Let $\varphi :X \to Y \cup Z$ be a partition. We say a gate $v$ in $\Phi$ is $k\mbox{-}weak$ with respect to $\varphi$ if every full central signature in $\Phi$ that terminates at $v$ is $k\mbox{-}unbalanced$ with reaspect to $\Phi$.
\begin{lemma} \label{lem:rank-full-sign}
Let $P$ be an oblivious ROABP $\Phi$ be a multilinear formula obtained from $\Phi$ for polynomial $f$. Let $N$ be a power of $2$. If $\varphi :X \to Y \cup Z$ is such that root gate of $\Phi$ is $k\mbox{-}weak$ with respect to $\varphi$, then ${\sf rank}(M_{f^\varphi}) \le |\Phi|\cdot 2^{N/2 - k/2}$. \end{lemma}
\makeproof{lem:rank-full-sign}{
The proof is by induction on the structure of the formula $\Phi$. Let $v$ be the root gate of $\Phi$. Assume that $|S_v| >2k$.
\noindent \textbf{Case 1 :} $v$ is $k\mbox{-}unbalanced$. Then, ${\sf rank}(M_{f^\varphi}) \le 2^{N/2 - k} \le |\Phi|\cdot 2^{N/2 - k/2}$.
\noindent \textbf{Case 2 :} $v$ is a sum gate. Let $v_1,v_2,\ldots,v_r$ be the children of $v$ in $\Phi$, $r\leq w$. Assume w.l.o.g that $v$ is not $k\mbox{-}unbalanced$, else apply Case 1. Since $v$ is $k\mbox{-}weak$ and gate $v$ is not $k\mbox{-}unbalanced$, for every $i\in [r]$ either $v_i$ is $k\mbox{-}weak$ or $k<|S_{v_i}| < |S_{v_{i+1}}|/2$. In any case, ${\sf rank}(M_{f_{v_i}}) \leq |\Phi_{v_i}|\cdot 2^{|S_{v_i}|/2-k/2}$. By sub-additivity, \begin{align*}
{\sf rank}(M_{f_v}) &\leq \sum_{i=1}^r{\sf rank}(M_{f_{v_i}}) \leq |\Phi_{v_1}|2^{|S_{v_1}|/2-k/2} + \cdots + |\Phi_{v_r}|2^{|S_{v_r}|/2-k/2} \\
& \le |\Phi|\cdot 2^{|S_v|/2 - k/2} \le |\Phi|\cdot 2^{N/2 - k/2} \text{~as $|S_v|=N$} \end{align*}
\noindent\textbf{Case 3 :} $v$ is a product gate with children $v_1$ and $v_2$. Assume w.l.o.g that $v$ is not $k\mbox{-}unbalanced$, else apply Case 1. Since $v$ is $k\mbox{-}weak$ and gate $v$ is not $k\mbox{-}unbalanced$, atleast one of $v_1$ or $v_2$ is $k\mbox{-}weak.$. W.l.o.g, let $v_1$ be $k\mbox{-}weak$. By induction, ${\sf rank}(M_{f_{v_1}^\varphi}) \leq |\Phi_{v_1}|\cdot 2^{|S_{v_1}|/2-k/2}$ and ${\sf rank}(M_{f_{v_2}^\varphi}) \le 2^{|S_{v_2}|/2}$. By sub-multliplicativity, ${\sf rank}(M_{f^\varphi})\leq |\Phi_{v_1}|\cdot 2^{|S_{v_1}|/2+|S_{v_2}|/2-k/2} \leq |\Phi|\cdot 2^{|S_v|/2 - k/2} \le |\Phi|\cdot 2^{N/2 - k/2}$ as $S_v = S_{v_1} \cup S_{v_2}$ and $|S_v|=N$. \qed
}
Combining Observation~\ref{obs:full-sign} and Lemma~\ref{lem:rank-full-sign}, we get \begin{corollary} Let $f_1,\ldots, f_m$ be oblivious ROABPs such that $g = f_1+\ldots +f_m$, where $g=g_{1,N}$. Then, $m = N^{\Omega(1)}$. \end{corollary} \begin{remark} The above result is only to demonstrate the usefullness of full central signatures over central paths or central signatures. However, the lower bound above is far inferior to the one in Theorem~\ref{thm:lb-roabp}. \end{remark}
\subsection*{Proofs from Section \ref{subsec:sumroabp}}
\appendixproof{lem:abptoformula} \appendixproof{lem:rank} \appendixproof{obs:hitting} \appendixproof{lem:kbl}
\subsection*{Proofs from Section \ref{subsec:rpass}}
\appendixproof{lem:rpasstoformula} \appendixproof{lem:rpass} \appendixproof{thm:lb-rpass}
\subsection*{Proofs from Section \ref{subsec:sparse}}
\appendixproof{lem:sparsetoformula} \appendixproof{lem:sparse-ub} \appendixproof{thm:sparse-factor}
\subsection*{Proofs from Section \ref{subsec:super-poly}}
\appendixproof{obs:full-sign} \appendixproof{lem:rank-full-sign}
\subsection*{Proofs from Section \ref{sec:signature}}
\appendixproof{lem:covering-sign} \appendixproof{lem:delta-signature-bound}
\end{document} |
\begin{document}
\title{An $O(n \log n)$ time Algorithm for computing the Path-length Distance between Trees}
\author{David Bryant \thanks{Department of Mathematics and Statistics,
University of Otago, Dunedin, New Zealand. {\tt david.bryant@otago.ac.nz}. {\tt https://orcid.org/0000-0003-1963-5535}}
\and
Celine Scornavacca
\thanks{ISEM, CNRS, Universit\'{e} de Montpellier, IRD, EPHE,
Montpellier, France {\tt celine.scornavacca@umontpellier.fr}} }
\maketitle
\begin{abstract} Tree comparison metrics have proven to be an invaluable aide in the reconstruction and analysis of phylogenetic (evolutionary) trees. The path-length distance between trees is a particularly attractive measure as it reflects differences in tree shape as well as differences between branch lengths. The distance equals the sum, over all pairs of taxa, of the squared differences between the lengths of the unique path connecting them in each tree. We describe an $O(n \log n)$ time for computing this distance, making extensive use of tree decomposition techniques introduced by Brodal et al. \cite{Brodal04}.\\ {\bf keywords} Phylogeny; Tree comparison metrics; Path-length metric; Tree decomposition.\\ {\bf Mathematics subject classification (2010)} 68Q25 $\cdot$ 92D15 $\cdot$ 05CO5 \end{abstract}
\section{Introduction} A \emph{phylogenetic tree} is a tree describing the evolution of a set of entities $X$ (species, genes etc.), which will be called \emph{taxa} from now onwards. Degree-one nodes are called \emph{leaves} and a bijective function associates each taxon to a leaf. Internal nodes represent putative ancestral taxa and branch lengths quantify the evolutionary distances between nodes.
Tree comparison metrics provide a quantitative measure of the similarity or difference between two phylogenetic trees. They have proven invaluable for statistical testing (e.g. \cite{Penny93,Holmes05,Susko11}), for visualisation \cite{Hillis05}, and for the construction of consensus trees \cite{Swofford91,Bryant03,Lapointe97}. By far the most well-known tree comparison metric is the Robinson-Foulds metric \cite{Robinson81}, which equals the number of bipartitions\footnote{A bipartition $A|B$ with $A\cup B=X$ is in a phylogenetic tree $T=(V,E)$ if there exists an edge $e\in E$ such that its removal creates two trees with taxon sets $A$ and $B$.}that are in one tree and not the other. However many other different metrics have also been proposed, each one based on a different characteristic of the trees being compared.
Here we consider pairs of trees on the same set of taxa. Also, our trees are \emph{binary}, i.e. each internal node has degree three. The {\em path-length} between two taxa in a phylogenetic tree is the sum of the branch lengths along the unique path between them. The {\em path-length distance} between two trees $T_1$ and $T_2$ is given by \begin{equation} \Delta(T_1,T_2) = \sum_{ij} (p_{ij} - q_{ij})^2, \label{eq:Delta} \end{equation} where $p_{ij}$ is the path length between taxa $i$ and $j$ in the first tree and $q_{ij}$ is the path length in the second tree. We note that $\sqrt{\Delta(T_1,T_2)}$ is a metric in the mathematical sense. The first explicit description of the metric appears in \cite{Penny93} (without branch lengths) and \cite{Lapointe97} (with branch lengths), though closely related ideas appear much earlier (e.g. \cite{Hartigan67,Farris69,Williams71}).
Given a phylogeny with $n$ leaves, it takes $O(n^2)$ time to construct the set of all path-lengths $p_{12},p_{13},\ldots,p_{(n-1)n}$, using the dynamic programming algorithm presented in \cite{Bryant97}. Hence the path-length distance can be easily computed in $O(n^2)$ time. Our main contribution in this paper is to show that we can compute this distance in $O(n \log n)$ time, which is almost, but not quite, linear in the size of the problem input.
Expanding \eqref{eq:Delta} gives \begin{equation} \Delta(T_1,T_2) = \sum_{ij} (p_{ij})^2 + \sum_{ij} (q_{ij})^2 - 2 \sum_{ij} p_{ij} q_{ij}.\label{eq:DistExpand} \end{equation} The first two terms can be evaluated in linear time using dynamic programming, as outlined in Section 2. To compute the second term efficiently we first introduce a tree decomposition technique (Section 3) allowing the sum to be evaluated in $O(n \log n)$ time (Section 4). Both the tree decomposition and algorithm of Section 4 draw heavily on an algorithm of \cite{Brodal04} for computing the quartet distance between two trees.
\section{Sums of squared distances}
In this section we show how to compute the sum of squared distances $\sum_{ij} p_{ij}^2$ in a tree in linear time. We begin by introducing some notation that will be used in the rest of the paper.
Select an arbitrary leaf $\rho$ and consider both $T_1$ and $T_2$ as rooted trees with root $\rho$. We think of $\rho$ being at the top of the tree and the other leaves being at the bottom of the tree. For any two edges $e,e'$ we write $e \preceq e'$ if the path from $e$ to $\rho$ passes through $e'$. We write $e \prec e'$ if $e \preceq e'$ and $e \neq e'$. Hence if $e$ is the edge incident with the root $\rho$ then $e' \prec e$ for all other edges $e'$. We say that $e$ is external if it is incident to a leaf other than $\rho$; otherwise $e$ is internal. When $e$ is internal let $e_L$ and $e_R$ denote the edges incident and immediately below $e$.
We will use $e,e'$ to denote edges in $T_1$ and $f,f'$ to denote edges in $T_2$. We let $x_e$ denote the length of an edge $e$ in $T_1$ and $y_f$ the length of an edge $f$ in $T_2$. Let $A_{ij}$ denote the set of edges on the path from $i$ to $j$ in $T_1$ and let $B_{ij}$ denote the corresponding set in $T_2$. Hence \[p_{ij} = \sum_{e \in A_{ij}} x_e \quad\quad q_{ij} = \sum_{f \in B_{ij}} y_f.\] Let $n(e)$ denote the number of leaves $\ell$ such that the path from $\ell$ to $\rho$ passes through $e$. Define \[\alpha(e) = \sum_{e' \preceq e} n(e') x_{e'}.\]
\begin{prop} \label{prop:singleTree} \[ \sum_{ij} (p_{ij})^2 = \sum_{\mbox{ \footnotesize $e$ internal } } \Big[ x_e (n-n(e)) (2\alpha(e) - n(e)x_e) + 2\alpha(e_L)\alpha(e_R) \Big] .\] \end{prop} \begin{proof} Given two edges $e_1,e_2$ we let \begin{equation}
\chi(e_1,e_2) = | \{ \mbox{pairs $ij$} : e_1,e_2 \in A_{ij}\} |,\label{eq:chi}\end{equation} the number of pairs having both $e_1$ and $e_2$ on the path between them. Then \begin{align} \sum_{ij} (p_{ij})^2 & = \sum_{ij} \left(\sum_{e_1 \in A_{ij}} x_{e_1} \right) \left( \sum_{e_2 \in A_{ij}} x_{e_2} \right) \nonumber \\ &= \sum_{ij} \sum_{e_1,e_2 \in A_{ij}} x_{e_1} x_{e_2}\\ & = \sum_{e_1} \sum_{e_2} \sum_{ij: e_1, e_2 \in A_{ij}} x_{e_1} x_{e_2}\\ & = \sum_{e_1} \sum_{e_2} x_{e_1} x_{e_2} \chi(e_1,e_2) \label{eq:pij2} \end{align}
If $e_1 \prec e_2$ then
$\chi(e_1,e_2) = n(e_1) (n - n(e_2))$. Hence, for any $e_2$ we have
\begin{align*}
\sum_{e_1:e_1 \prec e_2} x_{e_1} x_{e_2} \chi(e_1,e_2) &= x_{e_2} (n-n(e_2))
\sum_{e_1:e_1 \prec e_2} x_{e_1} n(e_1) \\
& =x_{e_2} (n-n(e_2)) (\alpha(e_2)-n(e_2)x_{e_2}).
\end{align*}
If $e_1 \not \preceq e_2$ and $e_2 \not \preceq e_1$ then $\chi(e_1,e_2) = n(e_1) n(e_2)$. Furthermore there is an edge $e$ with children $e_L,e_R$ such that, without loss of generality, $e_1 \preceq e_L$ and $e_2 \preceq e_R$. For such an edge $e$ we have
\begin{align*}
\sum_{e_1:e_1 \preceq e_L} \sum_{e_2:e_2 \preceq e_R} x_{e_1} x_{e_2} \chi(e_1,e_2) & = \sum_{e_1:e_1 \preceq e_L} \sum_{e_2:e_2 \preceq e_R} x_{e_1} x_{e_2} n(e_1)n(e_2) \\ & = \alpha(e_L) \alpha(e_R). \end{align*}
Summing up over all $e_1,e_2$ in \eqref{eq:pij2} we have \begin{align*} \sum_{ij} (p_{ij})^2 & = \sum_{e_1} \sum_{e_2} x_{e_1} x_{e_2} \chi(e_1,e_2) \\ & = 2\sum_{e_2} \sum_{e_1 \prec e_2} x_{e_1} x_{e_2} \chi(e_1,e_2) + 2 \sum_e \sum_{e_1 \preceq e_L} \sum_{e_2 \preceq e_R} x_{e_1} x_{e_2} \chi(e_1,e_2) + \sum_e x_{e} x_{e} \chi(e,e) \\ & = 2 \sum_{e_2} x_{e_2} (n-n(e_2)) (\alpha(e_2)-n(e_2)x_{e_2}) + 2 \sum_e \alpha(e_L) \alpha(e_R) + \sum_e x_{e} x_{e} n(e)(n-n(e)) \end{align*} and the result follows. \end{proof}
\begin{prop} \label{prop:sumSquared} The sum $\sum_{ij} (p_{ij})^2$ can be computed in linear time. \end{prop} \begin{proof} If $e$ is external, $n(e) = 1$ and $\alpha(e) = x_e$. Otherwise \begin{align*} n(e) & = n(e_L) + n(e_R) \\ \alpha(e) &= \alpha(e_L) + \alpha(e_R) + n(e) x_e. \end{align*} Hence with a post-order traversal of the tree we can compute $n(e)$ and $\alpha(e)$ for all edges $e$ in $O(n)$ time. Computing the sum takes a further $O(n)$ time by Proposition~\ref{prop:singleTree}. $ \sum_{ij} (q_{ij})^2$ can be computed in the same way.
\end{proof}
\section{Segment decomposition}
In this section we introduce a hierarchical decomposition of the edge set of $T_2$ that forms the structure used in our dynamical programming algorithm in Section 4.
Let $Q$ be a connected subset of $E(T_2)$, the set of edges of $T_2$. We define the {\em boundary} of $Q$ to be the set of vertices incident both to edges within $Q$ and to edges outside $Q$: \[ \partial Q = \{v : \mbox{ there are $e \in Q$, $e' \not \in Q$ incident with $v$ }\}.\] The {\em degree} of $Q$ is the cardinality of $\partial Q$. A {\em segment} of $T_2$ is a connected subset of $E(T_2)$ with degree at most two.
A {\em segment decomposition} for $T_2$ is a binary tree $T_D$ such that \begin{enumerate} \item[(D1)] The leaves of $T_D$ correspond to edges in $E(T_2)$ (i.e. minimal segments); \item[(D2)] Each node of $T_D$ corresponds to a segment of $T_2$; \item[(D3)] The segment corresponding to an internal node of $T_D$ equals the disjoint union of the segments corresponding to its children. \end{enumerate} An example of segment decomposition is given in Figure \ref{segDec}.
The main result in this section is that we can, in linear time, construct a segment decomposition for $T$ with height $O( \log n)$.
\begin{figure}
\caption{(a) A phylogenetic tree and (b) a segment decomposition for it.}
\label{segDec}
\end{figure}
The definition of a segment decomposition is based on the tree decomposition used by \cite{Brodal04} to compute quartet-based distances, which in turn are based on techniques for efficient parsing of expressions \cite{Brent74,Cohen95}. The main difference with \cite{Brodal04} is that the segment decomposition is based on partitioning the set of edges, rather that the set of vertices, and that we were able to obtain a tighter bound on the height.
Our algorithm for constructing $T_D$ is agglomerative: we start with a degree one vertex for each edge in $E(T_2)$; these form the leaves of $T_D$. Each iteration, we identify pairs of maximal nodes corresponding to pairs of segments which can be combined to give new segments. We make the nodes in each pair children of a new node. The process continues until one node remains and $T_D$ is complete.
The following Proposition shows that in any partition of $E(T_2)$ into segments we can always find a large number of pairs of disjoint segments which can be merged to give other segments.
\begin{prop}~\label{manypairs}
Let $T$ be a binary tree. Let $\mathcal{M}$ be a collection of segments which partition $E(T)$. Then there are at least $\frac{|\mathcal{M}|}{4}$ non-overlapping pairs $(A,B)$ such that $A,B \in \mathcal{M}$ and $A \cup B$ is a segment of $T$. \end{prop} \begin{proof} Let $\mathcal{G}_\mathcal{M} = (\mathcal{V}_\mathcal{M},\mathcal{E}_\mathcal{M})$ be the graph with vertex set \[ \mathcal{V}_\mathcal{M} = \bigcup_{A \in \mathcal{M}} \partial A\] and edge set \[ \mathcal{E}_\mathcal{M} = \left \{ \{u,v\}: \partial A = \{u,v\} \mbox{ for some $A \in \mathcal{M}$ } \right\}.\] Decompose $\mathcal{G}_\mathcal{M}$ into maximal paths $P_1,P_2,\ldots,P_\kappa$ which contain no degree three vertices in their interiors. For each $i$, let $\mathcal{M}_i$ be the set of elements $A \in \mathcal{M}$ such that $\partial A \subseteq P_i$. The sets $\mathcal{M}_i$ partition $\mathcal{M}$.
Fix one path $P_i = v_1,v_2.\ldots,v_\ell$. We order the elements of $\mathcal{M}_i$ lexicographically with respect to the indices of their boundary vertices. In other words, if $A,B \in \mathcal{M}_i$ satisfy $\partial A = \{v_j,v_k\}$ and $\partial B = \{v_\ell,v_m\}$ (where we might have $j=k$ or $\ell = m$) then we write $A < B$ if $\max(j,k) < \max(\ell,m)$ or $\max(j,k) = \max(\ell,m)$ and $\min(j,k)<\min(\ell,m)$. With this ordering, if $A_k$ and $A_{k+1}$ are adjacent then $(A_k \cup A_{k+1})$ is connected and has degree at most two. Hence by pairing off $A_1$ and $A_2$, $A_3$ and $A_4$, and so on, we can construct $\lfloor \frac{\mathcal{M}_i}{2} \rfloor$ disjoint pairs. An example is given in Figure \ref{paring}.
\begin{figure}\label{paring}
\end{figure}
The total number of pairs we obtain this way is given by $\sum_{i=1}^\kappa \lfloor \frac{|\mathcal{M}_i|}{2} \rfloor$. We will determine a lower bound for this sum. Let $d$ be the number of degree three vertices in $\mathcal{G}_\mathcal{M}$. \ Since $\mathcal{G}_\mathcal{M}$ is connected and acyclic there are $d+2$ paths $P_i$ which contain a degree one vertex in $\mathcal{G}_\mathcal{M}$ and $d-1$ paths which do not. If $P_i$ contains a degree one vertex then $\mathcal{M}_i$ contains at least one component with degree two and another component with boundary equal to the degree one vertex, so $|\mathcal{M}_i| \geq 2$.
If $P_i$ contains no degree one vertices then $|\mathcal{M}_i|$ is at least one. Let $x$ denote the number of paths $P_i$ which contain a degree one vertex and for which $|\mathcal{M}_i|$ is odd (and hence at least three). We have
\[|\mathcal{M}| = \sum_{i=1}^\kappa |\mathcal{M}_i| \geq 3x + 2(d+2 - x) + (d-1) = x + 3d + 3\] as well as $0 \leq x \leq d+2$ and $d \geq 0$.
We have that $|\mathcal{M}_i|$ is even for at least $(d+2) - x$ paths ending in a degree one vertex, and for these paths $\frac{|\mathcal{M}_i|}{2} = \lfloor \frac{|\mathcal{M}_i|}{2} \rfloor$. Thus
\[\frac{|\mathcal{M}|}{2} - \sum_{i=1}^\kappa \lfloor \frac{|\mathcal{M}_i|}{2} \rfloor \leq \frac{x}{2} + \frac{d-1}{2}.\] To bound the right hand side, note that the linear program \begin{align*} \max \quad & x+d \\ \mbox{subj. to} \quad & x -d \leq 2 \\
& x + 3d \leq |\mathcal{M}| - 3 \end{align*}
has solution $d = \frac{|\mathcal{M}|-5}{4}$, $x = \frac{|\mathcal{M}|+3}{4}$ and so $x+d \leq \frac{2|\mathcal{M}| - 2}{4}$. Hence
\[ \frac{|\mathcal{M}|}{2} - \sum_{i=1}^\kappa \lfloor \frac{|\mathcal{M}_i|}{2} \rfloor \leq \frac{|\mathcal{M}|}{4} - \frac{3}{4} \]
and $\sum_{i=1}^\kappa \lfloor \frac{|\mathcal{M}_i|}{2} \rfloor$, the number of pairs, is bounded below by $\frac{|\mathcal{M}|}{4}$.
\end{proof}
We can now state the algorithm for constructing $T_D$. Initially $T_D$ is a set of isolated vertices. As the algorithm progresses, vertices are combined into larger trees, so that each iteration $T_D$ is a forest. The algorithm terminates when $T_D$ contains a single tree.
At each iteration let $\mathcal{M}$ denote the partition of the edge set of $E(T)$ into segments corresponding to the maximal elements of the incomplete tree $T_D$. Rather than store this partition explicitly, we maintain a linked list $\mathcal{B}$ of boundary nodes. For each element $v$ in the list we maintain pointers to maximal nodes $T_D$ corresponding to segments in $\mathcal{M}$ having $v$ in their boundaries. In addition, we maintain pointers from each node in $T_D$ to the boundary nodes of the corresponding segments.
\begin{enumerate} \item Initialize $T_D$ with a forest of degree-one vertices corresponding to each edge of $E(T_2)$. Hence we initialise $\mathcal{B}$ with one element for each vertex in $V(T_2)$, with the associated pointers. At this point, $\mathcal{M}$ is the partition of $E(T_2)$ putting each edge into a separate block. \item {\bf While} $T_D$ is disconnected {\bf do} \begin{enumerate}
\item Using the construction in Proposition~\ref{manypairs} determine a set of at least $k \geq \frac{|\mathcal{M}|}{4}$ pairs $(A_1,B_1),\ldots,(A_k,B_k)$ of disjoint elements of $\mathcal{M}$ such that $A_j \cup B_j$ has at most two boundary points. \label{lemstep} \item For each pair $(A_i,B_i)$, $i=1,2,\ldots,k$, create a new node of $T_D$ corresponding to $A_i \cup B_i$ and with children corresponding to $A_i$ and $B_i$. \item Update the list $\mathcal{B}$ of boundary vertices and the associated pointers. \end{enumerate} \end{enumerate}
\begin{thm} We can construct a segment decomposition tree $T_D$ for $T_2$ with height $O(\log n)$ in $O(n)$ time. \end{thm} \begin{proof} We only merge nodes if the union of their corresponding segments is also a segment. Hence $T_D$ will be a segment decomposition tree. It remains to prove the bound on height and running time.
We note that $|\mathcal{M}|$ reduces by a factor of $\frac{3}{4}$ each iteration. Hence the number of iterations is at most $ \log_{\frac{4}{3}} (2n-3)$, which is also a bound on the height of the tree.
Using the list of boundary points $\mathcal{B}$ we can construct construct $\mathcal{G}_{\mathcal{M}}$ and identify pairs, in $O(|\mathcal{M}|)$ time each iteration. Thus the total running time is at most $O( n (\sum_{i=0} \left( \frac{3}{4} \right)^i )) = O(n)$ time.
\end{proof}
We can strengthen the height bound. We say that a tree is $k$-locally balanced if, for all nodes $v$ in the tree, the height of the subtree rooted at $v$ is at most $k \cdot (1 + log|v|)$. As the algorithm can be applied recursively on each node of $T_D$ we have that the global height bound applies to each node. Hence
\begin{cor} The segment decomposition $T_D$ is ($1/log \frac{4}{3}$)- locally balanced. \end{cor}
\section{Computing the inner product}
In this section we show that $\sum_{ij} p_{ij} q_{ij}$ can be computed in $O(n \log n)$ time, so that the main result follows from Eq.~\eqref{eq:DistExpand}.
A \emph{(taxon) colouring} is an assignment $c$ of the colors black and white to the taxa. For each edge $e$ of $T_1$ we let $c_e$ denote a coloring assigning black to those taxa on one side of $e$ and white to those on the other. For each edge $f$ in $E(T_2)$ and each colouring $c$ of the set of taxa, we let $\widetilde{\chi}(c,f)$ denote the number of pairs of taxa $ij$ such that $i$ and $j$ have different colours and they label leaves on different sides of $f$.
\begin{lem} \label{lem:InnerIsSum} \begin{equation} \sum_{ij} p_{ij} q_{ij} = \sum_{e \in E(T_1) } \sum_{f \in E(T_2) } x_{e} y_{f} \widetilde{\chi}(c_e,f) \label{eq:pijqij} \end{equation} \end{lem} \begin{proof} \begin{align} \sum_{ij} p_{ij} q_{ij} & = \sum_{ij} \left(\sum_{e:e \in A_{ij}} x_{e} \right) \left( \sum_{f:f \in B_{ij}} y_{f} \right) \nonumber \\ &= \sum_{ij} \sum_{e \in A_{ij}} \sum_{f \in B_{ij}} x_e y_{f}\\ & = \sum_{e \in E(T_1)} \sum_{f \in E(T_2)} x_{e} y_{f} \widetilde{\chi}(c_e,f) . \end{align}
\end{proof}
For the remainder of this section we will assume that the vertices in $T_2$ are indexed $v_1,v_2,\ldots,v_{2n-3}$. The actual ordering does not matter; it is only used to help presentation.
Let $T_D$ be the segment decomposition tree constructed for $T_2$ using the Algorithm in Section 3. For each node $v$ of $T_D$ we let $Q_v \subseteq E(T_2)$ denote corresponding segment in $T_2$. The overall strategy at this point is to compute values for each node in $T_D$ which will allow us to: (i) compute, for an initial choice of $e \in E(T_1)$, the sum $ \sum_{f \in E(T_2) } x_{e} y_{f} \widetilde{\chi}(c_e,f) $ in linear time, and (ii) update this computation efficiently as we iterate in a particular way through edges $e$ of $T_1$.
We will store three pieces of information at every non-root node $v$ of $T_D$, the exact type of information stored being dependent on the degree of the segment $Q_v$ corresponding to $v$.\\ If $Q_v$ is degree one then we store: \begin{itemize} \item[$\circ$] Two integer counts $w_v,b_v$ \item[$\circ$] A description (e.g. coefficients) for a quadratic polynomial $\phi_v(\cdot,\cdot)$ with two variables. \end{itemize} If $Q_v$ has degree two then we store: \begin{itemize} \item[$\circ$] Two integer counts $w_v,b_v$ \item[$\circ$] A description (e.g. coefficients) for a quadratic polynomial $\phi_v(\cdot,\cdot,\cdot,\cdot)$ with four variables. \end{itemize}
We now show how the values $b_v,w_v$ and $\phi_v$ are computed using a colouring $c$ of the taxa. We start at the leaves of $T_D$ and work upwards towards the root.
Suppose that $v$ is a leaf of $T_D$, so that $Q_v$ contains a single edge $f$ of $T_2$. There are two cases. \begin{enumerate} \item The edge $f$ is incident with a leaf $u$ of $T_2$, so $Q_v$ has degree one. If $c(u)$ is black then $b_v = 1$ and $w_v = 0$, while if $c(u)$ is white we have $w_v = 1$ and $b_v = 0$. In either case \begin{equation} \phi_v(b,w) = y_f (b \cdot w_v + w \cdot b_v).\end{equation} \item The edge $f$ is not incident with a leaf of $T_2$, so $Q_v$ has degree two. Then $b_v = w_v = 0$ and \begin{equation} \phi_v(b_1,w_1,b_2,w_2) = (b_1 w_2 + b_2 w_1) y_f.\end{equation} \end{enumerate}
Now suppose that $v$ is an internal vertex of $T_D$.
Once again there are several cases, however in all cases we have \begin{align*} b_v & = b_{v_L} + b_{v_R} \\ w_v & = w_{v_L} + w_{v_R}. \end{align*} \begin{enumerate}
\setcounter{enumi}{2} \item Suppose $Q_{v_L}$ and $Q_{v_R}$ have degree one. Then \begin{equation} \phi_v(b,w) = \phi_{v_L}(b+b_{v_R},w+w_{v_R}) + \phi_{v_R}(b+b_{v_L},w+w_{v_L}). \label{eq:Qv1} \end{equation} \item Suppose $Q_{v_L}$ has degree two and $Q_{v_R}$ has degree one, where $\partial Q_{v_L} = \{v_i,v_j\}$ and $Q_{v_R} = \{v_j\}$. \begin{align} \intertext{(a) If $Q_v$ has degree one and $i<j$ then} \phi_v(b,w) &= \phi_{v_L}(b,w,b_{v_R},w_{v_R}) + \phi_{v_R}(b+b_{v_L},w+w_{v_L});\\ \intertext{(b) If $Q_v$ has degree one and $i>j$ then} \phi_v(b,w) &= \phi_{v_L}(b_{v_R},w_{v_R},b,w) + \phi_{v_R}(b+b_{v_L},w+w_{v_L});\\ \intertext{(c) If $Q_v$ has degree two and $i<j$ then} \phi_v(b_1,w_1,b_2,w_2) &= \phi_{v_L}(b_1,w_1,b_2+b_{v_R},w_2+w_{v_R}) + \phi_{v_R}(b_1+b_2+b_{v_L},w_1+w_2+w_{v_L});\\ \intertext{(d) If $Q_v$ has degree two and $i>j$ then} \phi_v(b_1,w_1,b_2,w_2) &= \phi_{v_L}(b_1+b_{v_R},w_1+w_{v_R},b_2,w_2) + \phi_{v_R}(b_1+b_2+b_{v_L},w_1+w_2+w_{v_L}). \end{align} \item The case when $Q_{v_L}$ has degree one and $Q_{v_R}$ has degree two is symmetric. \item Suppose that $Q_{v_L}$ and $Q_{v_R}$ have degree two, that $\partial Q_{v_L} = \{v_i,v_j\}$ and $\partial Q_{v_R} = \{v_j,v_k\}$. We can assume that $i<k$ since the alternative case follows by symmetry. This leaves three possibilities: \begin{align} \intertext{(a) If $i<j$ and $j<k$ then} \phi_v(b_1,w_1,b_2,w_2) &= \phi_{v_L}(b_1,w_1,b_2+b_{v_R},w_2+w_{v_R}) + \phi_{v_R}(b_1+b_{v_R},w_1+w_{v_R},b_2,w_2);\\ \intertext{(b) If $i<j$ and $j>k$ then}
\phi_v(b_1,w_1,b_2,w_2) &= \phi_{v_L}(b_1,w_1,b_2+b_{v_R},w_2+w_{v_R}) + \phi_{v_R}(b_2,w_2,b_1+b_{v_L},w_1+w_{v_L});\\ \intertext{(c) If $j<i$ and (hence) $j<k$ then} \phi_v(b_1,w_1,b_2,w_2) &= \phi_{v_L}(b_1+b_{v_R},w_1+w_{v_R},b_1,w_1) + \phi_{v_R}(b_1+b_{v_R},w_1+w_{v_R},b_2,w_2). \label{eq:Qvend} \end{align} \end{enumerate}
An illustration for several of these cases can be found in Figure \ref{cases} below. \begin{figure}
\caption{Cartoons of segment merging for several cases discussed in the main text.}
\label{cases}
\end{figure}
\begin{lem} Suppose that $b_v,w_v$ and $\phi_v$ have been computed as above for all nodes of $T_D$ except the root. Let $v_L$ and $v_R$ be the children of the root of $T_D$. Then \[\sum_{f \in E(T_2)} \widetilde{\chi}(c,f) y_f = \phi_{V_L}(b_{v_R},w_{v_R}) + \phi_{V_R}(b_{v_L},w_{v_L}).\] \end{lem} \begin{proof} For any node $v$ of $T_D$ we let $L_v$ denote the set of leaves of $T_2$ not incident with an edge of $Q_v$. If $Q_v$ has degree two and boundary $\{v_i,v_j\}$, $i<j$, then we let $L^{(1)}_v$ be the leaves in $L_v$ which are closest to $v_i$ and $L^{(2)}_v$ the leaves in $L_v$ which are closest to $v_j$. Let $\tilde{c}$ be any colouring of the leaves of $T_2$, possibly distinct from $c$. Let $B$ and $W$ be the sets of leaves that $\tilde{c}$ colours black and white respectively.
We will establish the following claims for all nodes $v$ in $T_D$, using induction on the height of the node. \begin{enumerate} \item[(C1)] $b_v$ and $w_v$ are the number of leaves incident with edges in $Q_v$ which are coloured black and white by $c$ (and hence by $\tilde{c}$).
\item[(C2)] If $Q_v$ has degree one, $b = |B \cap L_v|$ and $w = |W \cap L_v|$ then \[ \sum_{f \in Q_v} \widetilde{\chi}(\tilde{c},f) y_f = \phi_v( b,w ).\]
\item[(C3)] If $Q_v$ has degree two, $b_1 = |B \cap L^{(1)}_v|$, $w_1 = |W \cap L^{(1)}_v|$, $b_2 = |B \cap L^{(2)}_v|$, and $w_2 = |B \cap L^{(2)}_v|$ then \[ \sum_{f \in Q_v} \widetilde{\chi}(\tilde{c},f) y_f = \phi_v( b_1,w_1,b_2,w_2 ).\] \end{enumerate}
We start by considering any leaf $v$ of $T_D$. In this case, $Q_v$ contains a single edge $f$. If $f$ is an edge incident to a leaf coloured white then $b_v = 0$, $w_v = 1$ as required, and $\widetilde{\chi}(\tilde{c},f)$ equals the number of leaves coloured black by $\tilde{c}$, so
\[ \widetilde{\chi}(\tilde{c},f) y_f = |B \cap L_v| y_f = (b w_v + w b_v)y_f = \phi_v(b,w).\] The same holds if the leaf is coloured black.\\
If the edge $f$ is internal then $b_v=w_v=0$, and $\widetilde{\chi}(\tilde{c},f)$ is equal to the number of paths crossing $f$ connecting leaves with different colours, or
\[ |B \cap L_v^{(1)}| |W \cap L_v^{(2)}| + |W \cap L_v^{(1)}| |B \cap L_v^{(2)}| = b_1 w_2 + w_1 b_2,\] so $\widetilde{\chi}(\tilde{c},f) y_f = \phi_v(b_1,w_1,b_2,w_2)$.
Now consider the case when $v$ is an internal node of $T_D$, other than the root. Let $v_L$ and $v_R$ be the two children of $v$. Note that $Q_v$ is the disjoint union of $Q_{v_L}$ and $Q_{v_R}$, so $b_v = b_{v_L}+b_{v_R}$ and $w_v = w_{v_L}+w_{v_R}$, proving (C1). \\
Furthermore, we have \[\sum_{f \in Q_v} \widetilde{\chi}(\tilde{c},f) = \sum_{f \in Q_{v_L}} \widetilde{\chi}(\tilde{c},f) + \sum_{f \in Q_{v_R}} \widetilde{\chi}(\tilde{c},f).\] If $Q_{v_L}$ has degree one then, by the induction hypothesis, \[\sum_{f \in Q_{v_L}} \widetilde{\chi}(\tilde{c},f) = \phi_{v_L}(b',w')\] where $b'$ and $w'$ are the numbers of leaves coloured black and white that are not incident with edges in $Q_{v_L}$. Similarly, if $Q_{v_L}$ has degree two then, by the induction hypothesis, \[\sum_{f \in Q_{v_L}} \widetilde{\chi}(\tilde{c},f) = \phi_{v_L}(b_1',w_1',b_2',w_2')\] where $b_1'$ and $w_1'$ are the numbers of leaves coloured black and white that are not incident with edges in $Q_{v_L}$ and are closer to the boundary vertex of $Q_{v_L}$ with the smallest index, while $b_2'$ and $w_2'$ are the numbers of leaves coloured black and white that are not incident with edges in $Q_{v_L}$ and are closer to the boundary vertex of $Q_{v_L}$ with the largest index. The symmetric result holds for $Q_{v_R}$. \\
The different cases in Eq. \eqref{eq:Qv1} to Eq. \eqref{eq:Qvend} now correspond to the different counts for $b',w'$ or for $b_1',w_1',b_2',w_2'$ depending on whether $Q_{v_L}$ and $Q_{v_R}$ have degree one or two, and whether the boundary vertices in common had the highest or lower index for each segment.
Now suppose that $v_L$ and $v_R$ are the children of the root of $T_B$. Then $\partial (Q_{v_L} \cup Q_{v_R}) = \emptyset$ so $Q_{v_L}$ and $Q_{v_R}$ must both have degree one. We have that $E(T_2)$ is the disjoint union of $Q_{v_L}$ and $Q_{v_R}$. Any leaf not incident to a leaf in $Q_{v_L}$ is incident to a leaf in $Q_{v_R}$ and vice versa. Hence
as required.
\end{proof}
Evaluating Eq. \eqref{eq:Qv1} to Eq. \eqref{eq:Qvend} takes constant time and space per each node of $T_D$, since we manipulate and store a constant number of polynomials with at most four variables and total degree at most two. Thus, evaluating Eq. \eqref{eq:Qv1} to Eq. \eqref{eq:Qvend} takes $O(n)$ time and space for each colouring, and since we want to sum this quantity over all colourings $c_e$ from edges $e \in E(T_1)$ a naive implementation would still take $O(n^2)$ time. The key to improving this bound is in the use of efficient updates.
\begin{lem} Suppose that we have computed $b_v$, $w_v$ and the functions $\phi_v$ for all $v \in T_D$, using a leaf colouring $c$. Let $\tilde{c}$ be a colouring which differs from $c$ at $k$ leaves. Then we can update the values $b_v$, $w_v$ and the functions $\phi_v$ in $O(k + k \log(n/k)$ time. \end{lem} \begin{proof}
Let $F'$ be the set of edges of $T_2$ which are incident to a leaf for which $c$ and $\tilde{c}$ have a different colour, so $|F'| = k$. The only nodes $v$ in $T_D$ which need to updated are those with $f \in Q_v$ for some $f \in F'$. This is a union of the paths from $k$ leaves of $T_D$ to the root of $T_D$, and so by Lemma 2 of \cite{Brodal04} , it has size $O(k+k \cdot log(\frac{n}{k}))$.
\end{proof}
The final step is to show that we can navigate the edges in $E(T_1)$ so that the total number of changes in the colourings is bounded appropriately. Suppose that $T_1$ is rooted at the leaf $\rho$ (the same as $T_2$). For each internal node $u$ in $T_1$ we let Small($u$) denote the child of $u$ with the smallest number of leaf descendants and let Large($u$) denote the child with the largest number of leaf descendants, breaking ties arbitrarily.
The following recursive procedure returns the sum of \[\sum_{f \in E(T_2) } x_{e} y_{f} \widetilde{\chi}(c_e,f) \] over all edges $e \in E(T_1)$. Initially we let $e$ be the edge incident with the root $\rho$. Let $c$ be the colouring where $\rho$ is black and all other leaves white. We initialise $T_D$ and fill out the values $b_v$, $w_v$ and $\phi_v$ for all nodes $v$ of $T_D$ using the colouring $c$. We then call {\sc Sum}($u$) where $u$ is the unique internal node adjacent to $\rho$.
\begin{figure}
\caption{Recursive algorithm {\sc Sum}}
\end{figure}
We see that the algorithm makes a pre-order traversal of $T_1$, evaluating the sum \[\sum_{f \in E(T_2) } x_{e} y_{f} \widetilde{\chi}(c_e,f)\] for each edge $e$ and accumulating the total. Thus by Lemma~\ref{lem:InnerIsSum}, the algorithm returns $\sum_{ij} p_{ij} q_{ij}$.
The running time is dominated by the time required to update $T_D$. For each leaf, the update is made after only one leaf changes colour, so this takes $O(n \log n)$ summed over all leaves. For every other node $u$ in the tree, the number of nodes of $T_D$ to update is $O(k + k \log(n/k))$ where $k$ is the number of leaves in the subtree rooted at Small($u$).
\begin{lem} Let $T$ be a rooted binary tree with $n$ leaves and for each internal node $u$ of $T$ let $k_u$ denote the number of leaves in the smallest subtree rooted at a child of $u$. Then \[\sum_{\mbox{\footnotesize $u$ internal}} k_u \log(n/k_u) \leq n \log n.\] \end{lem} \begin{proof} This is a restatement of Lemma 7 in \cite{Brodal04}.
\end{proof}
\begin{thm} Algorithm {\sc Sum} computes $\sum_{ij} p_{ij} q_{ij}$ in $O(n \log n)$ time. Hence the path length distance between $T_1$ and $T_2$ can be computed in $O(n \log n)$ time. \end{thm}
\end{document} |
\begin{document}
\pagestyle{headings} \flushbottom \maketitle
\subsection*{} Keywords : scalar conservation laws, level sets, kinetic approximation, maximal monotone operator \\ \\ AMS classification~: 35L65, 47H05
\section*{Abstract}
We show that Kruzhkov's theory of entropy solutions to multidimensional scalar conservation laws \cite{Kr} can be entirely recast in $L^2$ and fits into the general theory of maximal monotone operators in Hilbert spaces. Our approach is based on a combination of level-set, kinetic and transport-collapse approximations, in the spirit of previous works by Giga, Miyakawa, Osher, Tsai and the author \cite{Br1,Br2,Br3,Br4,GM,TGO}.
\section{A short review of Kruzhkov's theory}
First order systems of conservation laws read: $$ \partial_t u+\sum_{i=1}^d \partial_{x_i}(Q_i(u))=0, $$ or, in short, using the nabla notation, \begin{equation} \label{scalar} \partial_t u+\nabla_x\cdot (Q(u))=0, \end{equation} where $u=u(t,x)\in {\Bbb R}^m$ depends on $t\ge 0$, $x\in {\Bbb R}^d$, and $\cdot$ denotes the inner product in ${\Bbb R}^d$. The $Q_i$ (for $i=1,\cdot\cdot\cdot,d$) are given smooth functions from ${\Bbb R}^m$ into itself. The system is called hyperbolic when, for each $\tau\in{\Bbb R}^d$ and each $U\in{\Bbb R}^m$, the $m\times m$ matrix $\sum_{i=1,d}\tau_i Q'_i(U)$ can be put in diagonal form with real eigenvalues. There is no general theory to solve globally in time the initial value problem for such systems of PDEs. (See \cite{BDLL,Da,Ma,Se} for a general introduction to the field.) In general, smooth solutions are known to exist for short times but are expected to blow up in finite time. Therefore, it is usual to consider discontinuous weak solutions, satisfying additional 'entropy' conditions, to adress the initial value problem, but nothing is known, in general, about their existence . Some special situations are far better understood. First, for some special systems (enjoying 'linear degeneracy' or 'null conditions'), smooth solutions may be global (shock free), at least for 'small' initial data (see \cite{Kl}, for instance). Next, in one space dimension $d=1$, for a large class of systems, existence and uniqueness of global weak entropy solutions have been (recently) proven for initial data of sufficiently small total variation \cite{BB}. Still, in one space dimension, for a limited class of systems (typically for $m=2$), existence of global weak entropy solutions have been obtained for large initial data by 'compensated compactness' arguments \cite{Ta,Di,LPS}. Finally, there is a very comprehensive theory in the much simpler case of a $single$ conservation laws, i.e. when $m=1$. Then, equation (\ref{scalar}) is called a 'scalar conservation law'. Kruzhkov \cite{Kr} showed that such a scalar conservation law has a unique 'entropy solution' $u\in L^\infty$ for each given initial condition $u_0\in L^\infty$. (If the derivative $Q'$ is further assumed to be bounded, then we can substitute $L^1_{loc}$ for $L^\infty$ in this statement.) An entropy (or Kruzhkov) solution is an $L^\infty$ function that satisfies the following distributional inequality \begin{equation} \label{entropy} \partial_t C(u)+\nabla_x\cdot (Q^C(u))\le 0, \end{equation} for all Lipschitz convex function $C:{\Bbb R}\rightarrow {\Bbb R}$, where the derivative of $Q^C$ is defined by $(Q^C)'=C'Q'$. In addition, the initial condition $u_0$ is prescribed in $L^1_{loc}$, namely: \begin{equation} \label{continuity}
\lim _{t\rightarrow 0} \int_{B} |u(t,x)-u_0(x)|dx=0, \end{equation} for all compact subset $B$ of ${\Bbb R}^d$. Beyond their existence and uniqueness, the Kruzhkov solutions enjoy many interesting properties. Each entropy solution $u(t,\cdot)$, with initial condition $u_0$, continuously depends on $t\ge 0$ in $L^1_{loc}$ and can be written $T(t)u_0$, where $(T(t),t\ge 0)$ is a family of order preserving operators: \begin{equation} \label{order T} T(t)u_0\;\ge\; T(t)\tilde u_0\;,\;\;\forall t\ge 0, \end{equation} whenever $u_0\ge \tilde u_0$. Since constants are trivial entropy solutions to (\ref{scalar}), it follows that if $u_0$ takes its values in some fixed compact interval, so does $u(t,\cdot)$ for all $t\ge 0$. Next, two solutions $u$ and $\tilde u$, with $u_0-\tilde u_0\in L^1$, are $L^1$ stable with respect to their initial conditions: \begin{equation} \label{L1 stability}
\int|u(t,x)-\tilde u(t,x)|dx \le
\int|u_0(x)-\tilde u_0(x)|dx, \end{equation} for all $t\ge 0$. As a consequence, the total variation $TV(u(t,\cdot))$ of a Kruzhkov solution $u$ at time $t\ge 0$ cannot be larger than the total variation of its initial condition $u_0$. This easily comes from the translation invariance of (\ref{scalar}) and from the following definition of the total variation of a function $v$: \begin{equation} \label{TV} TV(v)=\sup_{\eta\in{\Bbb R}^d,\;\;\eta\ne 0}
\int\frac{|v(x+\eta)-v(x)|}{||\eta||}dx, \end{equation}
where $||\cdot||$ denotes the Euclidean norm on ${\Bbb R}^d$. The space $L^1$ plays a key role in Kruzhkov's theory.
There is no $L^p$ stability with respect to initial conditions in any $p>1$. Typically, for $p>1$, the Sobolev norm $||u(t,\cdot)||_{W^{1,p}}$ of a Kruzhkov solution blows up in finite time. This fact has induced a great amount of pessimism about the possibility of a unified theory of global solutions for general multidimensional systems of hyperbolic conservation laws. Indeed, simple linear systems, such as the wave equation (written as a first order system) or the Maxwell equations, are not well posed in any $L^p$ but for $p=2$ \cite{Brn}. However, as shown in the present work, $L^2$ is a perfectly suitable space for entropy solutions to multidimensional scalar conservation laws, provided a different formulation is used, based on a combination of level-set, kinetic and transport-collapse approximations, in the spirit of previous works by Giga, Miyakawa, Osher, Tsai and the author \cite{Br1,Br2,Br3,Br4,GM,TGO}.
\section{Kruzhkov solutions revisited} \subsection{A maximal monotone operator in $L^2$}
Subsequently, we restrict ourself, for simplicity, to initial conditions $u_0(x)$ valued in $[0,1]$ and spatially periodic of period 1 in each direction. In other words, the variable $x$ will be valued in the flat torus ${\Bbb T}^d={\Bbb R}^d/{\Bbb Z}^d$. \\ \\ Let us now introduce: \\ 1) the space $L^2([0,1]\times {\Bbb T}^d)$ of all square integrable functions $$(a,x)\in [0,1]\times {\Bbb T}^d\rightarrow Y(a,x)\in{\Bbb R}\;,$$ 2) the closed convex cone $K$ of all $Y\in L^2$ such that $\partial_a Y\ge 0$ (in the sense of distributions), \\ 3) the subdifferential of $K$ defined at each point $Y\in K$ by: \begin{equation} \label{subdifferential} \partial K(Y)=\{Z\in L^2,\;\;\; \int (\tilde Y-Y)Z\;dadx\le 0\;, \;\;\;\forall \tilde Y\in K\}\;, \end{equation} 4) the maximal monotone operator (MMO) (see \cite{Brz}): \begin{equation} \label{operator} Y\rightarrow - q(a)\cdot\nabla_x Y+\partial K(Y), \end{equation} where $q(a)=Q'(a)$, and the corresponding subdifferential equation \cite{Brz}: \begin{equation} \label{inclusion} 0\;\in\;\; \partial_t Y+ q(a)\cdot\nabla_x Y+\partial K(Y). \end{equation}
From maximal monotone operator theory \cite{Brz}, we know that, for each initial condition $Y_0\in K$, there is a unique solution $Y(t,\cdot)\in K$ to (\ref{inclusion}), for all $t\ge 0$. More precisely, we will use the following definition (which includes the possibility of a left-hand side $q_0\in L^2([0,1])$):
\begin{Definition} \label{def} $Y$ is a solution to \begin{equation} \label{inclusion bis} q_0(a)\;\in\;\; \partial_t Y+ q(a)\cdot\nabla_x Y+\partial K(Y), \end{equation} with initial value $Y_0\in K$ and left-hand side $q_0\in L^2([0,1])$, if: \\ 1) $t\rightarrow Y(t,\cdot)\in L^2$ is continuous and valued in $K$, with $Y(0,\cdot)=Y_0,$ \\ 2) $Y$ satisfies, in the sense of distribution, \begin{equation} \label{semi-integral}
\frac{d}{dt} \int |Y-Z|^2 dadx \le 2\;\int (Y-Z)(q_0(a)-\partial_t Z- q(a)\cdot\nabla_x Z)dadx, \end{equation} for each smooth function $Z(t,a,x)$ such that $\partial_a Z\ge 0$.
\end{Definition}
\begin{Proposition} \label{Proposition} For each $Y_0\in K$, and $q_0\in L^2([0,1])$, there is a unique solution $Y$ to (\ref{inclusion bis}) in the sense of Definition \ref{def}. If both $Y_0$ and $q_0$ belong to $L^\infty$, then we have for all $t\ge0$: \begin{equation} \label{maxi} -t\sup (-q_0)_++\inf Y_0\le Y(t,\cdot)\le \sup Y_0+t\sup (q_0)_+. \end{equation} If $\nabla_x Y_0$ belongs to $L^2$, then so do $\partial_t Y(t,\cdot)$ and $\nabla_x Y(t,\cdot)$ for all $t\ge 0$. Two solutions $Y$ and $\tilde Y$ to (\ref{inclusion bis}) (with different left-hand side $q_0$ and $\tilde q_0$) are $L^2$ stable with respect to their initial conditions $Y_0$ and $\tilde Y_0$ in $K$: \begin{equation} \label{L2 stability}
||Y(t,\cdot)-\tilde Y(t,\cdot)||_{L^2}
\le ||Y_{0}-\tilde Y_{0}||_{L^2}+t||q_0-\tilde q_0||_{L^2}. \end{equation} for all $t\ge 0$. This is also true for all $p\ge 1$, when both $Y_0-\tilde Y_0$ and $q_0-\tilde q_0$ belong to $L^p$: \begin{equation} \label{Lp stability}
||Y(t,\cdot)-\tilde Y(t,\cdot)||_{L^p}
\le ||Y_{0}-\tilde Y_{0}||_{L^p}+t||q_0-\tilde q_0||_{L^p}. \end{equation} \end{Proposition}
For the sake of completeness, a brief proof of these (standard) results will be provided at the end of the paper.
\subsection{The main result}
Our main result is \begin{Theorem} \label{main} Let $Y=Y(t,a,x)$ be a solution to the subdifferential equation (\ref{inclusion}) with initial condition $Y_0\in L^\infty$, with $\partial_a Y_0\ge 0$. Then, \begin{equation} \label{solution} u(t,y,x)=\int_0^1 H(y-Y(t,a,x))da, \end{equation} defines a one parameter family (parameterized by $y\in{\Bbb R}$) of Kruzhkov solution to (\ref{scalar}), valued in $[0,1]$. In addition, all Kruzhkov solutions, with initial values in $L^\infty$, can be recovered this way (up to a trivial rescaling). \end{Theorem}
Let us rapidly check the last statement of our main result. We must show that any Kruzhkov solution $U(t,x)$ with initial condition $U_0(x)$ valued in $L^\infty$ can be recovered from a solution to (\ref{inclusion}). To do that, according to the first part of the theorem, it is enough to find an $L^\infty$ function $Y_0(a,x)$ such that $\partial_a Y_0\ge 0$ and $$ U_0(x)=\int_0^1 H(y-Y_0(a,x))da, $$ for some $y\in{\Bbb R}$, say $y=1$. This is always possible, up to rescaling, by assuming: $$r\le U_0(x)\le 1-r$$ for some constant $r>0$. Indeed, we set $$ u_0(y,x)=\max(0,\min(1,y\;U_0(x))) $$ so that $U_0(x)=u_0(1,x)$ and $\partial_y u_0\ge 0$. \\ Then, for each fixed $x$, we solve $u_0(y,x)=a$ by $y=Y_0(a,x)$, setting: $$ Y_0(a,x)=\frac{a}{U_0(x)},\;\;\;\forall a\in [0,1],\;\;\;\forall x\in{\Bbb T}^d, $$ so that $$ u_0(y,x)=\int_0^1 H(y-Y_0(x,a))da. $$ (Notice that $Y_0$ is valued in $[0,r^{-1}]$.) Finally, according to the first part of the theorem, we get $$ U(t,x)=\int_0^1 H(1-Y(t,x,a))da, $$ where $Y$ is the solution to (\ref{inclusion}) with initial condition $Y_0$.
\subsubsection{Remark}
Notice that, for all $t\ge 0$, the level sets of $Y$ and $U$ are related by: $$ \{(a,x),\;\;\;U(t,x)\ge a\}=\{(a,x),\;\;\;Y(t,a,x)\le 1\}. $$ Thus, the method of construction of $Y_0$ out of $U_0$ and the derivation of $U(t,x)$ from $Y(t,a,x)$ can be related to level-set methods in the spirit of \cite{FSS,Gi1,OF,TGO}. This is why we may call 'level-set formulation' of scalar conservation law ({\ref{scalar}) the subdifferential equation given by (\ref{inclusion})
\subsubsection{Remark}
The solutions $(t,x)\rightarrow u(t,y,x)$, parameterized by $y\in{\Bbb R}$, are automatically ordered in $y$. Indeed, $\partial_y u\ge 0$ immediately follows from representation formula (\ref{solution}). This is consistent with the order preserving property of Kruzhkov's theory (as explained in the first section).
\subsection{A second result}
The function $u(t,y,x)$, given by (\ref{solution}), can also be considered as a $single$ Kruzhkov solution of a scalar conservation law in the enlarged $(1+d)$ dimensional space ${\Bbb R}\times{\Bbb T}^d$, namely \begin{equation} \label{scalar bis} \partial_t u+\partial_y (Q_0(u))+\nabla_x\cdot(Q(u))=0, \end{equation} with $(y,x)\in{\Bbb R}\times{\Bbb T}^d$, provided: \\ \\ 1) $Q_0$ is zero, \\ 2) the initial condition $u_0(y,x)$ is valued in $[0,1]$ and $\partial_y u_0\ge 0$. \\ Furthermore, it turns out that, if we add the left-hand side $q_0(a)=Q'_0(a)$ to (\ref{inclusion}), so that we get (\ref{inclusion bis}): $$ q_0(a)\;\in\;\; \partial_t Y+ q(a)\cdot\nabla_x Y+\partial K(Y), $$ and solve for $Y$, then the corresponding $u$ given by (\ref{solution}) is a Kruzhkov solution to (\ref{scalar bis}). \\ As a matter of fact, our proof will be done in this larger framework. We assume that $q_0$, $q$ and $Y_0$ are given in $L^\infty$, for simplicity. Without loss of generality, up to easy rescalings, we may assume that both $q_0$ and $Y_0$ are nonnegative, which simplifies some notations.
\begin{Theorem} \label{main bis} Assume that $q_0$ and $q$ are given in $L^\infty$, with $q_0\ge 0$. Let $Y=Y(t,a,x)$ be a solution to the subdifferential equation (\ref{inclusion bis}), with initial condition $Y_0\in L^\infty$, $Y_0\ge 0$ and $\partial_a Y_0\ge 0$. Then, \begin{equation} \label{solution bis} u(t,y,x)=\int_0^1 H(y-Y(t,a,x))da, \end{equation} is the unique Kruzhkov solution to (\ref{scalar bis}) with initial condition: \begin{equation} \label{initial bis} u_0(y,x)=\int_0^1 H(y-Y_0(a,x))da. \end{equation} In addition, $Y$ is nonnegative and can be recovered from $u$ as: \begin{equation} \label{level} Y(t,a,x)=\int_0^\infty H(u(t,y,x)-a)dy. \end{equation} \end{Theorem}
Before proving the theorem, let us observe that the recovery of $Y$ from $u$ through (\ref{level}) is just a consequence of the following elementary lemma which generalizes (in a standard way) the inversion of a strictly increasing function of one real variable: \begin{Lemma} \label{lemma} Let: $a\in[0,1]\rightarrow Z(a)\in {\Bbb R}_+$ with $Z'\ge 0$. We define the generalized inverse of $Z$: $$ v(y)=\int_0^1 H(y-Z(a))da,\;\;\;\forall y\in{\Bbb R}. $$ Then $v'\ge 0$, $H(y-Z(a))=H(v(y)-a)$ holds true a.e. in $(a,y)\in [0,1]\times{\Bbb R}$ and: $$ Z(a)=\int_0^\infty H(a-v(y))dy. $$ In addtion, for a pair $(Z,v)$, $(\tilde Z,\tilde v)$ of such functions, we have the co-area formula: \begin{equation} \label{coarea}
\int_0^1|Z(a)-\tilde Z(a)|da=
\int_0^1\int_0^\infty|H(y-Z(a))-H(y-\tilde Z(a))|dyda \end{equation} $$
=\int_0^1\int_0^\infty|H(v(y)-a)-H(\tilde v(y)-a)|dyda=
\int_0^\infty|v(y)-\tilde v(y)|dy. $$ \end{Lemma}
To recover (\ref{level}), we notice first that $\partial_a Y\ge 0$ follows from the very definition \ref{def} of a solution to (\ref{inclusion bis}). Next, $Y\ge 0$ follows from (\ref{maxi}) and the assumptions $q_0\ge 0$, $Y_0\ge 0$. Then, we apply lemma \ref{lemma}, for each fixed $x\in{\Bbb T}^d$ and $t\ge 0$, by setting $Z(a)=Y(t,a,x)$ and $u(t,y,x)=v(y)$.
\subsubsection{Remark}
The function $f(t,a,y,x)=H(y-Y(t,a,x))=H(u(t,y,x)-a)$ valued in $\{0,1\}$ is nothing but the solution of the Lions-Perthame-Tadmor \cite{LPT} 'kinetic formulation' of (\ref{scalar bis}), which satisfies: $$ \partial_t f+q_0(a)\partial_y f+q(a)\cdot\nabla_x f=\partial_a \mu, $$ for some nonnegative measure $\mu(t,a,y,x)$.
\subsubsection{Remark}
As already mentioned, the solutions of (\ref{inclusion bis}) enjoys the $L^p$ stability property with respect to initial conditions (\ref{Lp stability}), not only for $p=2$ but also for all $p\ge 1$. The case $p=1$ is of particular interest. Let us consider two solutions $Y$ and $\tilde Y$ of (\ref{inclusion bis}) and the corresponding Kruzhkov solutions $u$ and $\tilde u$ given by Theorem \ref{main bis}. Using the co-area formula (\ref{coarea}), we find, for all $t\ge 0$, $$
\int_{{\Bbb R}}\int_{{\Bbb T}^d}|u(t,y,x)-\tilde u(t,y,x)| dxdy= $$ $$
=\int_0^1\int_{{\Bbb R}}\int_{{\Bbb T}^d}|H(u(t,y,x)-a)-H(\tilde u(t,y,x)-a)| dadxdy $$ $$
=\int_0^1\int_{{\Bbb R}}\int_{{\Bbb T}^d}|H(y-Y(t,a,x))-H(y-\tilde Y(t,a,x))| dadxdy $$ $$
=\int_0^1\int_{{\Bbb T}^d}|Y(t,a,x)-\tilde Y(t,a,x)| dxda
\le \int_0^1\int_{{\Bbb T}^d}|Y_0(a,x)-\tilde Y_0(a,x)| dxda $$ $$
=\int_{{\Bbb R}}\int_{{\Bbb T}^d}|u_0(y,x)-\tilde u_0(y,x)| dxdy. $$
Thus, Kruzhkov's $L^1$ stability property is nothing but a $very$ incomplete output of the much stronger $L^p$ stability property provided by equation (\ref{inclusion bis}) for all $p\ge 1$.
\subsubsection{Remark}
As a matter of fact, in Theorem \ref{main bis}, it is possible to translate the $L^p$ stability of the level set function $Y$ in terms of the Kruzhkov solution $u$ by using Monge-Kantorovich (MK) distances. Let us first recall that for two probability measures $\mu$ and $\nu$ compactly supported on ${\Bbb R}^D$, their $p$ MK distance can be defined (see \cite{Vi} for instance), for $p\ge 1$, by: $$ \delta_p^p(\mu,\nu)=\sup\int \phi(x)d\mu(x)+\int \psi(y)d\nu(y), $$ where the supremum is taken over all pair of continuous functions $\phi$ and $\psi$ such that: $$
\phi(x)+\psi(y)\le |x-y|^p,\;\;\;\forall x,y\in{\Bbb R}^D. $$ In dimension $D=1$, this definition reduces to: $$
\delta_p(\mu,\nu)=||Y-Z||_{L^p}, $$ where $Y$ and $Z$ are respectively the generalized inverse (in the sense of Lemma \ref{lemma}) of $u$ and $v$ defined on ${\Bbb R}$ by: $$ u(y)=\mu([-\infty,y]),\;\;\;v(y)=\nu([-\infty,y]),\;\;\;\forall y\in{\Bbb R}. $$ Next, observe that, for each $x\in{\Bbb T}^d$, the $y$ derivative of the Kruzhkov solution $u(t,y,x)$, as described in Theorem \ref{main bis}, can be seen as a probability measure compactly supported on ${\Bbb R}$. (Indeed, $\partial_y u\ge 0$, $u=0$ near $y=-\infty$ and $u=1$ near $y=+\infty$.) Then, the $L^p$ stability property simply reads: $$ \int_{{\Bbb T}^d} \delta_p^p(\partial_y u(t,\cdot,x),\partial_y \tilde u(t,\cdot,x))dx \le \int_{{\Bbb T}^d} \delta_p^p(\partial_y u_0(\cdot,x),\partial_y \tilde u_0(\cdot,x))dx. $$ We refer to \cite{BBL} and \cite{CFL} for recent occurences of MK distances in the field of scalar conservation laws.
\section{Proofs}
Let us now prove Theorem \ref{main bis} (which contains the first part of Theorem \ref{main} as the special case $q_0=0$). The main idea is to provide, for both formulations (\ref{scalar bis}) and (\ref{inclusion bis}), the same time-discrete approximation scheme, namely the 'transport-collapse' method \cite{Br1,Br2,Br3,GM}, and get the same limits.
\subsection{A time-discrete approximation}
We fix a time step $h>0$ and approximate $Y(nh,a,x)$ by $Y_n(a,x)$, for each positive integer $n$. To get $Y_{n}$ from $Y_{n-1}$, we perform two steps, making the following induction assumptions: \begin{equation} \label{induction} \partial_a Y_{n-1}\ge 0,\;\;\;0\le Y_{n-1} \le \sup Y_0+(n-1)h\sup q_0, \end{equation} which are consistent with our assumptions on $Y_0$.
\subsubsection*{Predictor step}
The first 'predictor' step amounts to solve the linear equation \begin{equation} \label{linear} \partial_t Y+ q(a)\cdot\nabla_x Y=q_0(a) \end{equation} for $nh-h<t<nh$, with $Y_{n-1}$ as initial condition at $t=nh-h$. We exactly get at time $t=nh$ the predicted value: \begin{equation} \label{predictor} Y^*_{n}(a,x)=Y_{n-1}(a,x-h\;q(a))+h\;q_0(a). \end{equation} Notice that, since $q_0$ is supposed to be nonnegative, the induction assumption (\ref{induction}) implies: \begin{equation} \label{induction bis} 0\le Y^*_{n}\le \sup Y_0+nh\sup q_0. \end{equation} However, although $\partial_a Y_{n-1}$ is nonnegative, the same may not be true for $\partial_a Y^*_n$. This is why, we need a correction step.
\subsubsection*{Rearrangement step}
In the second step, we 'rearrange' $Y^*$ in increasing order with respect to $a\in [0,1]$, for each fixed $x$, and get the corrected function $Y_{n}$. Let us recall some elementary facts about rearrangements:
\begin{Lemma} \label{lemma bis} Let: $a\in[0,1]\rightarrow X(a)\in {\Bbb R}_+$ an $L^\infty$ function. Then, there is unique $L^\infty$ function $Y:[0,1]\rightarrow {\Bbb R}_+$, such that $Y'\ge 0$ and: $$ \int_0^1 H(y-Y(a))da=\int_0^1 H(y-X(a))da,\;\;\;\forall y\in {\Bbb R}. $$ We say that $Y$ is the rearrangement of $X$. In addition, for all $Z\in L^\infty$ such that $Z'\ge 0$, the following rearrangement inequality: \begin{equation} \label{inequality}
\int |Y(a)-Z(a)|^p da\le \int |X(a)-Z(a)|^p da. \end{equation} holds true for all $p\ge 1$. \end{Lemma}
So, we define $Y_n(a,x)$ to be, for each fixed $x$, the rearrangement of $Y^*_n(a,x)$ in $a\in [0,1]$: \begin{equation} \label{corrector} \partial_a Y_n \ge 0,\;\;\; \int_0^1 H(y-Y_n(a,x))da=\int_0^1 H(y-Y^*_n(a,x))da,\;\;\;\forall y\in {\Bbb R}. \end{equation} Equivalently, we may define the auxiliary function: \begin{equation} \label{u} u_n(y,x)=\int_0^1 H(y-Y^*_n(a,x))da,\;\;\;\forall y\in {\Bbb R}, \end{equation} i.e. \begin{equation} \label{predictor bis} u_n(y,x)=\int_0^1 H(y-h\;q_0(a)-Y_{n-1}(a,x-h\;q(a)))da, \end{equation} and set: \begin{equation} \label{corrector bis} Y_n(a,x)=\int_0^\infty H(a-u_n(y,x))dy. \end{equation} At this point, $Y_n$ is entirely determined by $Y_{n-1}$ through formulae (\ref{predictor}), (\ref{corrector}), or, equivalently, through formulae (\ref{predictor bis}), (\ref{corrector bis}). Notice that, from the very definition (\ref{corrector}) of the rearrangement step, $u_n$, defined by (\ref{u}), can be equivalently written: \begin{equation} \label{u bis} u_n(y,x)=\int_0^1 H(y-Y_n(a,x))da. \end{equation} Also notice that, for all function $Z(a,x)$ such that $\partial_a Z\ge 0$, and all $p\ge 1$: \begin{equation} \label{comparison}
\int |Y_n(a,x)-Z(a,x)|^p dadx\le \int |Y^*_n(a,x)-Z(a,x)|^p dadx \end{equation} follows from the rearrangement inequality (\ref{inequality}). Finlly, we see that $\partial_a Y_n \ge 0$ is automatically satisfied (this was the purpose of the rearrangement step) and $$ 0\le Y_{n}\le \sup Y_0+nh\sup q_0. $$ follows form (\ref{induction bis}) (since the range of $Y^*_n$ is preserved by the rearrangement step). So, the induction assumption (\ref{induction}) is enforced at step $n$ and the scheme is well defined.
\subsubsection{Remark}
Observe that, for any fixed $x$, $u_n(y,x)$, as a function of $y$, is the (generalized) inverse of $Y_n(a,x)$, viewed as a function of $a$, in the sense of Lemma \ref{lemma}. Also notice that the level sets $\{(a,y);\;\;y\ge Y_n(a,x)\}$ and $\{(a,y);\;\;a\le u_n(y,x)\}$ coincide.
\subsection{The transport-collapse scheme revisited}
The time-discrete scheme can be entirely recast in terms of $u_n$ (defined by (\ref{u bis})). Indeed, introducing \begin{equation} \label{tcm1} ju_n(a,y,x)=H(u_n(y,x)-a), \end{equation} we can rewrite (\ref{predictor bis}), (\ref{corrector bis}) in terms of $u_n$ and $ju_n$ only: \begin{equation} \label{tcm2} u_n(y,x)=\int_0^1 ju_{n-1}(y-h\;q_0(a),x-h\;q(a),a)da. \end{equation} We observe that, formulae (\ref{tcm1},\ref{tcm2}) exactly define the 'transport-collapse' (TC) approximation to (\ref{scalar bis}), or, equivalently, its 'kinetic' approximation, according to \cite{Br1,Br2,Br3,GM}.
\subsection{Convergence to the Kruzhkov solution}
We are now going to prove that, on one hand, $Y_n(a,x)$ converges to $Y(t,a,x)$ as $nh\rightarrow t$, and, on the other hand, $u_n(y,x)$ converges to $u(t,y,x)$, where $Y$ and $u$ are respectively the unique solution to subdifferential equation (\ref{inclusion bis}) with initial condition $Y_0(a,x)$ and the unique Kruzhkov solution to (\ref{scalar bis}) with initial condition \begin{equation} \label{initial} u_0(y,x)=\int_0^1 H(y-Y_0(a,x))da. \end{equation} \\ From the convergence analysis of the TC method \cite{Br1,Br2,Br3,GM}, we already know that, as $nh\rightarrow t$, $$
\int |u_n(y,x)-u(t,y,x)|dydx\rightarrow 0, $$ where $u$ is the unique Kruzhkov solution with initial value $u_0$ given by (\ref{initial}). More precisely, if we extend the time discrete approximations $u_n(y,x)$ to all $t\in [0,T]$ by linear interpolation in time: \begin{equation} \label{interpo bis} u^h(t,y,x)=u_{n+1}(y,x)\frac{t-nh}{h}+u_n(y,x)\frac{nh+h-t}{h}, \end{equation} then $u^h-u$ converges to $0$ in the space $C^0([0,T],L^1({\Bbb R}\times{\Bbb T}^d))$ as $h\rightarrow 0$. Following (\ref{level}), it is now natural to introduce the level-set function $Y$ defined by (\ref{level}) from the Kruzhkov solution: $$ Y(t,a,x)=\int_0^\infty H(a-u(t,y,x))dy. $$ (Notice that, at this point, we do not know that $Y$ is a solution to the subdifferential formulation (\ref{inclusion bis})!) Let us interpolate the $Y_n$ by \begin{equation} \label{interpo} Y^h(t,a,x)=Y_{n+1}(a,x)\frac{t-nh}{h}+Y_n(a,x)\frac{nh+h-t}{h}, \end{equation} for all $t\in [nh,nh+h]$ and $n\ge 0$. By the co-area formula (\ref{coarea}), we have $$
\int |Y(t,a,x)-Y_n(a,x)|dadx=\int |u(t,y,x)-u_n(y,x)|dydx. $$ Thus: $$
\sup_{t\in [0,T]}||Y(t,\cdot)-Y^h(t,\cdot)||_{L^1}
\le \sup_{t\in [0,T]}||u(t,\cdot)-u^h(t,\cdot)||_{L^1}\rightarrow 0, $$ and we conclude that the approximate solution $Y^h$ must converge to $Y$ in $C^0([0,T],L^1([0,1]\times{\Bbb T}^d))$ as $h\rightarrow 0$. Notice that, since the $Y^h$ are uniformly bounded in $L^\infty$, the convergence also holds true in $C^0([0,T],L^2([0,1]\times{\Bbb T}^d))$.
We finally have to prove that $Y$ is the solution to the subdifferential formulation (\ref{inclusion bis}) with initial condition $Y_0$.
\subsection{Consistency of the transport-collapse scheme}
Let us check that the TC scheme is consistent with the subdifferential formulation (\ref{inclusion bis}) in its semi-integral formulation (\ref{semi-integral}). For each smooth function $Z(t,a,x)$ with $\partial_a Z\ge 0$ and $p\ge 1$, we have $$
\int |Y_{n+1}(a,x)-Z(nh+h,a,x)|^p dadx $$ $$
\le \int |Y_{n+1}^*(a,x)-Z(nh+h,a,x)|^p dadx $$ (because of property (\ref{comparison}) due to the rearrangement step (\ref{corrector})) $$
=\int |Y_{n}(a,x-h\;q(a))+h\;q_0(a)-Z(nh+h,a,x)|^p dadx $$ (by definition of the predictor step (\ref{predictor}) $$
=\int |Y_{n}(a,x)+h\;q_0(a)-Z(nh+h,a,x+h\;q(a))|^p dadx $$ $$
=\int |Y_{n}-Z(nh,\cdot)|^p dadx+h\;\Gamma+o(h) $$ where: $$
\Gamma=p\int (Y_{n}-Z(nh,\cdot))|Y_{n}-Z(nh,\cdot)|^{p-2} \{q_0-\partial_t Z(nh,\cdot)-q\cdot\nabla_x Z(nh,\cdot)\}dadx $$ (by Taylor expanding $Z$ about $(nh,a,x)$). Since the approximate solution provided by the TC scheme has a unique limit $Y$, as shown in the previous section, this limit must satisfy: $$
\frac{d}{dt} \int |Y-Z|^p dadx
\le p\;\int (Y-Z)|Y-Z|^{p-2} (q_0(a)-\partial_t Z-q(a)\cdot\nabla_x Z)dadx, $$ in the distributional sense in $t$. In particular, for $p=2$, we exactly recover the semi-integral version (\ref{semi-integral}) of (\ref{inclusion bis}). We conclude that the approximate solutions generated by the TCM scheme do converge to the solutions of (\ref{inclusion bis}) in the sense of Definition \ref{def}, which completes the proof of Theorem \ref{main bis}.
\section{Viscous approximations}
A natural regularization for subdifferential equation (\ref{inclusion bis}) amounts to substitute a barrier function for the convex cone $K$ in $L^2([0,1]\times {\Bbb T}^d)$ of all functions $Y$ such that $\partial_a Y\ge 0$. Typically, we introduce a convex function $\phi:{\Bbb R}\rightarrow ]-\infty,+\infty]$ such that $\phi(\tau)=+\infty$ if $\tau<0$, we define, for all $Y\in K$, \begin{equation} \label{potential} \Phi(Y)=\int \phi(\partial_a Y)dadx, \end{equation} and set $\Phi(Y)=+\infty$ if $Y$ does not belong to $K$. Typical examples are: $$\phi(\tau)=-\log(\tau),\;\;\; \phi(\tau)=\tau\log(\tau),\;\;\; \phi(\tau)=\frac{1}{\tau},\;\;\;\forall\tau>0.$$ Then, we considered the perturbed subdifferential equation \begin{equation} \label{perturbed} 0\;\in\;\; \partial_t Y+ q(a)\cdot\nabla_x Y-q_0(a) +\varepsilon\partial \Phi(Y), \end{equation} for $\varepsilon>0$. The general theory of maximal monotone operators guarantees the convergence of the corresponding solutions to those of (\ref{inclusion bis}) as $\varepsilon\rightarrow 0$. It is not difficult (at least formally) to identify the corresponding perturbation to scalar conservation (\ref{scalar bis}). Indeed, assuming $\phi(\tau)$ to be smooth for $\tau>0$, we get, for each smooth function $Y$ such that $\partial_a Y>0$: $$ \partial\Phi(Y)=-\partial_a(\phi'(\partial_a Y)). $$ Thus, any smooth solution $Y$ to (\ref{perturbed}), satisfying $\partial_a Y>0$, solves the following parabolic equation: \begin{equation} \label{viscous} \partial_t Y+ q(a)\cdot\nabla_x Y-q_0(a) =\varepsilon\partial_a(\phi'(\partial_a Y)). \end{equation} Introducing, the function $u(t,y,x)$ implicitely defined by $$ u(t,Y(t,a,x),x)=a, $$ we get (by differentiating with respect to $a$, $t$ and $x$): $$ (\partial_y u)(t,Y(t,a,x),x)\partial_a Y(t,a,x)=1, $$ $$ (\partial_t u)(t,Y,x)+(\partial_y u)(t,Y,x)\partial_t Y=0, $$ $$ (\nabla_x u)(t,Y,x)+(\partial_y u)(t,Y,x)\nabla_x Y=0. $$ Multiplying (\ref{viscous}) by $(\partial_y u)(t,Y(t,a,x),x)$, we get: \begin{equation} \label{viscous bis} -\partial_t u-q(u)\cdot\nabla_x u-q_0(u)\partial_y u =\varepsilon \partial_y (\phi'(\frac{1}{\partial_y u})). \end{equation} In particular, in the case $\phi(\tau)=-\log\tau$, we recognize a linear viscous approximation to scalar conservation law (\ref{scalar bis}): \begin{equation} \label{viscous ter} \partial_t u+q(u)\cdot\nabla_x u+q_0(u)\partial_y u =\varepsilon \partial^2_{yy} u, \end{equation} with viscosity only in the $y$ variable.
\subsubsection{Remark}
Of course, these statements are not rigourous since the parabolic equations we have considered are degenerate and their solutions may not be smooth.
\subsubsection{Remark}
In the case of our main result, Theorem \ref{main}, we have $q_0=0$ and the variable $y$ is just a dummy variable in (\ref{scalar}). Thus, the corresponding regularized version \begin{equation} \label{viscous quart} -\partial_t u-q(u)\cdot\nabla_x u =\varepsilon \partial_y (\phi'(\frac{1}{\partial_y u})). \end{equation} includes viscous effects not on the space variable $x$ but rather on the 'parameter' $y\in{\Bbb R}$. This unusual type of regularization has already been used and analyzed in the level-set framework developped by Giga for Hamilton-Jacobi equations \cite{Gi2}, and by Giga, Giga, Osher, Tsai for scalar conservation laws \cite{GG,TGO}.
\section{Related equations}
A similar method can be applied to some special systems of conservation laws. A typical example (which was crucial for our understanding) is the 'Born-Infeld-Chaplygin' system considered in \cite{Br4}, and the related concept of 'order-preserving strings'. This system reads: \begin{equation} \label{bi} \partial_t(hv)+\partial_y(hv^2-hb^2)-\partial_x(hb)=0, \end{equation} $$ \partial_t h+\partial_y(hv)=0,\;\;\; \partial_t (hb)-\partial_x(hv)=0, $$ where $h,b,v$ are real valued functions of time $t$ and two space variables $x,y$. In \cite{Br4}, this system is related to the following subdifferential system: \begin{equation} \label{bi-subdif} 0\in \partial_t Y-\partial_x W+\partial K(Y), \;\;\;\partial_t W=\partial_x Y, \end{equation} where $(Y,W)$ are real valued functions of $(t,a,x)$ and $K$ is the convex cone of all $Y$ such that $\partial_a Y\ge 0$. The (formal) correspondence between (\ref{bi}) and (\ref{bi-subdif}) is obtained by setting: $$ h(t,x,Y(t,x,a))\partial_a Y(t,x,a)=1, $$ $$ v(t,x,Y(t,x,a))=\partial_t Y(t,x,a),\;\;\; b(t,x,Y(t,x,a))=\partial_x Y(t,x,a). $$ Unfortunately, this system is very special (its smooth solutions are easily integrable). In our opinion, it is very unlikely that $L^2$ formulations can be found for general hyperbolic conservation laws as easily as in the multidimensional scalar case.
\section{Appendix: proof of Proposition \ref{Proposition}}
In the case when $q_0$ and $Y_0$ belong to $L^\infty$ and are nonnegative, we already know, from the convergence of the TC scheme, that there is a solution $Y$ to (\ref{inclusion bis}), with initial value $Y_0$, in the sense of definition \ref{def}. From (\ref{induction}), we also get for such solutions, when $q_0\ge 0$ and $Y_0\ge 0$, $$ 0\le Y(t,\cdot)\le \sup Y_0+t\sup q_0,\;\;\;\forall t\ge 0. $$ By elementary rescalings, we can remove the assumptions that both $Y_0$ and $q_0$ are nonnegative and get estimate (\ref{maxi}). \\ Let us now examine some additional properties of the solutions to (\ref{inclusion bis}) obtained from the TC approximations. First, we observe that, in the TC scheme, \\ 1) the predictor step (a translation in the $x$ variable by $h\;q(a)$ plus an addition of $h\;q_0(a)$) is isometric in all $L^p$ spaces, \\ 2) the corrector step (an increasing rearrangement in the $a$ variable) is non-expansive in all $L^p$. \\ Thus the scheme is non-expansive in all $L^p([0,1]\times {\Bbb T}^d)$. More precisely, for two different initial conditions $Y_0$ and $\tilde Y_0$, and two different data $q_0$ and $\tilde q_0$, all in $L^\infty$, we get for the corresponding approximate solutions $Y_n$ and $\tilde Y_n$: \begin{equation} \label{non expansive}
||Y_{n}-\tilde Y_{n}||_{L^p}
\le ||Y_{n-1}-\tilde Y_{n-1}||_{L^p}+h||q_0-\tilde q_0||_{L^p}\;. \end{equation} This shows that (\ref{Lp stability}) holds true for all solutions of (\ref{inclusion bis}) generated by the TC scheme. \\ Since the scheme is also invariant under translations in the $x$ variable, we get the following a priori estimate: \begin{equation} \label{esti2}
||\nabla_x Y_n||_{L^p}\le ||\nabla_x Y_0||_{L^p}. \end{equation} Finally, let us compare two solutions of the scheme $Y_n$ and $\tilde Y_n=Y_{n+1}$ obtained with initial condition $\tilde Y_0=Y_1$. Using (\ref{non expansive}), we deduce: $$
\int |Y_{n+1}(a,x)- Y_{n}(a,x)|^p dadx
\le \int |Y_{1}(a,x)- Y_{0}(a,x)|^p dadx $$ $$
\le \int |Y^*_{1}(a,x)- Y_{0}(a,x)|^p dadx
=\int |Y_{0}(a,x-h\;q(a))+h\;q_0(a)-Y_0(a,x)|^p dadx. $$ So we get a second a priori estimate: \begin{equation} \label{esti3}
||Y_{n+1}-Y_n||_{L^p}\le
(||q_0||_{L^p}+||q||_{L^\infty}||\nabla_x Y_0||_{L^p})h. \end{equation} Thus the solutions $Y$ to (\ref{inclusion bis}) obtained from the TC scheme satisfy the a priori bounds: \begin{equation} \label{esti2 bis}
||\nabla_x Y(t,\cdot)||_{L^p}\le ||\nabla_x Y_0||_{L^p}, \end{equation} \begin{equation} \label{esti3 bis}
||\partial_t Y(t,\cdot)||_{L^p}\le
||q_0||_{L^p}+||q||_{L^\infty}||\nabla_x Y_0||_{L^p}. \end{equation} Notice that, at this level, we still do not know if solutions, in the sense of Definition \ref{def} exist when $Y_0\in K$ and $q_0\in L^2([0,1])$ are not in $L^\infty$ and we know nothing about their uniqueness. This can be easily addressed by standard functional analysis arguments.
\subsubsection*{Existence for general data}
Let $Y_0\in K$ and $q_0\in L^2([0,1])$. We can find two Cauchy sequences in $L^2$, labelled by $k\in{\Bbb N}$, namely $Y_0^k\in K$ and $q_0^k\in L^2([0,1])$, made of smooth functions, with limits $Y_0$ and $q_0$ respectively. Let us denote by $Y^k$ the corresponding solutions, generated by the TC scheme. Because of their $L^2$ stability, they satisfy: $$
\sup_{t\in [0,T]}||Y^k(t,\cdot)-Y^{k'}(t,\cdot)||_{L^2}
\le ||Y_0^k-Y_0^{k'}||_{L^p}+T||q_0^k-q_0^{k'}||_{L^2}. $$ So, $Y^k$ is a Cauchy sequence in $C^0([0,T],L^2)$ of solutions of (\ref{inclusion bis}) in the sense of Definition \ref{def}, with a definite limit $Y$. Definition \ref{def} is clearly stable under this convergence process. So, we conclude that $Y$ satisfies the requirements of Definition \ref{def} and is a solution with initial condition $Y_0$ and left-hand side $q_0$. Notice that, through our approximation process, we keep the a priori estimates (\ref{esti2 bis}),(\ref{esti3 bis}), for general data $q_0\in L^2([0,1])$.
\subsubsection*{Uniqueness}
Let us consider a solution $Y$ to (\ref{inclusion bis}), with initial condition $Y_0\in K$ and left-hand side $q_0\in L^2([0,1])$, in the sense of Definition \ref{def}. By definition $Y(t,\cdot)\in K$ depends continuously of $t\in [0,T]$ in $L^2$. From definition (\ref{semi-integral}), using $Z=0$ as a test function, we see that: $$
\frac{d}{dt}||Y(t,\cdot)||^2_{L^2} \le 2\int Y(t,a,x)q_0(a)\; dadx \le
||Y(t,\cdot)||^2_{L^2}+||q||^2_{L^2}, $$ which implies that the $L^2$ norm $Y(t,\cdot)$ stays uniformly bounded on any finite interval $[0,T]$. Thus, $T>0$ being fixed, we can mollify $Y$ and get, for each $\epsilon\in ]0,1]$ a smooth function $Y_\epsilon$, valued in $K$, so that: \begin{equation} \label{error}
\sup_{t\in [0,T]}||Y(t,\cdot)-Y_\epsilon(t,\cdot)||_{L^2}\le \epsilon. \end{equation} Let us now consider an initial condition $Z_0$ such that $\nabla_x Z_0$ belongs to $L^2$. We know that there exist a solution $Z$ to (\ref{inclusion bis}), still in the sense of Definition \ref{def}, obtained by TC approximation, for which both $\partial_t Z(t,\cdot)$ and $\nabla_x Z(t,\cdot)$ stay uniformly bounded in $L^2$ for all $t\in [0,T]$. This function $Z$ has enough regularity to be used as a test function in (\ref{semi-integral}) when expressing that $Y$ is a solution in the sense of Definition \ref{def}. So, for each smooth nonnegative function $\theta(t)$, compactly supported in $]0,T[$, we get from (\ref{semi-integral}): $$
\int \{\theta'(t)|Y-Z|^2 +2\theta(t)(Y-Z)(q_0(a)-\partial_t Z- q(a)\cdot\nabla_x Z)\}dadxdt\ge 0. $$ Substituting $Y_\epsilon$ for $Y$, we have, thanks to estimate (\ref{error}), $$
\int \{\theta'(t)|Y_\epsilon-Z|^2 +2\theta(t)(Y_\epsilon-Z)(q_0(a)-\partial_t Z- q(a)\cdot\nabla_x Z)\}dadxdt \ge -C\epsilon, $$ where $C$ is a constant depending on $\theta$, $Z$, $q_0$ and $q$ only. Since $Z$ is also a solution, using $Y_\epsilon$ as a test function, we get from formulation (\ref{semi-integral}): $$
\int \{\theta'(t)|Z-Y_\epsilon|^2 +2\theta(t)(Z-Y_\epsilon) (q_0(a)-\partial_t Y_\epsilon- q(a)\cdot\nabla_x Y_\epsilon)\}dadxdt \ge 0. $$ Adding up these two inequalities, we deduce: $$
\int\{2\theta'(t)|Y_\epsilon-Z|^2 +2\theta(t)(Y_\epsilon-Z)(\partial_t (Y_\epsilon-Z)+ q(a)\cdot\nabla_x(Y_\epsilon- Z))\}dadxdt \ge -C\epsilon. $$ Integrating by part in $t\in [0,T]$ and $x\in{\Bbb T}^d$, we simply get: $$
\int \theta'(t)|Y_\epsilon-Z|^2 dadxdt \ge -C\epsilon. $$ Letting $\epsilon\rightarrow 0$, we deduce: $$
\frac{d}{dt}\int |Y-Z|^2 dadx\le 0. $$ We conclude, at this point, that: $$
||Y(t,\cdot)-Z(t,\cdot)||_{L^2}\le ||Y_0-Z_0||_{L^2},\;\;\;\forall t\in [0,T] $$ This immediately implies the uniqueness of $Y$. Indeed, any other solution $\tilde Y$ with initial condition $Y_0$ must also satisfy: $$
||\tilde Y(t,\cdot)-Z(t,\cdot)||_{L^2}\le ||Y_0-Z_0||_{L^2}. $$ Thus, by the triangle inequality: $$
||\tilde Y(t,\cdot)-Y(t,\cdot)||_{L^2}\le 2||Y_0-Z_0||_{L^2}. $$
Since $Z_0\in K$ is any function such that $\nabla_x Z_0$ belongs to $L^2$, we can make $||Y_0-Z_0||_{L^2}$ arbitrarily small and conclude that $\tilde Y=Y$, which completes the proof of uniqueness.
\subsection*{Acknowledgments} This article was written at the Bernoulli Centre, EPFL, Lausanne, in September 2006, during the program ``Asymptotic Behaviour in Fluid Mechanics''. The author is grateful to the organizers, Dragos Iftime, Genevi\`eve Raugel and Tudor Ratiu for their kind invitation.
\end{document} |
\begin{document}
\begin{center} \begin{large} Logarithmic intertwining operators \\ and\\ the space of conformal blocks over the projective line \end{large} \end{center} \vskip5ex \begin{center} Yusuke Arike \vskip1ex Department of Mathematics, Graduate School of Science\\ Osaka University\\ y-arike@cr.math.sci.osaka-u.ac.jp \end{center} \vskip2ex \abstract{ We show that the space of logarithmic intertwining operators among logarithmic modules for a vertex operator algebra is isomorphic to the space of $3$-point conformal blocks over the projective line. This is considered as a generalization of Zhu's result for ordinary intertwining operators among ordinary modules.} \vskip2ex
\section{Introduction} One of the most important problems in representation theory of vertex operator algebras is to determine fusion rules which are the dimensions of intertwining operators among three modules for vertex operator algebras. Intertwining operators of the type $\fusion{M^1}{M^2}{M^3}$ are linear maps $I(-,z):M^1\to\Hom_\mathbb{C}(M^2,M^3)[[z,z^{-1}]]$ with several axioms (see \cite{FHL}) where $M^i\,(i=1,2,3)$ are modules for a vertex operator algebra.
The definition of intertwining operators given in \cite{FHL} treats modules on which $L_0$ acts as a semisimple operator. However, in general, we have to consider modules which do not decompose into $L_0$-eigenspaces but do into generalized $L_0$-eigenspaces
Such modules are called {\it logarithmic modules} in \cite{M1}.
A notion of {\it logarithmic intertwining operators} among logarithmic modules is introduced in \cite{M1}. Logarithmic intertwining operators may involve logarithmic terms. It is shown in \cite{M1} that a logarithmic intertwining operator among ordinary modules is nothing but the so-called intertwining operator. Several examples of logarithmic modules are found and logarithmic intertwining operators among these modules are constructed (see eg. \cite{M1}, \cite{M2}, \cite{AM}).
On the other hand in conformal field theory its important feature is a notion of conformal blocks associated with vertex operator algebras. Mathematically rigorous formulation of $N$-point conformal blocks on Riemann surfaces associated with vertex operator algebras is given in \cite{Z1} with the assumption that the corresponding vertex operator algebra is quasi-primary generated. It is shown in \cite{Z1} that the space of $3$-point conformal blocks over the projective line $\mathbb{P}^1$ is isomorphic to the space of intertwining operators among ordinary modules for a vertex operator algebra.
In this paper we give a sort of generalization of Zhu's result in the case that the modules are logarithmic. More precisely we are going to prove that the space of $3$-point conformal blocks over the projective line is isomorphic to the space of logarithmic intertwining operators without the assumption that a vertex operator algebra is quasi-primary generated. Taking the formulation of the space of coinvariants in \cite{NT} we do not have to assume that a vertex operator algebra is quasi-primary generated.
The study on logarithmic intertwining operators is very important since if we could know its dimension from $S$-matrix obtained by formal characters in fact if a vertex operator algebra is rational and satisfies several conditions the dimension of intertwining operators is completely determined by $S$-matrix. However it is left for further studies.
The paper is organized as follows. In section 2 we recall the definition of vertex operator algebras and their modules. The definition of logarithmic modules is located here. Also we describe the space of conformal blocks over $\mathbb{P}^1$ according to \cite{NT}.
In section 3 we recall the definition and properties of logarithmic intertwining operators which are given in \cite{M1} and \cite{HLZ}.
We state the main theorem in this paper and give a proof in section 4. The linear maps between the space of logarithmic intertwining operators and the space of $3$-point conformal blocks are defined and it is proved that these maps are well-defined and mutually inverse.
\section{Vertex operator algebras and the space of conformal blocks over the projective line} Throughout this paper we use the notation $\mathbb{N}=\{0,1,2,\dots\}$. \subsection{Vertex operator algebras and current Lie algebras} A {\it vertex operator algebra} is a $\mathbb{N}$-graded vector space $V=\bigoplus_{k=0}^\infty V_k$ with $\dim V_k<\infty \,(k\in\mathbb{Z}_{\ge0})$ equipped with a linear map \begin{equation} Y(-,z):V\to\End(V)[[z,z^{-1}]],\,Y(a,z)=\sum_{n\in\mathbb{Z}}a_{(n)}z^{-n-1} \end{equation} and with distinguished vectors $\mathbf{1}\in V_0$ called the {\it vacuum vector} and $\omega\in V_2$ called the {\it Virasoro vector} satisfying the following axioms (see e.g. \cite{FHL}, \cite{MN}): \vskip 1ex \noindent (1) For any pair of vectors in $V$ there exists a nonnegative integer $N$ such that $a_{(n)}b=0$ for all integers $n\ge N$.\\ \noindent (2) For any vectors $a, b, c\in V$ and integers $p, q, r\in\mathbb{Z}$, \begin{equation} \begin{split} &\sum_{i=0}^\infty\binom{p}{i}(a_{(r+i)}b)_{(p+q-i)}c\\ &\qquad\qquad=\sum_{i=0}^\infty(-1)^i\binom{r}{i}(a_{(p+r-i)}b_{(q+i)}c-(-1)^rb_{(q+r-i)}a_{(p+i)}c) \end{split} \end{equation} hold.\\ \noindent (3) $Y(\mathbf{1},z)=\id_V$.\\ (4) $Y(a,z)\mathbf{1}\in V[[z]]$ and $a_{(-1)}\mathbf{1}=a$.\\ \noindent (5)
Set $L_n=\omega_{(n+1)}$. Then $\{L_n\,|\,n\in\mathbb{Z}\}$ together with the identity map on $V$ give a representation of the Virasoro algebra on $V$ with central charge $c_V\in\mathbb{C}$.\\ \noindent (6) $L_0a=ka$ for any $a\in V_k$ and nonnegative integers $k$.
\vskip 1ex \noindent (7) $\dfrac{d}{dz}Y(a,z)=Y(L_{-1}a,z)$ for any $a\in V$.
\noindent (8)
Denote $|a|=k$ for any $a\in V_k$ and then \begin{equation}
|a_{(n)}b|=|a|+|b|-1-n \end{equation} for any homogeneous $b\in V$ and $n\in\mathbb{Z}$.
In order to define the space of conformal blocks we introduce the spaces $V^{(1)}=\bigoplus_{k=0}^\infty V_k\otimes \mathbb{C}((\xi))(d\xi)^{1-k}$ and $V^{(0)}=\bigoplus_{k=0}^\infty V_k\otimes\mathbb{C}((\xi))(d\xi)^{-k}$. Let $\nabla:V^{(0)}\to V^{(1)}$ be the linear map defined by \begin{equation} v\otimes f(\xi)(d\xi)^{-n}=L_{-1}v\otimes f(\xi)(d\xi)^{-k}+v\otimes\frac{df(\xi)}{d\xi}(d\xi)^{1-k}. \end{equation} We set $\mathfrak{g}=V^{(1)}/\nabla V^{(0)}$ and denote the image of $a\otimes f(\xi)(d\xi)^{1-k}\in V_k\otimes\mathbb{C}((\xi))(d\xi)^{1-k}$ by $J(a,f)$. Then we have:
\begin{proposition}[\cite{NT}, Proposition 2.1.1]\label{prop-current} The vector space $\mathfrak{g}$ is a Lie algebra with the blacket \begin{equation}
[J(a,f), J(b,g)]=\sum_{m=0}^{|a|+|b|-1}\frac{1}{m!}J\bigl(a_{(m)}b, \frac{d^mf}{d\xi^m}g\bigr) \end{equation} for homogeneous $a, b\in V$. \end{proposition}
The Lie algebra $\mathfrak{g}$ is called the {\it current Lie algebra}. Let us denote $J_n(a)=J(a,\xi^{n+|a|-1})$.
Applying the construction of the current Lie algebra to the vector space $\bigoplus_{k=0}^\infty V_k\otimes\mathbb{C}[\xi,\xi^{-1}](d\xi)^{1-k}$, we have a graded Lie algebra $\bar{\mathfrak{g}}=\bigoplus_{n\in\mathbb{Z}}\bar{\mathfrak{g}}_n$ where the vector space $\bar{\mathfrak{g}}_n$ is linearly spanned by $J_n(a)\,(a\in V)$. The Lie algebra $\bar{\mathfrak{g}}$ is a Lie subalgebra of $\mathfrak{g}$. The following proposition plays an important role when we define duality functor on the category of $V$-modules.
\begin{proposition}[\cite{NT}, Proposition 4.1.1]\label{prop-dual} The linear map $\theta:\bar{\mathfrak{g}}\to\bar{\mathfrak{g}}$ defined by \begin{equation}\label{eq-involution}
\theta(J_n(a))=(-1)^{|a|}\sum_{j=0}^\infty\frac{1}{j!}J_{-n}(L_1^ja) \end{equation} for $a\in V$ and $n\in\mathbb{Z}$ is an anti-Lie algebra involution. \end{proposition}
\subsection{Modules for vertex operator algebras} Let $M$ be a weak $V$-module (see \cite{DLM} for the definition). A weak $V$-module $M$ is called $\mathbb{N}$-gradable if it admits a decomposition $M=\bigoplus_{n\in\mathbb{N}}M_{(n)}$ such that \begin{equation}
a_{(n)}M_{(k)}\subseteq M_{(|a|+k-1-n)} \end{equation} for homogeneous $a\in V$ and $n\in\mathbb{Z}$.
Let $M$ be a weak $V$-module. A weak $V$-module is called a {\it logarithmic module} if $M$ decomposes into a direct sum of generalized $L_0$-eigenspaces.
Let $M=\bigoplus_{n\in\mathbb{Z}_{\ge0}}M_{(h+n)}$ be a logarithmic module with a complex number $h$ and \begin{equation}
M_{(h+n)}=\{x\in M\,|\,(L_0-h-n)^{k+1}x=0 \text{ for a nonnegative integer }k \}. \end{equation} Obviously $M$ is a $\mathbb{N}$-gradable $V$-module.
In this paper a $V$-module $M$ is always a logarithmic $V$-module satisfies the following conditions.
\noindent i) There exist a complex number $h$ and a nonnegative integer $k$ such that $M=\bigoplus_{n=0}^\infty M_{(h+n)}$ with
$M_{(h+n)}=\{u\in M\,|\,(L_0-h-n)^{k+1}u=0\}$ for all $n\in\mathbb{Z}$.\\
\noindent ii) $\dim M_{(h+n)}<\infty$ for all nonnegative integers $n$. We denote $|u|=h+n$ for $u\in M_{(h+n)}$ for short.
We remark that any $V$-module in this paper is a $V$-module in the sense of \cite{NT} and \cite{MNT}.
Let $k$ be a nonnegative integer and let $\mathscr{C}_k$ be the category consisting of $V$-modules whose homogeneous subspaces are annihilated by $(L_0-h-n)^{k+1}$. Then it follows that $\mathscr{C}_0\subseteq \mathscr{C}_1\subseteq\dotsb \subseteq\mathscr{C}_k\subseteq\dotsb$.
Any $V$-module $M$ is a $\bar{\mathfrak{g}}$-module by the action \begin{equation}\label{eq-action}
J_n(a)u=a_{(|a|-1+n)}u \end{equation} for any homogeneous $a\in V$ and $u\in M$ (cf. \cite{DLM}, \cite{NT}). For any $a\in V$ and $u\in M$, there exists a nonnegative integer $n_0$ such that $a_{(n)}u=0$ for all $n\ge n_0$. Therefore, the $V$-module $M$ is also a $\mathfrak{g}$-module by the action \eqref{eq-action}.
Let us denote the restricted dual of a $V$-module $M=\bigoplus_{n=0}^\infty M_{(h+n)}$ by $D(M)=\bigoplus_{n=0}^\infty M_{(h+n)}^\ast$ where $M_{(h+n)}^\ast=\Hom_\mathbb{C}(M_{(h+n)},\mathbb{C})$. A $\bar{\mathfrak{g}}$-module structure on $D(M)$ can be defined by letting \begin{equation} \langle J_n(a)\varphi, u\rangle=\langle \varphi, \theta(J_n(a))u\rangle \end{equation} for all $\varphi\in D(M)$ and $u\in M$. The following proposition is known.
\begin{proposition}[\cite{NT}, Proposition 4.2.1, cf. \cite{FHL}, Theorem 5.2.1] There exists a unique $V$-module structure on $D(M)$ which extends $\bar{\mathfrak{g}}$-module structure. \end{proposition}
Since $\langle L_n\varphi,u\rangle=\langle \varphi, L_{-n}u\rangle$ for all $\varphi\in D(M)$ and $u\in M$, we see that $D(M)=\oplus_{n=0}^{\infty}D(M)_{(h+n)}$ and $D(M) \in\mathscr{C}_k$ for any $M\in\mathscr{C}_k$.
\subsection{The space of conformal blocks over the projective line}
Let $\mathbb{P}^1=\mathbb{C}\cup\{\infty\}$ be the projective line and $z$ its inhomogeneous coordinate. Let $A=\{1,2,\dotsc,N,\infty\}$ and let us fix a set $p_A=(p_a)_{a\in A}$ of $N+1$ distinct points $p_a\in\mathbb{P}^1\,(a\in A)$ with $p_\infty=\infty$. We write $\xi_a=z-p_a\,(a\not=\infty)$ and $\xi_\infty=z$, respectively.
We denote by $H^0(\mathbb{P}^1,\Omega^{1-k}(\ast p_A))$ the vector space of global meromorphic $(1-k)$-differentials whose poles are located only at $p_a\,(a\in A)$. Set $H(V,\ast p_A)^{(1)}=\bigoplus_{k=0}^\infty V_k\otimes H^0(\mathbb{P}^1,\Omega^{1-k}(\ast p_A))$ and $H(V,\ast p_A)^{(0)}=\bigoplus_{k=0}^\infty V_k\otimes H^0(\mathbb{P}^1,\Omega^{-k}(\ast p_A))$. Define the linear map $\nabla:H(V,\ast p_A)^{(0)}\to H(V,\ast p_A)^{(1)}$ by \begin{equation} a\otimes f(z)(dz)^{1-k}\mapsto L_{-1}a\otimes f(z)(dz)^{-k}+a\otimes \frac{df(z)}{dz}(dz)^{1-k}\qquad(a\in V_k). \end{equation} We set \begin{equation} \mathfrak{g}(\mathbb{P}^1,\ast p_A)=H(V,\ast p_A)^{(1)}/\nabla H(V,\ast p_A)^{(0)}. \end{equation} It is shown (cf. \cite[Proposition 5.1.1]{NT}) that the vector space $\mathfrak{g}(\mathbb{P}^1,\ast p_A)$ is a Lie algebra with the blacket \begin{multline}
\bigl[a\otimes f(z)(dz)^{1-|a|}, b\otimes g(z)(dz)^{1-|b|}\bigr]\\
=\sum_{m=0}^\infty\frac{1}{m!}a_{(m)}b\otimes \frac{d^mf(z)}{dz^m}g(z)(dz)^{2-|a|-|b|+m}. \end{multline}
For each $a\in A$ we define the linear map \begin{equation} i_a:H^0(\mathbb{P}^1,\Omega^k(\ast p_A))\to \begin{cases} \mathbb{C}((\xi_a))(d\xi_a)^k, & a\in A\backslash\{\infty\}\\ &\\ \mathbb{C}((\xi_\infty^{-1}))(d\xi_\infty)^k, & a=\infty \end{cases} \end{equation} by taking the Laurent expansion at $z=p_a$ in terms of the coordinate $\xi_a$. We denote $i_a f(z)(dz)^k$ by $f_a(\xi_a)(d\xi_a)^k$.
For any $a\in A\backslash\{\infty\}$, we define the linear map $j_a:\mathfrak{g}(\mathbb{P}^1,\ast p_A)\to\mathfrak{g}$ by $j_a(a\otimes f(z)(dz)^{1-k})=a\otimes f_a(\xi_a)(d\xi_a)^{1-k}$ and the linear map $j_\infty:\mathfrak{g}(\mathbb{P}^1,\ast p_A)\to\mathfrak{g}$ by $j_\infty(a\otimes f(z)(dz)^{1-k})=-\theta(a\otimes f_\infty(\xi_\infty)(d\xi_\infty)^{1-k})$. Then the linear map $j_\infty$ is well-defined since \begin{equation} j_\infty(a\otimes f(z)(dz)^{1-k})=-\sum_{n\le n_0}f_n\theta(J_n(a)),\; \theta(J_n(a))=(-1)^kJ_{-n}(e^{L_1}a), \end{equation} where $f_\infty(\xi_\infty)=\sum_{n\le n_0}f_n \xi_\infty^{n+k-1}$ (see \cite{NT}). The following proposition is fundamental.
\begin{proposition}[\cite{NT}, Proposition 5.1.3]\label{prop-Lie} For any $a\in A$, the linear map $j_a:\mathfrak{g}(\mathbb{P}^1,\ast p_A)\to\mathfrak{g}$ is a Lie algebra homomorphism. \end{proposition}
Let $M^a\,(a\in A)$ be $V$-modules. We set $M_A=\bigotimes_{a\in A}M^a$ and $\mathfrak{g}_A=\mathfrak{g}^{\oplus|A|}$. Let $\rho_a:\mathfrak{g}\to\End(M^a)$ be the representation defined by \eqref{eq-action} for $a\in A$. Then the linear map $\rho_A:\mathfrak{g}_A\to\End(M_A)$ defined by $\rho_A=\oplus_{a\in A}\rho_a$ is a representation of the Lie algebra $\mathfrak{g}_A$ on $M_A$. We denote the image of the Lie algebra homomorphism $j_A=\sum_{a\in A}j_a$ by $\mathfrak{g}_{p_A}^{out}$, which acts on $M_A$ via $\rho_A$. The following definition is given by \cite{NT}.
\begin{definition} The vector space $C^*(M_A, p_A)=\Hom_\mathbb{C}(M_A/\mathfrak{g}_{p_A}^{out}M_A,\mathbb{C})$ is called the space of conformal blocks at $p_A$. \end{definition}
\section{Logarithmic intertwining operators} In this section, we recall the notion of logarithmic intertwining operators and their properties according to \cite{M1}. \subsection{Definition}
\begin{definition}[\cite{M1}] Let $M^1,\,M^2$ and $M^3$ be weak $V$-modules. A {\it logarithmic intertwining operator of the type $\fusion{M^1}{M^2}{M^3}$} is a linear map \begin{align} &I(-,z):M^1\to\Hom_\mathbb{C}(M^2,M^3)\{z\}[\log z]\\ &I(u,z)=\sum_{n=0}^d\sum_{\alpha\in\mathbb{C}}u_{(\alpha)}^nz^{-\alpha-1}(\log z)^k \end{align} with the following properties:\\ \noindent i)\,(Truncation condition) For any $u_1\in M^1$, $u_2\in M^2$ and $0\le k\le d$, \begin{equation} (u_1)_{(\alpha)}^ku_2=0 \end{equation} for sufficiently large $\Real(\alpha)$.\\ \noindent ii)\,($L_{-1}$-derivative property) For any $u_1\in M^1$, \begin{equation} I(L_{-1}u_1,z)=\frac{d}{dz}I(u_1,z). \end{equation} \noindent iii)\, For all $a\in V$, $u_1\in M_1$, $u_2\in M_2$, $\alpha\in\mathbb{C}$, $0\le n\le d$ and $p,q\in\mathbb{Z}$, we have \begin{equation}\label{eq-borcherds} \begin{split} &\sum_{i=0}^\infty\binom{p}{i} (a_{(q+i)}u_1)_{(\alpha+p-i)}^n\\ &\qquad\qquad=\sum_{i=0}^\infty(-1)^i\binom{q}{i}(a_{(p+q-i)}(u_1)^n_{(\alpha+i)}-(-1)^q(u_1)_{(\alpha+q-i)}^na_{(p+i)}). \end{split} \end{equation}
We denote the space of logarithmic intertwining operators of the type $\fusion{M^1}{M^2}{M^3}$ by $I\fusion{M^1}{M^2}{M^3}$, that is, we use the same notation as usual intertwining operators. \end{definition}
Setting $q=0$ and $p=0$ in \eqref{eq-borcherds}, respectively, we have \begin{align} &[a_{(p)},(u_1)_{(\alpha)}^n]=\sum_{i=0}^\infty \binom{p}{i}(a_{(i)}u_1)_{(\alpha+p-i)}^n,\label{eq-com}\\ &(a_{(q)}u_1)_{(\alpha)}^n=\sum_{i=0}^\infty(-1)^i\binom{q}{i}\label{eq-ass} \{a_{(q-i)}(u_1)_{(\alpha+i)}^n-(-1)^qa_{(\alpha+q-i)}^na_{(i)}\} \end{align} and we call, by abuse of terminologies, the {\it commutator formula} and {\it associativity formula}, respectively. By the commutator formula, we have \begin{equation} [L_{-1}, u_{(\alpha)}^n]=(L_{-1}u)_{(\alpha)}^n\label{eq-derivation} \end{equation} for any $u\in M^1$ and $0\le n\le d$. By the associativity formula, \eqref{eq-derivation} and $L_{-1}$-derivative property, we have \begin{equation} (L_0u)_{(\alpha)}^n= \begin{cases} [L_0, (u)_{(\alpha)}^n]+(\alpha+1)(u)_{(\alpha)}^n-(n+1) (u)_{(\alpha)}^{n+1} & 0\le n\le d-1,\\ &\\ [L_0, (u)_{(\alpha)}^n]+(\alpha+1)(u)_{(\alpha)}^n & n=d\\ \end{cases} \label{eq-fund} \end{equation} for any $u\in M^1$.
\subsection{Properties for logarithmic intertwining operators}
Let $M^i=\bigoplus_{n=0}^\infty M^i_{(h_i+n)}\,(i=1,2,3)$ be objects in $\mathscr{C}_{k_i}$ for nonnegative integers $k_i \,(i=1,2,3)$ and complex numbers $h_i\,(i=1,2,3)$. Suppose that a logarithmic intertwining operator $I(-,z)$ of the type $\fusion{M^1}{M^2}{M^3}$ is of the form \begin{equation} I(u_1,z)=\sum_{n=0}^d\sum_{\alpha\in\mathbb{C}}(u_1)_{(\alpha)}^nz^{-\alpha-1}(\log z)^n. \end{equation} For any homogeneous element $u_i\in M^i\,(i=1,2)$, we introduce notations \begin{align}
x_1(u_1)_{(\alpha)}^nu_2&=((L_0-|u_1|)u_1)_{(\alpha)}^nu_2,\\
x_2(u_1)_{(\alpha)}^nu_2&=(u_1)_{(\alpha)}^n(L_0-|u_2|)u_2,\\
x_3(u_1)_{(\alpha)}^nu_2&=(L_0+\alpha+1-|u_1|-|u_2|)(u_1)_{(\alpha)}^nu_2. \end{align} Note that these operations $x_1$ and $x_2$ are mutually commutative (see \cite{M1}). By using these operations we get:
\begin{lemma}[\cite{HLZ}, Lemma 3.8]\label{lemma-fund} Let \begin{equation} I(-,z)=\sum_{n=0}^d\sum_{\alpha\in\mathbb{C}}(-)_{(\alpha)}^nz^{-\alpha-1}(\log z)^n \end{equation} be a logarithmic intertwining operator of the type $\fusion{M^1}{M^2}{M^3}$ and let $p, q$ be integers such that $p\ge0$ and $0\le q\le d$. Then \[ x_3^p(u_1)_{(\alpha)}^qu_2=\sum_{\ell=0}^N\binom{p}{\ell}\frac{(q+\ell)!}{q!} (x_1+x_2)^{p-\ell}(u_1)_{(\alpha)}^{q+\ell}u_2 \] for homogeneous $u_1\in M^1$ and $u_2\in M^2$ where $N=\min\{p,\,d-q\}$. \end{lemma}
The following proposition is proved in \cite{M1} by using differential equations and in \cite[Proposition 3.9]{HLZ} by using Lemma \ref{lemma-fund}.
\begin{proposition}[\cite{M1}, Proposition 1.10]\label{prop-eigen} Suppose that $M^i\in\mathscr{C}_{k_i}\,(i=1,2,3)$ for nonnegative integers $k_i$ and that $M^i=\bigoplus_{n=0}^\infty M^i_{(h_i+n)}$ for complex numbers $h_i\,(i=1,2,3)$. Let $I(-,z)\in I\fusion{M^1}{M^2}{M^3}$ be a logarithmic intertwining operator such that \begin{equation} I(u_1,z)=\sum_{n=0}^d\sum_{\alpha\in\mathbb{C}}(u_1)_{(\alpha)}^n z^{-\alpha-1}(\log z)^n \qquad (u_1\in M^1). \end{equation} \noindent {\rm(1)}\,For any homogeneous $u_i\in M^i\,(i=1,2)$ we have
$|(u_1)_{(\alpha)}^nu_2|=|u_1|+|u_2|-1-\alpha$ for all $0\le n\le d$.\\ \noindent {\rm(2)}\, For any $u_i\in M^i\,(i=1,2)$ we have \[ I(u_1,z)u_2\in\sum_{n=0}^{k_1+k_2+k_3}M^3((z))z^{h_3-h_1-h_1}(\log z)^n. \] \end{proposition}
\section{The space of $3$-point conformal blocks and logarithmic intertwining operators} In this section we focus on $3$-point conformal blocks in conformal field theories over the projective line. We prove that the space of $3$-point conformal blocks over $\mathbb{P}^1$ is isomorphic to the space of logarithmic intertwining operators. The almost same result is found in \cite{Z1}, however, the categories of modules of us and the one in \cite{Z1} are slightly different. \subsection{Main theorem} Set $A=\{1,2,\infty\}$ and let $p_A=\{0,1,\infty\}$ be the set of points on $\mathbb{P}^1$. Let $z$ be the inhomogeneous coordinate of $\mathbb{P}^1$. The $\xi_0=z$, $\xi_1=z-1$ and $\xi_\infty=z$ are local coordinate of $\mathbb{P}^1$ at $0,1,$ and $\infty$, respectively. Take $V$-modules $M^1,M^2$ and $M^3$. We assume that there exist complex numbers $h_i\in\mathbb{C} \,(i=1,2,3)$ such that $M^i=\bigoplus_{n=0}^\infty M_{(h_i+n)}$ and that $M^i\in\mathscr{C}_{k_i}\,(i=1,2,3)$ for nonnegative integers $k_i \,(i=1,2,3)$. Let us set $M_A=M^1\otimes M^2\otimes M^3$. We denote the space of conformal blocks at $p_A=\{0,1,\infty\}$ by $C^*(M_A,p_A)$. Then we can now state the main theorem of the paper which is a generalization of Zhu's result \cite[Proposition 7.4]{Z1}.
\begin{theorem}\label{thm-1} Let $M^i (i=1,2,3)$ be $V$-modules with $M^i=\bigoplus_{n=0}^\infty M^i_{(h_i+n)}$ and
$M^i\in\mathscr{C}_{k_i}\,(i=1,2,3)$ for nonnegative integers $k_i \,(i=1,2,3)$. The space of conformal blocks $C^*(M_A,p_A)$ at $p_A=\{0,1,\infty\}$ is isomorphic to the space of logarithmic intertwining operators of the type $\fusion{M^2}{M^1}{D(M^3)}$ \end{theorem}
Let $C_2(V)$ be the vector subspace of $V$ spanned by vectors of the form $a_{(2)}b\,(a,b\in V)$. If $\dim V/C_2(V)<\infty$ we say that $V$ satisfies {\it Zhu's finiteness condition} which is introduced in \cite{Z2}.
By combining \cite[Theorem 5.8.1]{NT} and the theorem we get: \begin{corollary} If $V$ satisfies Zhu's finiteness condition then the space of intertwining operators is finite-dimensional. \end{corollary}
\subsection{Proof of Theorem \ref{thm-1}} For any logarithmic intertwining operator $I(-,z)$ of the type $\fusion{M^2}{M^1}{D(M^3)}$, we define $F\in\Hom_\mathbb{C}(M_A,\mathbb{C})$ by \begin{equation}\label{eq-main-def-1} \left\langle F, u_1\otimes u_2\otimes u_3\right\rangle=\left\langle I(u_2,1)u_1,u_3\right\rangle \end{equation} for any $u_1\in M^1$, $u_2\in M^2$ and $u_3\in M^3$. For any $V$-module $M\in\mathscr{C}_{k}$ we define the operator $z^{L_0}:M\to M\{z\}[\log z]$ by \begin{equation}
z^{L_0}u=\sum_{j=0}^{k}\frac{1}{j!}(L_0-|u|)^jz^{|u|}(\log z)^j. \end{equation} For any $x\in C^*(M_A,p_A)$, we define $I_x(-,z)\in\Hom_\mathbb{C}(M^1, D(M^3))\{z\}[\log z]$ by \begin{multline}\label{eq-main-def-2} \left\langle I_x(u_2,z)u_1,u_3\right\rangle\\ =\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle \text{ for all }u_i\in M^i\,(i=1,2,3). \end{multline}
We are going to give a prove of the theorem by dividing its into three steps. In step 1 we prove that $F$ belongs to $C^*(M_A,p_A)$ and show that $I_x$ is a logarithmic intertwining operator among $V$-modules in step 2. The final step is devoted to the proof that the correspondence between $F$ and $I_x$ is one-to-one.
\noindent {\bf (Step 1)} In order to prove that $F$ belongs to $C^*(M_A,p_A)$, by the definition of the space of conformal blocks, it is sufficient to prove that \begin{equation}\label{eq-main-0} \begin{split} &\left\langle F, j_0(a\otimes f(z)(dz)^{1-k})u_1\otimes u_2\otimes u_3\right\rangle\\ &\quad\qquad\qquad+\left\langle F, u_1\otimes j_1(a\otimes f(z)(dz)^{1-k})u_2\otimes u_3\right\rangle\\ &\qquad\qquad\qquad\qquad\qquad+\left\langle F, u_1\otimes u_2\otimes j_\infty(a\otimes f(z)(dz)^{1-k})u_3\right\rangle =0 \end{split} \end{equation}
for all $a\in V_k$ and $f(z)(dz)^{1-k}\in H^0(\mathbb{P}^1,\Omega^{1-k}(\ast p_A))$. It is well known that $\{z^p(z-1)^q(dz)^{1-k}\,|\,p,q\in\mathbb{Z}\}$ is a topological basis of $H^0(\mathbb{P}^1,\Omega^{1-k}(\ast p_A))$. Therefore, it is enough to show \eqref{eq-main-0} for $f(z)=z^p(z-1)^q\,(p,q\in\mathbb{Z})$. First of all we have \begin{equation}\label{eq-main-1} \begin{split} j_0(a\otimes z^p(z-1)^q(dz)^{1-k})u_1&=\Bigl(\sum_{i=0}^\infty(-1)^{q-i}\binom{q}{i}a\otimes \xi_0^{p+i}(d\xi_0)^{1-k}\Bigr)u_1\\ &=\sum_{i=0}^\infty(-1)^{q-i}\binom{q}{i}J_{p+i-k+1}(a)u_1\\ &=\sum_{i=0}^\infty(-1)^{q-i}a_{(p+i)}u_1 \end{split} \end{equation} and secondly \begin{equation}\label{eq-main-2} \begin{split} j_1(a\otimes z^p(z-1)^q(dz)^{1-k})u_2&=\Bigl(\sum_{i=0}^\infty\binom{p}{i}a\otimes \xi_1^{q+i}(d\xi_1)^{1-k}\Bigr)u_2\\ &=\sum_{i=0}^\infty\binom{p}{i}J_{q+i-k+1}(a)u_2\\ &=\sum_{i=0}^\infty\binom{p}{i}a_{(q+i)}u_2 \end{split} \end{equation} and finally \begin{equation}\label{eq-main-3} \begin{split} j_\infty(a\otimes z^p(z-1)^q(dz)^{1-k})u_3&=-\theta\Bigl(\sum_{i=0}^\infty(-1)^i\binom{q}{i}a\otimes \xi_\infty^{p+q-i}(d\xi_\infty)^{1-k}\Bigr)u_3\\ &=-\sum_{i=0}^\infty(-1)^i\binom{q}{i}\theta(J_{p+q-i-k+1}(a))u_3 \end{split} \end{equation} for all $a\in V_k$ and $p,q\in\mathbb{Z}$.
By \eqref{eq-main-1}--\eqref{eq-main-3}, the definition of the functional $F$, Proposition \ref{prop-dual} and Proposition \ref{prop-eigen}, the left-hand side of \eqref{eq-main-0} is equal to \begin{equation} \begin{split} &\sum_{i=0}^\infty(-1)^{q-i}\binom{q}{i}\left\langle (u_2)^0_{(\alpha+q-i)}a_{(p+i)}u_1, u_3\right\rangle\\ &\quad\qquad\qquad+\sum_{i=0}^\infty\binom{p}{i}\left\langle(a_{(q+i)}u_2)_{(\alpha+p-i)}^0u_1,u_3\right\rangle\\ &\qquad\qquad\qquad\qquad\qquad-\sum_{i=0}^\infty(-1)^i\binom{q}{i}\left\langle a_{(p+q-i)}(u_2)_{(\alpha-i)}^0u_1, u_3 \right\rangle \end{split} \end{equation}
where $\alpha=|u_1|+|u_2|-|u_3|+k-2-p-q$, which vanishes by $\eqref{eq-borcherds}$. Hence \eqref{eq-main-0} is proved.
\noindent {\bf(Step 2)} We now prove that $I_x(-,z)\in I\fusion{M^2}{M^1}{D(M^3)}$.
Since $M^i=\bigoplus_{n=0}^\infty M^i_{(h_i+n)}\in\mathscr{C}_{k_i}\,(i=1,2,3)$ we have \begin{equation}\label{eq-main-4} \begin{split} &z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3= \sum_{n_1=0}^{k_1}\sum_{n_2=0}^{k_2}\sum_{n_3=0}^{k_3} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}\\
&\qquad\qquad\qquad\qquad\times(L_0-|u_1|)^{n_1}u_1\otimes(L_0-|u_2|)^{n_2}u_2\otimes
(L_0-|u_3|)^{n_3}u_3\\
&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\times z^{|u_3|-|u_1|-|u_2|}(\log z)^{n_1+n_2+n_3} \end{split} \end{equation} for homogeneous $u_1\in M^1$, $u_2\in M^2$ and $u_3\in M^3$. Then the left-hand side of \eqref{eq-main-def-2} is an element in $\mathbb{C}[z,z^{-1}]z^{-h_1-h_2+h_3}[\log z]$. Therefore $\left\langle I_x(u_2,z)u_1,-\right\rangle=\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}-\right\rangle$ gives an element of the space \begin{equation} \Hom_\mathbb{C}(M_3,\mathbb{C})[[z,z^{-1}]]z^{-h_1-h_2+h_3}[\log z], \end{equation} which shows $I_x(u_2,z)\in \Hom_\mathbb{C}(M_1, D(M_3))[[z,z^{-1}]]z^{-h_1-h_2+h_3}[\log z]$. Therefore we can write \begin{equation} I_x(u_2,z)=\sum_{n=0}^d\sum_{\alpha\in\mathbb{Z}+h_1+h_2-h_3}(u_2)_{(\alpha)}^nz^{-\alpha-1}(\log z)^n. \end{equation}
For fixed $u_1\in M^1_{(h_1+\ell_1)}$ and $u_2\in M^2_{(h_2+\ell_2)}$ with nonnegative integers $\ell_1$ and $\ell_2$, we have by \eqref{eq-main-4} \begin{equation}\label{eq-main-10} \langle I_x(u_2,z)u_1, u_3\rangle=\sum_{n=0}^{k_1+k_2+k_3}\sum_{\ell=0}^\infty c_\ell^n z^{h_3-h_1-h_2-\ell_1-\ell_2+\ell} (\log z)^n \end{equation} where $c_\ell^n$ are complex numbers. The \eqref{eq-main-10} implies that $(u_2)_{(\alpha)}^nu_1=0$ for $\alpha>h_3-h_1-h_2+\ell_1+\ell_2-1$. Hence $I_x(-,z)$ satisfies the truncation condition.
In order to prove $L_{-1}$-derivative property, we first note that \begin{equation}\label{eq-main-11} \begin{split} &\left\langle x, j_0(\omega\otimes z(dz)^{-1})z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\\ &\quad\qquad+\left\langle x, z^{-L_0}u_1\otimes j_1(\omega\otimes z(dz)^{-1})z^{-L_0}u_2\otimes z^{L_0}u_3 \right\rangle\\ &\quad\qquad\qquad+\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes j_\infty(\omega\otimes z(dz)^{-1})z^{L_0}u_3 \right\rangle=0. \end{split} \end{equation} The left-hand side of \eqref{eq-main-11} turns to be \begin{equation}\label{eq-deri} \begin{split} &\left\langle x,L_0z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\\ &\quad\qquad\qquad\qquad+\left\langle x, z^{-L_0}u_1\otimes (L_0+L_{-1})z^{-L_0}u_2\otimes z^{L_0}u_3 \right\rangle\\ &\quad\qquad\qquad\qquad\qquad\qquad\qquad-\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes L_0z^{L_0}u_3 \right\rangle. \end{split} \end{equation} From now on each term in \eqref{eq-deri} is simplified. Let us consider the second term of \eqref{eq-deri}. Since $[L_{-1},L_0]=-L_{-1}$, we have \begin{equation} \left\langle x, z^{-L_0}u_1\otimes L_{-1}z^{-L_0}u_2\otimes z^{L_0}u_3 \right\rangle =z\left\langle I_x(L_{-1}u_2,z)u_1, u_3\right\rangle, \end{equation} which shows \begin{equation}\label{eq-deri1} \begin{split} z\left\langle I_x(u_2,z)u_1,u_3\right\rangle =&-\left\langle x,L_0z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\\ &-\left\langle x, z^{-L_0}u_1\otimes L_0z^{-L_0}u_2\otimes z^{L_0}u_3 \right\rangle\\ &+\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes L_0z^{L_0}u_3 \right\rangle. \end{split} \end{equation} The first term of \eqref{eq-deri1} can be calculated to be \begin{equation}\label{eq-deri2} \begin{split} &\left\langle x,L_0z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\\ &=\sum_{n_1=0}^{k_1-1}\sum_{n_2=0}^{k_2}\sum_{n_3=0}^{k_3} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}
z^{-|u_1|-|u_2|+|u_3|}(\log z)^{n_1+n_2+n_3}\\
&\quad\times\left\langle x,(L_0-|u_1|)^{n_1+1}u_1\otimes (L_0-|u_2|)^{n_2}u_2\otimes (L_0-|u_3|)^{n_3}u_3\right\rangle\\
&\qquad\qquad+|u_1|\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle. \end{split} \end{equation} Similarly, the second term of \eqref{eq-deri1} becomes \begin{equation}\label{eq-deri3} \begin{split} &\left\langle x,z^{-L_0}u_1\otimes L_0z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\\ &=\sum_{n_1=0}^{k_1}\sum_{n_2=0}^{k_2-1}\sum_{n_3=0}^{k_3}
\frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}z^{-|u_1|-|u_2|+|u_3|}(\log z)^{n_1+n_2+n_3}\\
&\quad\times\left\langle x,(L_0-|u_1|)^{n_1}u_1\otimes (L_0-|u_2|)^{n_2+1}u_2\otimes (L_0-|u_3|)^{n_3}u_3\right\rangle\\
&\qquad\qquad+|u_2|\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle. \end{split} \end{equation} Finally, the third term is \begin{equation}\label{eq-deri4} \begin{split} &\left\langle x,z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes L_0z^{L_0}u_3\right\rangle\\ &=\sum_{n_1=0}^{k_1}\sum_{n_2=0}^{k_2}\sum_{n_3=0}^{k_3-1}
\frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}z^{-|u_1|-|u_2|+|u_3|}(\log z)^{n_1+n_2+n_3}\\
&\quad\times\langle x,(L_0-|u_1|)^{n_1}u_1\otimes (L_0-|u_2|)^{n_2}u_2\otimes (L_0-|u_3|)^{n_3+1}u_3\rangle\\
&\qquad\qquad-|u_3|\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle. \end{split} \end{equation} In all of the calculations given above we have used the fact that each $M^i$ is an object in $\mathscr{C}_{k_i}$.
By using \eqref{eq-deri1}--\eqref{eq-deri4} we obtain \begin{align} &\frac{d}{dz}\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\\ &\qquad =-z^{-1}\left\langle x,L_0z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\notag\\ &\qquad\qquad -z^{-1}\left\langle x, z^{-L_0}u_1\otimes (L_0)z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle\notag\\ &\qquad\qquad\qquad+z^{-1}\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes L_0z^{L_0}u_3\right\rangle,\notag \end{align} which shows \begin{equation} \frac{d}{dz}\left\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\right\rangle =\frac{d}{dz}\left\langle I_x(u_2,z)u_1,u_3\right\rangle. \end{equation} Hence we have proved the $L_{-1}$-derivative property \begin{equation} I_x(L_{-1}u_2,z)u_1=\dfrac{d}{dz}I_x(u_2,z)u_1. \end{equation}
Finally we will show \eqref{eq-borcherds}. Since $x$ is a conformal block, for any $p,\,q\in\mathbb{Z}$ and $a\in V_k$, we have
\begin{equation} \begin{split} &\langle x, j_0(a\otimes z^p(z-1)^q(dz)^{1-k})z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\rangle\\ &\quad+\langle x, z^{-L_0}u_1\otimes j_1(a\otimes z^p(z-1)^q(dz)^{1-k})z^{-L_0}u_2\otimes z^{L_0}u_3\rangle\\ &\qquad+\langle x, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes j_\infty(a\otimes z^p(z-1)^q(dz)^{1-k})z^{L_0}u_3\rangle=0. \end{split} \end{equation} By \eqref{eq-main-1}--\eqref{eq-main-3}, \eqref{eq-involution} and $[L_0, a_{(n)}]=(k-n-1)a_{(n)}$ we have \begin{equation}\label{eq-borcherds-prove} \begin{split} &\sum_{i=1}^\infty(-1)^{q+i}\binom{q}{i}z^{-p-i+k-1}\langle I_x(u_2,z)a_{(p+i)}u_1, u_3\rangle\\ &\quad+\sum_{i=1}^\infty\binom{p}{i}z^{-q-i+k-1}\langle I_x(a_{(q+i)}u_2,z)u_1, u_3\rangle\\ &\qquad-\sum_{i=0}^\infty(-1)^i\binom{q}{i}z^{-p-q+i+k-1}\langle a_{(p+q-i)}I_x(u_2, z)u_1, u_3\rangle =0. \end{split} \end{equation} Recall that $I_x(u_2,z)u_1=\sum_{n=0}^d\sum_{\alpha\in\mathbb{C}}(u_2)_{(\alpha)}^nu_1z^{-\alpha-1}(\log z)^n$. Then taking the coefficient of $z^{-\alpha-p-q+k-2}(\log z)^n$ in \eqref{eq-borcherds-prove} gives \eqref{eq-borcherds}.
\noindent {\bf (Step 3)} We will show that $F=x$ for any $x\in C^*(M_A,p_A)$ and that $I_{F}(-,z)=I(-,z)$ for $I(-,z)\in I\fusion{M^2}{M^1}{D(M^3)}$.
Suppose that $I_x(u_2,z)=\sum_{n=0}^d\sum_{\alpha\in\mathbb{C}}(u_2)_{(\alpha)}^nz^{-\alpha-1}(\log z)^n$. By \eqref{eq-main-4}, we have \begin{equation} \begin{split} \langle F, u_1\otimes u_2\otimes u_3\rangle&=\langle I_x(u_2,1)u_1, u_3\rangle\\
&=\langle (u_2)_{(|u_1|+|u_2|-|u_3|-1)}^0u_1, u_3 \rangle\\ &=\langle x, u_1\otimes u_2\otimes u_3\rangle \end{split} \end{equation} for any homogeneous $u_i\in M^i\,(i=1,2,3)$, which implies $F=x$.
Conversely, we see that \begin{equation}\label{eq-425} \begin{split} &\langle I_{F}(u_2,z)u_1, u_3\rangle\\ &=\langle F, z^{-L_0}u_1\otimes z^{-L_0}u_2\otimes z^{L_0}u_3\rangle\\ &=\sum_{n_1=0}^{k_1}\sum_{n_2=0}^{k_2}\sum_{n_3=0}^{k_3} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}\\
&\quad\times\langle F, (L_0-|u_1|)^{n_1}u_1\otimes (L_0-|u_2|)^{n_2}u_2
\otimes (L_0-|u_3|)^{k_3}u_3\rangle\\
& \quad\times z^{-|u_1|-|u_2|+|u_3|}(\log z)^{n_1+n_2+n_3}\\ &=\sum_{n_1=0}^{k_1}\sum_{n_2=0}^{k_2}\sum_{n_3=0}^{k_3} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}\\
&\times\langle ((L_0-|u_2|)^{n_2}u_2)_{(\alpha)}^0(L_0-|u_1|)^{n_1}u_1, (L_0-|u_3|)^{n_3}u_3\rangle\\
&\quad\times z^{-|u_1|-|u_2|+|u_3|}(\log z)^{n_1+n_2+n_3}\\ &=\sum_{n_1=0}^{k_1}\sum_{n_2=0}^{k_2}\sum_{n_3=0}^{k_3} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!}\\
&\times\langle (L_0-|u_3|)^{n_3} ((L_0-|u_2|)^{n_2}u_2)_{(\alpha)}^0(L_0-|u_1|)^{n_1}u_1, u_3\rangle\\
&\quad\times z^{-|u_1|-|u_2|+|u_3|}(\log z)^{n_1+n_2+n_3} \end{split} \end{equation}
where $\alpha=|u_1|+|u_2|-|u_3|-1$. On the other hand, by Lemma \ref{lemma-fund}, we have \begin{equation}\label{eq-426} \begin{split} &\sum_{n_1+n_2+n_3=k}
\frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!} (L_0-|u_3|)^{n_3} ((L_0-|u_2|)^{n_2}u_2)_{(\alpha)}^0(L_0-|u_1|)^{n_1}u_1\\ &=\sum_{n_1+n_2+n_3=k} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!} \sum_{\ell=0}^{n_3}\binom{n_3}{\ell}\ell!(x_1+x_2)^{n_3-\ell}x_1^{n_1}x_2^{n_2} (u_2)_{(\alpha)}^{\ell}u_1\\ &=\sum_{n_1+n_2+n_3=k} \frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!} \sum_{\ell=0}^{n_3}\binom{n_3}{\ell}\ell!(x_1+x_2)^{n_3-\ell}x_1^{n_1}x_2^{n_2} (u_2)_{(\alpha)}^{\ell}u_1\\ &=\sum_{\ell=0}^{k}\sum_{n_3=\ell}^{k}\Bigl(\sum_{n_1+n_2=k-n_3}\frac{(-1)^{n_1+n_2}}{n_1!n_2!(n_3-\ell)!} (x_1+x_2)^{n_3-\ell}x_1^{n_1}x_2^{n_2} (u_2)_{(\alpha)}^{\ell}u_1\Bigr)\\ &=\sum_{\ell=0}^{k}\sum_{n_3=0}^{k-\ell}\Bigl(\sum_{n_1+n_2=k-\ell-n_3}\frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!} (x_1+x_2)^{n_3}x_1^{n_1}x_2^{n_2} (u_2)_{(\alpha)}^{\ell}u_1\Bigr)\\ &=\sum_{\ell=0}^{k}\Bigl(\sum_{n_1+n_2+n_3=k-\ell}\frac{(-1)^{n_1+n_2}}{n_1!n_2!n_3!} (x_1+x_2)^{n_3}x_1^{n_1}x_2^{n_2} (u_2)_{(\alpha)}^{\ell}u_1\Bigr)\\ &=\sum_{\ell=0}^k \frac{1}{(k-\ell)!}(-x_1-x_2+x_1+x_2)^{k-\ell} (u_2)_{(\alpha)}^{\ell}u_1\\ &=(u_2)_{(\alpha)}^{k}u_1. \end{split} \end{equation} Therefore, by combining \eqref{eq-425} and \eqref{eq-426} we obtain \begin{equation} \langle I_{F_I}(u_2,z)u_1, u_3\rangle=\langle I(u_2,z)u_1, u_3\rangle \end{equation} for homogeneous $u_i\in M^i\,(i=1,2,3)$. The theorem is proved.
\end{document} |
\begin{document}
\title{Domination Cover Pebbling: Structural Results}
\author{ Nathaniel G. Watson\\ Department of Mathematics\\ Washington University at St.~Louis\and Carl R.~Yerger\\ Department of Mathematics\\ Georgia Institute of Technology}
\maketitle
\begin{abstract}This paper continues the results of ``Domination Cover Pebbling: Graph Families.'' An almost sharp bound for the domination cover pebbling (DCP) number, $\psi(G)$, for graphs $G$ with specified diameter has been computed. For graphs of diameter two, a bound for the ratio between $\lambda(G)$, the cover pebbling number of $G$, and $\psi(G)$ has been computed. A variant of domination cover pebbling, called subversion DCP is introduced, and preliminary results are discussed. \end{abstract}
\section{Introduction} \phantom{space } Given a graph $G$ we distribute a finite number of indistinguishable markers
called \emph{pebbles} on its vertices. Such an arrangement of pebbles, which can also be thought of as a function from $V(G)$ to $\mathbb{N} \cup \{0\},$ is called a \textit{configuration}. A \emph{pebbling move} on a graph is defined as taking two pebbles off one vertex, throwing one away, and moving the other to an adjacent vertex. Most research in pebbling has focused on a quantity known as the \emph{pebbling number} $\pi(G)$ of a graph, introduced by F. Chung in \cite{Chung}, which is defined to be the smallest integer $n$ such that for every configuration of $n$ pebbles on the graph and for any vertex $v \in G,$ there exists a sequence of pebbling moves starting at this configuration and ending in a configuration in which there is at least one pebble on $v$. A new variant of this concept, introduced in by Crull et al.\ in \cite{Crull}, is the \emph{cover pebbling number} $\lambda(G)$, defined as the minimum number $m$ such that for any initial configuration of at least $m$ pebbles on $G$ it is possible to make a sequence of pebbling moves after which there is at least one pebble on every vertex of $G$.
In a recent paper (\cite{VNIDCP1}) the authors, along with Gardner, Godbole, Teguia, and Vuong, have introduced a concept called domination cover pebbling and have presented some preliminary results.
Given a graph $G,$ and a configuration $c,$ we call a vertex $v \in G$ \emph{dominated} if it is covered (occupied by a pebble) or adjacent to a covered vertex. We call a configuration $c'$ \emph{domination cover pebbling solvable}, or simply \emph{solvable}, if there is a sequence of pebbling moves starting at $c'$ after which every vertex of $G$ is dominated. We define the \emph{domination cover pebbling number} $\psi(G)$ to be the minimum number $n$ such that any initial configuration of $n$ pebbles on $G$ is domination cover pebbling solvable.
The set of covered vertices in the final configuration depends, in general, on the initial configuration---in particular, $S$ need not equal a minimum dominating set. For instance, consider the configurations of pebbles on $P_4$, the path on four vertices, as shown in Figure 1: \begin{figure}
\caption{An example where two different initial configurations produce two different domination cover solutions.}
\label{ex2}
\end{figure}
For the graph on the left, we make pebbling moves so that the first and third vertices (from left to right) form the vertices of the dominating set. However, for the graph on the right, we make pebbling moves so that the second and fourth vertices are selected to be the vertices of the dominating set. In some cases, moreover, it takes more vertices than are in the minimum dominating set of vertices to form the domination cover solution. For example, in Figure 2 we consider the case of the binary tree with height two, where the minimum dominating set has two vertices, but the minimal dominating set possible for a domination cover solution has three vertices. This corresponds to several possible starting configurations, for example the configuration pictured, the configuration with a pebble at the leftmost bottom vertex and 4 pebbles at the root, and the configuration with 1 and 10 pebbles at the leftmost and rightmost bottom level vertices respectively. \begin{figure}
\caption{A reachable minimal configuration of pebbles on $B_2$ that forces a domination cover solution.}
\label{ex1}
\end{figure}
The above two facts constitute the main reason why domination cover pebbling is nontrivial. We refer the reader to \cite{haynes} for additional exposition on domination in graphs, and to \cite{VNIDCP1} for some further explanation of the domination cover pebbling number, including the computation of the domination cover pebbling number for some families of graphs.
One way to understand the size of the numbers $\pi(G), \lambda(G),$ and $\psi(G)$ is to find a bound for the size of these numbers given the diameter of $G$ and the number of vertices. This has been done for $\pi(G)$ for graphs of diameter two in \cite{Clarke} and for graphs of diameter three in $\cite{Bukh}.$ A theorem proven in \cite{jonas} and \cite{stacking} gives as a corollary a sharp bound for graphs of all diameters, which was originally established by other means in \cite{firstpaper}. In this paper, we prove that for graphs of diameter two with $n$ vertices, $\psi(G) \leq n-1$. For graphs of diameter $d,$ we show $\psi(G) \leq 2^{d-2}(n-2)+1$. We also compute that the ratio $\lambda(G) / \psi(G) \geq 3$ for graphs of diameter two.
Another way to extend cover pebbling is called subversion domination cover pebbling. A parameter $\omega$ used in calculating the vertex neighbor integrity of a graph $G$ counts the size of the largest undominated connected subset of $G$. When $\omega = 0$, this corresponds to domination cover pebbling. To conclude this paper, we provide some preliminary results for this generalized parameter.
\section{Diameter Two Graphs} In the next few sections, we will present structural domination cover pebbling results. \begin{thm} \label{dia2} For all graphs $G$ of order $n$ with maximum diameter two, $\psi(G) \leq n-1$. \end{thm}
\begin{proof} First, we show this bound is sharp by exhibiting a graph $G$ such that $\psi(G) > n-2$. Consider the star graph on $n$ vertices, and place a pebble on all of the outer vertices except one. This configuration of pebbles does not dominate the last outer vertex. Hence, $\psi(G) > n-2$.
To prove the theorem, we will show that, given a graph $G$ of diameter two on $n$ vertices, any configuration $c$ of $n-1$ pebbles on $G$ is solvable.
Given such a graph configuration $c$, let $S_1$ be the set of vertices $v \in G$ such that $c(v)
> 1$. Let $S_2$ be the set vertices $w \in G$ such that $c(w)= 0$ and $w$ is adjacent to some vertex of $S_1,$ and let $S_3$ be the rest of the vertices, the ones that are neither in $S_1$ nor adjacent to a vertex of $S_1$. Let $a := |S_2|$, and $b := |S_3|$. Given a configuration $c'$, define the \emph{pairing number} $P(c')$ to be $\sum_{v \in G} $ $\max{\{0, \frac{c'(v)-1}{2}\}}$. It can easily be checked that $P(c')= \frac{a+b-1}{2}.$ Note that if $P(c') = k$ then $c'$ contains at least $\lceil k \rceil$ disjoint pairs of pebbles, which means that we can make at least $\lceil k \rceil$ pebbling moves. Also, note that every vertex in $G$ is at distance at most two from some vertex in $S_1.$ This ensures that that every vertex in $S_3$ is adjacent to a vertex in $S_2.$ Also, if some vertex in $S_1$ is not adjacent to a vertex of $S_2$, it must be adjacent only to vertices in $S_1$. Since this vertex has distance at most two from any other vertex on the graph, we conclude that every vertex of the graph is either in $S_1$ or adjacent to a vertex of $S_1,$ meaning the $G$ is already dominated by covered vertices, as desired. Therefore, it suffices to consider the case in which $S_2$ is a dominating set of $G.$
First, suppose that $a \leq b$. In this case, $P(c) \geq \frac{2a-1}{2}$. Hence, there are at least $a$ disjoint pairs of pebbles that can be moved from elements in $S_1$ to $S_2$. For each uncovered vertex $v \in S_2$, if possible, move a pair of pebbles from an adjacent element of $S_1$ to put a pebble on $v$. After this is done for as many vertices of $S_2$ as possible, let $L$ be the set vertices in $S_2$ which are still uncovered. Note that these vertices are necessarily at distance $2$ from all remaining pairs of pebbles. Furthermore, since $S_1$ initially had at least $a$ disjoint pairs of pebbles, there remain at least as many pairs as there are vertices in $L.$ If this number is $0,$ the dominating set $S_2$ is covered and we are done. Otherwise, we nonetheless now know $S_3$ is dominated because if there were some vertex $y$ that were adjacent to only those elements of $S_2$ which are also in $L,$
then the minimum distance between $y$ and a vertex in $S_1$ with a pair of pebbles is $3$, which is impossible. However, it may be the case for some $z \in L$ that the vertex in $S_1$ that $z$ was adjacent to lost its pebbles, and if this is the case, move a pair of pebbles from $S_1$ so that $z$ is dominated (this always possible since our graph has diameter two). With the $|L|$ pairs we of pebbles we have, we can ensure each vertex of $L$ is dominated. After this is done, $G$ will be completely dominated by covered vertices.
Now consider the case $a > b$. We know that $P(c) \geq \frac{2b-1}{2}$ and so there are at least $b$ pairs of pebbles available. Given any vertex $v$ in $S_3$ and a pair of pebbles on a vertex $w \in S_1$, we can use this pair to move to a vertex between $v$ and $w,$ which is clearly in $S_2.$ We now do this whenever necessary for each vertex of $S_3,$ first using those pairs which can be removed from vertices having at least 3 pebbles. Let $m$ be the number of moves that have been made. Then we know that $m$ vertices in $S_2$ now have pebbles on them. Furthermore we know $m \leq b,$ and since some of our moves may dominate multiple vertices of $S_3,$ thus making some other moves unnecessary, it is indeed possible that $m<b.$ In any case, after the moves are made, every vertex in $S_3 \cup S_1$ is dominated. If every vertex we have removed pebbles from is still covered, then the vertices of $S_2$ are still dominated and we are done.
Otherwise, we have removed pebbles from some vertex which had exactly two pebbles on it. Thus, these first $m$ pebbling moves subtract at most $\frac{2m-1}{2}$ from $P(c)$, leaving a pairing number of $\frac{a+b-2m}{2}\geq \frac{a-m}{2}$ for the configuration after these moves. At this point, since we were forced to use pebbles from a vertex that had only two pebbles, we know that every vertex that contributes to the pairing number has exactly two pebbles on it. Thus there are at least $a-m$ vertices in $S_1$ with two pebbles on them. We can use these pairs to dominate the $a-m$ vertices of $S_2$ which are not covered. This leaves $G$ dominated by covered vertices and therefore $\psi(G) \leq n-1$. \end{proof}
We can apply this theorem to prove a result about the ratio between the cover pebbling number and the domination cover pebbling number of a graph. We conjecture that this ratio holds for all graphs, but it does not seem that this can be directly proven using the structural bounds in this paper. \begin{thm} For all graphs $G$ of order $n$ with diameter two, $\lambda(G) / \psi(G) \geq 3$. \end{thm} \begin{proof} First, suppose that the minimum degree of a vertex of $G$ is less than or equal to $\lceil \frac{n-1}{2} \rceil$. By the previous theorem, we know that the maximum value of $\psi(G)$ is $n-1$. We now construct a configuration of pebbles on $G$ such that $\lambda(G) \geq 3n - 3$. Place $3n-3$ pebbles on any vertex $v$ that has a degree less than $\lceil \frac{n-1}{2} \rceil$. It takes $2$ pebbles to cover solve each vertex adjacent to $v$, at most $\lceil \frac{n-1}{2} \rceil$, and all the remaining vertices require $4$ pebbles. Since there are at least as many vertices a distance of $2$ away from $v$ as there are a distance of $1$ away from $G$, $3n-3$ pebbles or more are required to cover pebble all of the vertices except for $v$. Thus for this class of graphs, $\lambda(G) > 3n -3 \geq 3 \psi(G)$.
Now suppose that the minimum degree $k$ of a vertex in $G$ is greater than $\lceil \frac{n-1}{2} \rceil$. By a similar argument as the previous paragraph, notice that $\lambda(G)$ for any diameter two graph is at least $4n - 2m - 3$, where $m$ is the minimum degree of a vertex of $G$. Since $\lambda(G) \geq 4n - 2m - 3$, it suffices to show we can always solve a configuration $c$ of $\lfloor \frac{4n - 2m - 3}{3} \rfloor = k$ pebbles on $G$. Given a particular value for $m$ between $\lceil \frac{n + 1}{2} \rceil$ and $n -1$, we will construct a domination cover solution.
As long as there exist vertices of $G$ that have at least three pebbles and are adjacent to an unoccupied vertex, we haphazardly make moves from such vertices to adjacent unoccupied vertices. We claim that the resulting configuration has the desired property that the set of occupied vertices are a dominating set of $G$. First suppose that the algorithm is forced to terminate while there remains some vertex $v$ having at least three pebbles. Then this vertex must be adjacent only to occupied vertices of $G,$ and since the diameter of $G$ is two, these neighbors $v$ form a dominating set of $G$. Otherwise, if every vertex has less than three pebbles, it can easily be checked that the number of occupied vertices is now $\sum_{v \in G} \lceil \frac{c(v)}{2} \rceil \geq \lceil \frac{k}{2} \rceil.$ Since the minimum degree of a vertex in $G$ is $m,$ by the pigeonhole principle, if we now have $n - m$ or more vertices covered by a pebble, then every vertex of $G$ is dominated. So if $\lceil \frac{k}{2} \rceil \geq n-m$, we are finished. We see that \begin{equation*} \left\lceil \frac{\left\lfloor \frac{4n - 2m - 3}{3} \right\rfloor}{2} \right\rceil \geq \left\lceil \frac{ \frac{4n - 2m - 5}{3} }{2} \right\rceil = \left\lceil \frac{4n}{6} - \frac{m}{3} - \frac{5}{6} \right\rceil \end{equation*} Therefore, we are done if
\begin{equation*} \left\lceil \frac{4n}{6} - \frac{m}{3} - \frac{5}{6} \right\rceil \geq n-m, \end{equation*} which is equivalent to \begin{equation*} n \leq \left\lceil \frac{4n}{6} + \frac{2m}{3} - \frac{5}{6} \right\rceil.\end{equation*} This inequality holds for $m \geq \lceil \frac{n + 1}{2}\rceil$. Therefore, we have completed this case and have shown that for all graphs $G$ of diameter two, $\lambda(G) / \psi(G) \geq 3$.
\end{proof} \noindent We now prove a more general bound for graphs of diameter $d$. \section{Graphs of Diameter $d$}
\begin{thm}
Let $G$ be a graph of diameter $d \geq 3$ and order $n$. Then $\psi(G) \leq 2^{d-2}(n-2)+1.$ \end{thm}
Throughout the proof, we adopt the convention that if $G$ is a graph and $V$ and $W$ are subsets of $V(G)$ and $v \in V(G)$ then $d(v,W)= \min_{w \in W} d(v,w)$ and $d(V,W)= \min_{v \in V} d(v,W).$ Also, for any set $S \subseteq V(G)$ we of course let $S^C = V(G) \setminus S.$ \begin{proof} First, we define the \emph{clumping number} $\chi$ of a configuration $c'$ by $$\chi(c') := \sum_{v \in \, G} 2^{d-2}\max\left( \left\lfloor \frac{c'(v)-1}{2^{d-2}}\right\rfloor, \ 0 \right).$$ The clumping number counts the number of pebbles in a configuration which are part of disjoint ``clumps'' of size $2^{d-2}$ on a single vertex, with one pebble on each occupied vertex ignored.
Now let $c$ be a configuration on $G$ of size at least $2^{d-2}(n-2)+1.$ We will show that $c$ is solvable
by giving a recursively defined algorithm for solving $c$ through a sequence of pebbling moves. First, we make some definitions to begin the algorithm: \begin{itemize} \item $c_0=c$. \item $A_0 = \{ v \in G \ : \ c(v) > 0 \}$. \item $B_0 = \{ v \in G \ : \ c(v) \geq 2^{d-2}+1 \}$. \item $C_0 = V(G) - A_0$. \item $D_0 = \emptyset$. \end{itemize}
We will describe our algorithm by recursively defining a sequence of configurations $c_p$ and four sequences $ A_p, B_p, C_p, $ and $D_p$ of sets of vertices. At each step, we will need to make sure a few conditions hold, to ensure that the next step of the algorithm may be performed. For each $m$, we will insist that: \begin{enumerate}
\item For every $v \in C_m \cup D_m$, $c_m(v) = 0$ and for every $v \in A_m,$ $c_m(v) > 0$.
\item $\chi(c_m) \geq 2^{d-2}(|C_m| - 1)$.
\item $|C_m| \leq |C_0| - m$. \item $B_m = \{ v \in G \ : \ c_m(v) \geq 2^{d-2}+1 \}$. \item If both $B_m \not= \emptyset$ and $D_m \not= \emptyset,$ $d(B_m, D_m) = d$ ; If $D_m \not= \emptyset,$ there always exists some $v \in G$ such that $d(v, D_m) = d,$ even if $B_m = \emptyset$. \item $A_m, C_m,$ and $D_m$ are pairwise disjoint and $A_m \cup C_m \cup D_m = V(G)$. \item Every vertex of $D_m$ is dominated by $c_m$. \item There exists a sequence of pebbling moves transforming $c$ to $c_m$. \end{enumerate} Note by 1, 4, and 6, we will always have $B_m \subseteq A_m.$ Also, by 1, 6, and 7, every vertex of $G$ which is not dominated by $c_m$ is in $C_m.$
For $m=0$, only condition 2 is not immediately clear. To verify it, note that \begin{eqnarray*} \chi(c) &=& \sum_{v \in G} 2^{d-2} \max \left( \left\lfloor \frac{c(v)-1}{2^{d-2}}\right\rfloor, \ 0 \right) \\ & =& \sum_{v \in A_0} 2^{d-2} \left\lfloor \frac{c(v)-1}{2^{d-2}}\right\rfloor \\ & \geq & \sum_{v \in A_0} 2^{d-2}\left(\frac{c(v)}{2^{d-2}}-1\right). \end{eqnarray*} Using the fact that the size of $c$ is at least
$2^{d-2}(n-2)+1,$ and $|C_0|=n-|A_0|,$ we see
$$\chi(c) \geq (2^{d-2}(n-2)+1)-2^{d-2} |A_0|=2^{d-2}(|C_0|-2)+1.$$
From the definition of $\chi$, it is apparent that $2^{d-2} | \,
\chi(c).$ Thus, we indeed must have $$\chi(c)=\chi(c_0) \geq 2^{d-2}(|C_0|-1).$$
Suppose for some $p-1 > 0$ we have defined $c_{p-1}, A_{p-1}, B_{p-1}, C_{p-1},$ and $D_{p-1}$ and the above conditions hold when $m=p-1$. We shall assume that there is some vertex in $C_{p-1}$
which is not dominated by $c_{p-1},$ for otherwise, by conditions 6, 7 and 8, $c$ is solvable and we are done. Thus $|C_{p-1}| \geq 1.$
But suppose $|C_{p-1}| = 1.$ Call this single vertex $v.$ Since it is non-dominated, it is adjacent to only uncovered vertices. These vertices cannot be in $C_{p-1}$ for $|C_{p-1}|=1,$ and they are not in $A_{p-1},$ because every vertex in $A_{p-1}$ is covered by property 1. So every vertex adjacent to $v$ is in $D_{p-1}.$ Invoke property 5 to choose a $w \in G$ for which $d(w, D_{p-1}) = d.$ Any path from $w$ to $v$ passes through one of the vertices in $D_{p-1}$ which is adjacent to $v,$ and is thus of length at least $d+1,$ so
$d(w, v) \geq d+1,$ contradicting the assumption that $G$ has diameter $d.$ We have now shown that, if $C_{p-1}$ has a non-dominated vertex, then $|C_{p-1}| \geq 2.$ In this case, we will have $\chi(c_{p-1}) \geq 2^{d-2},$ ensuring the existence of some clump of size $2^{d-2},$ and thus that $B_{p-1}$ is non-empty. Therefore, we will always implicitly assume that $B_{p-1} \not= \emptyset$. \newline
\textbf{Case 1:} $d(B_{p-1}, C_{p-1}) \leq d-2$
In this case, we choose $v' \in B_{p-1}$ and $w' \in C_{p-1}$ for which $d(v',w') \leq d-2$ and move $2^{d(v',w')}$ pebbles from $v'$ to $w',$ leaving one pebble on $w'$ and at least one on $v'.$ We let $c_p$ be the configuration of pebbles resulting from this move. Let
$C_p = C_{p-1} \setminus w'.$ Thus $|C_p| = |C_{p-1}| - 1 \leq
|C_0|-(p-1)-1$ and we see that condition 3 holds when $m=p.$ Furthermore, We have used at most one clump of $2^{d-2}$ pebbles so
$$\chi(c_p) \geq \chi(c_{p-1}) - 2^{d-2} \geq 2^{d-2}(|C_{p-1}|-1)
-2^{d-2} = 2^{d-2}(|C_p|-1)$$ and therefore condition 2 holds for $p.$ Also, we let $A_p = A_{p-1} \cup \{w'\},$ let $C_p=C_{p-1} \ w',$ and $D_p=D_{p-1}$ (now, clearly condition 6 holds.) We again let $B_p = \{ v \in G \ : \ c_p(v) \geq 2^{d-2}+1 \},$ which simply means that we have possible removed $v'$ from $B_{p-1}$ if $v'$ now has less than $2^{d-2}+1$ pebbles. Thus $B_{p} \subseteq B_{p-1},$ and now 1, 4, 5, 7, and, 8 are all easily seen to hold for $m=p.$ \newline
\textbf{Case 2:} $d(B_{p-1}, C_{p-1}) \geq d-1.$
If every vertex in $C_{p-1}$ is dominated by $A_{p-1},$ we are done. Otherwise, let $w'$ be some non-dominated vertex in $C_{p-1}.$ Clearly,
$w'$ is at distance $d-1$ or $d$ from $B_{p-1}.$ Suppose $d(B_{p-1}, w') = d-1.$ Then $w'$ is adjacent to some (non-covered)
vertex $w''$ at distance $d-2$ from $B_{p-1}.$ By condition 1, every vertex of $G$ which is not covered by $c_{p-1}$ is in $C_{p-1} \cup D_{p-1}.$ But $d(B_{p-1}, C_{p-1}) \geq d-1$ and by 5, $d(B_{p-1}, D_{p-1}) = d$ so $w'' \notin C_{p-1} \cup D_{p-1}.$ This contradiction means that $d(w',B_{p-1}) \not= d-1$ and so $d(w',B_{p-1}) = d.$
Choose some vertex in $B_{p-1}$ and call it $v'.$ We know $d(v',w')=d$ so consider some path of length $d$ from $v'$ to $w'.$ Let $v^*$ be the unique point on this path for which $d(v^*,v' = d-2).$ Thus $v^* \notin C_{p-1} \cup D_{p-1}$ and so $v^* \in A_{p-1},$ and also $d(v^*, w') =2.$ Let $w''$ be some vertex which is adjacent to both $v^*$ and $w'$ so that $d(v',w'')=d-1.$ Then because $w''$ is uncovered (else $w'$ would be dominated), it must be in $C_{p-1}.$ This also means that $v^* \notin B_{p-1}$ by the assumption that $d(B_{p-1}, C_{n-1}) \geq d-1.$
We now move one clump of $2^{d-2}$ pebbles from $v'$ to $v^*,$ adding one pebble to $v^*,$ which now, by condition 1, has at least two pebbles. We then move two pebbles from $v^*$ and cover $w''$ with one pebble. We let $c_p$ be the configuration resulting from these moves. We let $D_p= D_{p-1} \cup \{ w' \}$ and we again let $B_p = \{ v \in G \ : \ c_p(v) \geq 2^{d-2}+1 \},$ which just means we have possibly removed $v'$ from $B_{p-1},$ so $B_p \subseteq B_{p-1}.$ If now $c_p(v^*) = 0,$ we let $A_p = A_{p-1} \cup \{w''\} \setminus v^* \}$ and $C_p = C_{p-1} \cup \{v^*\} \setminus \{w', w''\}.$ Otherwise, if $c_p(v^*) > 0$, let $A_p = A_{p-1} \cup \{w''\}$ and $C_p = C_{p-1} \setminus \{w', w''\}.$
This ensures that conditions 1 and 6 still hold for $m=p.$ Also, $|C_p| \leq
|C_{p-1}|-1 \leq |C_0|-(p-1)-1$ and so condition 3 holds for $m=p.$ Furthermore, we have used only one clump of $2^{d-2}$ pebbles, because $v^*
\notin B_{p-1}$ and so by using a pebble from $v^*,$ we could not have destroyed a clump. Thus $$\chi(c_p) = \chi(c_{p-1}) - 2^{d-2} \geq 2^{d-2}(|C_{p-1}|-1) -2^{d-2} \geq 2^{d-2}(|C_p|-1) $$ and therefore condition 2 holds for $p.$ Condition 5 also still holds for $m=p$ because $B_p \subseteq B_{p-1}$ and because we have added only the vertex $w'$ to $D_{p-1}$ and $d(B_{p-1}, w') =d,$ so $d(B_{p-1}, D_p) = d.$ To see condition 7 is still true, note that to get $D_p$ we have only added $w'$ to $D_{p-1},$ and certainly, $w'$ is adjacent to $w'',$ which is covered by $c_p,$ so $w'$ is dominated by $c_p.$ Also, the only previously covered vertex of $G$ which is now uncovered is (possibly) $v^*$ but $d(v^*, B_{p-1} )= d-2,$ and so $v^*$ is not adjacent to any vertex in $D_{p-1}$ for, by 5, $d(B_{p-1}, D_{p-1}) = d.$ Thus, by possibly uncovering $v^*,$ we did not cause any vertex in $D_{p-1}$ to become undominated, so 7 still holds for $m=p$. Finally, the fact that conditions 4 and 8 still hold for $m=p$ is easily seen. \newline
The algorithm continues as long as there is some non-dominated vertex in $C_p.$ By condition 3, it must terminate after at most
$|C_0|$ steps, with $ |C_k| = 0$ for some $k \leq |C_0|.$ The configuration $c_k$ clearly dominates every vertex of $G, $ and by property 8, $c_k$ is reachable from $c$ by pebbling moves, so $c$ is solvable. \end{proof}
For $d\geq 3,$ Figure 3 shows a graph $G$ which is an example of a graph of diameter $d$ with $n = 2m+d-2$ vertices for which $\psi(G)$ comes close to the upper bound of $2^{d-2}(n-2)+1 = 2^{d-1}m + 2^{d-2}(d-2)+1.$ \begin{figure}
\caption{A graph with high DCP number. The box represents the fact that there is an edge between every pair of vertices inside, making the subgraph induced by $\{v_1, v_2, \ldots, v_m \}$ a complete graph on $m$ vertices. }
\label{badgraph}
\end{figure}
To dominate vertex $w_i,$ it is easy to see a pebble is needed on $w_i$ or $v_i.$ They each have distance not less than $d-1$ from $u_{d-1},$ and so it requires $2^{d-1}$ pebbles on $u_{d-1}$ to supply this pebble. This means at least $2^{d-1}m$ pebbles are needed on $u_{d-1}$ to dominate every $w_i,$ so $\psi(G) \geq 2^{d-1}m.$ Further, using the result of \cite{jonas} and \cite{stacking}, we can calculate $\lambda(G) = 3 \cdot 2^{d-1}m+ 2^d-1.$ Clearly, by making $m$ large we can make $\lambda(G) / \psi(G)$ arbitrarily close to 3. Also note that for the complete graph on 2 vertices, $\lambda(G)=3$ and $\psi(G)=1.$ We conjecture that it is not possible, however, for the ratio to be less than 3:
\begin{con} $\lambda(G) / \psi(G) \geq 3$ for all graphs $G$ with more than one vertex. \end{con}
\section{Subversion DCP}
There are several possible generalizations of domination cover pebbling which readily suggest themselves, and many of these are indeed interesting. For instance, we may ask what happens if we simply allow $n$ vertices to remain undominated, that is, if we say a graph has been solved if all but $n$ vertices are dominated by covered vertices. More interestingly, one may relax the requirement that a graph must be dominated by pebbled vertices in order to be solved to the condition that every vertex of a solved graph must have distance no more than $n$ from some pebbled vertex. On the other hand, we could tighten the condition that every vertex of a solved graph is either covered by pebbles or adjacent to a covered vertex by insisting that all vertices, covered or not, must be adjacent to some covered vertex.
However, these generalizations, while natural, may not be different enough from DCP to warrant extensive study. For instance, the problem of diameter bounds seems highly likely to be solvable in each case by an approach quite similar to that in Section 3. Furthermore, in each case, lower bounds which intuitively seem good can be derived from graphs quite similar to the one shown in Figure 3. Therefore, we introduce in this section a less obvious generalization of DCP which we feel makes the analogues to the questions answered in this paper more interesting than they are for the generalizations named above.
Given a graph $G$ and a subset $S \subseteq V(G)$, call the subgraph induced by the set of vertices which are neither in $S$ nor adjacent to a vertex of $S$ the $\emph{undominated subgraph}$ of $S$. Then we let the \emph{ $\omega$-subversion number} of $G,$ denoted $\Omega_{\omega}(G),$ be the minimum number of pebbles required such that regardless of their initial configuration it is always possible through a sequence of pebbling moves to cover some subset of $G$ that has an undominated subgraph in which there is no connected component of more than $\omega$ vertices.\footnote{This definition and the term ``subversion" are partly inspired by Cozzens and Wu \cite{shu-shih}. Specifically, our parameter $\omega$ matches with their use of $\omega$ for the order of the largest connected component of an undominated subgraph. } Notice that domination cover pebbling corresponds to the case when $\omega = 0$.
\section{Basic Results}
\begin{thm} For $\omega \geq 0$, $\Omega_{\omega}(K_n) = 1$. \end{thm} \begin{proof} When any pebble is placed on $K_n$, the entire graph is dominated. \end{proof} \begin{thm} For $ s_1 \geq s_2 \geq \cdots \geq s_r$, let $K_{s_1, s_2, \ldots, s_r}$ be the complete $r$-partite graph with $s_1,s_2,\ldots,s_r$ vertices in vertex classes $c_1, c_2, \ldots, c_r$
respectively. Then for $\omega \geq 1,$ $\Omega_{\omega}(K_{s_1, s_2, \ldots, s_r}) = 1$. \end{thm} \begin{proof} Place a pebble on any vertex in $c_i$. All the vertices in the other $c_i$'s are dominated. The other vertices in $c_1$ that are undominated are disjoint from each other. Thus, the result follows. \end{proof}
\begin{thm} For $\omega \geq 1$, $n \geq \omega + 3$, $\Omega_{\omega}(W_n) = n-2-\omega$, where $W_n$ denotes the wheel graph on $n$ vertices. \end{thm} \begin{proof} First, we will show that $\Omega_{\omega}(W_n) > n-3-\omega$. Place a single pebble on each of $n-3-\omega$ consecutive outer vertices so that all of the pebbled vertices form a path. This leaves a connected undominated set of size $\omega + 1$. Hence, $\Omega_{\omega}(W_n) > n-3-\omega$. Now, suppose that we place $n-2-\omega$ pebbles on $W_n$. If any vertices have a pair of pebbles on them, the entire graph can be dominated by moving a single pebble to the hub vertex. Hence, each vertex can contain only one pebble. Since every outer vertex is of degree $3$, if any vertex is undominated, at least $3$ vertices must be dominated but unpebbled. Hence, in order to obtain an undominated set of size $\omega+1$, there must be $\omega+4$ vertices that are unpebbled. By the pigeonhole principle, we obtain a contradiction because there are not enough vertices for this constraint to hold. Thus, for $\omega \geq 1$, $n \geq \omega + 3$, $\Omega_{\omega}(W_n) = n-2-\omega$. \end{proof} \section{Graphs of Diameter 2 and 3}
\begin{thm} Let $G$ be a graph of diameter two with $n$ vertices. For $\omega \geq 1$, $\Omega_{\omega}(G) \leq n - 1 - \omega$.
\end{thm} \begin{proof} To show that the bound is sharp, consider the graph $H_n$, defined to be a star graph of order $n$ with $\omega$ additional edges added to make the graph induced by one subset of $\omega+1$ outer vertices connected.
\begin{figure}
\caption{An example of the construction for $n = 9$, $\omega = 1$.}
\label{ex2}
\end{figure}
If we place a single pebble on each of the $n-2-\omega$ leaves of the star that are not connected to any other outer vertices, the remaining set of undominated vertices is connected and of size $\omega+1$. Hence, $\Omega(H_n) > n - 2 - \omega$.
Now, let $G$ be a graph of diameter two with $n$ vertices. Suppose there is an arbitrary configuration of pebbles $c(G)$ that contains exactly $n-1-\omega$ pebbles. We now show not only that this configuration can be solved to eliminate undominated connected components of order greater than $\omega,$ but can in fact be solved such that only at most $\omega$ vertices in total are left undominated.
Much as we did in the proof of Theorem \ref{dia2}, we let $T_1$ be the set of vertices $v \in G$ such that $c(v)
> 1$, let $T_2$ be the set vertices $w \in G$ such that $c(w)= 0$ and $w$ is adjacent to some vertex of $T_1,$ and let $T_3$ be the rest of the vertices, the ones that are neither in $T_1$ nor adjacent to a vertex of $T_1$. If $|T_3| \leq \omega$, we are done, because there are no more than $\omega$ undominated vertices and thus the largest undominated component has size at most $\omega.$ Otherwise, eliminate $\omega$ vertices in $T_2$ from the graph, and consider the induced subgraph $G'$ and the induced configuration $c'$. We know $G'$ has order $n' = n - \omega$ and $c'$ still has size at least $n - 1-\omega = n' -1$. Finally, let $T_1'= T_1,$ $T_2'=T_2$ and $T_3' =T_3 \cap V(G')$. The new graph $G'$ may no longer have diameter two, which prevents us from directly applying Theorem \ref{dia2}. Nevertheless, we notice that in $G',$ every vertex in $T_2'$ is still adjacent to a vertex in $T_1',$ and every vertex in $T_3'$ is still adjacent to one in $T_2'.$ Also, since in $G$ we know $d(T_1, T_3)=2,$ it follows that no path of length one or two between a vertex in $T_1$ and another vertex of $G$ can pass through $T_3,$ unless this vertex is the other endpoint. In particular, since the diameter of $G$ is 2, this implies that the shortest path between a vertex in $T_1$ and another vertex of $G$ cannot pass through a vertex of $T_3$ as an intermediate vertex, and so the length of the shortest path between a vertex in $T_1$ and another vertex in $G$ will be unaffected by removing a subset of $T_3$. This shows that in $G',$ if $s \in T_1'$ and $v \in G'$ then $d(s, v) \leq 2$.
We now note that since we have the right number of pebbles in $c'$ (at least $n' - 1$) we can apply the proof of Theorem \ref{dia2}. Following the proof, we see that we will have $S_1=T_1'$, $S_2=T_2'$ and $S_3=T_3'.$ Henceforth, the proof never uses the fact that two vertices of the graph have distance at most two from one another except when at least one of the vertices in $S_1.$ Thus, the algorithm detailed in the proof can be applied \emph{mutatis mutandis} to $G'$, after with $G'$ is dominated by covered vertices. The same sequence of pebbling moves, if performed on $G,$ leaves all vertices except possibly the $\omega$ that were eliminated to get $G'$ dominated by covered vertices, thus solving $G$ as desired.
\end{proof}
In general, however, we believe that determining good diameter bounds for $\Omega_w$ will be harder than it is for $\psi.$ It is not even clear to the authors how to construct graphs which establish good lower bounds for large diameters. However, we conclude this section by conjecturing an analogous result for graphs of diameter $3$, along with a valid lower-bound construction for this conjecture.
\begin{con} Let $G$ be a graph of diameter 3 with $n$ vertices. For $\omega \geq 1$ , $\Omega_\omega(G) \leq \lfloor \frac{3}{2}(n - 2 - \omega) + 1 \rfloor$. \end{con}
To see that this result, if true, would give a sharp bound, we exhibit a graph $G$ on $n \geq \omega+3$ vertices such that $\Omega_\omega(G) > \lfloor \frac{3}{2}(n - 2 - \omega) \rfloor$. Take a $K_{\omega + 1}$ and attach each of its vertices to some other vertex $v$. Connect $v$ to each vertex of a $K_{\lceil \frac{n-\omega - 2}{2} \rceil}$, call it $H$. Connect each of the remaining $\lfloor \frac{n-\omega - 2}{2} \rfloor$ vertices to a vertex of $H$, so that each vertex in $H$ has at most one such vertex adjacent to it. Now, place three pebbles on each of the ``tendril" vertices attached to $H$, and if there is one vertex in $H$ without a tendril, place one pebble on it. This is a total of $3 \lfloor \frac{n-\omega - 2}{2} \rfloor$ ($+ 1$ if $n-\omega-2$ is odd) pebbles in this configuration, which is equivalent to $\lfloor \frac{3}{2}(n - 2 - \omega) \rfloor$. Since it is clearly not possible to dominate the vertices in the $K_{\omega +1}$, the graph still has an undominated component of order $\omega + 1$. Thus, $\Omega_\omega(G) > \lfloor \frac{3}{2}(n - 2 - \omega) \rfloor$.
\end{document} |
\begin{document}
\title{Unscrambling the Quantum Omelette}
\author{Karl Svozil} \affiliation{Institute for Theoretical Physics, Vienna
University of Technology, Wiedner Hauptstra\ss e 8-10/136, A-1040
Vienna, Austria} \email{svozil@tuwien.ac.at} \homepage[]{http://tph.tuwien.ac.at/~svozil}
\pacs{03.65.Ta, 03.65.Ud} \keywords{quantum measurement theory, mixed state, quantum probability}
\begin{abstract} Based on recent theorems about quantum value-indefiniteness it is conjectured that many issues of ``Born's quantum mechanics'' can be overcome by supposing that only a single pure state exists; and that the quantum evolution permutes this state. \end{abstract}
\maketitle
\section{Ingredients}
The following rather ``iconoclastic'' recasting of quantum mechanics applies to the quantum formalism as outlined by von Neumann \cite{v-neumann-49}. It will most likely survive this theory because the definitions, conventions and results presented apply to a reversible (indeed, bijective) state evolution, which amounts to permutations of elements in some state space. The title is taken from a passage of Jaynes \cite{jaynes-90}, presenting the current quantum mechanical formalism as {\em ``not purely epistemological; it is a peculiar mixture describing in part realities of Nature, in part incomplete human information about Nature -- all scrambled up by Heisenberg and Bohr into an omelette that nobody has seen how to unscramble.''}
What might be the ingredients of such a quantum omelette? First and foremost, we need to keep in mind that we are dealing with {\em intrinsic self-perception:} no observer has a ``direct, detached, objective, extrinsic'' viewpoint; all observers are ``embedded'' in the system they observe (``Cartesian prison'') \cite{bos1,toffoli:79,svozil-94}.
Second, all observations are based on {\em detector clicks}. Based on these clicks, and through {\em projections and conventions} of our mind we reconstruct what we consider the physical universe. Any inductive (re-)construction of a representation of a universe entirely from ``physical signals'' and, in particular, from detector clicks, is a subtle epistemic and physical task \cite{sum-3,wheeler-89} involving explicit and implicit conventions and assumptions. As we do not possess any direct access to the system other than these clicks we have to be careful in ascribing physical properties and existence to anything \cite{stace1}. Indeed, it must be expected that we are deceived by our preconceptions, implicit conventions, and subjective expectations and projections. Jaynes called this the ``Mind Projection Fallacy'' \cite{jaynes-89,jaynes-90}, pointing out that {\em ``we are all under an ego-driven temptation to project our private thoughts out onto the real world, by supposing that the creations of one's own imagination are real properties of Nature, or that one's own ignorance signifies some kind of indecision on the part of Nature.´´} I believe that this ``over-interpretation of empirical data,'' in particular, of detector clicks, is at the heart of many misconceptions about quantized systems.
Let us, as a starter, mention some quantum examples of the Mind Projection Fallacy. First, consider the inclinations \cite{born-26-1} yielding claims \cite{zeil-05_nature_ofQuantum} of absolute, irreducible indeterminism and randomness, demanding the ``{\it ex nihilo} emergence of single bits (of information).'' In this orthodox line of thought, the apparent lack of prediction and control is not merely ``means-relative'' \cite{Myrvold2011237} but ``absolutely irreducible.'' In particular, the possibility of mere epistemic ignorance, originating from the limited capacities of intrinsic observers, resulting in ``pragmatic'' propositions that are true ``for all practical purposes'' (FAPP) \cite{bell-a} but strictly false, is denied.
Rigorously speaking, any believe in (in-)determinism is {\em provably unprovable} because, by reduction to recursion theoretic unknowables (e.g., the halting problem or the rule inference problem \cite{go-67,blum75blum,angluin:83,ad-91,li:92}), randomness as well as determinism turn out to be undecidable. That is, one may still be ``inclined to believe in (in-)determinism'' \cite{born-26-1}, and this believe might serve as a good, pragmatic working hypothesis for various tasks; alas, strictly speaking, any such ``evidence'' is no more compelling than, say, the belief in Santa Claus.
An algorithmic proof can be sketched as follows: For the sake of an argument against provable indeterminism, suppose Bob presents Alice a black box, thereby wrongly claiming that the box contains an oracle for indeterminism, or even absolute randomness. Alice's challenge is to ``verify'' that this is correct. As it turns out, Alice's verification task is impossible if she is bound by intrinsic algorithmic means, because every time Alice has made up her mind that no algorithm from a particular finite set of algorithms is generating the output of the box, by diagonalization Bob can construct a ``faker box algorithm'' which yields a different output than Alice's finite set of algorithms; thereby giving Alice the wrong illusion of randomness. With finite physical means the limit of ``all (i.e., a countable infinity of) algorithms'' is impossible to attain. But even for a finite number of algorithms, their output behavior is FAPP impossible to predict, since the halting time of a program of fixed length is of the order of the Busy Beaver function of that length, and therefore grows faster than any computable function thereof \cite{chaitin-bb}.
On the other hand, for the sake of an argument against provable determinism, suppose Bob claims that the box behaves deterministically. In this case, Alice can be deceived as well; because whenever she claims to know such an algorithm, by diagonalization Bob can fabricate another ``faker algorithm'' which behaves exactly as Alice's algorithm until she mentions her claim, and subsequently behaves differently. In that way, Alice will never be able to prove determinism.
Of course, the obvious ``solution'' would be to allow Alice to ``screw open Bob's box'' and see whether contained in it there is any ``paper-and-pencil Turing type machinery;'' alas this is not allowed in the intrinsic epistemology.
Other fallacies involve so-called {\em ``experimental proofs of the Kochen-Specker (KS) theorem''} -- because ``how can you measure a [proof by] contradiction?'' \cite{clifton}; as well as {\em ``experimental proofs of contextuality''} -- what is actually measured are violations of Boole-Bell type inequalities {\em via} successive measurements of counterfactual, complementary observables that are not co-measurable \cite{cabello:210401}. Although contextuality might be {\em sufficient} to render any experimental records (even beyond quantum correlations \cite{svozil-2011-enough}), these experiments fall short of any strict test of the {\em necessity} of contextuality.
Still another fallacy is the assumption of the {\em physical (co-)existence of counterfactuals} (Specker's ``Infuturabilien'' referring to scholastic debates); that is, hypothetical observables that one could have, but did not measure; instead some different, complementary, observable has been measured. We shall come back to this issue later. Finally let me mention the fallacy of supposing that there is some {\em space-time theater} in which events occur; rather than the ``operationalization'' of space-time {\em via} events \cite{svozil-1996-time,Knuth-Bahreyni}.
\section{Ontological single pure state conjecture}
So, in view of these epistemic limitations and pitfalls, how might we ``unscramble'' the quantum omelette? In what follows, the KS and related theorems will be used as a guiding principle. But first, we need to clarify what constitutes a pure quantum state. \begin{definition}[State] Informally, we shall assume that a {\em pure state} is characterized by the {\em maximal information} encodable into a physical system. This can, for instance, be realized by a generalized beam splitter configuration \cite{rzbb} with an array of detectors; of which only one clicks, the others remain silent. Formally, a pure quantum state can be represented by a {\em two-valued measure} either (i) on
an {\em orthonormal basis}; or (ii) on the spectral decomposition of a {\em maximal operator}, from which all commuting orthogonal projectors corresponding to (i) can be functionally derived (they occur in the spectrum); or (iii) on a {\em context, subalgebra} or {\em block}; or (iv) on the constituents of
a {\em unitary transformation} ``encoding'' the basis states (i) by, say, arranging the coordinates of the basis as either rows or columns in a matrix representation, and singling out one of the basis elements to ``be true.'' \end{definition}
The (strong) KS theorem is usually proved by taking a finite subset of interconnected (the dimension of the vector space must be three or higher for interconnectivity) contexts (or any similar encoding thereof, such as maximal observables, orthogonal bases, or unitary operators), and by demonstrating that no two-valued measure (interpretable as classical truth assignment) exists on those structures of observables if non-contextuality is required -- meaning that the measure is independent of the context. In a classical (non-contextual) sense, ``somewhere'' in these finite constructions any attempt to overlay a two-valued measure -- that is, any enumeration of truth assignments regarding the propositions about outcomes of conceivable measurements --
must break down due to inconsistencies. This also occurs, at least for some members of an ensemble, in Boole-Bell-type configurations \cite{peres222}. Other weak forms of the KS theorem allow two-valued measures, alas they may be too scarce to, for instance, be able to separate all observables; and to allow a homeomorphic embedding into Boolean algebras.
A formalism defining partial frame functions, similar to the one developed in Ref. \cite{2012-incomput-proofsCJ,2013-KstLip} (instead of the ``holistic'' frame function defined everywhere by Pitowsky's {\em logical indeterminacy principle} \cite{pitowsky:218,hru-pit-2003}) can, in a particular sense, be considered an ``improved'' version of the KS theorem which certifies ``breakdown of (non-contextual) value definiteness'' for any observable $\vert \textsf{\textbf{b}} \rangle \langle \textsf{\textbf{b}} \vert$ (associated with the vector $\vert \textsf{\textbf{b}} \rangle$; from now on, the vector and its associated projector will be used synonymously), if the quantum is prepared in a particular state such that the observable $\vert \textsf{\textbf{c}}\rangle$, which must be non-orthogonal and non-collinear to $\vert \textsf{\textbf{b}} \rangle$, occurs with certainty. More formally, by considering some finite construction of interconnected contexts $\Gamma (C_1,C_2,\ldots ,C_i)$, $i<\infty$, it turns out that both possible value assignments $v( \vert \textsf{\textbf{b}} \rangle )= 0$ as well as $v( \vert \textsf{\textbf{b}} \rangle )= 1$ are inconsistent with the value assignment $v( \vert \textsf{\textbf{c}} \rangle )= 1$ for any non-orthogonal and non-collinear $\vert \textsf{\textbf{b}} \rangle$. While, for proof technical reasons, the Abbott-Calude-Conder-Svozil theorem (ACCS) \cite{2012-incomput-proofsCJ} restricted the angles to $\sqrt{{5/14}} \le \vert \langle \textsf{\textbf{c}} \mid \textsf{\textbf{b}}\rangle \vert \le {3/\sqrt{14}}$, these boundaries have been extended in a recent paper by Abbott, Calude, and the author \cite{2013-KstLip}.
In what follows we shall argue that, by explicitly excluding certain {\em star-shaped configurations of contexts} characterized by an arbitrary number of orthogonal bases with one common element (cf. Fig.~\ref{2012-psiqm-v2}), it is possible to extend the ACCS theorem to the remaining ``counterfactual observables.'' \begin{figure}\label{2012-psiqm-v2}
\end{figure}
For the sake of demonstration, consider a configuration of three vectors $ \vert \textsf{\textbf{a}} \rangle \perp \vert \textsf{\textbf{c}} \rangle \not\perp \vert \textsf{\textbf{b}} \rangle$, and a two-valued state $v(\vert \textsf{\textbf{c}} \rangle )=1$. Note that $\vert \textsf{\textbf{a}} \rangle$ lies on the plane (through the origin) orthogonal to $\vert \textsf{\textbf{c}} \rangle$, whereas $\vert \textsf{\textbf{b}} \rangle$ lies outside of this orthogonal plane. In terms of Greechie orthogonality diagrams \cite{greechie:71}, $ \vert \textsf{\textbf{a}} \rangle $ as well as $ \vert \textsf{\textbf{c}} \rangle $ are contained in a star-shaped configuration of contexts characterized by the rays perpendicular to some ``true'' $\vert \textsf{\textbf{c}} \rangle$ with $v( \vert \textsf{\textbf{c}} \rangle )=1$; whereas $\vert \textsf{\textbf{b}} \rangle$ lies outside of ``$\vert \textsf{\textbf{c}} \rangle$'s star.'' For any such observable corresponding to $\vert \textsf{\textbf{b}} \rangle$ there is no consistent non-contextual two-valued state assignment whatsoever.
That is, if $\vert \textsf{\textbf{a}} \rangle$ is orthogonal to $\vert \textsf{\textbf{c}} \rangle$ the value assignment $v(\vert \textsf{\textbf{a}} \rangle)=0$ follows from $v(\vert \textsf{\textbf{c}} \rangle)=1$; but this latter assignment is inconsistent with either $v(\vert \textsf{\textbf{b}} \rangle)=0$ or $v(\vert \textsf{\textbf{b}} \rangle)=1$ for all $\vert \textsf{\textbf{b}} \rangle$ non-orthogonal and non-collinear to $\vert \textsf{\textbf{c}} \rangle$. This is also a consequence of Pitowsky's logical indeterminacy principle, which, given $v(\vert \textsf{\textbf{c}} \rangle)=1$, does not allow any globally defined two-valued state $v$ which acquires the values $v(\vert \textsf{\textbf{b}} \rangle)=0$ or $v(\vert \textsf{\textbf{b}} \rangle)=1$.
For a configuration $ \vert \textsf{\textbf{a}} \rangle \not\perp \vert \textsf{\textbf{c}} \rangle \not\perp \vert \textsf{\textbf{b}} \rangle$, both $ \vert \textsf{\textbf{a}} \rangle $ as well as $ \vert \textsf{\textbf{b}} \rangle $ lie outside of ``$\vert \textsf{\textbf{c}} \rangle$'s star,'' and are thus value indefinite. On the other hand, if we assume $ \vert \textsf{\textbf{a}} \rangle \perp \vert \textsf{\textbf{c}} \rangle \perp \vert \textsf{\textbf{b}} \rangle$ -- that is, both $\vert \textsf{\textbf{a}} \rangle$ as well as $\vert \textsf{\textbf{b}} \rangle$ are orthogonal to $\vert \textsf{\textbf{c}} \rangle$ (and thus ``in $\vert \textsf{\textbf{c}} \rangle$'s star'') -- $v(\vert \textsf{\textbf{a}} \rangle)=v(\vert \textsf{\textbf{b}} \rangle)=0$, even if they are non-orthogonal. Hence, given $v(\vert \textsf{\textbf{c}} \rangle)=1$, relative to the KS assumptions, the only consistent assignments may be made ``inside $\vert \textsf{\textbf{c}} \rangle$'s star.'' ``Outside of $\vert \textsf{\textbf{c}} \rangle$'s star'' all ``observables'' are value indefinite (relative to the KS assumptions, including non-contextuality).
How can one utilize these findings? One immediate possibility is the construction of a {\em quantum random number generator} ``certified by quantum value indefiniteness:'' prepare $\vert \textsf{\textbf{c}} \rangle$, measure $\vert \textsf{\textbf{b}} \rangle \langle \textsf{\textbf{b}}\vert$ \cite{2012-incomput-proofsCJ}.
Another intuitive speculation based on the very limited value-definiteness allowed by the KS assumptions (including non-contextuality) suggests a foundational principle. While extensions \cite{2013-KstLip} of the logical indeterminacy principle and the ACCS theorem might never be able to go beyond value indefiniteness of all but a ``star-shaped'' configuration of contexts depicted in Fig.~\ref{2012-psiqm-v2}, I suggest to ``get rid'' of even star-shaped configurations by denying the physical co-existence of all but one context -- the one in which the quantum has been ``prepared'' -- prior to measurement. \begin{conjecture}[Ontological single pure state conjecture]
A quantized system is in a state corresponding to a {\em two-valued measure on a single definite context (orthonormal basis, block, maximal observable, unitary operator). } In terms of observables, this translates into {\em ``ontologically there does not exist any observable beyond the observables representing a single definite context.''} \end{conjecture}
The ontological single pure state conjecture claims that a single quantum state is a {\em complete} theoretical representation of a physical system. Thereby it {\em abandons omni-existence and omniscience:} it states that all other (even hypothetically and consistently ``value definite'' yet counterfactual) observables different from the observables associated with the unique state, and possibly ascribed to such a system, are not value definite at all.
One should not be ``tricked'' into believing that such value indefinite observables are ``measurable'' just because their alleged ``measurement'' yields outcomes; that is, clicks in detectors that one is inclined to identify with (pre-existing) values. These {\em outcomes cannot reflect any value definite property of the object prior to measurement} because, according to the single pure state conjecture, such a value definite property simply does not exist. Rather the detector clicks associated with the ``measurement'' might be a very complex consequence of {\em ``the complete disposition of the apparatus''} \cite{bell-66}, as well as of the object, combined. In contradistinction, orthodox quantum mechanics treats {\em all potentially conceivable} observables on an {\em equal footing.}
We shall also introduce two other concepts: a {\em phantom context,} and {\em context translation:} Any context that is not the single context/state (in which the system is prepared) is a {\em phantom context}. And any mismatch between the preparation and the measurement may result in the {\em translation} of the original information encoded in a quantum system into the answer requested, whereby noise is introduced by the many degrees of freedom of a suitable ``quasi-classical, quasi-chaotic'' measurement apparatus (for a concrete model, see, for instance, Ref. \cite{Everitt20102809}).
Note that, for this epistemic uncertainty, the resulting stochasticity alone cannot account for greater-than-classical (sometimes referred to as ``nonlocal'') correlations; rather these reside in the quantum feature of {\em entanglement}, allowing to code information across multiple quanta without defining the (sub-)states of the individual quanta \cite{zeil-99}. Thereby, the holistic nature of the quantum entanglement of multipartite system ``creates'' violations of classical bounds on probabilities and expectations (see Refs.\cite{toner-bacon-03,svozil-2004-brainteaser} for non-local classical simulations of quantum and even stronger-than-quantum correlations).
For the sake of demonstration of the ontological single pure state conjecture, consider the rule that, under the KS assumptions (including non-contextuality), for Specker's ``bug'' configuration (Pitowsky's ``cat's cradle'' graph) of contexts as depicted in Fig.~\ref{2012-psiqm-v2-f2}, if a classical system is prepared in a two-valued state $v(\vert \textsf{\textbf{c}} \rangle )=1$ on the context $C_1$ (i.e. the detector corresponding to observable $\vert \textsf{\textbf{c}} \rangle $ clicks), and with $v(\vert \textsf{\textbf{a}} \rangle )=v(\vert \textsf{\textbf{d}} \rangle )=0$ (i.e. the detectors corresponding to observables $\vert \textsf{\textbf{a}} \rangle $ and $\vert \textsf{\textbf{d}} \rangle $ do not click), then the set of rays $\Gamma (C_1,C_2,\ldots ,C_7)$ allows only for $v(\vert \textsf{\textbf{b}} \rangle )=0$; that is, a detector corresponding to observable $\vert \textsf{\textbf{b}} \rangle $ will not click. [A rather simple proof by contradiction (wrongly) assumes that $v(\vert \textsf{\textbf{c}} \rangle )=1$ as well as $v(\vert \textsf{\textbf{b}} \rangle )=1$ can coexist consistently, thereby leading to a complete contradiction, since in this case the value assignment of both link observables for $C_3/C_5$ as well as $C_4/C_5$ have to be 1, alas these link observables belong to the same block $C_5$.] That quantum mechanics contradicts this prediction ``if $v(\vert \textsf{\textbf{c}} \rangle )=1$ then $v(\vert \textsf{\textbf{b}} \rangle )=0$'' is an immediate consequence of the fact that, because $\vert \textsf{\textbf{c}} \rangle $ and $\vert \textsf{\textbf{b}} \rangle $ are not in the same block, $\vert \textsf{\textbf{c}} \rangle $ cannot be orthogonal to $\vert \textsf{\textbf{b}} \rangle $, and hence $\langle \textsf{\textbf{c}} \mid \textsf{\textbf{b}} \rangle \neq 0$, implying a non-vanishing probability $\vert \langle \textsf{\textbf{c}} \mid \textsf{\textbf{b}} \rangle \vert^2 \ge 0$. For a concrete though not unique parametrization of the ``bug'' configuration, see Fig.~4.2 in Ref.~\cite{svozil-tkadlec}, in which preparation of $\vert \textsf{\textbf{c}} \rangle \equiv (1/\sqrt{3})\left(\sqrt{2},1,0\right)$ and measurement of $\vert \textsf{\textbf{b}} \rangle \equiv (1/\sqrt{3})\left(\sqrt{2},-1,0\right)$ implies a probability of observing $\vert \textsf{\textbf{b}} \rangle $, given $\vert \textsf{\textbf{c}} \rangle $ of $\vert (1/\sqrt{3})\left(\sqrt{2},1,0\right) \cdot (1/\sqrt{3})\left(\sqrt{2},-1,0\right)\vert^2 = 1/9$ (and not zero, as predicted from classical non-contextuality).
\begin{figure}\label{2012-psiqm-v2-f2}
\end{figure}
However, since according to the single pure state conjecture only $C_1$ exists, any argument based on the simultaneous co-existence of the counterfactual phantom contexts $C_2$--$C_7$, and, in particular, the assumption of a property associated with the counterfactual observable $\vert \textsf{\textbf{b}} \rangle \langle \textsf{\textbf{b}} \vert $, is inadequate for quantized systems.
\section{Persistent issues}
\subsection{Do measurements exist?}
Everett \cite{everett} and Wigner \cite{wigner:mb} observed that, if a unitary (bijective, one-to-one, reversible, Laplacian-type deterministic) quantum evolution were universally valid, then any distinction or cut between the observer and the measurement apparatus on the one side, and the quantum ``object'' on the other side, is not absolute or ontic, but epistemic, means-relative, subjective and conventional.
Because, suppose that one has defined a cut or difference between some quantum and a ``quasi-classical'' measurement device, one could, at least in principle and if the unitary quantum evolution is universally valid, ``draw a larger perimeter.'' This ``enlargement'' could contain the entire previous combination, {\em including} the quantum, the cut, and the measurement device. If the quantum laws are universally valid, such a quantized system should also undergo a unitary quantum evolution. And thus, if quantum mechanics is universally valid, and if it is governed by unitary, reversible, one-to-one evolution, how could irreversibility possibly ``emerge'' from reversibility? FAPP, due to the limitations of the experimenter's capacities irreversibility may be means-relative; alas, strictly speaking, it decays into ``thin air.''
Because suppose (wrongly) a hypothetical many-to-one function $h(x)=h(y)$ for $x\neq y$ exists which would somehow `emerge' from injective functions. Any such function would have to originate from the domain of one-to-one functions such that, for all functions $f$ of this class, $x\neq y$ implies $f(x)\neq f(y)$ -- or, equivalently, the contrapositive statement (provable by comparison of truth tables) $f(x) = f(y)$ implies $x = y$, a clear contradiction with the assumption.
Indeed, by {\em Caylay's theorem} the {\em unitary transformations} on some Hilbert space ${\mathfrak H}$ form a particular permutation group consisting of those permutations preserving the inner product. This is a subgroup of the {\em symmetric group} of all permutations on ${\mathfrak H}$. So, strictly speaking, any quantum mechanical state evolution amounts to permuting the state, and therefore leaves no room for ``measurement.''
\subsection{Quantum jellification}
Alas, as Schr\"odinger pointed out, without measurement, the quantum physicists should be troubled that, due to the coherent superposition resulting from the co-existence of classically mutually exclusive alternatives, their {\em ``surroundings rapidly turning into a quagmire, a sort of a featureless jelly or plasma, all contours becoming blurred, we ourselves probably becoming jelly fish''} \cite{schroedinger-interpretation}.
The single pure state conjecture and the context translation principle would resolve this conundrum by maintaining that there is only one state ``perceived'' from many epistemic perspectives \cite{DallaChiara-epistemic}; some of them causing noise which FAPP appears irreducible random to intrinsic observers. In that sense, the measurement conundrum, with all its variants -- Schr\"odinger's cat and jellyfish metaphors, as well as the Everett-Wigner critique -- can be ``FAPP-resolved by means-relativity.''
\subsection{Analogues in classical statistical mechanics}
Just as Newtonian physics and electromagnetism appear to be reversible, the quantum measurement conundrum is characterized by the reversibility of the unitary quantum evolution. In this respect, the (ir-)reversibility of quantum measurements bears some resemblance to statistical mechanics: take, for example, {\em Loschmidt's reversibility paradox} -- that, for large isolated systems with reversible laws of motion, one should never observe irreversibility, and thus a decrease in entropy; or {\em Zermelo's recurrence objection} -- that, as an isolated system will infinitely often approach its initial state, its entropy will infinitely often approach the initial entropy and thus cannot constantly increase; or the challenge posed by the {\em Loschmidt-Maxwell demon} \cite{maxwell-demon2}. And just as in statistical mechanics, irreversibility appears to be means-relative \cite{Myrvold2011237} and FAPP, yet cannot strictly be true. Also, the ontic determinism exposed here, accompanied by the epistemic uncertainty induced by context translation, results in the fact that, at least conceptually and on the most fundamental level, there need not be any probabilistic description.
\subsection{The epistemic or ontic (non-)existence of mixed states}
From a purely formal point of view, it is impossible to obtain a mixed state from a pure one. Because again, any unitary operation amounts to a mere basis transformation or permutation, and this cannot give rise to any increase in stochasticity or ``ignorance.'' Since the generation of ``ontologically mixed states'' from pure ones would require a many-to-one functional mapping, we conclude that, just as irreversible measurements, genuine ``ontological mixed states'' originating from pure states cannot exist. Therefore, any ontological mixed state has to be either carried through from previously existing mixed states (if they exist), or be FAPP perceived as means-relative. I would like to challenge anyone with doubts to come up with a concrete experiment that would ``produce'' a mixed state from a pure one by purely quantum mechanical ``unitary'' means.
\section{Summary}
In summary I hold these conjectures to be true: a quantum state characterized by the maximal information encoded into a physical system must formally be represented by some orthonormal basis and a two-valued measure thereon, or anything encoding it, such as a maximal operator. At any given moment, a quantized system is in a unique, single such state. All other contexts are phantom contexts, which have no meaning because they are non-operational at best, and in general misleading. Randomness does not come about {\it ex nihilo} but by {\em context translation}, whereby the many degrees of freedom of the measurement apparatus contribute to yield means-relative, FAPP random outcomes. Finally, also mixed states are means-relative and exist FAPP, but not strictly.
\begin{acknowledgments} This research has been partly supported by FP7-PEOPLE-2010-IRSES-269151-RANPHYS. This contribution was done in part during a visiting honorary appointment at the University of Auckland, New Zealand, as well as at the University of Cagliary, Sardinia, Italy. Discussions during a {\em LARSIM/QuPa workshop on physics and computation} at the {\it Institut Henri Poincar\'e}, Paris, on June 28-29, 2012, the {\it Biennial IQSA Conference Quantum Structures 2012} in Cagliari, Sardinia, on July 23-27, 2012, as well as the conference {\em New Directions in the Foundations of Physics 2013}, in Washington, D.C., on May 10-12, 2013, where previous versions of this paper have been presented, are gratefully acknowledged. I also gratefully acknowledge stimulating discussions with and comments by many peers; in particular, Alastair Abbott, Jeffrey Bub, Cristian S. Calude, William Demopoulos, Christopher Fuchs, and Constantine Tsinakis. \end{acknowledgments}
\begin{thebibliography}{45} \makeatletter \providecommand \@ifxundefined [1]{
\@ifx{#1\undefined} } \providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi } \providecommand \natexlab [1]{#1} \providecommand \enquote [1]{``#1''} \providecommand \bibnamefont [1]{#1} \providecommand \bibfnamefont [1]{#1} \providecommand \citenamefont [1]{#1} \providecommand \href@noop [0]{\@secondoftwo} \providecommand \href [0]{\begingroup \@sanitize@url \@href} \providecommand \@href[1]{\@@startlink{#1}\@@href} \providecommand \@@href[1]{\endgroup#1\@@endlink} \providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax} \providecommand \@@startlink[1]{} \providecommand \@@endlink[0]{} \providecommand \url [0]{\begingroup\@sanitize@url \@url } \providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }} \providecommand \urlprefix [0]{URL } \providecommand \Eprint [0]{\href } \providecommand \doibase [0]{http://dx.doi.org/} \providecommand \selectlanguage [0]{\@gobble} \providecommand \bibinfo [0]{\@secondoftwo} \providecommand \bibfield [0]{\@secondoftwo} \providecommand \translation [1]{[#1]} \providecommand \BibitemOpen [0]{} \providecommand \bibitemStop [0]{} \providecommand \bibitemNoStop [0]{.\EOS\space} \providecommand \EOS [0]{\spacefactor3000\relax} \providecommand \BibitemShut [1]{\csname bibitem#1\endcsname} \let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {{von Neumann}}(1932)}]{v-neumann-49}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{{von Neumann}}},\ }\href@noop {} {\emph {\bibinfo {title} {{M}athematische
{G}rundlagen der {Q}uantenmechanik}}}\ (\bibinfo {publisher} {Springer},\
\bibinfo {address} {Berlin},\ \bibinfo {year} {1932})\ \bibinfo {note}
{{E}nglish translation in Ref.~\cite{v-neumann-55}}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jaynes}(1990)}]{jaynes-90}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Edwin~Thompson}\
\bibnamefont {Jaynes}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Probability in quantum theory},}\ }in\ \href
{http://bayes.wustl.edu/etj/articles/prob.in.qm.pdf} {\emph {\bibinfo
{booktitle} {Complexity, Entropy, and the Physics of Information: Proceedings
of the 1988 Workshop on Complexity, Entropy, and the Physics of Information,
held May - June, 1989, in Santa Fe, New Mexico}}},\ \bibinfo {editor} {edited
by\ \bibinfo {editor} {\bibfnamefont {Wojciech~Hubert}\ \bibnamefont
{Zurek}}}\ (\bibinfo {publisher} {Addison-Wesley},\ \bibinfo {address}
{Reading, MA},\ \bibinfo {year} {1990})\ pp.\ \bibinfo {pages}
{381--404}\BibitemShut {NoStop} \bibitem [{\citenamefont {Boskovich}(1966)}]{bos1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Rudjer~Josif}\
\bibnamefont {Boskovich}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{De spacio et tempore, ut a nobis cognoscuntur},}\ }in\ \href
{http://www.archive.org/details/theoryofnaturalp00boscrich} {\emph {\bibinfo
{booktitle} {A Theory of Natural Philosophy}}},\ \bibinfo {editor} {edited
by\ \bibinfo {editor} {\bibfnamefont {J.~M.}\ \bibnamefont {Child}}}\
(\bibinfo {publisher} {Open Court (1922) and MIT Press},\ \bibinfo {address}
{Cambridge, MA},\ \bibinfo {year} {1966})\ pp.\ \bibinfo {pages}
{203--205}\BibitemShut {NoStop} \bibitem [{\citenamefont {Toffoli}(1978)}]{toffoli:79}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Toffoli}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The role of the
observer in uniform systems},}\ }in\ \href@noop {} {\emph {\bibinfo
{booktitle} {Applied General Systems Research, Recent Developments and
Trends}}},\ \bibinfo {editor} {edited by\ \bibinfo {editor} {\bibfnamefont
{George~J.}\ \bibnamefont {Klir}}}\ (\bibinfo {publisher} {Plenum Press},\
\bibinfo {address} {New York, London},\ \bibinfo {year} {1978})\ pp.\
\bibinfo {pages} {395--400}\BibitemShut {NoStop} \bibitem [{\citenamefont {Svozil}(1994)}]{svozil-94}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Extrinsic-intrinsic concept and complementarity},}\ }in\ \href {\doibase
10.1007/978-3-642-48647-0\_15} {\emph {\bibinfo {booktitle} {Inside versus
Outside}}},\ \bibinfo {series} {Springer Series in Synergetics},
Vol.~\bibinfo {volume} {63},\ \bibinfo {editor} {edited by\ \bibinfo {editor}
{\bibfnamefont {Harald}\ \bibnamefont {Atmanspacher}}\ and\ \bibinfo {editor}
{\bibfnamefont {Gerhard~J.}\ \bibnamefont {Dalenoort}}}\ (\bibinfo
{publisher} {Springer},\ \bibinfo {address} {Berlin Heidelberg},\ \bibinfo
{year} {1994})\ pp.\ \bibinfo {pages} {273--288}\BibitemShut {NoStop} \bibitem [{\citenamefont {Summhammer}(1989)}]{sum-3}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Johann}\ \bibnamefont
{Summhammer}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The physical
quantities in the random data of neutron interferometry},}\ }in\ \href
{\doibase 10.1007/978-94-009-1175-8} {\emph {\bibinfo {booktitle} {The
Concept of Probability}}},\ \bibinfo {series} {Fundamental Theories of
Physics}, Vol.~\bibinfo {volume} {24},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {E.~I.}\ \bibnamefont {Bitsakis}}\ and\
\bibinfo {editor} {\bibfnamefont {C.~A.}\ \bibnamefont {Nicolaides}}}\
(\bibinfo {publisher} {Springer Netherlands},\ \bibinfo {address}
{Amsterdam},\ \bibinfo {year} {1989})\ pp.\ \bibinfo {pages}
{207--219}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wheeler}(1990)}]{wheeler-89}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John~Archibald}\
\bibnamefont {Wheeler}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Information, physics, quantum: The search for links},}\ }in\ \href
{http://jawarchive.files.wordpress.com/2012/03/informationquantumphysics.pdf}
{\emph {\bibinfo {booktitle} {Complexity, Entropy, and the Physics of
Information: Proceedings of the 1988 Workshop on Complexity, Entropy, and the
Physics of Information, held May - June, 1989, in Santa Fe, New Mexico}}},\
\bibinfo {editor} {edited by\ \bibinfo {editor} {\bibfnamefont
{Wojciech~Hubert}\ \bibnamefont {Zurek}}}\ (\bibinfo {publisher}
{Addison-Wesley},\ \bibinfo {address} {Reading, MA},\ \bibinfo {year}
{1990})\BibitemShut {NoStop} \bibitem [{\citenamefont {Stace}(1949)}]{stace1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Walter~Terence}\
\bibnamefont {Stace}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The
refutation of realism},}\ }in\ \href@noop {} {\emph {\bibinfo {booktitle}
{Readings in Philosophical Analysis}}},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {Herbert}\ \bibnamefont {Feigl}}\ and\
\bibinfo {editor} {\bibfnamefont {Wilfrid}\ \bibnamefont {Sellars}}}\
(\bibinfo {publisher} {Appleton-Century-Crofts},\ \bibinfo {address} {New
York},\ \bibinfo {year} {1949})\ pp.\ \bibinfo {pages} {364--372},\ \bibinfo
{note} {previously published in {\em Mind} {\bf 53}, 349-353
(1934)}\BibitemShut {NoStop} \bibitem [{\citenamefont {Jaynes}(1989)}]{jaynes-89}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Edwin~Thompson}\
\bibnamefont {Jaynes}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Clearing up mysteries - the original goal},}\ }in\ \href
{http://bayes.wustl.edu/etj/articles/cmystery.pdf} {\emph {\bibinfo
{booktitle} {Maximum-Entropy and Bayesian Methods: : Proceedings of the 8th
Maximum Entropy Workshop, held on August 1-5, 1988, in St. John's College,
Cambridge, England}}},\ \bibinfo {editor} {edited by\ \bibinfo {editor}
{\bibfnamefont {John}\ \bibnamefont {Skilling}}}\ (\bibinfo {publisher}
{Kluwer},\ \bibinfo {address} {Dordrecht},\ \bibinfo {year} {1989})\ pp.\
\bibinfo {pages} {1--28}\BibitemShut {NoStop} \bibitem [{\citenamefont {Born}(1926)}]{born-26-1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Max}\ \bibnamefont
{Born}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Zur
{Q}uantenmechanik der {S}to{\ss}vorg{\"{a}}nge},}\ }\href {\doibase
10.1007/BF01397477} {\bibfield {journal} {\bibinfo {journal} {Zeitschrift
f{\"{u}}r Physik}\ }\textbf {\bibinfo {volume} {37}},\ \bibinfo {pages}
{863--867} (\bibinfo {year} {1926})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Zeilinger}(2005)}]{zeil-05_nature_ofQuantum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Anton}\ \bibnamefont
{Zeilinger}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The message
of the quantum},}\ }\href {\doibase 10.1038/438743a} {\bibfield {journal}
{\bibinfo {journal} {Nature}\ }\textbf {\bibinfo {volume} {438}},\ \bibinfo
{pages} {743} (\bibinfo {year} {2005})}\BibitemShut {NoStop} \bibitem [{Myr(2011)}]{Myrvold2011237}
\BibitemOpen
\bibfield {title} {\enquote {\bibinfo {title} {Statistical mechanics and
thermodynamics: A {M}axwellian view},}\ }\href {\doibase
10.1016/j.shpsb.2011.07.001} {\bibfield {journal} {\bibinfo {journal}
{Studies in History and Philosophy of Science Part B: Studies in History and
Philosophy of Modern Physics}\ }\textbf {\bibinfo {volume} {42}},\ \bibinfo
{pages} {237--243} (\bibinfo {year} {2011})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bell}(1990)}]{bell-a}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John~S.}\ \bibnamefont
{Bell}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Against
`measurement'},}\ }\href
{http://physicsworldarchive.iop.org/summary/pwa-xml/3/8/phwv3i8a26}
{\bibfield {journal} {\bibinfo {journal} {Physics World}\ }\textbf
{\bibinfo {volume} {3}},\ \bibinfo {pages} {33--41} (\bibinfo {year}
{1990})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Gold}(1967)}]{go-67}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Mark~E.}\ \bibnamefont
{Gold}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Language
identification in the limit},}\ }\href {\doibase
10.1016/S0019-9958(67)91165-5} {\bibfield {journal} {\bibinfo {journal}
{Information and Control}\ }\textbf {\bibinfo {volume} {10}},\ \bibinfo
{pages} {447--474} (\bibinfo {year} {1967})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Blum}\ and\ \citenamefont {Blum}(1975)}]{blum75blum}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Lenore}\ \bibnamefont
{Blum}}\ and\ \bibinfo {author} {\bibfnamefont {Manuel}\ \bibnamefont
{Blum}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Toward a
mathematical theory of inductive inference},}\ }\href {\doibase
10.1016/S0019-9958(75)90261-2} {\bibfield {journal} {\bibinfo {journal}
{Information and Control}\ }\textbf {\bibinfo {volume} {28}},\ \bibinfo
{pages} {125--155} (\bibinfo {year} {1975})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Angluin}\ and\ \citenamefont
{Smith}(1983)}]{angluin:83}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Dana}\ \bibnamefont
{Angluin}}\ and\ \bibinfo {author} {\bibfnamefont {Carl~H.}\ \bibnamefont
{Smith}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Inductive
inference: Theory and methods},}\ }\href {\doibase 10.1145/356914.356918}
{\bibfield {journal} {\bibinfo {journal} {ACM Computing Surveys}\ }\textbf
{\bibinfo {volume} {15}},\ \bibinfo {pages} {237--269} (\bibinfo {year}
{1983})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Adleman}\ and\ \citenamefont {Blum}(1991)}]{ad-91}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Leonard~M.}\
\bibnamefont {Adleman}}\ and\ \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Blum}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Inductive inference and unsolvability},}\ }\href {\doibase 10.2307/2275058}
{\bibfield {journal} {\bibinfo {journal} {Journal of Symbolic Logic}\
}\textbf {\bibinfo {volume} {56}},\ \bibinfo {pages} {891--900} (\bibinfo
{year} {1991})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Li}\ and\ \citenamefont
{Vit{\'{a}}nyi}(1992)}]{li:92}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Li}}\ and\ \bibinfo {author} {\bibfnamefont {P.~M.~B.}\ \bibnamefont
{Vit{\'{a}}nyi}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Inductive
reasoning and {K}olmogorov complexity},}\ }\href {\doibase
10.1016/0022-0000(92)90026-F} {\bibfield {journal} {\bibinfo {journal}
{Journal of Computer and System Science}\ }\textbf {\bibinfo {volume} {44}},\
\bibinfo {pages} {343--384} (\bibinfo {year} {1992})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Chaitin}(1987)}]{chaitin-bb}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Gregory~J.}\
\bibnamefont {Chaitin}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Computing the busy beaver function},}\ }in\ \href@noop {} {\emph {\bibinfo
{booktitle} {Open Problems in Communication and Computation}}},\ \bibinfo
{editor} {edited by\ \bibinfo {editor} {\bibfnamefont {T.~M.}\ \bibnamefont
{Cover}}\ and\ \bibinfo {editor} {\bibfnamefont {B.}~\bibnamefont
{Gopinath}}}\ (\bibinfo {publisher} {Springer},\ \bibinfo {address} {New
York},\ \bibinfo {year} {1987})\ p.\ \bibinfo {pages} {108}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Clifton}(1995)}]{clifton}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Robert~K.}\
\bibnamefont {Clifton}},\ }\href@noop {} {} (\bibinfo {year} {1995}),\
\bibinfo {note} {private communication}\BibitemShut {NoStop} \bibitem [{\citenamefont {Cabello}(2008)}]{cabello:210401}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ad\'an}\ \bibnamefont
{Cabello}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Experimentally
testable state-independent quantum contextuality},}\ }\href {\doibase
10.1103/PhysRevLett.101.210401} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {101}},\ \bibinfo
{eid} {210401} (\bibinfo {year} {2008})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Svozil}(2012)}]{svozil-2011-enough}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title} {How much
contextuality?}}\ }\href {\doibase 10.1007/s11047-012-9318-9} {\bibfield
{journal} {\bibinfo {journal} {Natural Computing}\ }\textbf {\bibinfo
{volume} {11}},\ \bibinfo {pages} {261--265} (\bibinfo {year} {2012})},\
\Eprint {http://arxiv.org/abs/arXiv:1103.3980} {arXiv:1103.3980} \BibitemShut
{NoStop} \bibitem [{\citenamefont {Svozil}(1996)}]{svozil-1996-time}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Time generated
by intrinsic observers},}\ }in\ \href
{http://tph.tuwien.ac.at/~svozil/publ/time1.htm} {\emph {\bibinfo {booktitle}
{Cybernetics and Systems '96. Proceedings of the 13th European Meeting on
Cybernetics and Systems Research}}},\ \bibinfo {editor} {edited by\ \bibinfo
{editor} {\bibfnamefont {Robert}\ \bibnamefont {Trappl}}}\ (\bibinfo
{publisher} {Austrian Society for Cybernetic Studies},\ \bibinfo {address}
{Vienna},\ \bibinfo {year} {1996})\ pp.\ \bibinfo {pages}
{162--166}\BibitemShut {NoStop} \bibitem [{\citenamefont {Knuth}\ and\ \citenamefont
{Bahreyni}(2012)}]{Knuth-Bahreyni}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~H.}\ \bibnamefont
{Knuth}}\ and\ \bibinfo {author} {\bibfnamefont {N.}~\bibnamefont
{Bahreyni}},\ }\bibfield {title} {\enquote {\bibinfo {title} {{The Physics
of Events: A Potential Foundation for Emergent Space-Time}},}\ }\href
{http://arxiv.org/abs/1209.0881} {\bibfield {journal} {\bibinfo {journal}
{ArXiv e-prints}\ } (\bibinfo {year} {2012})},\ \Eprint
{http://arxiv.org/abs/arXiv:1209.0881} {arXiv:arXiv:1209.0881 [math-ph]}
\BibitemShut {NoStop} \bibitem [{\citenamefont {Reck}\ \emph {et~al.}(1994)\citenamefont {Reck},
\citenamefont {Zeilinger}, \citenamefont {Bernstein},\ and\ \citenamefont
{Bertani}}]{rzbb}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Reck}}, \bibinfo {author} {\bibfnamefont {Anton}\ \bibnamefont {Zeilinger}},
\bibinfo {author} {\bibfnamefont {H.~J.}\ \bibnamefont {Bernstein}}, \ and\
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Bertani}},\ }\bibfield
{title} {\enquote {\bibinfo {title} {Experimental realization of any discrete
unitary operator},}\ }\href {\doibase 10.1103/PhysRevLett.73.58} {\bibfield
{journal} {\bibinfo {journal} {Physical Review Letters}\ }\textbf {\bibinfo
{volume} {73}},\ \bibinfo {pages} {58--61} (\bibinfo {year}
{1994})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Peres}(1978)}]{peres222}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Asher}\ \bibnamefont
{Peres}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Unperformed
experiments have no results},}\ }\href {\doibase 10.1119/1.11393} {\bibfield
{journal} {\bibinfo {journal} {American Journal of Physics}\ }\textbf
{\bibinfo {volume} {46}},\ \bibinfo {pages} {745--747} (\bibinfo {year}
{1978})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Abbott}\ \emph {et~al.}(2012)\citenamefont {Abbott},
\citenamefont {Calude}, \citenamefont {Conder},\ and\ \citenamefont
{Svozil}}]{2012-incomput-proofsCJ}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alastair~A.}\
\bibnamefont {Abbott}}, \bibinfo {author} {\bibfnamefont {Cristian~S.}\
\bibnamefont {Calude}}, \bibinfo {author} {\bibfnamefont {Jonathan}\
\bibnamefont {Conder}}, \ and\ \bibinfo {author} {\bibfnamefont {Karl}\
\bibnamefont {Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Strong {K}ochen-{S}pecker theorem and incomputability of quantum
randomness},}\ }\href {\doibase 10.1103/PhysRevA.86.062109} {\bibfield
{journal} {\bibinfo {journal} {Physical Review A}\ }\textbf {\bibinfo
{volume} {86}},\ \bibinfo {pages} {062109} (\bibinfo {year} {2012})},\
\Eprint {http://arxiv.org/abs/arXiv:1207.2029} {arXiv:1207.2029} \BibitemShut
{NoStop} \bibitem [{\citenamefont {Abbott}\ \emph {et~al.}(2013)\citenamefont {Abbott},
\citenamefont {Calude},\ and\ \citenamefont {Svozil}}]{2013-KstLip}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Alastair~A.}\
\bibnamefont {Abbott}}, \bibinfo {author} {\bibfnamefont {Cristian~S.}\
\bibnamefont {Calude}}, \ and\ \bibinfo {author} {\bibfnamefont {Karl}\
\bibnamefont {Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Value indefiniteness is almost everywhere},}\ }\href
{http://arxiv.org/abs/1309.7188} {\ (\bibinfo {year} {2013})},\ \Eprint
{http://arxiv.org/abs/arXiv:1309.7188} {arXiv:1309.7188} \BibitemShut
{NoStop} \bibitem [{\citenamefont {Pitowsky}(1998)}]{pitowsky:218}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Itamar}\ \bibnamefont
{Pitowsky}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Infinite and
finite {G}leason's theorems and the logic of indeterminacy},}\ }\href
{\doibase 10.1063/1.532334} {\bibfield {journal} {\bibinfo {journal}
{Journal of Mathematical Physics}\ }\textbf {\bibinfo {volume} {39}},\
\bibinfo {pages} {218--228} (\bibinfo {year} {1998})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Hrushovski}\ and\ \citenamefont
{Pitowsky}(2004)}]{hru-pit-2003}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ehud}\ \bibnamefont
{Hrushovski}}\ and\ \bibinfo {author} {\bibfnamefont {Itamar}\ \bibnamefont
{Pitowsky}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Generalizations of {K}ochen and {S}pecker's theorem and the effectiveness of
{G}leason's theorem},}\ }\href {\doibase 10.1016/j.shpsb.2003.10.002}
{\bibfield {journal} {\bibinfo {journal} {Studies in History and Philosophy
of Science Part B: Studies in History and Philosophy of Modern Physics}\
}\textbf {\bibinfo {volume} {35}},\ \bibinfo {pages} {177194} (\bibinfo
{year} {2004})},\ \Eprint {http://arxiv.org/abs/quant-ph/0307139}
{quant-ph/0307139} \BibitemShut {NoStop} \bibitem [{\citenamefont {Greechie}(1971)}]{greechie:71}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~R.}\ \bibnamefont
{Greechie}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Orthomodular
lattices admitting no states},}\ }\href {\doibase
10.1016/0097-3165(71)90015-X} {\bibfield {journal} {\bibinfo {journal}
{Journal of Combinatorial Theory}\ }\textbf {\bibinfo {volume} {10}},\
\bibinfo {pages} {119--132} (\bibinfo {year} {1971})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bell}(1966)}]{bell-66}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John~S.}\ \bibnamefont
{Bell}},\ }\bibfield {title} {\enquote {\bibinfo {title} {On the problem of
hidden variables in quantum mechanics},}\ }\href {\doibase
10.1103/RevModPhys.38.447} {\bibfield {journal} {\bibinfo {journal}
{Reviews of Modern Physics}\ }\textbf {\bibinfo {volume} {38}},\ \bibinfo
{pages} {447--452} (\bibinfo {year} {1966})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Everitt}\ \emph {et~al.}(2010)\citenamefont
{Everitt}, \citenamefont {Munro},\ and\ \citenamefont
{Spiller}}]{Everitt20102809}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.~J.}\ \bibnamefont
{Everitt}}, \bibinfo {author} {\bibfnamefont {W.~J.}\ \bibnamefont {Munro}},
\ and\ \bibinfo {author} {\bibfnamefont {T.~P.}\ \bibnamefont {Spiller}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Quantum measurement with
chaotic apparatus},}\ }\href {\doibase 10.1016/j.physleta.2010.05.006}
{\bibfield {journal} {\bibinfo {journal} {Physics Letters A}\ }\textbf
{\bibinfo {volume} {374}},\ \bibinfo {pages} {2809--2815} (\bibinfo {year}
{2010})},\ \Eprint {http://arxiv.org/abs/arXiv:0905.1867} {arXiv:0905.1867}
\BibitemShut {NoStop} \bibitem [{\citenamefont {Zeilinger}(1999)}]{zeil-99}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Anton}\ \bibnamefont
{Zeilinger}},\ }\bibfield {title} {\enquote {\bibinfo {title} {A
foundational principle for quantum mechanics},}\ }\href {\doibase
10.1023/A:1018820410908} {\bibfield {journal} {\bibinfo {journal}
{Foundations of Physics}\ }\textbf {\bibinfo {volume} {29}},\ \bibinfo
{pages} {631--643} (\bibinfo {year} {1999})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Toner}\ and\ \citenamefont
{Bacon}(2003)}]{toner-bacon-03}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {B.~F.}\ \bibnamefont
{Toner}}\ and\ \bibinfo {author} {\bibfnamefont {D.}~\bibnamefont {Bacon}},\
}\bibfield {title} {\enquote {\bibinfo {title} {Communication cost of
simulating {B}ell correlations},}\ }\href {\doibase
10.1103/PhysRevLett.91.187904} {\bibfield {journal} {\bibinfo {journal}
{Physical Review Letters}\ }\textbf {\bibinfo {volume} {91}},\ \bibinfo
{pages} {187904} (\bibinfo {year} {2003})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Svozil}(2005)}]{svozil-2004-brainteaser}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Communication
cost of breaking the {B}ell barrier},}\ }\href {\doibase
10.1103/PhysRevA.72.050302} {\bibfield {journal} {\bibinfo {journal}
{Physical Review A}\ }\textbf {\bibinfo {volume} {72}},\ \bibinfo {pages}
{050302(R)} (\bibinfo {year} {2005})},\ \Eprint
{http://arxiv.org/abs/physics/0510050} {physics/0510050} \BibitemShut
{NoStop} \bibitem [{\citenamefont {Svozil}\ and\ \citenamefont
{Tkadlec}(1996)}]{svozil-tkadlec}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Karl}\ \bibnamefont
{Svozil}}\ and\ \bibinfo {author} {\bibfnamefont {Josef}\ \bibnamefont
{Tkadlec}},\ }\bibfield {title} {\enquote {\bibinfo {title} {Greechie
diagrams, nonexistence of measures in quantum logics and {K}ochen--{S}pecker
type constructions},}\ }\href {\doibase 10.1063/1.531710} {\bibfield
{journal} {\bibinfo {journal} {Journal of Mathematical Physics}\ }\textbf
{\bibinfo {volume} {37}},\ \bibinfo {pages} {5380--5401} (\bibinfo {year}
{1996})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Specker}(1999)}]{Specker-priv}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Ernst}\ \bibnamefont
{Specker}},\ }\href@noop {} {} (\bibinfo {year} {1999}),\ \bibinfo {note}
{private communication to {K}. {S}vozil}\BibitemShut {NoStop} \bibitem [{\citenamefont {Kochen}\ and\ \citenamefont
{Specker}(1967)}]{kochen1}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Simon}\ \bibnamefont
{Kochen}}\ and\ \bibinfo {author} {\bibfnamefont {Ernst~P.}\ \bibnamefont
{Specker}},\ }\bibfield {title} {\enquote {\bibinfo {title} {The problem of
hidden variables in quantum mechanics},}\ }\href {\doibase
10.1512/iumj.1968.17.17004} {\bibfield {journal} {\bibinfo {journal}
{Journal of Mathematics and Mechanics (now Indiana University Mathematics
Journal)}\ }\textbf {\bibinfo {volume} {17}},\ \bibinfo {pages} {59--87}
(\bibinfo {year} {1967})}\BibitemShut {NoStop} \bibitem [{\citenamefont {{Everett III}}(1957)}]{everett}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Hugh}\ \bibnamefont
{{Everett III}}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{`{R}elative {S}tate' formulation of quantum mechanics},}\ }\href {\doibase
10.1103/RevModPhys.29.454} {\bibfield {journal} {\bibinfo {journal}
{Reviews of Modern Physics}\ }\textbf {\bibinfo {volume} {29}},\ \bibinfo
{pages} {454--462} (\bibinfo {year} {1957})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Wigner}(1961)}]{wigner:mb}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Eugene~P.}\
\bibnamefont {Wigner}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Remarks on the mind-body question},}\ }in\ \href
{http://www.phys.uu.nl/igg/jos/foundQM/wigner.pdf} {\emph {\bibinfo
{booktitle} {The Scientist Speculates}}},\ \bibinfo {editor} {edited by\
\bibinfo {editor} {\bibfnamefont {I.~J.}\ \bibnamefont {Good}}}\ (\bibinfo
{publisher} {Heinemann and Basic Books},\ \bibinfo {address} {London and New
York},\ \bibinfo {year} {1961})\ pp.\ \bibinfo {pages} {284--302}\BibitemShut
{NoStop} \bibitem [{\citenamefont
{Schr{\"{o}}dinger}(1995)}]{schroedinger-interpretation}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Erwin}\ \bibnamefont
{Schr{\"{o}}dinger}},\ }\href@noop {} {\emph {\bibinfo {title} {The
Interpretation of Quantum Mechanics. {D}ublin Seminars (1949-1955) and Other
Unpublished Essays}}}\ (\bibinfo {publisher} {Ox Bow Press},\ \bibinfo
{address} {Woodbridge, Connecticut},\ \bibinfo {year} {1995})\BibitemShut
{NoStop} \bibitem [{\citenamefont {Beltrametti}\ \emph {et~al.}(2012)\citenamefont
{Beltrametti}, \citenamefont {Chiara}, \citenamefont {Giuntini},
\citenamefont {Leporini},\ and\ \citenamefont
{Sergioli}}]{DallaChiara-epistemic}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Enrico}\ \bibnamefont
{Beltrametti}}, \bibinfo {author} {\bibfnamefont {Maria Luisa~Dalla}\
\bibnamefont {Chiara}}, \bibinfo {author} {\bibfnamefont {Roberto}\
\bibnamefont {Giuntini}}, \bibinfo {author} {\bibfnamefont {Roberto}\
\bibnamefont {Leporini}}, \ and\ \bibinfo {author} {\bibfnamefont {Giuseppe}\
\bibnamefont {Sergioli}},\ }\bibfield {title} {\enquote {\bibinfo {title}
{Epistemic quantum computational structures in a {H}ilbert-space
environment},}\ }\href {\doibase 10.3233/FI-2012-637} {\bibfield {journal}
{\bibinfo {journal} {Fundamenta Informaticae}\ }\textbf {\bibinfo {volume}
{115}},\ \bibinfo {pages} {1--14} (\bibinfo {year} {2012})}\BibitemShut
{NoStop} \bibitem [{\citenamefont {Leff}\ and\ \citenamefont
{Rex}(1990)}]{maxwell-demon2}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {Harvey~S.}\
\bibnamefont {Leff}}\ and\ \bibinfo {author} {\bibfnamefont {Andrew~F.}\
\bibnamefont {Rex}},\ }\href@noop {} {\emph {\bibinfo {title} {Maxwell's
Demon 2. Entropy, Classical and Quantum Information, Computing}}}\ (\bibinfo
{publisher} {Institute of Physics Publishing},\ \bibinfo {address} {Bristol
and Philadelphia},\ \bibinfo {year} {1990})\BibitemShut {NoStop} \bibitem [{\citenamefont {{von Neumann}}(1955)}]{v-neumann-55}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {John}\ \bibnamefont
{{von Neumann}}},\ }\href@noop {} {\emph {\bibinfo {title} {Mathematical
Foundations of Quantum Mechanics}}}\ (\bibinfo {publisher} {Princeton
University Press},\ \bibinfo {address} {Princeton, NJ},\ \bibinfo {year}
{1955})\BibitemShut {NoStop} \end{thebibliography}
\end{document} |
\begin{document}
\title{Quantum concepts in optical polarization}
\author{Aaron~Z.~Goldberg,\authormark{1,2}, Pablo de la Hoz,\authormark{3,4} Gunnar~Bj\"{o}rk,\authormark{5} Andrei~B.~Klimov,\authormark{6}
Markus~Grassl,\authormark{7,2}, Gerd~Leuchs\authormark{2,8}, and Luis~L.~S\'{a}nchez-Soto\authormark{2,4,*}}
\address{ \authormark{1} Department of Physics, University of Toronto, Toronto ON, M5S 1A7, Canada\\ \authormark{2} Max-Planck-Institut f\"ur die Physik des Lichts, 91058 Erlangen, Germany \\ \authormark{3} School of Physics and Astronomy, University of St Andrews, St Andrews KY16 9SS, UK \\ \authormark{4}Departamento de \'Optica, Facultad de F\'{\i}sica, Universidad Complutense, 28040~Madrid, Spain \\ \authormark{5} Department of Applied Physics, Royal Institute of Technology (KTH), SE106~91 Stockholm, Sweden\\ \authormark{6} Departamento de F\'{\i}sica, Universidad de Guadalajara, 44420~Guadalajara, Jalisco, Mexico\\ \authormark{7} International Centre for Theory of Quantum Technologies, University of Gda\'{n}sk, 80~308 Gda\'{n}sk, Poland \\ \authormark{8} Institute of Applied Physics, Russian Academy of Sciences, 603950 Nizhny Novgorod, Russia}
\email{\authormark{*}lsanchez@fis.ucm.es}
\begin{abstract} We comprehensively review the quantum theory of the polarization properties of light. In classical optics, these traits are characterized by the Stokes parameters, which can be geometrically interpreted using the Poincar\'e sphere. Remarkably, these Stokes parameters can also be applied to the quantum world, but then important differences emerge: now, because fluctuations in the number of photons are unavoidable, one is forced to work in the three-dimensional Poincar\'e space that can be regarded as a set of nested spheres. Additionally, higher-order moments of the Stokes variables might play a substantial role for quantum states, which is not the case for most classical Gaussian states. This brings about important differences between these two worlds that we review in detail. In particular, the classical degree of polarization produces unsatisfactory results in the quantum domain. We compare alternative quantum degrees and put forth that they order various states differently. Finally, intrinsically nonclassical states are explored and their potential applications in quantum technologies are discussed. \end{abstract}
\pagestyle{plain}
\section{Introduction}
Polarization, the vectorial aspect of light, is of paramount importance for a proper understanding of the physical world and continues to be the subject of much fundamental research today. Manipulating polarization is also crucial for applications: in many instances, it is a key measurement variable; whereas, in other cases, it is a source of noise whose control is imperative. The subject is so relevant that several monographs~\cite{Shurcliff:1962aa,Clarke:1971aa,Kliger:1990aa,Collett:1993aa,Azzam:1996aa,Huard:1997aa,Brosseau:1998aa,Pye:2001aa,Damask:2004aa,Collett:2005aa,Goldstein:2011aa,Ghatak:2012aa,Gil:2016aa} and review papers~\cite{McMaster:1961aa,Brosseau:2006aa,Gil:2007aa,Brosseau:2010aa,Brown:2011aa} are entirely devoted to it; the interested reader can find therein extensive information, including historical surveys of our understanding of polarized light.
Far from its source, any freely-propagating monochromatic electromagnetic field can be considered to a good approximation as a plane wave, with its electric field lying in a plane perpendicular to the direction of propagation. This simple observation is at the root of the notion of polarization: the endpoint of the electric field of such a wave traces in time a well-defined curve that is, in general, an ellipse.
The polarization ellipse is a simple amplitude description of polarized light, but it cannot be directly measured. In 1852, Stokes~\cite{Stokes:1852rt} pointed out that polarization can be specified by four intensity parameters~\cite{Wolf:1954aa,McMaster:1954ve,Walker:1954aa,Barakat:1987aa} that can be easily measured~\cite{Hecht:1970aa,Berry:1977aa,Boyer:1979aa,Schaefer:2007ly,Azzam:2016aa}. In addition, they lead in a natural way to the Poincar\'e sphere~\cite{Poincare:1889aa}, in which the polarization state is characterized by two angles directly related to the parameters of the polarization ellipse. This provides us with an elegant geometrical picture in which to analyze the effect of polarization transformations.
These arguments apply only to ideal plane waves. In practice, however, the fields with which one deals in optics exhibit some randomness. The chaotic nature of the light emission process requires then a statistical description. Actually, the rapid time fluctuations of the field cannot be discerned by any detector and one should consider instead the correlations of the field at different space-time points. From this viewpoint, polarization is closely related to coherence theory~\cite{Wolf:2007aa,Refregier:2007aa,Martinez-Herrero:2009aa}. In a naive picture, while a definite ellipse represents complete polarization, partial polarization arises by the rapid and random succession of different ellipses. In more quantitative terms, the Stokes parameters become random variables and one must deal with a probability distribution on the Poincar\'e sphere.
On the experimental side, polarization of light is a robust characteristic that can be efficiently manipulated using modest equipment without introducing more than marginal losses. It is thus not surprising that this is often the preferred degree of freedom for encoding information, as one can convince oneself by looking at some recent cutting-edge experiments, including quantum key distribution~\cite{Muller:1993aa}, quantum dense coding~\cite{Mattle:1996wd}, quantum teleportation~\cite{Bouwmeester:1997nx}, rotationally invariant states~\cite{Radmark:2009oq}, phase superresolution~\cite{Resch:2007kl}, and weak measurements~\cite{Dixon:2009hc}. This seems to call for a full theory of polarization in quantum optics.
The fact that the Stokes parameters can be immediately translated into the quantum realm was noticed in the seminal work of Fano~\cite{Fano:1949aa}, and discussions on the resulting Stokes operators can be found in old textbooks (see, e.g.,\cite{Jauch:1955aa,Akhiezer:1965aa}), including their connection with the spin of the photon~\cite{Falkoff:1951aa}. At this quantum level, no field state can have definite values of the three Stokes operators, for they do not commute and any sharp simultaneous measurement of these quantities is thus precluded. In physical terms, this means that there is no state with a well-defined polarization ellipse, much in the same way as one cannot assign a definite trajectory to a particle. The unavoidable fluctuations imply that the points on the Poincar\'e sphere lose their meaning. This establishes a first major difference with the classical description and is at the origin of many nonclassical features, the most tantalizing of which is perhaps polarization squeezing~\cite{Chirkin:1993dz,Korolkova:2002fu,Luis:2006ye,Mahler:2010fk,Chirkin:2015aa}.
On the other hand, classical polarization is often restricted to the mean values of the Stokes parameters. This is justified since most classical light has Gaussian statistics. However, non-Gaussian states are of utmost relevance in quantum optics, so that higher-order moments of the Stokes operators come into play. This opens the quantum world to polarization properties that have not been addressed in the classical domain. The classic degree of polarization cannot include these new phenomena, so it must be generalized to account for these higher-order polarization effects.
A number of results are dispersed in the literature (see~\cite{Luis:2016aa} for a recent review), but we think that a comprehensive account of polarization in quantum optics is missing. This is precisely the goal of this paper. To this end, and to be as self-contained as possible, we begin in Section~\ref{sec:classpol} with a short overview of the basic concepts of the classical theory. In Section~\ref{sec:quanpol} we extend those concepts into the quantum domain, introducing the basic tools of the SU(2) symmetry and underlining the differences with the classical case. Section~\ref{sec:phsp} exploits this symmetry to present the quantum formulation in phase space, which is nothing but the Poincar\'e sphere. This formulation is statistical in nature and offers logical connections between the quantum and classical descriptions, thus enabling a natural comparison between the two.
The advantages of encoding quantum information via polarization ultimately relies on the ability to create, manipulate, and measure polarization states. All of these tasks require a step-by-step verification in the experimental procedures; this is essentially the scope of polarization tomography, which is the subject of Section~\ref{sec:tomo}.
In Section~\ref{sec:desiderata}, we put forward criteria and desiderata for any measure of polarization. We examine several proposals and discuss their benefits and shortcomings, showing how they may be modified to avoid potential shortcomings. In particular, we apply the results obtained to various nonclassical states, whose description lies outside any classical framework.
Section~\ref{sec:complementarity} discusses the connection between quantum complementarity and the phenomenon of partial polarization. Actually, a proper quantum understanding of interference leads us to a new way of looking at optical polarization.
In Section~\ref{sec:unpol} we revisit the notion of unpolarized states. In classical optics, the field components of unpolarized light are modeled by zero-mean, uncorrelated, stationary Gaussian random processes~\cite{Barakat:1989fp}, which in geometrical terms means that they reduce to the origin of the Poincar\'e sphere. This is an incomplete characterization, for it overlooks higher-order moments~\cite{Singh:2013ab}. At the quantum level, the SU(2) invariance fixes once and for all the structure of the density matrix~\cite{Agarwal:1971zr,Prakash:1971fr,Karasev:1993aa,Soderholm:2001ay} and, as a result, all the moments of the Stokes variables. However, one can broaden the idea of unpolarized states up to a given order: a state that lacks polarization information up to that order will be called $M$th-order unpolarized. We explore these states and show how they motivate different levels of what is called hidden polarization~\cite{Klyshko:1992wd,Klyshko:1997yq,Klyshko:1998wd}. We also exhibit states with extremal higher-order fluctuations and their potential metrological applications. Finally, our conclusions are summarized in Section~\ref{sec:conclusions}.
\section{Polarized light in classical optics} \label{sec:classpol}
\subsection{Polarization ellipse}
To facilitate comparison with the quantum version, we first briefly survey the basic aspects of polarized light in classical optics. The topic is treated in any textbook~\cite{Born:1999yq} and in the more specific monographs already quoted~\cite{Shurcliff:1962aa,Clarke:1971aa,Kliger:1990aa,Collett:1993aa,Azzam:1996aa,Huard:1997aa,Brosseau:1998aa,Pye:2001aa,Damask:2004aa,Collett:2005aa,Goldstein:2011aa,Ghatak:2012aa,Gil:2016aa}.
Given a point in space, the state of polarization of a light beam that propagates in a fixed direction, say $z$, is given by the temporal evolution of the electric field of the wave, which lies in a plane perpendicular to the propagation direction. We shall be concerned with monochromatic plane waves of frequency $\omega$ and wave vector $\mathbf{k}$. Let $\mathbf{E}( z, t)$ be the electric field at a point $z$, at time $t$, of the wave; the components of the electric field are \begin{equation}
\label{eq:Efield}
E_{H} (z,t) = {E}_{0H}
\exp [ - i ( \omega t - kz + \delta_{H} ) ] \, ,
\qquad \qquad
E_{V} (z,t) ={E}_{0V}
\exp [ - i ( \omega t - kz + \delta_{V} ) ] \, . \end{equation} The subscripts $H$ (horizontal) and $V$ (vertical) refer to two Cartesian components transverse to $z$ and the coefficients $ {E}_{0H}$ and ${E}_{0V}$ denote the real amplitudes of the corresponding components with phases $\delta_{H}$ and $\delta_{V}$, respectively. Note that the measurable fields are given by the real parts of the complex expressions. It should also be remarked that the monochromatic plane wave used in the following discussions cannot be strictly realized in the experiment; the formalism, however, holds also for quasimonochromatic fields in the paraxial approximation, replacing $k$ and $\omega$ by their respective mean values $\bar{k}$ and $\bar{\omega}$. The theory cannot deal, though, with the polarization of multimode fields~\cite{Karassiov:2007gb}.
To obtain the curve that the tip of the electric field vector describes in time, we eliminate the time variable in Eq.~(\ref{eq:Efield}). After some direct calculations we get \begin{equation}
\label{eq:polelip}
\left ( \frac{E_{H}}{E_{0H}} \right )^{2} +
\left ( \frac{E_{V}}{E_{0V}} \right )^{2}
- 2 \left ( \frac{E_{H}}{E_{0H}} \right )
\left ( \frac{E_{V}}{E_{0V}} \right )
\cos \delta = \sin^{2} \delta \, , \end{equation} where $\delta = \delta_{V} - \delta_{H}$ is the relative phase between both oscillations and the real representation of the field is used. This is the equation of an ellipse, which degenerates into a straight line or a circle for some particular values of $\delta$. Although we have eliminated the temporal variable, the fields' components $E_{H}$ and $E_{V}$ continue to be time-space dependent, but for monochromatic radiation the amplitudes and phases are constant for all time. This means the polarization ellipse remains fixed as the polarized beam propagates in a linear medium.
In general, \eqref{eq:polelip} describes a rotated ellipse, with the semi-axes $a$ and $b$ ($a \ge b$). The angle of the semi-major axis, measured counter-clockwise from the positive horizontal axis, is the orientation angle $\psi$ ($0 \leq \psi \leq \pi$), as sketched in Fig.~\ref{fig:ellipse}. The degree to which the ellipse is oval is described by a shape parameter called ellipticity $\chi$ ($- \pi/4< \chi \le \pi/4$), defined as $\tan \chi = \mp b/a$, the sign distinguishing the two senses in which the ellipse may be described. These angles depend on the amplitude and relative phase: \begin{equation}
\tan (2 \psi) = \frac{2E_{0H}E_{0V}}{E_{0H}^2-E_{0V}^2} \, \cos \delta \, ,
\qquad \qquad
\sin (2 \chi ) = \frac{2E_{0H}E_{0V}}{E_{0H}^2+E_{0V}^2} \, \sin \delta \, .
\label{eq:nosval} \end{equation} The orientation angle $\psi$ is zero when $\delta$ is $\pi$ or $3\pi/2$: in these situations, \eqref{eq:polelip} describes an ellipse in its standard form. In terms of the amplitudes, the orientation is also zero if $E_{0H}$ ($E_{0V}$) is zero, and we have vertical (horizontal) linearly polarized light. For the extreme cases in which $b=0$ we have $\chi=0$ and the light is linearly polarized. In contrast, when $b=a$ we have $\chi=\pm \pi/4$ and the wave is circularly polarized.
\begin{figure}
\caption{ (Left) Polarization ellipse showing the orientation angle $\psi$ and the ellipticity $\chi$, which are functions of the semi-major and semi-minor axes, $a$ and $b$. (Right) Poincar\'e sphere and the parametrization of Stokes parameters for polarized light in the form of spherical coordinates.}
\label{fig:ellipse}
\end{figure}
\subsection{The polarization matrix and the Stokes parameters}
For our purposes in what follows, it will prove convenient to recast \eqref{eq:Efield} in the form \begin{equation}
\label{eq:Efield2}
E_{H} (z,t) = \mathcal{E}_{0} a_{H}
\exp [ - i ( \omega t - kz ) ] \, ,
\qquad \qquad
E_{V} (z,t) = \mathcal{E}_{0} \, a_{V}
\exp [ - i ( \omega t - kz ) ] \, , \end{equation} where we have absorbed the phases $\delta_{H}$ and $\delta_{V}$ in the dimensionless complex amplitudes $a_{H}$ and $a_{V}$, and we have used a common field amplitude $\mathcal{E}_{0}$ that can be identified as the \textit{electric field per photon} in the terminology of quantum optics~\cite{Scully:2012aa}.
The complete polarization information at any plane $z$ is thus conveyed by the amplitude \begin{equation}
\label{eq:Jones}
\mathbf{A}_{HV} =
\left(
\begin{array}{c}
a_{H} \\
a_{V}
\end{array}
\right ) \end{equation} which is usually called the Jones vector~\cite{Jones:1941aa}. The subscript $HV$ stresses the basis used to decompose the field amplitudes. Obviously, this vector can be expressed in any other polarization basis, which is obtained from the linear one $\{H, V\}$ by a unitary transformation. In particular, a convenient choice is the circularly polarized basis $\{ + , - \}$ such that \begin{equation}
{\mathbf{A}}_{\pm} = \frac{1}{\sqrt{2}} \left ( \begin{array}{cc} 1 & i \\ 1 & -i \end{array} \right )
{\mathbf{A}}_{HV} \, . \end{equation}
Since only bilinear quantities in the field amplitudes can be measured, it is advantageous to consider the so-called polarization (or coherence) matrix\cite{Fano:1954aa,Wolf:1959aa,Parrent:1960aa,Barakat:1963aa,Barakat:1985aa} \begin{equation}
\matriz{J} _{\pm} = \mathbf{A}_{\pm} \otimes \mathbf{A}_{\pm}^{\dagger}
=
\left(
\begin{array}{cc}
a_{+}^{\ast} a_{+} & a_{+} a_{-}^{\ast} \\
a_{+}^{\ast} a_{-} & a_{-}^{\ast} a_{-}
\end{array}
\right ) \, , \end{equation} where $\dagger$ stands for the Hermitian conjugate and $\otimes$ represents the Kronecker product. The elements of the main diagonal of $\matriz{J}_{\pm}$ are real and nonnegative, for they are the intensities of the $+$ and $-$ polarization components (in units of $\mathcal{E}_{0}^{2}$). In consequence, its trace is equal to the average intensity of the wave. The nondiagonal elements are complex conjugate to each other and, thus, $\matriz{J}_{\pm}$ is a Hermitian matrix. By a direct application of the Cauchy-Schwarz inequality we can show that the determinant of $\matriz{J}_{\pm}$ is nonnegative. Actually, the nonnegativity of its eigenvalues constitutes a complete set of necessary and sufficient conditions for a Hermitian matrix $\matriz{J}_{\pm}$ to be a polarization matrix; i.e., to represent the state of polarization of a light field.
The polarization matrix can be expanded in a basis of the vector space of $2 \times 2$ complex matrices. A natural basis is the one constituted by the identity ($\sigma_{0}= \leavevmode\hbox{\small1\normalsize\kern-.33em1}$) plus the three Pauli matrices $\sigma_{i}$ (henceforth, the Latin indices run from 1 to 3, and Greek indices from 0 to 3). The Pauli matrices are Hermitian ${\sigma}_{i} = {\sigma}_i^{\dagger}$, trace orthogonal $\mathop{\mathrm{Tr}} \nolimits ( \sigma_{i} \, \sigma_{j} )= 2 \delta_{ij}$ and satisfy $\sigma_i^2= \leavevmode\hbox{\small1\normalsize\kern-.33em1}$. In addition, they are unitary and traceless. The corresponding expansion gives real coefficients~\cite{Fano:1954aa} \begin{equation}
S_{\mu} = \frac{1}{2} \mathop{\mathrm{Tr}} \nolimits ( \matriz{J}_{\pm} \, {\sigma}_{\mu} ) \, , \end{equation} which are known as the Stokes parameters. This can be also compactly expressed as
\begin{equation}
\label{eq:stokesalt}
S_{\mu} = \frac{1}{2} \mathbf{A}_{\pm}^{\dagger} \, \sigma_{\mu} \, \mathbf{A}_{\pm} \, \end{equation} This relation can be inverted, so we can write the Stokes parameters in terms of the elements of the polarization matrix as \begin{eqnarray}
\label{eq:stokes}
& S_{0} = \frac{1}{2}
( a_{+}^{\ast} a_{+} + a_{-}^{\ast} a_{-} ) \, , & \nonumber\\
& & \\
& S_{1} =\frac{1}{2} ( a_{+}^{\ast} a_{-} + a_{+} a_{-}^{\ast}) \, ,
\qquad
S_{2} = \frac{i}{2} (a_{+} a_{-}^{\ast} - a_{+}^{\ast} a_{-} ) \, ,
\qquad
S_{3} = \frac{1}{2} (a_{+}^{\ast} a_{+} - a_{-}^{\ast} a_{-} ) \, . &
\nonumber \end{eqnarray}
A word of caution is in order here. Our definition (\ref{eq:stokes}) differs in two ways from the standard one in classical optics~\cite{Born:1999yq}. It contains an extra factor 1/2 and the parameters $S_{1}$ and $S_{3}$ are interchanged. Both modifications are unessential, as the overall structure remains invariant, but they smooth the way for a proper quantum definition.
The positivity of $\matriz{J}_{\pm}$ immediately implies that \begin{equation}
S_{0}^{2} \ge S_{1}^{2} + S_{2}^{2} + S_{3}^{2} \, ,
\label{eq:strel} \end{equation} and the equality holds when $\det \, \matriz{J}_{\pm} = 0$, which is exactly the case for monochromatic light. This suggests the introduction of the polarization state as a point on a spherical surface, called the Poincar\'e sphere~\cite{Poincare:1889aa}, with the coordinates $(S_{1}, S_{2}, S_{3})$. The position of the point on this sphere is characterized by the orientation and ellipticity angles $\psi$ ($0 \leq \psi \leq \pi$) and $\chi$ ($- \pi/4 \leq \chi \leq \pi/4$) such that (see Fig.~\ref{fig:ellipse}) \begin{equation}
\label{S1}
S_{1} = S_{0} \cos(2 \chi) \sin(2\psi), \qquad
S_{2} = S_{0} \cos(2 \chi) \cos (2\psi) \, , \qquad
S_{3} = S_{0} \sin(2 \chi) \, . \end{equation} The vector $ \mathbf{S} = (S_{1}, S_{2}, S_{3})^{\top}$ ($\top$ being the transpose) is known as the Stokes vector. With our choice of the circular basis, all linear polarization states lie on the equator of the Poincar\'e sphere, while the circular polarization states are in the north and south pole (circular polarized to the right and the left, respectively). This agrees with the standard use~\cite{Jones:2016aa}. Elliptically polarized states are represented everywhere else on the surface of the sphere.
In practice, for quasimonochromatic light or fields whose components may fluctuate in time in a complicated manner, the amplitudes $a_{\pm}$ depend on time and one should treat them as random variables. One must then take time averages of the matrix elements of $\matriz{J}_{\pm}$, which turns out to be a $2 \times 2$ covariance matrix whose elements are the second-order moments of the field amplitudes. Under the assumption that they are stationary and ergodic, the time average can also be understood as an ensemble average over different realizations. In that case, one can also study the equivalent version of $\matriz{J}_{\pm}$ in the frequency domain, which is called the cross-spectral density matrix~\cite{Mandel:1995qy}.
To conclude, we mention that a generalization of the Stokes parameters of a random electromagnetic beam has been introduced by Ellis and Dogariu~\cite{Ellis:2004aa} in the space-time domain and has also been studied by Korotkova and Wolf~\cite{Korotkova:2005aa} in the space-frequency domain. Whereas the usual Stokes parameters depend on one spatial variable, one can naturally introduce two-point Stokes parameters~\cite{Tervo:2009aa}, which are defined in the space-frequency domain by the cross-spectral density matrix that characterizes the correlations at two points. The two-point Stokes parameters depend on two spatial variables and contain additional information about the coherence properties. Another relevant approach is the model of spatial-angular Stokes parameters~\cite{Luis:2005ab,Luis:2005aa}, which are introduced for generalized rays including spatial and angular dependence and allow their evolution during propagation to be considered.
\subsection{Polarization transformations} \label{subsec:poltrans}
As stressed before, the polarization of the field is determined by its Jones vector $\mathbf{A}$ (henceforth the circular basis is always assumed and, accordingly, we drop the corresponding subscript). Its Euclidean norm \begin{equation}
\mathfrak{I} = \Vert \mathbf{A} \Vert^{2} =
\mathbf{A^\dagger} \mathbf{A}
= \lvert a_{+} \rvert^{2} + \lvert a_{-} \rvert^{2} = 2 S_{0} \, .
\label{eq:Nclas} \end{equation} is the intensity, measured in units of $\mathcal{E}_{0}^{2}$.
If we rewrite the Stokes parameters in the equivalent form \begin{eqnarray}
\label{eq:stokesalt2}
& S_{0} = \frac{1}{2}
( \lvert a_{+} \rvert^{2} + \lvert a_{-} \rvert^{2} ) \, , &
\nonumber\\
& & \\
& S_{1} = \frac{1}{2} ( \lvert a_{45^{\circ}} \rvert^{2}
- \lvert a_{135^{\circ}} \rvert^{2} ) \, ,
\qquad
S_{2} = \frac{1}{2} ( \lvert a_{H} \rvert^{2} - \lvert a_{V} \rvert^{2} ) \, ,
\qquad
S_{3} = \frac{1}{2} ( \lvert a_{+} \rvert^{2} - \lvert a_{-} \rvert^{2} ) \, , &
\nonumber \end{eqnarray} we can attach a clear operational meaning to them. Apart from the factor $1/2$, the parameter $S_{0}$ is proportional to the total intensity, while $S_{3}$ represents the excess in intensity between the right- and left-handed circularly polarized components. The parameter $S_{1}$ has a similar interpretation with respect to linearly polarized components at $45^{\circ}$ and $135^{\circ}$. Finally, the parameter $S_{2}$ is equal to the intensity excess of horizontal over the vertical polarized components.
The assessment of the Stokes parameters can be performed with a variety of methods~\cite{Chen:2020aa} that can be roughly divided into two groups. One relies on a measurement of the complex amplitudes $a_{+}$ and $a_{-}$ (or, equivalently, $a_{H}$ and $a_{V}$) with a coherent dual polarization receiver. The other relies on the measurement of intensities, which can be performed with different configurations, two of the most popular being sketched in Fig.~\ref{fig:polar}. The first intensity scheme is based on a rotating wave plate and a polarizer, while the second requires splitting the incoming field into three beams, so that each beam is analyzed, obtaining in this way intensities in the bases $+/-$, $H/V$, and $45^{\circ}/135^{\circ}$.
\begin{figure}
\caption{Typical Stokes polarimetric setups. (Left) Rotating wave plate polarimeter consisting of a rotating quarter wave plate, a linear polarizer and a photodector. (Right) Division-of-amplitude polarimeter, which splits the beam into three separate beams. Each of these beams is analyzed to provide a Stokes parameter. The brown boxes are polarizing beam splitters.}
\label{fig:polar}
\end{figure}
Let us now consider linear transformations of the field, which are represented by complex $2 \times2$ matrices $\matriz{T}$, such that \begin{equation} \label{eq:transT}
\mathbf{A}^\prime = \matriz{T} \, \mathbf{A} \, . \end{equation} For energy conserving, lossless transformations, the intensity is preserved; i.e., $\mathbf{A}^{\prime \dagger} \mathbf{A}^{\prime} = \mathfrak{I}$. This describes phase plates, which are ubiquitous in polarimetry. Accordingly, the matrix $\matriz{T}$ is unitary and will then be denoted as $\matriz{U}$. In fact, we impose that $\matriz{U} \in$ SU(2); i.e., the group of $2 \times 2$ complex unitary matrices with unit determinant~\cite{Cornwell:1984aa}. We can write any $\matriz{U} \in $ SU(2) in terms of a unit vector $\pmb{\mathfrak{n}} $, in the direction of the rotation axis [specified by the spherical angles $(\Theta, \Phi)$], and a rotation angle $\vartheta$, \begin{eqnarray} \matriz{U} (\pmb{\mathfrak{n}}, \vartheta ) & = & \exp(-i \vartheta \; \pmb{\mathfrak{n}} \cdot \pmb{\sigma}/2) \nonumber \\ & = & \left( \begin{array}{cc} \cos (\vartheta /2) - i \sin (\vartheta /2) \cos \Theta & - i \sin (\vartheta /2) \sin \Theta \; e^{-i \Phi} \\
- i \sin (\vartheta /2) \sin \Theta \; e^{i \Phi} &
\cos (\vartheta /2) + i \sin (\vartheta/2) \cos \Theta \end{array}
\right) \, . \label{eq:Ut} \end{eqnarray}
When the complex amplitudes undergo the unitary transformation \eqref{eq:transT} the Stokes vector $\mathbf{S} \in \mathbb{R}^{3} $ also transforms linearly; that is, \begin{equation}
\label{eq:transR}
\mathbf{S}^\prime = \matriz{R} (\matriz{U}) \, \mathbf{S} \, , \end{equation} where the $3\times 3$ matrix $\matriz{R}(\matriz{U}) \in$ SO(3) is a rotation. Actually, one can check that~\cite{Cornwell:1984aa} \begin{equation}
\matriz{U} \, (\mathbf{S} \cdot \pmb{\sigma} ) \, \matriz{U}^\dagger =
\left ( \matriz{R} (\matriz{U}) \; \mathbf{S} \right ) \cdot \pmb{\sigma} \, ,
\label{eq:doucov} \end{equation} where $\pmb{\sigma} = (\sigma_{1}, \sigma_{2}, \sigma_{3})^{\top}$. More explicitly, $\matriz{R} (\matriz{U})$ can be chosen as \begin{equation}
R(\matriz{U})_{jk} = \frac{1}{2} \, \mathop{\mathrm{Tr}} \nolimits ( \sigma_{j} \, \matriz{U} \, \sigma_{k} \,
\matriz{U}^{-1} ) \, .
\label{eq:cor23} \end{equation} The correspondence $\matriz{U} \mapsto \matriz{R}(\matriz{U})$ constitutes a representation of SU(2), and every rotation is an image of some element $\matriz{U}$. Note that both the matrices $\matriz{U}$ and $-\matriz{U}$ lead to the same rotation: $\matriz{R}(\matriz{U}) = \matriz{R}(- \matriz{U})$, so the above correspondence produces a double covering of SO(3) by SU(2).
An arbitrary SU(2) transformation can be generated by any pair of Pauli matrices; for example, $\sigma_{3}$ and $\sigma_{2}$. Using (\ref{eq:cor23}), one can immediately show that \begin{equation} \begin{array}{lll} \matriz{U}(\mathbf{e}_{3}, \phi) = e^{- i \phi \sigma_{3}/2} = \left ( \begin{array}{cc} e^{-i \phi /2} & 0 \\ 0 & e^{i \phi /2} \end{array} \right ) & \mapsto & \matriz{R} (\mathbf{e}_{3}, \phi) = \left ( \begin{array}{ccc} \cos \phi & \sin \phi & 0 \\ - \sin \phi & \cos \phi & 0 \\ 0 & 0 & 1 \end{array} \right ) \, , \\ & & \\ \matriz{U}(\mathbf{e}_{2}, \theta) = e^{- i \theta \sigma_{2}/2} = \left ( \begin{array}{cc} \cos (\theta/2) & \sin (\theta/2) \\ - \sin (\theta/2) & \cos (\theta/2) \end{array} \right ) & \mapsto & \matriz{R} (\mathbf{e}_{2}, \theta) = \left ( \begin{array}{ccc} \cos \theta & 0 & \sin \theta \\ 0 & 1 & 0 \\ - \sin \theta & 0 & \cos \theta \end{array} \right ) \, .\\ \end{array} \end{equation} Therefore, $\sigma_{3}$ generates differential phase shifts between the amplitudes, whereas $\sigma_{2}$ generates rotations around the direction of propagation. It then follows that any energy-conserving polarization transformation can be realized with linear optics: phase plates and rotators.
In many setups one also has to consider energy-nonconserving transformations. They are associated with dichroic devices, which attenuate the field components at different rates. This can be represented by the matrix \begin{eqnarray}
\matriz{H} (\pmb{\mathfrak{n}}, \eta ) & = & e^{-\rho} \exp( \eta \; \pmb{\mathfrak{n}} \cdot \pmb{\sigma}/2) \nonumber \\ & = & e^{-\rho} \left( \begin{array}{cc}
\cosh (\eta /2) + \sinh (\eta /2) \cos \Theta &
\sinh (\eta /2) \sin \Theta \; e^{-i \Phi} \\
\sinh (\eta /2) \sin \Theta \; e^{i \Phi} &
\cosh (\eta /2) - \sinh (\eta/2) \cos \Theta \end{array}
\right) \, ,
\end{eqnarray} where $e^{-\rho} = e^{-( \eta_1 + \eta_2 )/2}$ and $e^{\eta} = e^{(\eta_2 - \eta_1)}$ are the isotropic and the relative amplitude transmittances of the dichroic device, with $e^{-\eta_1}$ and $e^{-\eta_2}$ its principal transmittances, major and minor, respectively, and $\pmb{\mathfrak{n}}$ is the axis of diattenuation~\cite{Kim:2000aa}. Since the isotropic transmittance $e^{-\rho}$ reduces both components at the same rate and does not affect the polarization, it can be neglected as far as polarization is concerned. Therefore, $\matriz{H} (\pmb{\mathfrak{n}},\eta)$ is a Hermitian matrix of unit determinant; that is, it belongs to the group SL($2, \mathbb{C})$ of $2 \times 2$ complex matrices with unit determinant. In fact, it can be denoted by \begin{equation}
\matriz{H} ( \pmb{\mathfrak{n}},\eta )=\matriz{U} (\pmb{\mathfrak{n}},0 ) e^{\eta \sigma_3/2} \matriz{U} (\pmb{\mathfrak{n}},0 )^\dagger , \end{equation} so any polarization transformation, whether or not it conserves energy, can thus be realized with phase plates, rotators, and a single diattenuator.
The matrix $\matriz{H}$ induces a linear transformation on the Stokes parameters~\cite{Han:1997aa,Moreva:2006fv,Savenkov:2009aa,Simon:2010aa}, which transform as a four-vector; namely, \begin{equation} \label{eq:Mueclas} S_\mu^\prime = \matriz{M}_{\mu \nu} (\matriz{H}) \, S_\nu \,,
\end{equation}
where the $4 \times 4$ matrix is known as the Mueller matrix~\cite{Soleillet:1929aa,Mueller:1948aa} and summation over repeated indices is understood henceforth. Following a similar procedure as before, it can be expressed as~\cite{Barut:1977}
\begin{equation}
\matriz{M}(\matriz{H})_{\mu\nu} = \frac{1}{2} \, \mathop{\mathrm{Tr}} \nolimits ( \sigma_{\mu} \, \matriz{H} \, \sigma_{\nu} \,
\matriz{H}^\dagger ) \, ,
\label{eq:Mueller from Jones} \end{equation} and it turns out that it is a boost along the axis $\pmb{\mathfrak{n}}$~\cite{Franssens:2015aa,Tudor:2016aa}. For example, $\matriz{H}_3\equiv \exp ( \eta\sigma_3/2 )$, which describes the attenuation of the $a_+$ and $a_-$ field amplitudes by $e^{- \rho} \, e^{\eta/2}$ and $e^{- \rho} \, e^{-\eta/2}$, respectively, corresponds to a boost along the $S_3$ axis with rapidity $\eta$. The most general polarization transformation can be expressed by the product \begin{equation} \matriz{T} = \matriz{U} \matriz{H} \, , \label{eq:SL2C polar decomp} \end{equation} which is called the polar decomposition~\cite{Halmos:1982aa}. Up to the scaling factor $t=\det(\matriz{T})$, general polarization transformations belong to SL($2, \mathbb{C}$) and describe the attenuation of a Stokes vector in addition to its rotations.
It was first noted by Barakat~\cite{Barakat:1963} that transformation matrices $\matriz{T}$ preserve the quadratic form \begin{equation}
M^{2} = S_0^2-S_1^2-S_2^2-S_3^2 , \end{equation} and are homomorphic to the proper ortochronous Lorentz group, of which SL($2, \mathbb{C}$) is the universal covering~\cite{Barakat:1981}. The invariant $M^{2}$ is similar to the mass of particle, while the Stokes four-vector $S_{\mu}$ is the corresponding four-momentum. The correspondence $\matriz{T} \mapsto \matriz{M}$ constitutes a representation of SL(2,$\mathbb{C}$), and every Lorentz transformation is an image of some element $\matriz{T}$, up to a scaling factor.
A final note on nondeterministic polarization transformations is warranted. Following such transformations, the coherency matrix may not be rank-one, and thus it may contain more information than the Jones vector $\mathbf{A}$. Fortunately, nondeterministic polarization transformations can be represented by convex combinations of deterministic ones, through \cite{Kim:1987} \begin{equation}
\matriz{J}^\prime =
\matriz{T}_i \, \matriz{J} \, \matriz{T}_i^\dagger \, . \end{equation} This allows for a description of polarization transformations using only the linear optical elements mentioned above.
\subsection{Degree of polarization}
Before discussing the classical quantification of polarization, some clarification may be in order. There exist many definitions of the degree of polarization. They differ not only in their mathematical definitions, but also in their basic assumptions. One may treat light as a beam (possibly multimode)~\cite{Qasimi:2007aa}, and sometimes with additional assumptions about its form, such as the Gaussian-Schell model~\cite{Yao:2008aa}; as a scalar field \cite{Vahimaa:2004aa} or as a vectorial field, either two-dimensional (that is appropriate for the far field of a source), or three-dimensional (appropriate for the near field) \cite{Gil:2015aa}. One can define degrees of polarization for all these cases, but quite naturally, one cannot expect them to agree, or even quantify the same physical property. In what follows, we shall only discuss a two-mode model, describing far-field light that is indistinguishable in every respect except for its transverse-field degree of freedom. Thus, our two-mode description is equivalent to a two-dimensional, vectorial description of light and it is only such degrees of polarization we will discuss in the following.
If the relation between the field components $E_{+}$ and $E_{-}$ (or, equivalently, between the components $E_{H}$ and $E_{V}$) is completely deterministic, the field is fully polarized. For such a pure state (borrowing the terminology from quantum optics), the polarization matrix satisfies \begin{equation}
\matriz{J}_{\mathrm{pol}}^{2} = \matriz{J}_{\mathrm{pol}} \, . \end{equation} On the other hand, if the components of the field are fully uncorrelated, the off-diagonal elements are zero. If, in addition, the energy is distributed evenly between the horizontal and vertical components, the coherence matrix is proportional to the unit matrix: \begin{equation}
\matriz{J}_{\mathrm{unpol}} = \frac{1}{2} \mathfrak{I} \, \leavevmode\hbox{\small1\normalsize\kern-.33em1} \, , \end{equation} and we say that this field is unpolarized.
This leads to the important decomposition of an arbitrary $\matriz{J}$ into fully polarized and unpolarized parts~\cite{Wolf:1959aa,Parrent:1960aa} \begin{equation}
\label{eq:decpu}
\matriz{J} =\mathfrak{I} \;
[ (1 - \mathbb{P} ) \, \matriz{J}_{\mathrm{unpol}} +
\mathbb{P} \, \matriz{J}_{\mathrm{pol}} ] \, , \end{equation} where $\mathbb{P}$, called the degree of polarization, can be physically interpreted as the fraction of the total energy contained in the fully polarized part.
Alternatively, $\mathbb{P}$ can be written in a slightly different yet equivalent way~\cite{Born:1999yq} \begin{equation}
\label{eq:defPur}
\mathbb{P} = \sqrt{\frac{2 \mathop{\mathrm{Tr}} \nolimits ( \matriz{J}^{2} )}{\mathop{\mathrm{Tr}} \nolimits^2 (\matriz{J} )} - 1} = \sqrt{1 - \frac{4 \det (\matriz{J})}{\mathop{\mathrm{Tr}} \nolimits^2 (\matriz{J})}} \, , \end{equation} as can be checked by a direct calculation. In the first form, the degree of polarization seems to be intimately linked to $\mathop{\mathrm{Tr}} \nolimits (\matriz{J}^{2} )$, which, following again a quantum notation, is called the purity. In the second form, it can be immediately related to the eigenvalues of $\matriz{J}$: if we denote them by $\lambda_{+}$ and $\lambda_{-}$, ($\lambda_{+} > \lambda_{-}$), then $ \mathop{\mathrm{Tr}} \nolimits^{2}( \matriz{J}) = (\lambda_{+} + \lambda_{-})^{2} = \mathfrak{I}^2$ and $\det (\matriz{J}) = \lambda_{+} \lambda_{-}$, so that \begin{equation} \label{eq:scram}
\mathbb{P}= \frac{\lambda_{+} - \lambda_{-}}
{\lambda_{+} + \lambda_{-}} \, . \end{equation} The action of any unitary transformation on $\matriz{J}$ does not affect its trace. We can thus regard the intensity of a partially polarized field as the sum of two uncorrelated fields components with intensities $\lambda_+$ and $\lambda_-$. If the light is thermal, the lack of correlation implies the statistical independence of both polarization components and the corresponding intensities.
Another equivalent definition of $\mathbb{P}$ is \begin{equation}
\label{eq:defPcl}
\mathbb{P}= \frac{\sqrt{S_{1}^{2} + S_{2}^{2} + S_{3}^{2}}}{S_{0}} \, , \end{equation} representing the intensity-normalized length of the Stokes vector. Stokes vectors on the surface of the Poincar\'e sphere represent totally polarized states (pure states) and Stokes vectors inside the sphere represent partially polarized states (mixed states). The maximally mixed state is at the origin and represents classical unpolarized light.
In the relativistic picture presented in Section~\ref{subsec:poltrans}, pure states correspond to $M=0$. As $M$ increases, the system becomes more and more mixed, until being completely random for $M=S_{0}$.
To conclude, it is opportune to recall that the concept of the von Neumann entropy $\mathcal{S}$ can be transferred in a direct way to electromagnetic waves by~\cite{Fano:1957aa,Barakat:1983aa,Gase:1994aa,Barakat:1996aa} \begin{equation}
\mathcal{S} = - \mathop{\mathrm{Tr}} \nolimits (\matriz{J} \, \ln \matriz{J} ) \, . \end{equation} This quantity is a measure of the difference in the amount of information between a pure state and a mixed state (both with the same intensity). Using the eigenvalues of $\matriz{J}$, this entropy can be written as~\cite{Barakat:1983aa} \begin{equation} \mathcal{S} = - \sum_{i= \pm} \lambda_{i} \, \ln \lambda_{i} \, , \end{equation} or, equivalently, as \begin{equation}
\mathcal{S} = - \left \{ \tfrac{1}{2} ( 1 + \mathbb{P} )
\ln \left [ \tfrac{1}{2} ( 1 + \mathbb{P} ) \right ]
+ \tfrac{1}{2} ( 1 - \mathbb{P} )
\ln \left [ \tfrac{1}{2} ( 1 - \mathbb{P} ) \right ] \right \} \, . \end{equation} Therefore, $\mathcal{S}$ is unequivocally characterized by $\mathbb{P}$ and decreases monotonically with $\mathbb{P}$. The maximum $\mathcal{S} = \ln 2$ corresponds to $ \mathbb{P} = 0$; whereas, the minimum $\mathcal{S} = 0$ is reached for $ \mathbb{P} = 1$.
\section{Polarized light in quantum optics} \label{sec:quanpol}
\subsection{Stokes operators and the polarization sector} \label{sec:polsec}
As heralded in the Introduction, the Stokes parameters are fitting analytical tools for treating polarization in the quantum domain because they can easily be translated into truly quantum observables. We again begin with a monochromatic plane wave, propagating in the $z$ direction. The quantum field is now characterized by two complex amplitude operators, denoted by $\hat{a}_{+}$ and $\hat{a}_{-}$ (we shall use carets to denote operators). They are the quantum equivalent of the amplitudes $a_{+}$ and $a_{-}$ in (\ref{eq:Efield2}) and obey the bosonic commutation rules (with $\hbar =1$ throughout) \begin{equation}
[ \hat{a}_{\mathfrak{s}}, \hat{a}_{\mathfrak{s}^{\prime}}^{\dagger} ] =
\delta_{\mathfrak{s} \mathfrak{s}^{\prime}} \leavevmode\hbox{\small1\normalsize\kern-.33em1} \, ,
\qquad \qquad
\mathfrak{s}, \mathfrak{s}^{\prime} \in \{+, -\} \, ,
\label{eq:bosonic commutation relations} \end{equation} which bring about the existence of unavoidable quantum noise precluding their sharp simultaneous measurement. The Stokes operators are then a direct extension of their classical counterparts~\cite{Collett:1970ys,Karassiov:1993lq,Shumovsky:1998aa,Luis:2000ys,Agarwal:2013aa} \begin{eqnarray}
\label{eq:Stokop}
& \hat{S}_{0} = \tfrac{1}{2}
( \hat{a}^{\dagger}_{+} \hat{a}_{+} +
\hat{a}^{\dagger}_{-} \hat{a}_{-} ) \, , & \nonumber \\
& & \\
& \hat{S}_{1} = \tfrac{1}{2}
( \hat{a}^{\dagger}_{+} \hat{a}_{-} +
\hat{a}^{\dagger}_{+} \hat{a}_{-} ) \, ,
\qquad
\hat{S}_{2} = \tfrac{i}{2}
( \hat{a}_{+} \hat{a}^{\dagger}_{-} -
\hat{a}^{\dagger}_{+} \hat{a}_{-} ) \, ,
\qquad
\hat{S}_{3} = \tfrac{1}{2}
( \hat{a}^{\dagger}_{+} \hat{a}_{+} -
\hat{a}^{\dagger}_{-} \hat{a}_{-} ) \, , & \nonumber \end{eqnarray} so that the components of the Stokes vector $\hat{\mathbf{S}} = (\hat{S}_{1}, \hat{S}_{2}, \hat{S}_{3})^{\top}$ satisfy the commutation relations of angular momentum \begin{equation} [\hat{S}_k,\hat{S}_\ell]= i \epsilon_{k\ell m} \hat{S}_m \, , \qquad \qquad [\hat{S}_{0}, \hat{\mathbf{S}} ] = \pmb{0} \, ,
\label{eq:CR} \end{equation} where $\epsilon_{k\ell m}$ is the Levi-Civita fully antisymmetric tensor; i.e., $\epsilon_{k\ell m}$ is $1$ if $(k, \ell, m)$ is an even permutation of $(1, 2, 3)$, $- 1$ if it is an odd permutation, and $0$ in any index is repeated. Note that $\hat{S}_{0} = \hat{N}/2$, with $\hat{N} = \hat{N}_{+} + \hat{N}_{-}$being the operator for the total number of excitations. Mathematically, the operators (\ref{eq:Stokop}) are the Jordan-Schwinger representation~\cite{Jordan:1935aa,Schwinger:1965kx} of SU(2) in terms of bosonic amplitudes. This construction is by no means restricted to polarization, but encompasses many different instances of two-mode problems, such as, e.g., strongly correlated systems, Bose-Einstein condensates, and Gaussian-Schell beams, where the modes can even be spatially separated~\cite{Chaturvedi:2006vn}.
In classical optics, the total intensity is a nonfluctuating quantity, so the Poincar\'e sphere appears as a smooth surface with radius equal to the intensity. In contradistinction, in quantum optics we have that \begin{equation}
\hat{\mathbf{S}}^{2} =
\hat{S}_{1}^{2} + \hat{S}_{2}^{2} + \hat{S}_{3}^{2} =
S (S+1) \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} , \end{equation} with $S = S_{0} = N/2$. As fluctuations in the number of photons are unavoidable (leaving aside photon-number states), we are forced to work in a three-dimensional Poincar\'e space that can be regarded as a set of nested spheres with radii proportional to the different photon numbers that contribute to the state: they have been aptly termed as \emph{Fock layers}~\cite{Donati:2014aa}. One can also introduce \emph{normalized} Stokes operators~\cite{Zukowski:2017aa} for which the effects of intensity fluctuations are removed, making them more sensitive when detecting entanglement.
The second equation in~(\ref{eq:CR}) expresses in the quantum language that polarization and intensity are separate concepts: the form of the ellipse (polarization) does not depend on its size (intensity). This fact brings about remarkable simplifications. First, it means that we must handle each subspace with a fixed number of photons $N$ separately. In other words, in the previous onionlike picture of Fock layers, each shell has to be addressed independently. This can be highlighted if instead of the Fock states $\{ |n_{+}, n_{-} \rangle \}$, which are an orthonormal basis of the Hilbert space of these two-mode fields, we employ the relabeling \begin{equation}
| S, m \rangle \equiv |n_{+} = S+m,n_{-} =S-m \rangle \, .
\label{eq:Spinequiv} \end{equation} The relabeled states form the angular momentum basis of common eigenstates of $\{\hat{\mathbf{S}}^2,\hat{S}_3\}$. They span a $(2S + 1)$-dimensional subspace, $\mathcal{H}_{S}$, wherein they act in the standard way \begin{equation}
\hat{S}_{z} \, | S, m \rangle =
m | S, m \rangle \, ,
\qquad \qquad
\hat{S}_{\pm} \, | S, m \rangle =
\sqrt{ S (S+1 ) - m (m \pm 1) } \,
|S, m \pm 1 \rangle \, , \end{equation} with $\hat{S}_{\pm} = \hat{S}_{1} \pm i \hat{S}_{2}$ being the raising and lowering operators. The label $S$ always indicates the use of this basis.
Second, for any arbitrary function of the Stokes operators $f (\hat{\mathbf{S}} )$, we have $[ f (\hat{\mathbf{S}} ), \hat{N}] = 0$, so the matrix elements of the density matrix $\hat{\varrho}$ connecting subspaces with different photon numbers do not contribute to $\left\langle f (\hat{\mathbf{S}} )\right\rangle$. Then, it is clear that the moments of any energy-preserving observable (such as $\hat{S}_\mathbf{n}$) do not depend on the coherences between different subspaces. The only accessible information from any state described by the density matrix $\hat{\varrho}$ is thus its block-diagonal form \begin{equation}
\hat{\varrho}_{\mathrm{pol}} = \bigoplus_S w_{S} \, \hat{\varrho}^{(S)} =
\sum_{S=0}^{\infty} \sum_{m,m'=-S}^S w_{S} \, \hat{\varrho}^{(S)}_{m m'} \;
| S,m \rangle \langle S,m^{\prime } | \, ;
\label{eq:PolSec} \end{equation} the blocks off of the diagonal are zero matrices, and the diagonal block matrices are given by $\hat{\varrho}^{(S)}$, which are the density matrices in the $S$th subspaces ($2S$ runs over all the possible photon numbers, i.e. $S = \tfrac{1}{2}, 1, \tfrac{3}{2}, 2, \dots$). We have included the factor $w_{S}$, which is the photon-number distribution, so that all the density matrices $\hat{\varrho}^{(S)}$ are normalized to unit trace. The form $\hat{\varrho}_{\mathrm{pol}}$ is called the polarization sector~\cite{Raymer:2000zt,Marquardt:2007bh,Muller:2012ys} and also the polarization density matrix~\cite{Karassiov:2006hq,Karassiov:2007aa}. Since any $\hat{\varrho}$ and its associated block diagonal form $\hat{\varrho}_{\mathrm{pol}}$ cannot be distinguished in polarization measurements, we henceforth drop the subscript pol.
Finally, note that the SU(2) transformations are represented in the subspace $\mathcal{H}_{S}$ by the operator $\hat{U} (\pmb{\mathfrak{n}},\vartheta) = \exp (- i \vartheta \hat{\mathbf{S}} \cdot \pmb{\mathfrak{n}})$. As we have seen before, the action of this unitary operator induces a rotation, as indicated in Eq.~(\ref{eq:doucov}).
\subsection{Uncertainty relations and polarization squeezing} \label{sec:uncrel}
The Stokes operators satisfy the standard uncertainty relations of the $\mathfrak{su}(2)$ algebra; viz., \begin{equation}
\label{eq:polsquez1}
\var{\hat{S}_{k}} \, \var{\hat{S}_{\ell}} \ge
\lvert \epsilon_{k \ell m} \; \langle \hat{S}_{m} \rangle \rvert^{2} \, , \end{equation} where $\var{\hat{X}} = \langle \hat{X}^{2} \rangle - \langle \hat{X} \rangle^{2}$ stands for the variance. The noncommutability of these operators precludes the simultaneous sharp measurement of the physical quantities they represent. Note that the lower bound in Eq.~(\ref{eq:polsquez1}) is state dependent, and, in particular, some of the uncertainty relations may become trivial; all three variance bounds vanish simultaneously when $\langle \hat{\mathbf{S}} \rangle = 0$. To bypass this problem it is often convenient to use uncertainty relations in terms of sum of variances~\cite{Maccone:2014aa,Zheng:2020aa}, which in our case reads \begin{equation}
\var{\hat{\mathbf{S}}} =
\var{\hat{S}_{1}} + \var{\hat{S}_{2}} + \var{\hat{S}_{3}}
\geq 2 \langle \hat{S}_0 \rangle \, . \end{equation} The states satisfying the equality $\var{\hat{\mathbf{S}}} = 2 \langle \hat{S}_0 \rangle$ are precisely the SU(2) coherent states~\cite{Arecchi:1972zr,Perelomov:1986ly,Gazeau:2009aa} (see Appendix~\ref{sec:cs} for a brief account of their properties), so they can be rightly considered as the most classical states allowed by the quantum theory. They live in the subspace $\mathcal{H}_{S}$ and are defined in the standard angular momentum basis by \begin{equation}
|S, \mathbf{n} \rangle = \sum_{m=-S}^{S}
c_{m} (\mathbf{n}) \, |S,m \rangle \, , \end{equation} where the coefficients $c_{m} (\mathbf{n} )$ follow a binomial distribution peaked around the direction given by the unit vector $\mathbf{n}$ of spherical angles $(\theta, \phi)$: \begin{equation}
c_{m} ( \mathbf{n} )=
\binom{2S}{S+m}^{1/2}
[ \sin ( \theta / 2 ) ]^{S-m} \,
[ \cos ( \theta/2)]^{S+m} \, \exp [ - i ( S+m ) \phi] \, .
\label{eq:coeff} \end{equation}
Another issue with the relations (\ref{eq:polsquez1}) is that they are not explicitly SU(2) invariant, which can lead to confusing conclusions. A way of attaining the desirable SU(2) invariance is by using specific components of the Stokes operators. To this end, we first define the mean-polarization direction by (assuming $\langle \hat{\mathbf{S}} \rangle \neq 0$) \begin{equation} \mathbf{n}_{\parallel} = \frac{\langle \hat{\mathbf{S}} \rangle}
{\lvert \langle \hat{\mathbf{S}}\rangle \rvert} \, , \end{equation} and two other orthogonal vectors $\{ \mathbf{n}_{\perp 1}, \mathbf{n}_{\perp 2} \}$ that, together with $ \mathbf{n}_{\parallel}$, define an orthonormal reference frame. If we denote by $ \hat{S}_{\mathbf{n}} = \hat{\mathbf{S}} \cdot \mathbf{n}$ the projection of the Stokes vector onto the direction $\mathbf{n}$, the commutation relations (\ref{eq:CR}) then read $ [ \hat{S}_{\perp 1}, \hat{S}_{\perp 2} ] = i \hat{S}_{\parallel}$, which gives only one nontrivial uncertainty relation, namely \begin{equation}
\label{eq:URpp}
\var{\hat{S}_{\perp 1}} \; \var{\hat{S}_{\perp 2}} \ge
\lvert \langle \hat{S}_{\parallel} \rangle \rvert \, , \end{equation} and two trivial ones $\var{\hat{S}_{\perp 1}} \; \var{\hat{S}_{\parallel}} \ge 0$ and $\var{\hat{S}_{\perp 2}} \; \var{\hat{S}_{\parallel}} \ge 0$. The equality in Eq.~(\ref{eq:URpp}) is reached by the eigenvectors of $\hat{S}_{\perp 1} + i \kappa \hat{S}_{\perp 2}$, for any real $\kappa$; these are the so-called intelligent states~\cite{Aragone:1974aa, Aragone:1976aa}. The SU(2) coherent states are the only states satisfying the three equalities simultaneously, as $\var{\hat{S}_{\parallel}} = 0$ for them.
Another way of ensuring SU(2) invariance is to use the real symmetric $3 \times 3$ covariance matrix for the Stokes variables~\cite{Barakat:1989fp,Rivas:2008ys,Bjork:2012zr}, defined as \begin{equation}
\Lambda_{k \ell} =
\tfrac{1}{2} \langle \{ \hat{S}_{k}, \hat{S}_{\ell} \} \rangle -
\langle \hat{S}_{k} \rangle \langle \hat{S}_{\ell} \rangle \, , \label{eq:gamma} \end{equation} where $\{ \cdot , \cdot \}$ is the anticommutator. Note that while the Stokes operators are all Hermitian, the noncommutability makes mixed, nonsymmetric products (such as $\hat{S}_{k} \hat{S}_{\ell}$) non-Hermitian, also precluding their direct measurement. The symmetrization included in the definition (\ref{eq:gamma}) prevents this problem. In terms of the matrix $\Lambda$, we have \begin{equation}
\var{\hat{S}_{\mathbf{n}}} =
\mathbf{n}^{\top} \, \Lambda \, \mathbf{n} \, . \end{equation} By construction, $\Lambda_{k \ell} = \Lambda_{\ell k}$, so $\var{\hat{S}_{\mathbf{n}}}$ is a symmetric quadratic form in $\mathbf{n}$. In consequence, the minimum of $ \var{\hat{S}_{\mathbf{n}}}$ with respect to the direction $\mathbf{n}$ exists and is unique. If we incorporate the constraint $\mathbf{n}^{\top} \cdot \mathbf{n} =1$ as a Lagrange multiplier $\lambda$, this minimum is given by \begin{equation} \Lambda \mathbf{n} = \lambda \, \mathbf{n} \, . \end{equation} The admissible values of $\lambda$ are thus the eigenvalues of $\Lambda$ (which are real and non-negative) and the directions minimizing $ \var{\hat{S}_{\mathbf{n}}}$ are the corresponding eigenvectors.
As for any second-rank tensor, we can readily define three invariants: the determinant, the sum of the principle minors, and the trace. In terms of the eigenvalues $\lambda_{i}$ $i \in \{1, 2, 3 \}$, we can form state-dependent uncertainty relations; viz., \begin{align} 0 & \leq \lambda_{1} \lambda_{2} \lambda_{3} \leq \tfrac{1}{27} \langle \hat{S}_{0}^{3} (\hat{S}_{0} + 2)^{3} \rangle \, , \nonumber \\ \hat{S}_{0}^{2} & \leq \lambda_{1} \lambda_{2} + \lambda_{2}\lambda_{3} + \lambda_{3}\lambda_{1} \leq \tfrac{1}{3} \langle \hat{S}_{0}^{3} (\hat{S}_{0} + 2)^{3} \rangle \, ,\\ 2 \langle \hat{S}_{0} \rangle & \leq \lambda_{1} + \lambda_{2} + \lambda_{3} \leq \langle \hat{S}_{0} (\hat{S}_{0} + 2) \rangle \, . \nonumber \end{align} Reference~\cite{Shabbir:2016aa} discusses in detail the structure of these relations and their possible saturation.
There is an alternative approach focusing on the sum of the variances of two Stokes operators. The resulting relations \begin{equation}
\label{eq:ursu}
\var{\hat{S}_{k}} + \var{\hat{S}_{\ell}} \geq C \end{equation} are referred to as planar uncertainty relations~\cite{He:2011aa,Puentes:2013aa}. The lower bound $C$ is state dependent, but it can be explicitly calculated for $S= 1/2$, $1$, and $3/2$: the results are $1/4$, $7/16$, and $0.600933$, respectively. For large photon numbers, numerical calculations suggest that $C \simeq \tfrac{3}{2} \langle \hat{S}_{0} \rangle^{2/3}$~\cite{Dammeier:2015aa}.
The concept of squeezing is closely linked to the uncertainty relations above. Squeezing occurs whenever the fluctuations of one of the Stokes operators is below the shot-noise level, which is fixed by SU(2) coherent states. But, unlike in the bosonic case, in which the coherent state variances are equal in any direction, in the case of an SU(2) coherent state the variances of the Stokes operators depend on the direction $\mathbf{n}$. Actually, the parallel component satisfies $\var{\hat{S}_{\parallel}} =0$, so squeezing is primarily determined by the fluctuations of the orthogonal components and alternative squeezing criteria depend on the particular functions used.
For an SU(2) coherent state $\var{\hat{S}_{\bf{n_\bot}}} = S/2$. It is thus sensible to establish that squeezing takes place when the variance of $\hat{S}_{\bf{n_\bot}}$ is less than $S/2$, and the associated squeezing parameter is~\cite{Kitagawa:1993aa} \begin{equation}
\xi_{S}^2= \frac{2}{S} \inf_{\bf{n_\bot}} \;
\var{\hat{S}_{\bf{n_\bot}}} \, .
\label{eq:spsqKU} \end{equation} Obviously, $\xi_{S}^2=1$ for the SU(2) coherent states, whereas we may have $\xi_{S}^2<1$; that is, the fluctuation in one direction may be reduced.
In the context of interferometry, a suitable degree of squeezing is the ratio of the phase sensitivity of a general state to that of the SU(2) coherent states~\cite{Wineland:1992aa,Wineland:1994aa,Hillery:1993aa,Agarwal:1994aa,Brif:1996aa}. For an SU(2) coherent state, the phase sensitivity is $1/N$ and a direct calculation gives \begin{equation}
\xi_{R}^2= \frac{S} {2 \lvert \langle \bf{S}_{\parallel}\rangle \rvert^2} \inf_{\bf{n_\bot}} \var{S_{\bf{n_\bot}}} \, .
\label{eq:spsqW} \end{equation} There are a few other squeezing parameters that are discussed in great detail in a recent comprehensive review~\cite{Ma:2011aa}.
The idea of polarization squeezing can be extended to the simultaneous fluctuations of two Stokes components, say $k$ and $\ell$, as \begin{equation}
\var{\hat{S}_{k}} + \var{\hat{S}_{\ell}} <
\sqrt{\langle \hat{S}_{k} \rangle^{2} + \langle \hat{S}_{\ell}
\rangle^{2}} \, , \end{equation} which is referred to as planar squeezing~\cite{He:2011aa,Puentes:2013aa}. Alternative results concerning the simultaneous squeezing of two or three~\cite{Prakash:2011aa} Stokes operators have been obtained.
Finally, we mention that uncertainty relations can be assessed using measures of uncertainty other than variance; the most popular alternatives are entropic measures~\cite{Wehner:2010aa}. This leads to the idea of entropic spin squeezing, which has been considered by several authors~\cite{Abdel-Aty:2002aa,Civitarese:2013aa}.
When a state spans several photon numbers, we are forced to scrutinize multiple Fock layers. When this happens, we bring to bear an averaged Stokes vector \begin{equation} \langle\mathbf{\hat{S}}\rangle = \sum_{S=0}^\infty w_{S} \; \mathop{\mathrm{Tr}} \nolimits [ \hat{\varrho}^{(S)}\mathbf{\hat{S}} ] \, . \end{equation} As a result of this parsing, the squeezing of the state can be much smaller than the corresponding squeezing in the individual Fock layers~\cite{Muller:2016aa}.
Polarization squeezing has been observed in numerous experiments~\cite{Bowen:2002kx,Heersink:2003aa,Glockl:2003aa,Heersink:2005ul,Dong:2007fu,Corney:2008uq,Shalm:2009mi,Iskhakov:2009pi,Andersen:2016aa}. The Kerr effect in fibers is probably one of the most efficient and will be discussed with more detail below in Sec.~\ref{sec:Kerr}. The squeezing can be achieved by a single pass of optical pump pulses on the two polarization axes of a polarization maintaining optical fiber. When compensating for the birefringence inside the fiber the two orthogonal polarized squeezed beams interfere at the output of the fiber. The resulting polarization squeezing can then be determined by a Stokes measurement, as described in Sec.~\ref{sec:tomo}. The simplicity of the setup and very good spatial and spectral overlap of the two interfering beams led to a measured squeezing of around 7 dB.
\subsection{The dark plane}
It is always possible to establish a basis in which only one of the Stokes operators (\ref{eq:Stokop}) has a nonzero expectation value, say \mbox{$\langle \hat{S}_{k} \rangle = \langle \hat{S}_{\ell}\rangle=0$} and $\langle \hat{S}_{m} \rangle \neq 0$. The only uncertainty inequality thus reads $\var{\hat{S}_{k}} \, \var{\hat{S}_{\ell}} \geq \lvert \langle\hat{S}_{m} \rangle \rvert^{2}$. Polarization squeezing can then be defined as~\cite{Korolkova:2002fu,Bowen:2002kx,Schnabel:2003vn} \begin{equation}
\var{\hat{S}_{k}} < \lvert \langle\hat{S}_{m} \rangle \rvert
< \var{\hat{S}_{\ell}} \, .
\label{eq:polsq1} \end{equation} The choice of the conjugate operators $\{\hat{S}_{k},\hat{S}_{\ell} \}$ is by no means unique; there exists an infinite set of operators $\{ \hat{S}_\perp(\theta), \hat{S}_\perp(\theta+\pi/2) \}$ that are perpendicular to the state's classical excitation direction $\langle \hat{S}_{m} \rangle$, for which $\langle \hat{S}_\perp(\theta) \rangle = 0$ for all $\theta$. All these pairs exist in the $S_{k}$-$S_{\ell}$ plane, which is called the \emph{dark plane} because it is the plane of zero mean intensity. We can express a generic $\hat{S}_{\perp}(\theta)$ as $\hat{S}_{\perp}(\theta) = \hat{S}_{k} \, \cos \theta + \hat{S}_{\ell} \, \sin \theta$, $\theta$ being an angle defined relative to $\hat{S}_{k}$. Condition (\ref{eq:polsq1}) is then equivalent to \begin{equation}
\var{\hat{S}_\perp (\theta_{\mathrm{sq}})} < \lvert \langle \hat{S}_{0} \rangle \rvert <
\var{\hat{S}_\perp ( \theta_{\mathrm{sq}} + \pi/2 )} \, ,
\label{eq:polsq2} \end{equation} where $\hat{S}_\perp(\theta_{\mathrm{sq}} )$ is the squeezed parameter and $\hat{S}_\perp( \theta_{\mathrm{sq}} + \pi/2 )$ the antisqueezed parameter.
Many experiments use circularly polarized light, which fulfills $\langle \hat{S}_{1} \rangle = \langle \hat{S}_{2} \rangle = 0$, $\langle \hat{S}_{3} \rangle = \alpha^{2}$. In this case the dark plane is exactly the $S_1$-$S_2$ plane and $\langle \hat{a}_{+} \rangle = \alpha$ and $\langle \hat{a}_{-} \rangle = 0$. Expressing the fluctuations of $\hat{\mathbf{S}}$ in terms of the noise of the circularly polarized modes $\delta \hat{a}_{\pm}$ and assuming $ \lvert \langle \delta \hat{a}_{\pm} \rangle \rvert \ll \alpha$, we find~\cite{Corney:2008uq} \begin{equation}
\delta \hat{S}_\perp (\theta) = \alpha \, \delta\hat{X}_{-} (\theta) =
\alpha [ \delta \hat{X}_{H} (\theta ) +
\delta \hat{X}_{V} (\theta + \pi/2) ] \, , \label{eq_polsq_darkmode} \end{equation} where $\hat{X}_{H} = (\hat{a}_{H} e^{-i \theta} + \hat{a}_{H}^{\dagger} e^{{i \theta}} )/\sqrt{2}$ is the rotated quadrature for the $H$ mode, and an analogous expression for the $V$ mode. On the other hand, since $\delta \hat{N} = \alpha ( \delta\hat{a}_{+} + \delta \hat{a}^\dagger_{+} ) = \alpha \, \delta\hat{X}_{+}$, the intensity exhibits no dependence on the dark mode. In consequence, the condition (\ref{eq:polsq2}) can be recast as \begin{equation}
\var{\hat{X}_{-} (\theta )} < 1 \, ;
\label{eq_sq_equivalence} \end{equation} that is, polarization squeezing is equivalent to vacuum quadrature squeezing in the orthogonal polarization mode. This is also seen by considering that the sphere can locally be replaced by its tangent plane since $S \simeq \alpha^{2}$; i.e., for bright states, the Poincar\'e sphere has a large enough radius such that the curvature is locally negligible and the projection in the $S_1$-$S_2$ dark plane is equivalent to a rescaled canonical $x$-$p$ quadrature phase space.
\subsection{Higher-order fluctuations}
Squeezing refers to the behavior of the second-order moments of the Stokes operators. As indicated before, higher-order fluctuations play a crucial role in the quantum domain. To deal with them, it is convenient to use the so called irreducible tensorial sets~\cite{Fano:1959ly,Silver:1976aa,Blum:1981ya,Varshalovich:1988ct,Manakov:2002aa}, a basic concept in the quantum theory of angular momentum. For a fixed spin $S$, these operators (also called polarization operators) are defined as \begin{equation}
\label{Tensor}
\hat{T}_{Kq}^{(S)} = \sqrt{\frac{2 K +1}{2 S +1}}
\sum_{m, m^{\prime}= -S}^{S} C_{Sm, Kq}^{Sm^{\prime}} \,
| S , m^\prime \rangle \langle S, m | \, , \end{equation} with $C_{S_{1}m_{1}, S_{2} m_{2}}^{Sm}$ denoting the Clebsch-Gordan coefficients~\cite{Varshalovich:1988ct} that couple a spin $S_{1}$ and a spin $S_{2}$ to a total spin $S$ and vanish unless the usual angular momentum coupling rules are satisfied: $m_1+m_2=m$, $ 0 \leq K \leq 2S$, and $ -K\leq q \leq K$.
According to the properties of the Clebsch-Gordan coefficients, $K$ takes the values $0, 1,2,\dots, 2S$, giving rise to $(2S+1)^2$ polarization operators that constitute a basis for the space of linear operators acting on $\mathcal{H}_{S}$. This is guaranteed by the property \begin{equation}
\mathop{\mathrm{Tr}} \nolimits \, [ \hat{T}_{Kq}^{(S)}\hat{T}_{K'q'}^{(S)\dagger} ] =
\delta_{S S'}\delta_{K K'}\delta_{q q^{\prime}} \, .
\label{eq:Tortho} \end{equation} Polarization operators are, in general, non-Hermitian. But, due to symmetry properties, for every fixed $S$ they satisfy the relation \begin{equation}
\hat{T}_{Kq}^{(S)\dagger} = (-1)^q \; \hat{T}_{K-q}^{(S)} \, .
\label{eq:Therm} \end{equation} Most importantly, they have the correct transformation properties under SU(2) transformations~\cite{Varshalovich:1988ct}.
Although the definition of $\hat{T}_{Kq}^{(S)}$ might look a bit unfriendly, the essential observation for what follows is that the operators $\hat{T}_{Kq}^{(S)}$ are proportional to the $K$th powers of the generators of SU(2), so they are intimately linked to the moments of the Stokes variables. Actually, one can recast Eq.~(\ref{Tensor}) as \begin{equation}
\label{eq:multi}
\begin{array}{lll}
\hat{T}_{00}^{( S )} = \displaystyle
\frac{1}{\sqrt{2 S + 1}} \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} \, , & & \\
& & \\
\hat{T}_{10}^{( S )} = \displaystyle
\frac{\sqrt{3}}{\sqrt{( 2 S + 1 ) (S+1) S}} \, \hat{S}_{z} \, ,
\qquad \qquad &
\hat{T}_{1\mp 1}^{( S )} = \displaystyle
\frac{\sqrt{3}}{\sqrt{( 2 S + 1 ) (S+1) S}} \,
\hat{S}_{\pm} \, , \\
& & \\
\hat{T}_{20}^{{(S)}} = \textstyle{\sqrt{\frac{C}{6}}}
(3\hat{S}_{z}^{2}- \hat{S}^{2}) \, , &
\hat{T}_{2\mp 1}^{( S )} = \textstyle{\sqrt{\frac{C}{2}}} \,
\{ \hat{S}_{z}, \hat{S}_{\pm} \} \, , &
\hat{T}_{2 \mp 2}^{( S )} = \textstyle{\sqrt{\frac{C}{2}}} \,
\hat{S}_{\pm}^{2} \, ,
\end{array} \end{equation} with $C=30/[(2S + 3)(2 S + 1) (2S-1) (S+1)]$, and so on.
The expansion of the density matrix $\varrho^{(S)}$ in polarization operators reads \begin{equation}
\label{rho1}
\hat{\varrho}^{(S)} = \sum_{K= 0}^{2S} \sum_{q=-K}^{K}
\varrho_{Kq}^{(S)} \, \hat{T}_{Kq}^{(S)} \, , \end{equation} where the corresponding expansion coefficients \begin{equation}
\varrho_{Kq}^{(S)}=\mathop{\mathrm{Tr}} \nolimits [
\hat{\varrho}^{(S)}\hat{T}_{Kq}^{(S)\dagger} ]
\label{eq:Multipoles} \end{equation} are known as state multipoles and contain all the information about the state, but arranged in a manifestly SU(2)-invariant form. Apart from their theoretical relevance, we will show in Sect.~\ref{sec:dv} that they can be experimentally determined using simple measurements.
In order to represent a physical state, the density operator must have unit trace, be Hermitian, and be positive definite. These conditions impose some restrictions on the expansion coefficients. The unit trace fixes the value of the monopole, the only spherical tensor that is not traceless \begin{equation}
\varrho_{00}^{(S)} = \frac{1}{\sqrt{2S+1}} \, ,
\label{eq:monop} \end{equation} so, in a way, the monopole is trivial. Hermiticity imposes the symmetry $\varrho_{K-q}^{(S)} = ( -1)^{q} \, \varrho_{Kq}^{(S) \ast}$. The positive semidefiniteness of $\hat{\varrho}^{(S)}$ forces constraints on the multipoles, which can be expressed as \begin{equation}
\sum_{q=- K}^{K} \lvert \varrho_{Kq}^{(S)} \rvert^{2} \leq C_{K}^{(S)} \, .
\label{eq:cons} \end{equation} For the simplest case of $S=1/2$, we have $C_1^{(1/2)}=1/2$, and for $S=1$, we have $C_{1}^{(1)}=1$ and $C_{2}^{(1)}=2/3$. The general structure of the allowed ranges of the multipoles is quite complicated and can be seen in Refs.\cite{Band:1971aa} and ~\cite{Kryszewski:2006aa}.
The dipole $\varrho^{(S)}_{1q}$ is the first-order moment of $\hat{\mathbf{S}}$ and thus corresponds to the classical picture of polarization, in which the state is represented by the average value of $\hat{\mathbf{S}}$. A complete characterization of the state demands the knowledge of all the multipoles to all orders. This implies measuring the probability distribution of $\hat{\mathbf{S}}$ in all directions, and then performing an integral inversion, which turns out to be a hard task. However, in most realistic cases, only a finite number of multipoles are needed and then the reconstruction of the $K$th multipole entails measuring along only $(2K + 1) \ll (2S+1)^2$ independent directions, as we shall see in Section~\ref{sec:tomo}.
\subsection{Quantum polarization transformations}
The Stokes operators obey the same transformation rules as their classical counterparts in \eqref{eq:Mueclas} \begin{equation}
\hat{S}_\mu^\prime= \matriz{M}_{\mu\nu} \, \hat{S}_\nu \, , \end{equation} where we have assumed again summation over repeated indices. SU(2) transformations leave $\hat{S}_0$ unchanged while rotating the vector of Stokes operators $\hat{\mathbf{S}}$, and are immediately seen to describe Mueller matrices corresponding to phase shifters and wave plates. To describe other optical elements, such as polarizers, transformations beyond SU(2) are required ~\cite{Goldberg:2019}.
Deterministic polarization transformations that, unlike SU(2) transformations, do not conserve energy must include loss. Consider the Lorentz boost $\matriz{H}_3$ describing diattenuation of the two classical field amplitudes $a_\pm$. A quantum description of this process $\hat{a}_{\pm} \mapsto e^{-\rho\pm\eta/2}\hat{a}_{\pm}$ does not preserve the commutation relations listed in \eqref{eq:bosonic commutation relations}, and so does not represent a valid trace-preserving quantum transformation. Rather, one must introduce loss channels $\hat{b}_\pm$ into which some of the light from modes $\hat{a}_\pm$ can be coupled, per \begin{equation}
\hat{a}_{\pm} \mapsto e^{-\rho\pm\eta/2}\hat{a}_{\pm}+\sqrt{1-e^{-2\rho\pm\eta}}\hat{b}_\pm. \end{equation} Combining this transformation with the act of ignoring the initially-unpopulated loss modes $\hat{b}_\pm$ leads to the desired transformation of the polarization modes $\hat{a}_\pm$. A left-circular polarizer, for example, corresponds to the boost $\eta \mapsto\infty$ that maintains $\rho=\eta/2$. The entirety of the $-$ polarization component is transmitted into an inaccessible mode beyond the polarization Hilbert space.
This analysis is readily extended to a general SL($2, \mathbb{C}$) transformation \begin{equation}
\left(
\begin{array}{c}
\hat{a}_{+} \\
\hat{a}_{-}
\end{array}
\right ) \mapsto \matriz{T}
\left(
\begin{array}{c}
\hat{a}_{+} \\
\hat{a}_{-}
\end{array}
\right ),
\label{eq:SL2C on operators} \end{equation} where $\matriz{T}$ is given by \eqref{eq:SL2C polar decomp}, since all SL($2,\mathbb{C}$) polarization transformations can be realized using rotation operations supplemented by a single diattenuation operation. More insight is gained by considering these general transformations in an enlarged Hilbert space. Again considering a vacuum mode $\hat{b}$, the unitary transformation [$\matriz{U}\in$SU(3)] \begin{equation}
\left(
\begin{array}{c}
\hat{a}_{+} \\
\hat{a}_{-} \\
\hat{b}
\end{array}
\right ) \mapsto \matriz{U}\left(
\begin{array}{c}
\hat{a}_{+} \\
\hat{a}_{-} \\
\hat{b}
\end{array}
\right ) \, , \end{equation} achieves \eqref{eq:SL2C on operators} while maintaining global conservation of energy. The $2 \times 2$ matrices representing SL($2, \mathbb{C}$) correspond to a projection of a $3\times 3$ matrix representation of SU(3), and the study of such projections from the perspective of random matrices~\cite{Zyczkowski:2000} may provide insight into the statistics of quantum polarization transformations. All deterministic polarization transformations can thus be realized as photon-number-conserving operations $\matriz{U}$ on an enlarged Hilbert space.
Convex combinations of deterministic quantum polarization transformations suffice to represent all classical polarization transformations. Depolarization, for example, which corresponds to a loss in the degree of polarization while maintaining $\langle \hat{S}_{0} \rangle$, can be characterized by a weighted sum of SU(2) operations acting on $\hat{\varrho}$~\cite{Rivas:2013}. It can be cast into an SU(2)-invariant master equation~\cite{Klimov:2008depol}, and leads to the decay of the higher-order multipoles at a rate that increases quadratically with multipole rank $K$~\cite{Rivas:2013}. All other quantum channels lead to polarization transformations that are more sophisticated than their classical counterparts.
\section{Phase-space representation of polarization states}
\label{sec:phsp}
\subsection{The Husimi $Q$-function}
In the conventional formulation of quantum optics, a system is described in the language of Hilbert space. However, for many purposes it proves advantageous to use a phase-space formulation, which is surveyed in a number of books~\cite{Schleich:2001aa,QMPS:2005aa,Schroek:1996aa} and review papers~\cite{Hillery:1984aa,Hansen:1984aa,Lee:1995aa,Ozorio:1998aa,Polkovnikov:2010aa,Weinbub:2018aa}. The idea is to exploit the Weyl correspondence between ordinary $c$-number functions in phase space and quantum operators in Hilbert space. The SU(2) symmetry inherent to the polarization structure greatly simplifies the task of finding this correspondence. Actually, Stratonovich~\cite{Stratonovich:1956qc} and Berezin~\cite{Berezin:1975mw} worked out quasiprobability distributions on the sphere satisfying all the pertinent requirements; this construction was later generalized by others~\cite{Agarwal:1981bh,Brif:1998if,Heiss:2000kc, Klimov:2000zv,Klimov:2008yb} and has demonstrated to be extremely useful in visualizing the properties of spinlike systems~\cite{Dowling:1994sw, Atakishiyev:1998pr,Chumakov:1999sj,Chumakov:2000le,Klimov:2002cr}.
We do not need this complete machinery (a brief account can be found in Appendix~\ref{app:phsp}); for our goals it is enough if we concentrate on the Husimi $Q$-function~\cite{Husimi:1940aa}, defined in complete analogy to its counterpart for continuous variables, namely~\cite{Agarwal:1981bh} \begin{equation}
\label{eq:defQS}
Q ( S, \mathbf{n} ) = \langle S, \mathbf{n} | \hat{\varrho}^{(S)} |
S, \mathbf{n} \rangle \, , \end{equation}
where $\hat{\varrho}^{(S)}$ is the density matrix in the $S$th subspace of the polarization sector (\ref{eq:PolSec}). This $Q$-function is only defined in a subspace with fixed spin $S$. Since the SU(2) coherent states $|S, \mathbf{n} \rangle$ are the only states saturating the uncertainty relation (\ref{eq:polsquez1}), the definition of $Q ( S, \mathbf{n} )$ is quite appealing, for it comprises the projection onto the states that have the most definite polarization allowed by the quantum theory. The function $Q(S, \mathbf{n} )$ is everywhere nonnegative and properly normalized \begin{equation} \frac{2S+1}{4 \pi} \int d\mathbf{n} \; Q( S, \mathbf{n}) = 1 \, , \end{equation} with $d \mathbf{n} = \sin \theta d\theta d\phi$ being the invariant differential element of solid angle. In consequence, it can be interpreted as a genuine probability distribution over the $S$th Fock layer.
Most states require the full polarization sector as in \eqref{eq:PolSec}. For the total polarization matrix $\hat{\varrho}$, the $Q$-function can be obtained by summing over all the Fock layers (with the proper normalization)~\cite{Sanchez-Soto:2013cr} \begin{equation}
\label{eq:Q12}
Q ( \mathbf{n})=
\sum_{S=0}^{\infty} (2S + 1) \;
Q (S, \mathbf{n} ) \, , \end{equation} which is normalized according to \begin{equation}
\frac{1}{4 \pi}\int_{\mathcal{S}_{2}} d\mathbf{n} \,
Q ( \mathbf{n} ) = 1 \, , \end{equation} the integral now being extended to the unit sphere $\mathcal{S}_{2}$.
A point to be stressed is that (\ref{eq:Q12}) involves only diagonal elements between states with the same number of excitations. Because of the lack of off-diagonal contributions of the form $\langle S, \mathbf{n} | \hat{\varrho} | S^\prime, \mathbf{n} \rangle$ with $S \neq S^\prime$, the total $Q$-function is an average of the $Q$-functions over the Fock layers. The role of the sum over $S$ is to remove the total intensity from the description of the state~\cite{Klimov:2006aa}.
\begin{figure}
\caption{Density plots of the Husimi $Q$-function for: (left) an SU(2)
coherent state centered in the north pole, (right) a NOON
state. In both cases $S=4$. In all the density plots in the paper we use the same colormap that ranges from dark blue (corresponding to the numerical value 0) to bright red (corresponding to the numerical value 1).}
\label{fig:cohNoon}
\end{figure}
For the distinguished case of pure states, which can be expanded in the angular momentum basis with coefficients $\Psi_{Sm}$, (i.e., $ | \Psi \rangle = \sum_{S=0}^\infty \sum_{m=-S}^S \Psi_{S m}\, | S , m \rangle$), the Husimi $Q$-function takes the form \begin{equation}
Q_{\Psi} ( \mathbf{n} ) = \sum_{S=0}^\infty (2S+1) \;
\lvert \langle S, \mathbf{n} | \Psi \rangle \rvert^{2} =
\sum_{S=0}^\infty (2S+1) \; \left |
\sum_{m=-S}^S c_m( \mathbf{n} ) \, \Psi_{Sm} \right |^2 \, ,
\label{eq:Qpsgen} \end{equation} where the coefficients $c_{m} ( \mathbf{n}) $ are given in Eq.~(\ref{eq:coeff}).
Let us examine a few illustrative examples. We first consider states with fixed $S$. For an SU(2) coherent state $|S, \mathbf{n}_{0} \rangle$ one immediately finds \begin{equation}
Q_{\mathbf{n}_{0}} ( \mathbf{n} ) =
\left [
\tfrac{1}{2} (1 + \mathbf{n} \cdot \mathbf{n}_{0} )
\right ]^{2S} \, ,
\label{eq:QCS} \end{equation} which, as expected, is a distribution strongly peaked around the direction $\mathbf{n}_{0}$.
The second example is a NOON state~\cite{Lee:2002aa,Dowling:2008aa} \begin{equation}
|\mathrm{NOON} \rangle = \frac{1}{\sqrt{2}}
(|N,0\rangle - |0,N\rangle) = \frac{1}{\sqrt{2}}
(|S,S\rangle - |S,-S\rangle) \, ,
\label{Eq: NOON} \end{equation} expressed first in the Fock and then in the angular momentum basis. We have now \begin{equation}
Q_{\mathrm{NOON}} ( \mathbf{n} ) = \tfrac{1}{2} \left [ \sin^{4S} (\theta/2) + \cos^{4S} (\theta/2) -
2 \sin^{2S} (\theta/2) \cos^{2S} (\theta/2) \cos(S \phi) \right ] \, . \end{equation} This exhibits $2S$ minima equidistantly placed around the equator of the Poincar\'{e} sphere, as we can see in Fig.~\ref{fig:cohNoon}.
Let us consider two further examples of states spanning the whole polarization sector. The first one is a quadrature coherent state in both modes
$ |\alpha_{+}, \alpha_{-} \rangle$. Without loss of generality we take the state to be $|\alpha_{+}, 0_{-} \rangle$, for any other state of this family can be generated from this one via an SU(2) transformation. The decomposition into invariant subspaces $\mathcal{H}_{S}$ reads \begin{equation} \label{eq:2mcsdec}
|\alpha_{+}, 0_{-} \rangle = e^{- \lvert \alpha \rvert^{2}/2}
\sum_{S=0}^{\infty} \frac{\lvert \alpha \rvert^{2S}}
{\sqrt{( S+m)! (S-m)!}} e^{i (S+m) \delta} \; |S, m \rangle \, . \end{equation} The final result is \begin{equation}
Q_{ \alpha_{+}, 0_{-}} ( \mathbf{n} ) =
[ 1 + \lvert \alpha \rvert^2 \cos^2 (\theta/2) ] \;
\exp [ - \lvert \alpha^2 \rvert \sin^2 (\theta/2) ]\, .
\label{eq:twomodecs} \end{equation}
The last example is a two-mode squeezed vacuum state~\cite{jmo:1987aa,josab:1987aa} \begin{align} \label{eq:TMSVdec}
|\mathrm{TMSV} \rangle & = \hat{S} ( r ) \; |0_{+}, 0_{-} \rangle
\nonumber \\
& = \frac{1}{\cosh r} \sum_{N} (-1)^{N} (\tanh r)^{N} |N, N \rangle =
\frac{1}{\cosh r} \sum_{S} (-1)^{S} (\tanh r)^{S} |S, 0 \rangle \, , \end{align} again expressed in both Fock and angular momentum bases. The two-mode squeezing operator is $ \hat{S} (r) = \exp [ r ( \hat{a}_{+} \hat{a}_{-} - \hat{a}_{+}^\dagger \hat{a}_{-}^\dagger )/2 ]$ and the squeezing parameter $r$ has been chosen to be real without loss of generality. The resulting Husimi $Q$- function is \begin{equation}
\label{Wtwom}
Q_{ \mathrm{TMSV}} ( \mathbf{n} ) = \frac{1}{\cosh^{2} r}
\frac{1 - \tanh^{2} r}{[1 + \tanh^{2} r + 2 \tanh r \,
\cos (2 \theta) ] ^{3/2}} \, . \end{equation}
\begin{figure}
\caption{Density plots of the Husimi $Q$-function for: (left) a two-mode
coherent state in which only the $+$ mode is excited, (right) a two-mode squeezed vacuum state with squeezing parameter $r = 0.9$.}
\label{fig:Wcohsqu}
\end{figure}
In Fig.~\ref{fig:Wcohsqu} we have plotted the $Q$-functions for these last two states. For the state $|\alpha_{+}, 0_{-} \rangle$, the $Q$-function has a caplike structure highly concentrated around the north pole, as the classical intuition suggests. For the two-mode squeezed vacuum (we take $r = 0.9$), we see the presence of a Gaussian peak centered at the equator ($\theta = \pi/2$) and independent of $\phi$. This means that we can see this state as arising from a superposition of states with equal amplitudes in each polarization mode, but all possible relative phases $\phi$. These contributing states are concentrated around the equator, and therefore we see a napkin ring there.
To conclude, we recall that polarization can also be understood as arising from the superposition of two perpendicular harmonic modes of the same frequency. To translate this picture into the quantum realm, we assume that a state can be characterized by the standard two-mode Husimi $Q$-function \begin{equation}
\label{prodw1}
Q ( \alpha_{+}, \alpha_{+} ) = \langle \alpha_{+}, \alpha_{-} | \:
\hat{\varrho} \:| \alpha_{+}, \alpha_{-} \rangle \, . \end{equation} These two pictures can easily be related, because the polarization ellipse needs only three independent quantities to be fully characterized: the amplitudes of each mode and the relative phase between them. We therefore introduce the parametrization \begin{equation} \label{eq:Hopffib}
\alpha_{+} = r e^{i \zeta} \; e^{i \phi/2} \cos (\theta/2) \, ,
\qquad \qquad
\alpha_{-} = r e^{i \zeta} \; e^{- i \phi/2} \sin (\theta /2) , \end{equation} where $\zeta$ is a global (irrelevant) phase, the radial variable $r^2 = \lvert \alpha_{+} \rvert^2 + \lvert \alpha_{-} \rvert^2$ represents the total intensity (here considered as a continuous variable), and the parameters $\theta$ and $\phi$ can be interpreted as the polar and azimuthal angles, respectively, on the Poincar\'e sphere: $\theta$ describes the relative amount of intensity carried by each mode and $\phi$ is the relative phase between them. In mathematical terms, (\ref{eq:Hopffib}) is an example of a Hopf fibration~\cite{Hopf:1931aa}, a remarkable nontrivial principal fiber bundle that occurs in different situations in theoretical physics in various guises~\cite{Urbantke:2003aa}.
Both Husimi functions $Q (\alpha_{+}, \alpha_{-} )$ and $Q(\mathbf{n})$ should be closely related. In fact, the latter can be understood as a marginal of the former, as has been worked out~\cite{Klimov:2006aa,Luis:2005ab}. To this end, it is essential to realize that the two-mode quadrature coherent states are expressed in terms of the SU(2) coherent states by~\cite{Atkins:1971aa} \begin{equation}
|\alpha_{+}, \alpha_{-} \rangle = e^{- r^{2}/2}
\sum_{S=0}^{\infty} \frac{r^{2S} e^{2 i S \zeta }}{\sqrt{(2S)!}}
|S, \mathbf{n} \rangle \,.
\label{eq:quadang} \end{equation} By integrating $Q(\alpha_+,\alpha_-)$ over the intensity variable $r$ we get the same result as in Eq.~(\ref{eq:Q12}). We will later discuss experimental methods for measuring the Husimi $Q$-function.
As is clear from Eqs.~\eqref{eq:2mcsdec} and \eqref{eq:TMSVdec}, the two-mode coherent states $| \alpha_{+}, \alpha_{-} \rangle $ are separable in the basis $|S,m \rangle$; whereas, the two-mode squeezed vacuum $| \mathrm{TMSV} \rangle $ is nonseparable (or entangled). Despite their classical flavor, SU(2) coherent states are typically entangled in our chosen modal decomposition. Naturally, entanglement properties are closely related to the Hilbert space structure and the nature of observables chosen~\cite{Zanardi:2004aa,Harshman:2007aa,Sanchidrian:2018aa}. It is worth recalling that similar entanglement structures exist in polarization optics, also at the classical level~\cite{Simon:2010ab,Qian:2011kx,Kagalwala:2013aa,Toppel:2014aa,Korolkova:2019aa}.
\subsection{Husimi $Q$-function and higher-order fluctuations}
The Husimi $Q$-function completely encompasses the information that can be obtained from a quantum state; knowledge thereof is tantamount to tabulating the values of all the multipoles $\varrho_{Kq}^{(S)}$. This can be stressed if we rewrite $Q(S, \mathbf{n})$ as~\cite{Klimov:2002cr} (see also Appendix~\ref{app:phsp}) \begin{equation}
\label{eq:QSU2rj}
Q (S, \mathbf{n} ) = \sqrt{\frac{4 \pi}{2S+1}}
\sum_{K=0}^{2S} \sum_{q=-K}^{K} C_{SS,K0}^{SS} \, \varrho_{Kq}^{(S)} \,
Y_{Kq}^{\ast} ( \mathbf{n} ) \, , \end{equation} where $ Y_{Kq}^{\ast} ( \mathbf{n})$ are the spherical harmonics. The Clebsch-Gordan coefficient $C_{SS,K0}^{SS}$ has a simple analytical form~\cite{Varshalovich:1988ct} \begin{equation}
\label{eq:Cesp}
C_{SS,K0}^{SS} = \frac{\sqrt{2S+1} \; (2S)!}
{\sqrt{(2S-K)! \, (2S+1 + K)!}} \, . \end{equation}
When a state lives in a complete polarization sector, then, by substituting Eq.~(\ref{eq:QSU2rj}) into the general definition Eq.~(\ref{eq:Q12}), the total -function appears as a sum \begin{equation}
\label{eq:QsumK}
Q ( \mathbf{n} ) = \sum_{K=0}^{\infty}
Q_{K} ( \mathbf{n} ) \, , \end{equation} where each partial multipole component is \begin{equation}
\label{eq:QSU2K}
Q_{K} ( \mathbf{n} ) =
\sum_{S=\lfloor K/2 \rfloor}^{\infty} \sqrt{4 \pi (2S+1)}
\sum_{q=-K}^{K} C_{SS,K0}^{SS} \, \varrho_{Kq}^{(S)} \;
Y_{Kq}^{\ast} ( \mathbf{n} ) \, . \end{equation} Here, the floor function $\lfloor x \rfloor$ is the largest integer less than or equal to $x$. The partial components $Q_K$ inherit the properties of $Q$, but they exclusively contain information about the $K$th moments of the Stokes variables. In this way, Eq.~(\ref{eq:QsumK}) is the appropriate tool for arranging the successive moments.
We illustrate this viewpoint with the simple example of the state $| 1_{\mathrm{+}}, 1_{\mathrm{-}} \rangle$. This represents the photon pairs generated in type-II optical parametric down-conversion~\cite{Rubin:1994aa} and is generally viewed as a highly nonclassical state. In the angular momentum basis, the state is $|1, 0 \rangle$ and its $Q$-function can immediately be calculated: \begin{equation}
\label{eq:Qt11}
Q ( \mathbf{n} ) = \sin^{2} \theta \, . \end{equation} The $Q$-function does not depend on $\phi$ and its shape has an equatorial bulge, revealing that the state is highly delocalized on the Poincar\'e sphere, in agreement with its nonclassical character. The partial components, according to (\ref{eq:QSU2K}), are \begin{equation}
\label{eq:Qp11}
Q_{0} ( \mathbf{n} ) = \frac{1}{3} \, ,
\qquad \qquad
Q_{1} ( \mathbf{n} ) = 0 \, ,
\qquad \qquad
Q_{2} ( \mathbf{n} ) = \left ( \frac{2}{3} - \cos^{2} \theta \right ) \, . \end{equation} The sum of these three terms gives, of course, the result Eq.~(\ref{eq:Qt11}), but there is more information encoded in Eq.~(\ref{eq:Qp11}): the dipolar contribution is absent, which means that this state conveys no first-order information. This is the reason why this was the first state in which hidden polarization was detected~\cite{Usachev:2001ve,Bushev:2001xq}. Figure~\ref{Fig:Qpartial} shows the $Q$-function for this state.
\begin{figure}
\caption{Density plot of Husimi $Q$-function for the state $|1, 0 \rangle$. Here, the dipolar component ($K=1$ vanishes), so this state lacks first-order information. The features of the Husimi function are then due to the quadrupolar ($K=2$) components of the state.}
\label{Fig:Qpartial}
\end{figure}
The expansion \eqref{eq:QSU2rj} can be inverted using the orthonormality of the spherical harmonics. In this way, the multipoles appear as~\cite{Agarwal:1998aa,Bouchard:2017aa} \begin{equation}
\varrho_{Kq}^{(S)} = \sqrt{\frac{4\pi}{2S+1}}\frac{1}{C_{SS,K0}^{SS}}
\int_{\mathcal{S}_2} d\mathbf{n} \; Q( \mathbf{n}) \;
{Y}_{Kq} (\mathbf{n}) \, .
\end{equation} When expressed in the Cartesian basis these multipoles appear in a very transparent way. For example, the three dipole $(\varrho_{1q})$ and the five quadrupole $(\varrho_{2q})$ terms are given, respectively, by \begin{equation}
\wp_{q} = \langle n_{q} \rangle \, , \qquad \qquad \mathcal{Q}_{qq^{\prime}}=\langle 3 n_{q} n_{q^{\prime}} - \delta_{q q^{\prime}} \rangle \, ,
\end{equation} where the expectation values of a function $f (\mathbf{n})$ are calculated with respect the $Q$-function \begin{equation}
\langle f ( \mathbf{n} )\rangle= \frac{\int_{\mathcal{S}_2}\; d\mathbf{n} \; f(\mathbf{n}) \; Q( S, \mathbf{n})} {\int_{\mathcal{S}_2} d\mathbf{n} \; Q(S, \mathbf{n})} \, . \end{equation} Therefore, the state multipoles appear as the standard ones in electrostatics, but with charge density replaced by $Q(S, \mathbf{n})$ and distances by directions~\cite{Jackson:1999aa}. They are the $K$th directional moments of the state and, therefore, the multipoles resolve progressively finer angular features. The extension to the complete polarization sector is direct.
\subsection{Propagation in a Kerr medium} \label{sec:Kerr}
The optical Kerr effect refers to the intensity-dependent phase shift that light experiences during its propagation through a third-order nonlinear medium. This leads to a remarkable non-Gaussian operation that has attracted considerable interest due to potential applications in a variety of areas, such as quantum nondemolition measurements~\cite{Braginskii:1968aa,Unruh:1979aa,Milburn:1983aa,Imoto:1985aa,Grangier:1998aa,Sanders:1989aa,Xiao:2008aa}, generation of quantum superpositions~\cite{Milburn:1986aa,Yurke:1986aa,Tombesi:1987aa,Gantsog:1991aa,Wilson-Gordon:1991aa,Tara:1993aa,Luis:1995aa,Chumakov:1999aa,Korolkova:2001aa}, and quantum logic~\cite{Turchette:1995aa,Semiao:2005aa,Matsuda:2007aa,You:2012aa}.
Special mention must be made of the role that this cubic nonlinearity has played in the generation of squeezed light, which is precisely our interest here. Optical fibers are the paradigm for that purpose~\cite{Levenson:1985aa,Schmitt:1998aa}, although, due to the typically small values of the nonlinearity in silica glass, one needs long propagation distances and high powers to observe nonlinear effects, which brings other unwanted results~\cite{Shelby:1985aa,Elser:2006aa}.
Let us consider the following Hamiltonian \begin{equation} \hat{H} = \upchi \hat{a}_{+}^{\dag} \hat{a}_{+} \, \hat{a}_{-}^{\dag} \hat{a}_{-} \, , \label{Hkerr} \end{equation} where $\upchi$ is an effective coupling constant that depends on the third-order nonlinear susceptibility. This describes the cross-Kerr effect in which a nonlinear phase shift of an optical polarization mode (say, $+$) is induced by the other mode ($-$)~\cite{Agrawal:2001aa}.
For any state described by the density operator $\hat{\varrho}$, the evolution can be formally written as $\hat{\varrho} (t) = \exp (-i t \hat{H} ) \, \hat{\varrho} (0) \, \exp ( i t \hat{H} )$. By expanding this equation in the two-mode Fock basis, the evolution may, in principle, be tracked. Taking the example of an initially pure, two-mode coherent state $| \Psi (0) \rangle = | \alpha_{+}, \alpha_{-} \rangle$, the resulting time-evolved state is~\cite{Agarwal:1989aa} \begin{equation}
\label{eq:evolexaCohst}
|\Psi (t) \rangle
= \exp [ - ( |\alpha_{+}|^2 + |\alpha_{-}|^2)/2 ]
\sum_{n_{+}, n_{-} =0}^\infty
\frac{\alpha_{+}^{n_{+}} \alpha_{-}^{n_{-}} }{\sqrt{n_{+}!\, n_{-}!}}
\, \exp (- i \upchi t n_{+} n_{-} ) |n_{+}, n_{-} \rangle \, . \end{equation} The term $\exp (- i \upchi t n_{+} n_{-} )$ arises because of the coupling between the modes and prevents the state from being factorized into single-mode states; i.e., the state becomes entangled. This exact expression is only of practical use for few-photon states.
Phase-space methods are especially adapted to this problem. If we employ the two-mode Husimi function $ Q(\alpha_{+}, \alpha_{-})$ and the basic techniques outlined, e.g., in Ref.~\cite{Gardiner:2004aa}, the quantum dynamics can be mapped to the following second-order differential equation, \begin{eqnarray}
i \partial_{t} Q &=&\upchi ( |\alpha_{-} |^{2}+1 ) \left(\alpha_{+}^{\ast} \frac{\partial Q}{\partial \alpha_{+}^{\ast}} - \alpha_{+} \frac{\partial Q}{\partial \alpha_{+}}\right) +
\upchi ( |\alpha_{+}|^{2}+1 ) \left( \alpha_{-}^{\ast} \frac{\partial Q}{\partial \alpha_{-}^{\ast}} - \alpha_{-} \frac{\partial Q}{\partial \alpha_{-} }\right) \nonumber \\ &-& \upchi \left( \alpha_{+}^{\ast} \alpha_{-}^{\ast} \frac{\partial }{\partial \alpha_{-}^{\ast}} \frac{\partial }{\partial \alpha_{+}^{\ast}} - \alpha_{+} \alpha_{-} \frac{\partial }{\partial \alpha_{-}} \frac{\partial }{\partial \alpha_{+} }\right) Q \, .
\label{Q2E} \end{eqnarray}
\begin{figure}
\caption{ Schematic representation of the effect of a Kerr medium in the phase
space of a single mode, assuming
an initially-coherent state.}
\label{fig:Squeezing}
\end{figure}
When both modes are initially in strongly-excited coherent states, which defines the semiclassical limit, the $Q$-function dynamics is approximately described by the first two terms of in Eq.~(\ref{Q2E}) (at least, for times $\upchi t \ll 1$). If we introduce polar coordinates for each amplitude $\alpha_{\pm} = \sqrt{\mathcal{I}_{\pm}} e^{i \varphi_{\pm}}$, where $\varphi_{\pm}$ is the polar angle in phase space, and $\mathcal{I}_{\pm}$ is the mode intensity, we find \begin{equation} \partial_{t} Q = \upchi ( \mathcal{I}_{-} + 1 ) \frac{\partial Q}{\partial \varphi_{+}} + \upchi ( \mathcal{I}_{+} +1 ) \frac{\partial Q}{\partial \varphi_{-} } \, . \label{eq:W_p} \end{equation} Since the operator $\partial/\partial \varphi_{\pm}$ generates rotations in phase space,~(\ref{eq:W_p}) reflects that the amplitudes in each mode experience different rotations, with angles proportional to the intensity components of the other mode~\cite{Heersink:2005ul,Rigas:2013aa}. The result is schematized in Fig.~\ref{fig:Squeezing}: the shaded area indicates the region in phase space occupied by the state. For an initial coherent state this area is a circle; the top of the circle corresponds to higher intensity and therefore is more phase shifted than the bottom, resulting in an elliptical noise distribution.
Equation~(\ref{eq:W_p}) can be readily solved: \begin{eqnarray}
\label{eq:Wigner-T}
Q (\mathcal{I}_{+},\varphi_{+} ; \mathcal{I}_{-} ,\varphi_{-} |t) =
Q (\mathcal{I}_{+}, \varphi_{+} + (\mathcal{I}_{-} + 1) \upchi t;
\mathcal{I}_{-} , \varphi_{-} + (\mathcal{I}_{+}+ 1) \upchi t|0) \, . \end{eqnarray}
The cross-dependence of the phases on the amplitudes of the other field leads to the mode correlation. These intermodal correlations can be assessed, e.g., in terms of the linear entropy~\cite{Rigas:2013aa}. For an initial two-mode coherent state $|\sqrt{\mathcal{I}_{0+}}, \sqrt{\mathcal{I}_{0-}} \rangle$ the $Q$-function acquires the form \begin{equation}
Q (\mathcal{I}_{+}, \varphi_{+} ; \mathcal{I}_{-} ,\varphi_{-} |t)= \exp \left [ - \lvert \mathcal{I}_{+} e^{i \varphi_{+} + i (\mathcal{I}_{-} +1) \upchi t} - \mathcal{I}_{0+} \rvert^{2}\right ] \exp \left [ - \lvert \mathcal{I}_{-} e^{i \varphi_{-} + i (\mathcal{I}_{+} +1) \upchi t} - \mathcal{I}_{0-} \rvert^{2}\right ] \, . \label{Wt_0} \end{equation} At $ t = 0$ the $Q$-function is made of two independent Gaussians, while as time goes by the induced mode correlations lead to a non-Gaussian state.
The problem can also be treated in terms of the SU(2) $Q$-function, since in a Fock layer with fixed $S$, the Hamiltonian \eqref{Hkerr} reduces to~\cite{Corney:2008uq} \begin{equation} H = \upchi S_{z}^{2} \, , \end{equation} apart from an unessential constant. The evolution equation for the SU(2) $Q$-function takes the form~\cite{Klimov:2002cr} \begin{equation} \partial _{t} Q = - \upchi ( 2 S \cos \theta \, \partial_{\phi} - \sin \theta \, \partial_{\phi} \partial_{\theta}) Q \, , \label{Qee} \end{equation} This equation can be exactly solved by expanding in the basis of the harmonic functions $Y_{Kq}(\theta ,\phi )$. Nevertheless, in case of large photon number $S \gg 1$, we can perform the semiclassical expansion and find an approximate solution of (\ref{Qee}) by simply neglecting the term with the second derivative; i.e., reducing (\ref{Qee}) to the form \begin{equation} \partial_{t} Q \simeq - 2 \upchi S \cos \theta \, \partial_{\phi} Q \, , \end{equation} whose solution is \begin{equation}
Q(\theta ,\phi |t) = Q (\theta ,\phi -\upchi Nt\cos \theta ), \end{equation} which describes evolution of every point of the initial distribution along the classical trajectory. Note, however, that this SU(2) approach is valid only for initial states with a fixed number of photons in each mode.
\section{Polarization tomography} \label{sec:tomo}
Quantum tomography is the attempt to infer an unknown quantum state from the distinct outcomes of a collection of measurements performed on a finite set of identical copies of the system~\cite{lnp:2004uq,Teo:2015aa}. What makes polarization special is that the density operator contains much more than polarization information; reconstruction of the polarization sector suffices for polarization tomography.
\begin{figure}
\caption{Typical setup for an efficient Stokes tomography. The
modes are combined on a polarizing beam splitter (PBS)
and interference between the modes can be adjusted with the
combination of a half-wave plate ($\lambda/2, \theta$) and
a quarter-wave plate ($\lambda/4, \phi$). The polarization
states are spatially separated into orthogonal
components that can be processed independently.}
\label{fig:Stokessetup}
\end{figure}
A basic setup for any polarization tomography is sketched in Fig.~\ref{fig:Stokessetup}. The state to be characterized is analyzed using a general polarization measurement apparatus consisting of a half-wave plate $(\lambda /2 ,\theta)$ followed by a quarter-wave plate ($\lambda /4, \phi$) and a polarizing beam splitter (PBS). In physical terms, the wave plates transform the input polarization, performing the operation \begin{equation} \label{eq:dissph} \hat{D}(\mathbf{n}) = e^{- i \phi \hat{S}_{3}} e^{- i \theta \hat{S}_{2}} = \exp \left [ \tfrac{1}{2} \theta (\hat{S}_{+} e^{-i \phi} - \hat{S}_{-} e^{i \phi} )\right] \, , \end{equation}
which represents a displacement over the sphere by the unit vector $\mathbf{n}$. This allows the measurement of different Stokes parameters by projecting onto the basis $| S, m \rangle$. The two outputs of the photocurrent's sum directly gives the eigenvalue of $\hat{S}_{0}$, while their difference gives the observable $ \hat{S}_{\mathbf{n}}$~\cite{Marquardt:2007bh}. From a practical viewpoint there are two very different tomographic regimes.
\subsection{Discrete-variable regime} \label{sec:dv}
In the discrete-variable regime of single, or few, photons one is interested in two-mode states, which for many purposes can be regarded as spin systems. Consequently, the polarization states can be determined from correlation functions of different orders~\cite{White:1999fk,Kwiat:2000rw,James:2001vn,Thew:2002pd,Barbieri:2003ij,Bogdanov:2004bs,Moreva:2006fv,Barbieri:2007uq,Adamson:2010ys,Sansoni:2010zr,Altepeter:2011ly}. Given the small dimensionality of the Hilbert space involved, the state reconstruction can be readily performed.
Altogether, the setup yields the probability distribution for $\hat{S}_{\mathbf{n}}$, from which we can infer the moments \begin{equation} \mathfrak{m}_{\ell}^{(S)} (\mathbf{n} ) = \mathop{\mathrm{Tr}} \nolimits [ \hat{S}_{\mathbf{n}}^{\ell} \, \hat{\varrho}^{(S)} ] \, . \end{equation} For simplicity, we restrict ourselves to a single Fock layer with a fixed number of photons $S$, but everything can be smoothly extended to the whole polarization sector. The moments can be expressed in terms of the multipoles as \begin{equation}
\label{eq:lqh}
\mathfrak{m}_{\ell}^{(S)} (\mathbf{n} ) =
\mathop{\mathrm{Tr}} \nolimits [ \hat{S}_{3}^{\ell} \, \hat{D} (\mathbf{n} ) \,
\hat{\varrho}^{(S)} \, \hat{D}^\dagger (\mathbf{n}) ] =
\mathop{\mathrm{Tr}} \nolimits \left [ \hat{S}_{3}^{\ell} \, \sum_{K=0}^{2S} \sum_{q, q^{\prime}=-K}^{K}
\varrho_{Kq}^{(S)} \, D_{qq^\prime}^{K} (\mathbf{n} ) \,
\hat{T}_{Kq}^{(S)} \right ] \, , \end{equation} where $D_{q^{\prime} q}^{K} (\mathbf{n} ) $ is a Wigner rotation matrix. This trace can be computed using the machinery of angular momentum, and then the moments connect to the multipoles in quite an elegant way \begin{equation}
\label{eq:mrho}
\mathfrak{m}_{\ell}^{(S)} (\mathbf{n} ) = \sqrt{\frac{4\pi }{2 S + 1}}
\sum_{K=0}^{\ell} \sum_{q=-K}^{K} \varrho_{Kq}^{(S)} \,
f_{K\ell}^{(S)} \, Y_{Kq} ( \mathbf{n} ) \, , \end{equation} where $f_{K\ell}^{(S)} = \sum_{m} m^{\ell} C_{Sm,K0}^{Sm}$ ($K \leq \ell$). Given the orthonormality of $Y_{Kq} (\mathbf{n} )$, we can invert Eq.~(\ref{eq:mrho}) to obtain \begin{equation}
\varrho_{Kq}^{(S)} = \sqrt{\frac{2 S +1}{4\pi}}
\frac{1}{ f_{K\ell}^{(S)}} \int_{\mathcal{S}^{2}} d\mathbf{n} \; \mathfrak{m}_{\ell}^{(S)} (\mathbf{n} ) \; Y_{Kq}^{\ast} (\mathbf{n}) \, . \end{equation} The reconstruction of the polarization state thus requires the knowledge of \textit{all} the multipoles; this implies measuring \textit{all} the moments in \textit{all} directions, which proves to be very demanding~\cite{Muller:2012ys}.
\begin{figure}
\caption{Experimental scheme for the multipole reconstruction of a three-photon state. The detection of a single photon at detector D1 heralds the state. Fourfold coincidences with detectors D1, D2, D3, and D4 allow for the required projection measurements. The scheme also works for states with higher photon number.}
\label{fig:4fold}
\end{figure}
Nonetheless, one can approach the problem in a much more economical way. To determine the $K$th multipole, it is enough to perform a Stokes measurement in $2K+1$ independent directions. The proposal proceeds recursively: first, we measure the first-order moments along the three coordinate axis (or other equivalent ones) and reconstruct $ \varrho_{1q}^{(S)}$ from a linear inversion. The measurement of the second moments gives us \begin{equation}
\mathfrak{m}_{2} (\mathbf{n} ) = \frac{1}{2 S + 1} f_{02}^{(S)} + \sqrt{\frac{4\pi }{2S+1}} f^{(S)}_{22} \sum_{q=-2}^{2}
\varrho_{2q}^{(S)} \: Y_{2q} (\mathbf{n} ) \, , \end{equation} with $ f_{02}^{(S)} = \tfrac{1}{3}S(S+1)(2S+1)$, $f_{22}^{(S)} = \tfrac{4}{5!} (2S+1) \sqrt{S(2S-1)(S+1)(2S+3)}$, and $f_{12}^{(S)} =0$. We need to fix five optimal directions to invert that system. For example, we can choose the directions as those that maximize the minimum angle between the lines and thus in some sense spread the measurements as much as possible over the Poincar\'{e} sphere~\cite{Conway:1996ys}. The system can then be solved, yielding $ \varrho_{2q}^{(S)}$, and thus all the information needed to characterize the polarization to second order is known.
The process can be continued in this way up to any desired order. Choosing the appropriate directions is, in general, a tricky question if one wants to ensure linear independence, but it has been thoroughly studied~\cite{Hofmann:2004aa,Filippov:2010kx}. In practice, methods such as maximum likelihood estimation are much more efficient for performing that inversion~\cite{lnp:2004uq}. This strategy has been experimentally verified for photon pairs generated in spontaneous parametric down-conversion; i.e., the states $|1_{H},1_{V} \rangle$~\cite{Bjork:2012zr}. A sketch of the experimental setup is shown in Fig.~\ref{fig:4fold}.
A similar scheme has been discussed in Ref.~\cite{Schilling:2010aa}, but instead of Stokes moments, one measures $N$th-order intensity moments. In this way, an optimal measurement of arbitrary-order coherences between the two orthogonally polarized amplitudes can be achieved.
\begin{figure}
\caption{(Top left) Experimental setup for the characterization of two-mode fields. The polarization states are separated into orthogonal components followed by homodyne tomography. (Top right) Measured polarization sector (black blocks) for the case of a polarization squeezed state with coherent amplitude $\alpha = 1.13$ and squeezing parameter $r= 0.41$. (Bottom) econstructed Husimi functions $Q(S, \mathbf{n})$ of the Fock layers indicated in the insets for the same state.}
\label{fig:parsing}
\end{figure}
When a state spans a whole polarization sector, one should parse it into Fock layers. The most convenient way is to perform two-mode tomography by characterizing each mode after the polarizing beam splitter with homodyne tomography~\cite{Lvovsky:2009ys}. In this case, it is more convenient to work in the $\{ H, V\}$ basis, so the state to be characterized is of the form $| \Psi \rangle = | \psi_{H}\rangle \otimes | \psi_{V} \rangle$. In Ref.~\cite{Muller:2016aa} this parsing has been done for the case of $| \psi_{H} \rangle = \hat{S} (r ) \hat{D} (\alpha_{H}) | 0_{H}\rangle$ and $| \psi_{V} \rangle = \hat{S} (r ) | 0_{V} \rangle$, with the single-mode squeezing operator $\hat{S}(r)=\exp\left[r\left(\hat{a}^{\dagger2}-\hat{a}^2\right)/2\right]$ and displacement operator $\hat{D}(\alpha)=\exp (\alpha \hat{a}^\dagger-\alpha^{\ast} \hat{a})$. The results, shown in Fig.~\ref{fig:parsing}, confirm in a crystal-clear manner that the Husimi $Q$-function can be experimentally sampled. While these measurements are well known in the context of cold atoms~\cite{Evrard:2019aa}, their use in quantum optics is not widespread.
A similar method was recently proposed in Ref.~\cite{Bayraktar:2016aa}. This method is based on two polarization concepts: measurement projections onto the Stokes operator eigenvectors and division of the polarization multipole orders, characterized by $S$. The method assumes photon-number-resolving detectors, and measures not only the Stokes parameters for every $S$, but also the correlations between them.
\subsection{Continuous-variable regime}
The photodetection in the typical setup described in Fig.~\ref{fig:Stokessetup} can be modeled by the projection operators $\hat{\Pi}_{m}^{(S)} = |S, m \rangle \langle S, m |$ so that for each direction $\mathbf{n}$ we detect the tomographic probabilities \begin{equation}
\label{cojo}
w^{(S)}_{m} (\mathbf{n} ) =
\mathop{\mathrm{Tr}} \nolimits [ \hat{\varrho} \, \hat{\Pi}_m^{(S)} (\mathbf{n} ) ] =
\mathop{\mathrm{Tr}} \nolimits [ \hat{\varrho} \; \hat{D} (\mathbf{n} ) \, \hat{\Pi}_m^{(S)}
\, \hat{D}^\dagger ( \mathbf{n} ) ] \, , \end{equation} which correspond to the probabilities of simultaneously detecting $n_H = S+m$ photons in the horizontal mode and $n_V= S-m$ photons in the vertical one for each direction $\mathbf{n}$. When the total number of photons is not measured and only the difference $m$ is observed, the available projections reduce to
$ \hat{\Pi}_{m} = \sum_{S = |m|}^\infty |S, m \rangle \langle S, m |$.
The reconstruction in each invariant subspace $\mathcal{H}_{S}$ can now be carried out exactly since each subspace is essentially equivalent to a spin $S$~\cite{Brif:1999kx,Amiet:1999vn,DAriano:2003ys,Klimov:2002zr,Karassiov:2004xw,Karassiov:2005ss}. One can proceed in a variety of ways, but perhaps the simplest one is to look for an integral representation of the tomograms~\cite{Marquardt:2007bh} \begin{equation}
\label{eq:intreptom}
w_{m}^{(S)} (\mathbf{n} ) = \frac{1}{2 \pi} \int_{0}^{2\pi}
d\omega \, \mathop{\mathrm{Tr}} \nolimits [ \hat{\varrho}^{(S)} \,
\exp ( i \omega \hat{\mathbf{S}}_{\mathbf{n}} ) ] \, e^{- i m \omega} \, . \end{equation} The probabilities appear as the Fourier transform of the characteristic function for the observable $\hat{\mathbf{S}}_{\mathbf{n}}$. After some manipulations, we find that \begin{equation}
\label{uf}
\hat{\varrho}^{(S)} = \frac{1}{4 \pi} \sum_{m=-S}^{S}
\int_{\mathcal{S}_2} d \mathbf{n}^\prime \
w_m^{(S)} (\mathbf{n}^\prime) \,
\mathcal{K} ( \hat{\mathbf{S}}_{\mathbf{n}^\prime} - m ) \, , \end{equation} where the kernel $\mathcal{K} (x) $ is \begin{equation}
\label{kersing}
\mathcal{K} ( x ) = \frac{ 2 S +1}{4 \pi^2} \int_{0}^{2\pi} d \omega
\; \sin^2 (\omega / 2 ) \, e^{-i \omega x} \, . \end{equation} Although Eq.~(\ref{uf}) is a formal solution, it is handier to map this density matrix onto the corresponding Husimi $Q$-function. For that purpose, we only need to calculate the matrix elements of the kernel $\mathcal{K} (x)$. The most direct way to proceed is to note that \begin{equation}
\label{evalK}
\langle S, \mathbf{n} |
\mathcal{K} ( \hat{\mathbf{S}}_{\mathbf{n}^\prime} - m ) | S, \mathbf{n} \rangle = \frac{2S+1}{4 \pi^2} \int_{0}^{2\pi} d\omega \;
\sin^2 ( \omega/2 ) \; e^{- i m \vartheta}
\left [ \cos (\omega /2 ) - i \sin ( \omega / 2 ) \cos \mathcal{X} \right ]^{2S} \, , \end{equation} where $\cos \mathcal{X} = \mathbf{n} \cdot \mathbf{n}^\prime$. In the continuous-variable regime we take $S \gg~1$, so the integral in Eq.~(\ref{evalK}) reduces to $d^2 \delta (x) /dx^2$ evaluated at $x = S \, \mathbf{n} \cdot \mathbf{n}^\prime - m$. Now, $m$ can be taken as a quasicontinuous variable, and we integrate by parts to obtain \begin{equation}
Q (S, \mathbf{n} ) = \frac{2S+1}{4\pi^2} \int_{-\infty }^{\infty} \!
dm \int_{\mathcal{S}_2} \! d\mathbf{n}^\prime \, \frac{d^2 w_{m}^{(S)}
( \mathbf{n})}{dm^2} \, \delta ( S \, \mathbf{n} \cdot
\mathbf{n}^\prime - m ) \, . \end{equation} This means that, in the limit of large photon numbers, the inversion reduces to an inverse Radon transform~\cite{Deans:1983aa} of the measured tomograms, which greatly simplifies the computation of $Q(S,\mathbf{n})$.
In Fig.~\ref{fig:pol3D} we show an isocontour surface of $Q(S,\mathbf{n} )= \mathrm{constant}$ in the Poincaré space having $S_1$, $S_2$ and $S_3$ as the orthogonal axes for bright squeezed states, as described in Ref.~\cite{Marquardt:2007bh}. The ellipsoidal shape of the state is clearly visible. The antisqueezed direction of the ellipsoid is dominated by excess noise. We also sketch density plots of the projections on the coordinate planes of the previous Husimi $Q$-function. The projections on the planes $S_1$-$S_2$ and $S_2$-$S_3$ show an additional spreading of the squeezed state in the $S_3$ direction caused by the imperfect polarization contrast in the measurement setup that mixes some of the antisqueezing in the $S_3$ direction. By summing over all the values of $S$ we obtain the total $Q (\mathbf{n})$ function, which is a probability distribution over the Poincar\'e unit sphere and is properly normalized.
\begin{figure}
\caption{(Left) Isocontour surface (of the level $1/e$ from the
maximum) of the Husimi function $Q (S, \mathbf{n} )$ for a
bright squeezed state. We also show cross sections of the $Q$
function through the three coordinate planes. In blue we show the
isotropic section for a coherent state, which we scale to unity for
all the plots. (Right) Reconstructed total Husimi function
$Q (\mathbf{n})$ obtained by summing over all the (continuous) Fock layers.}
\label{fig:pol3D}
\end{figure}
The Radon reconstruction requires a large set of measured data to get a reasonably accurate representation of the state. There are two main reasons for this: integrals are approximated by finite sums and the kernel (\ref{kersing}) is singular, so some \emph{ad hoc} filtering of the raw data is needed. Acquiring such large data sets may be unwise, for they demand long measurement times. Ensuring the proper stability of the setup is thus essential and might be difficult depending on the quantum state being measured. This limitation may be circumvented by adopting a statistically motivated method, such as maximum likelihood estimation~\cite{lnp:2004uq}.
For a broad class of states, the registered tomograms have Gaussian statistics, which seems to call for a Gaussian maximum likelihood reconstruction~\cite{Rehacek:2009aa}. The Gaussianity is used as prior information about the signal, which helps to drastically reduce the number of free parameters, as experimentally verified in Ref.~\cite{Muller:2012ys}.
\section{Polarization measures} \label{sec:desiderata}
In classical optics, the Stokes parameters and the classical degree of polarization, as defined in Eq.~(\ref{eq:defPcl}), are sufficient for the characterization of most beams, as they are Gaussian states of light. Such a description in terms of first-order moments of Stokes variables can be naively extended to the quantum domain through \begin{equation}
\mathbb{P}_{s} = \frac{\lvert \langle \hat{\mathbf{S}} \rangle \rvert}
{\langle\hat{S}_0 \rangle} =
\frac{\sqrt{\langle\hat{S}_1 \rangle^{2}+ \langle\hat{S}_2\rangle^{2}+
\langle\hat{S}_3 \rangle^{2}}}{\langle\hat{S}_0\rangle} \, .
\label{eq:Ps} \end{equation} We refer to this definition as the semiclassical degree of polarization. It has the advantage that it can be measured with a traditional polarimeter, such as those described in Sect.~\ref{subsec:poltrans}. Another commonly used method is the scrambling method, in which a polarization scrambler is placed in front of a polarizer and a detector. The difference between the maximum and minimum power levels $P_{\mathrm{max}} - P_{\mathrm{min}}$ is precisely the total polarized portion. The unpolarized portion, on the other hand, is unaffected by the scrambler, except for a global reduction by a factor $1/2$. Because the contribution of the polarized portion is zero at $P_{\mathrm{min}}$, then $P_{\mathrm{min}} = P_{\mathrm{unpol}}/2$ and thus \begin{equation} \mathbb{P}_{s} = \frac{P_{\mathrm{max}} - P_{\mathrm{min}}} {P_{\mathrm{max}} + P_{\mathrm{min}}} \, . \end{equation} Therefore, $ \mathbb{P}_{s}$ can be determined by simply measuring the maximum and minimum power levels at the detector.
However, we might expect $ \mathbb{P}_{s}$ to be incomplete. This is confirmed when examining the two extreme situations $ \mathbb{P}_{s} = 1$ and $\mathbb{P}_{s} = 0$, representing fully polarized and fully unpolarized light, respectively. For example, all SU(2) coherent states have $ \mathbb{P}_{s} = 1$ and this holds true for any superposition of them in different polarization sectors, such as, e.g., the Glauber coherent states $|\alpha_{+}, \alpha_{-} \rangle$, as well as convex combinations thereof \cite{Goldberg:2017}. But this means that $ \mathbb{P}_{s} = 1$ for states arbitrarily close to the two-mode vacuum state $|0_{+},0_{-}\rangle$, which is a strange result.
On the other hand, there are states with $ \mathbb{P}_{s} = 0$ that can hardly be regarded as unpolarized. This gives rise to the phenomenon of hidden polarization. As heralded before, one example of hidden polarization is the state $|1_{H}, 1_{V} \rangle $, which is $|1,0\rangle$ in the angular momentum basis. A rotation by 45$^{\circ}$ degrees around its axis of propagation transforms this state into $[(\sqrt{2}+i)|1,-1\rangle+(\sqrt{2}-i)|1,1\rangle]/\sqrt{6}$, which is orthogonal to $|1,0\rangle$, and then perfectly distinguishable from the unrotated state. However, according to the semiclassical degree of polarization, this state is unpolarized, which implies an invariance when undergoing rotations. This is due to the fact that the rotated state cannot be distinguished from the unrotated state by any linear combination of the Stokes operators, as this requires higher-order field correlation measurements. For this reason, perhaps it would be better to say that such states have higher-order polarization.
\subsection{\emph{Desiderata} for polarization measures}
From the previous discussion one can conclude that an appropriate measure of polarization assigning a number $\mathbb{P} (\hat{\varrho})$ to the density operator $\hat{\varrho}$ must satisfy some requirements that capture the properties of the Stokes operators and the polarization transformations. Before proceeding any further, we stress that a physically meaningful reference for any degree of polarization is provided by unpolarized light. Indeed, fully unpolarized light states can be suitably defined as the states invariant under any polarization transformation. This demands that the whole probability distribution be SU(2) invariant~\cite{Prakash:1971fr,Agarwal:1971zr}; that is, \begin{equation}
\label{eq:2}
[\hat{\varrho}_{\mathrm{unpol}}, \hat{\mathbf{S}} ] = 0 \, , \end{equation} wherefrom it follows that in every Fock layer the state is maximally mixed~\cite{Soderholm:2001ay} \begin{equation}
\label{eq:unp}
\hat{\varrho}_{\mathrm{unpol}}^{(S)} =\frac{1}{2S+1} \,
\hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}}_{2S+1} \, . \end{equation} We thus require~\cite{Bjork:2010rt}
\begin{enumerate}[label={C\arabic*.--}] \item $\mathbb{P}(\hat{\varrho})=0$ iff $\hat{\varrho}$ is
unpolarized.
\item
$\mathbb{P}(\hat{\varrho})= \mathbb{P}(\hat{U} \,
\hat{\varrho} \, \hat{U}^\dagger)$ for any unitary polarization
transformation $U$.
\item $\mathbb{P}(\hat{\varrho})$ should not depend on the coherences
between different Fock layers. \end{enumerate}
Condition C1 rules out various possibilities such as the semiclassical degree of polarization, for this degree considers as unpolarized states that do have higher-order polarization correlations, including states that are fully polarized in different directions in each Fock layer~\cite{Goldberg:2017}. It similarly rules out the proposed second-order measure~\cite{AlodjantsArakelian1999} \begin{equation}
\mathbb{P}_{s,2} =
\frac{\lvert \langle \hat{\mathbf{S}} \rangle \rvert}
{\sqrt{\langle\hat{\mathbf{S}}^2 \rangle}} =
\sqrt{1 - \frac{\var{\hat{\mathbf{S}}}}{\langle\hat{\mathbf{S}}^2 \rangle}} \, . \end{equation} This conditions also precludes the definition of a degree of polarization solely in terms of the purity of a state $\mathcal{P}=\mathop{\mathrm{Tr}} \nolimits (\hat{\varrho}^2)$, as unpolarized quantum states span the whole purity scale ---there are examples of unpolarized states among the pure states (including but not limited to the two-mode vacuum), and there are also unpolarized states that are partially or maximally mixed.
Requirement C2 is a statement of SU(2) invariance, which is a desirable characteristic of any \emph{bona fide} measure. The second-order measure does not fulfill C2 either; it can be made SU(2)-invariant by~\cite{Klimov:2010uq} \begin{equation}
\mathbb{P}_{s,2}^{\prime} = \sqrt{1- 3 \inf_{\mathbf{n}}
\frac{\var{\hat{\mathbf{S}}\cdot\mathbf{n}}}{\langle\hat{\mathbf{S}}^2 \rangle}} \, , \end{equation} but this still does not capture higher-order polarization correlations.
The C3 requirement is also suitable, since polarization transformations do not produce coherences between Fock layers; the Stokes operators are photon-number preserving, so any measurement of a linear combination of these operators will be independent of any coherences between Fock layers. We can rephrase C3 in a more quantitative way. To this end, let us introduce the quantum channel \begin{equation}
\mathcal{R} [ \hat{\varrho} ] = \sum_{S=0}^\infty
\hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}}_S \, \hat{\varrho} \, \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}}_S =
\hat{\varrho}_{\mathrm{pol}} \, ,
\label{eq:chan} \end{equation} which can be viewed as a randomization of the phases between superpositions between states in different Fock layers. The states $\hat{\varrho}$ and $\hat{\varrho}_{\mathrm{pol}}$ cannot be distinguished from each other in polarization measurements, as discussed in Sec.~\ref{sec:polsec}. So, we can reformulate C3 in the equivalent form
\indent C3$^{\prime}.$-- $\mathbb{P}(\hat{\varrho}) = \mathbb{P}(\mathcal{R} [\hat{\varrho}]) = \mathbb{P} ( \hat{\varrho}_{\mathrm{pol}})$.
A conventional condition is also that $0\leq \mathbb{P}(\hat{\varrho})\leq 1$. However, some candidate polarization measures, such as the entropy, are only positive definite ($0 \leq \mathcal{S} < \infty$). In these cases, the ordering of the states is usually more important than the numerical value of the measure. For this reason, a remedy is the normalization $\mathbb{P}=\mathcal{S}/(1+\mathcal{S})$, which is a rescaling that keeps the ordering of the states intact.
Apart for the basic conditions C1--C3, the measure should be operational, easily measurable, and easy to compute. These conditions are, however, difficult to meet at the same time.
In this section, we explore several proposals for a quantum degree of polarization. It is important to stress, though, that these measures induce different orderings between the states, as they stem from different concepts.
\subsection{Distance-based measures} \label{sec:distances}
Quantum polarization can be quantified in terms of distance measures. The main idea is to define a degree of polarization as the shortest distance between a state and the set $\mathcal{U}$ of unpolarized states, as given in Eq.~(\ref{eq:unp}). Other notions such as nonclassicality~\cite{Hillery:1987aa,Dodonov:2000aa,Marian:2002aa}, entanglement~\cite{Vedral:1997aa}, localization~\cite{Maassen:1988cr,Anderson:1993nx,Gnutzmann:2001aa}, and quantum information~\cite{Schumacher:1995aa,Brukner:1999aa,Schack:1999aa,Childs:2000aa, Gilchrist:2005aa} have been systematically formulated in terms of distances to a given set of states. In a way, the distance determines the distinguishability of a state with respect to that set.
Therefore, it seems sensible to quantify the degree of polarization by \begin{equation}
\label{DoP}
\mathbb{P} (\hat{\varrho}) \propto
\inf_{\hat{\sigma} \in \mathcal{U}}
D(\hat{\varrho} \mid \hat{\sigma} ) \, , \end{equation} where $D(\hat{\varrho} \mid \hat{\sigma} )$ is any measure of distance (not necessarily a metric) between the density matrices $\hat{\varrho}$ and $\hat{\sigma}$ and $\mathcal{U}$ is the set of unpolarized states, such that $\mathbb{P} (\hat{\varrho})$ satisfies the requirements C1--C3.
There are numerous nontrivial choices for $D(\hat{\varrho} \mid \hat{\sigma})$ (by nontrivial we mean that the choice is not a simple scale transformation of any other distance measure). None of them could be said to be more important than any other \textit{a priori}; the significance of each candidate must be evaluated based on its physical implications in the particular context. For the case of polarization several distances may be considered~\cite{Klimov:2005aa,Sanchez-Soto:2006}, such as the Hilbert-Schmidt, trace, Bures, and Chernoff~\cite{Ghiu:2010aa} distances: \begin{equation} \begin{array}{lll}
& \displaystyle
\mathbb{P}_{\mathrm{HS}}(\hat{\varrho}) =
\inf_{\sigma\in \mathcal{U}} \mathop{\mathrm{Tr}} \nolimits[ (\hat{\varrho}-\hat{\sigma})^2]\, ,
\qquad \qquad
& \displaystyle
\mathbb{P}_{\mathrm{T}}(\hat{\varrho}) =
\inf_{\sigma\in \mathcal{U}} \mathop{\mathrm{Tr}} \nolimits[ \lvert \hat{\varrho}-\hat{\sigma} \rvert] \, , \\
& & \\
& \displaystyle
\mathbb{P}_{\mathrm{B}}(\hat{\varrho} ) = 1-\sup_{\sigma \in \mathcal{U}}\sqrt{F(\hat{\varrho} \mid \hat{\sigma})}\, ,
\qquad \qquad
& \displaystyle
\mathbb{P}_{\mathrm{C}}(\hat{\varrho}) =
1-\sup_{\sigma \in \mathcal{U}}\left[\inf_{t\in[0,1]}\mathop{\mathrm{Tr}} \nolimits(\hat{\varrho}^t\hat{\sigma}^{1-t})\right] \, , \end{array} \end{equation} where the infimum in the Chernoff distance is taken over a function that is continuous with respect to $t$, and the fidelity $F$ in the Bures distance is~\cite{Uhlmann:1976aa,Jozsa:1994aa,Alberti:2003aa} \begin{equation}
F(\hat{\varrho} \mid \hat{\sigma})=
\{ \mathop{\mathrm{Tr}} \nolimits[(\hat{\sigma}^{1/2}\hat{\varrho}\hat{\sigma}^{1/2})^{1/2}]\}^2 \, .
\label{eq:fidel} \end{equation} As they stand, these degrees do not satisfy the requirement C3; i.e., these measures are sensitive to coherences between different Fock layers. One can bypass this drawback by simply defining the distance not to the state, but to its block diagonal form or polarization sector; that is, $\mathbb{P}_{X}(\hat{\varrho}) = \mathbb{P}_X ( \mathcal{R} [\hat{\varrho}])$ for $ X \in \{\mathrm{HS,T,B,C}\}$.
Since $\mathcal{R} [\hat{\varrho}]$ and $\hat{\sigma}$ commute, we find the following general expressions \begin{equation} \begin{array}{lll}
& \displaystyle
\mathbb{P}_{\mathrm{HS}}(\hat{\varrho}) =
\sum_{S=0}^\infty w_{S}^{2}
\left(\xi_S^{(2)}-\frac{1}{2S+1}\right) \, ,
\qquad
& \displaystyle
\mathbb{P}_{\mathrm{T}}(\hat{\varrho}) =
\sum_{S=0}^\infty w_{S} \left(\sum_{n=0}^{M_{S}}\lambda_{S,n}-
\frac{M_{S}+1}{2S+1}\right)
\, , \\
& & \\
& \displaystyle
\mathbb{P}_{\mathrm{B}}(\hat{\varrho}) =
1-\left[\sum_{S=0}^\infty \frac{w_S}{2S+1}
\left(\xi_S^{(1/2)}\right)^2\right]^{1/2}
\, ,
\qquad
& \displaystyle
\mathbb{P}_{\mathrm{C}} (\hat{\varrho}) = 1
-\inf_{t\in [0,1]}\left[\sum_{S=0}^\infty w_{S} \;
(2S+1)^{1-1/t} \left (\xi_S^{(t)} \right )^{1/t}\right]^{t} \, ,
\label{eq:Pdismeas} \end{array} \end{equation} where $w_{S}$ encompasses the photon statistics, $\lambda_{S,n}$ are the eigenvalues of $\hat{\varrho}^{(S)}$ taken in decreasing order, $\xi_S^{(t)}=\sum_{n=0}^{2S} \lambda_{S,n}^{t}$, and $M_{S}$ is the largest integer satisfying $\lambda_{S,M_{S}}\geq 1/(2S+1)$. As we can see, all of them require the knowledge of the complete polarization sector.
For states living in the Fock layer with spin $S$, the maximum polarization is reached for pure states $| \Psi^{(S)}\rangle$, with values \begin{equation}
\mathbb{P}_{\mathrm{HS}}( |\Psi^{(S)}\rangle ) =
\mathbb{P}_{\mathrm{T}}( |\Psi^{(S)}\rangle ) =
\mathbb{P}_{\mathrm{C}}( |\Psi^{(S)}\rangle ) =
\frac{S}{S+1/2} \, ,
\qquad
\mathbb{P}_{\mathrm{B}}( |\Psi^{(S)}\rangle ) =
1- \frac{1}{\sqrt{2 S+1}} \, , \end{equation} so all of them tend to unity when $S$ is sufficiently large. We note, in passing, that $\mathbb{P}_{\mathrm{B}}(\hat{\varrho}) \leq \mathbb{P}_{\mathrm{C}}(\hat{\varrho})$.
Likewise, for quadrature coherent states $| \alpha_{+} , \alpha_{-}\rangle$, with average number of photons $\bar{N}$, we have \begin{equation}
\mathbb{P}_{\mathrm{HS}} = 1 -
\frac{I_{1} (2 \bar{N})}{\bar{N}}
e^{-2 \bar{N}} \simeq
1- \frac{1}{2\sqrt{\pi } \bar{N}^{3/2}} \, , \end{equation} where $I_{n} (x)$ is the modified Bessel function~\cite{NIST:DLMF} and the last expression holds when $\bar{N} \gg 1$. This again tends to unity, but with a scaling of the form $\bar{N}^{-3/2}$. The same scaling can be found for the Bures and Chernoff degrees of polarization.
It is possible to find the states that, for a given average number of photons, reach the maximal degree of polarization. Using a numerical optimization procedure, one discovers that such an optimal value is~\cite{Sanchez-Soto:2007aa} \begin{equation}
\label{pq2}
\mathbb{P}_{\mathrm{HS}}^{\mathrm{opt}} = 1 -
\frac{3}{(2 \bar{N} +1 ) ( 2 \bar{N} + 3)}
\sim 1 - \frac{3}{4 \bar{N}^2} \, , \end{equation} with similar scalings for the other degrees. To an excellent approximation, a highly squeezed vacuum can be taken as maximally polarized.
\subsection{Phase-space measures}
The ideas discussed in the previous subsection can be straightforwardly translated into the phase-space picture. The degree of polarization of a state can be defined as the distance between its Husimi $Q$-function and the corresponding one for unpolarized light. In this context, unpolarized light is defined by a uniform distribution~\cite{Luis:2007aa} \begin{equation}
Q_{\mathrm{unpol}} (\mathbf{n} ) = 1 \, , \end{equation} which agrees with (\ref{eq:unp}). One can then define the distance~\cite{Luis:2002ul,Luis:2007kx} \begin{equation}
D_{Q}(\hat{\varrho}) = \frac{1}{4\pi}
\int_{\mathcal{S}_{2}} d\mathbf{n} \; [ Q(\mathbf{n} ) - 1 ]^2 =
\frac{1}{4\pi} \left [ \int_{\mathcal{S}_{2}} d\mathbf{n} \;
Q^2(\mathbf{n}) \right ] - 1 \, .
\label{eq:DQ} \end{equation} The relevant term on the right-hand side of (\ref{eq:DQ}) can be expressed in terms of the multipoles as \begin{equation}
\int_{\mathcal{S}_{2}} d\mathbf{n} \;
Q^2(\mathbf{n}) = 4\pi \sum_{S=0}^\infty
(2S+1)^2 \sum_{K=0}^{2S} \lvert C_{SS,K0}^{SS} \rvert^2
\sum_{q=-K}^K \lvert \varrho_{Kq}^{(S)} \rvert^2 \, ,
\label{eq:QsquaredMultipoles} \end{equation} and regarded as a particular instance of a general class of measures of localization~\cite{Maassen:1988cr,Anderson:1993nx,Gnutzmann:2001aa} \begin{equation} M_{r} = \left [
\int_{\mathcal{S}_{2}} d\mathbf{n} \; Q^{1+r} ( \mathbf{n} ) \right ]^{1/r}\, , \end{equation} whose mathematical properties have been studied in great detail~\cite{Hardy:1952aa}. Since \begin{equation} \lim_{r \rightarrow 0} M_r =
\int_{\mathcal{S}_{2}} d\mathbf{n} \; Q ( \mathbf{n} ) \, \ln Q ( \mathbf{n} ) \, , \end{equation} they include the Wehrl entropy~\cite{Wehrl:1978aa,Wehrl:1991aa} as a limiting case.
In physical terms, the spread of the $Q$-function gives an indication of the polarization properties of the state. For states that are highly spread over the unit sphere the degree of polarization is small, as in the case of unpolarized states. For states whose $Q$-functions are highly peaked around some point in the sphere, the degree of polarization is expected to be high. Note that, as per Eq.~(\ref{eq:QsquaredMultipoles}), $D_{Q} (\hat{\varrho})$ involves \emph{all} the multipoles and its experimental determination thus requires a full tomography.
We briefly mention that the Wigner function has been used as a measure of the area occupied by a quantum state in the case of continuous variables~\cite{Heller:1987dq}. However, in the case of the sphere, the integral of $W^{2} (\mathbf{n})$ simply gives the purity~\cite{Varilly:1989ud}. This is one of the compelling reasons to use the Husimi $Q$-function instead of the Wigner function throughout this review when analyzing polarization.
Since $D_{Q}$ ranges from 0 to $\infty$, the associated degree of polarization, according to our previous discussion, is \begin{equation}
\mathbb{P}_{Q} (\hat{\varrho}) =\frac{D_Q (\hat{\varrho})}
{D_Q(\hat{\varrho})+1}\, .
\label{eq:DQnor} \end{equation} The only states with $\mathbb{P}_{Q} = 0$ are the fully unpolarized states. In contrast to the classical definition, there are states with $\langle \hat{\mathbf{S}} \rangle = 0$ and $\mathbb{P}_Q\neq 0$. This occurs because $\mathbb{P}_Q$ is a function of all the moments of the Stokes operators and not only of the first ones. In addition, the definition of $\mathbb{P}_Q$ is invariant under SU(2) transformations applied to the field state. This means that the degree of polarization depends on the form of the $Q$-function but not on its orientation on the Poincar\'e sphere.
For the SU(2) coherent state $|S, \mathbf{n} \rangle$ the $Q$-function is given in Eq.~(\ref{eq:QCS}). If, for definiteness, we take $\mathbf{n}$ to point to the north pole, the associated degree is \begin{equation}
\mathbb{P}_{Q} (|S, \mathbf{n} \rangle) = \left (
\frac{2S}{2S+1} \right )^2 {\xrightarrow[S \gg 1]{}} 1 \, . \end{equation} In fact, the maximally polarized states according to this degree of polarization are the SU(2) coherent states. This is because these states have their $Q$-functions highly peaked at some point of the sphere, making them minimum-uncertainty polarization states and states that are maximally different from those with a uniformly-distributed $Q$-function.
\subsection{Operational approaches}
It is clear that a state that is not invariant under all possible linear polarization transformations has a finite degree of quantum polarization. Therefore, we can use the distinguishability of a state under all possible polarization transformations as a degree of polarization. If we take $\mathop{\mathrm{Tr}} \nolimits(\hat{\varrho}_1 \hat{\varrho}_2)$ as the distinguishability for mixed states, one could define~\cite{Bjork:2000aa,Bjork:2002aa} \begin{equation}
\mathbb{P}_\mathrm{d} (\hat{\varrho})=
\left [1- \inf_{\hat{U} \in \mathrm{SU(2)}} \;\;
\sum_{S=0}^\infty w_{S} \; \mathop{\mathrm{Tr}} \nolimits \left (\hat{\varrho}^{(S)} \;
\hat{U}\, \hat{\varrho}^{(S)} \, \hat{U}^\dagger \right )
\right]^{1/2} \,.
\label{eq:dis} \end{equation} This is the minimal averaged overlap between a state and all of its SU(2) transformed partners. Hence, it gives the maximum visibility one can achieve by using a polarization interferometer~\cite{Bjork:2014aa}. The drawback of this definition is that it is not trivial to determine the degree of polarization for a given state, since there is, in general, no obvious way to find the $\hat{U}$ maximizing the polarimetric visibility. This measure of polarization is different from the previous ones because it assigns degree of polarization unity to states with a finite number of excitations. It has been proven~\cite{Sehat:2005wd} that all pure states with an odd number of photons reach degree of polarization unity under this definition. It is also true that single photon states, the definition implies that any pure state is fully polarized, since a transformation $\hat{U}$ can transform the state to an orthogonal state, located diametrically opposite on the sphere. One can conjecture that pure states with an even number of excitations, except the two-mode vacuum, are also fully polarized according to this measure of polarization, but there is still no proof for this.
The last speculation makes it tempting to define a degree of polarization such that the pure states with a given number of excitations are maximally polarized; this is a degree of polarization in terms of purities in every excitation manifold~\cite{Bjork:2010rt} \begin{equation}
\mathbb{P}_\mathrm{p} ( \hat{\varrho})=
\sum_{S= \frac{1}{2}}^\infty \frac{1}{2S}
[(2S+1) \, \mathop{\mathrm{Tr}} \nolimits( \hat{\varrho}^{(S)\; 2}) -1 ] \, ,
\end{equation}
with the additional definition $\mathbb{P}_\mathrm{p}\left(|0,0\rangle\right)=0$. Again, the maximally polarized states are the pure states in every excitation manifold. The only property of the Stokes operators used in this measure is its definition as a direct sum over the excitation manifolds with distinct number of photons. As a consequence, this measure quantifies a distinguishability under general energy-preserving unitary transformations rather that the distinguishability under the most concrete case of polarization transformations. This measure also critically relies on the conjecture that the pure states in every excitation manifold are fully polarized. Like many of the other polarization degrees, an experimental determination of $\mathbb{P}_\mathrm{p}$ is difficult, as purities require polarization tomography to be assessed.
\subsection{Higher-order degrees of polarization}
At this stage, it should be clear that many of the difficulties in defining a proper degree of polarization can be traced to classical polarization being built on first-order moments of the Stokes variables; whereas, higher-order moments can play a major role for quantum fields. A full understanding of the subtle polarization effects arising in the quantum realm requires a characterization of higher-order polarization fluctuations, as is done in coherence theory, where one needs, in general, a hierarchy of correlation functions~\cite{Mandel:1995qy}.
As we have seen, multipoles contain the higher-order fluctuations information, sorted in the proper way. This suggests looking at the cumulative distributions~\cite{Hoz:2013om} \begin{equation}
\label{eq:cum}
\mathcal{A}^{(S)}_{M} = \sum_{K = 1}^{M} \sum_{q=-K}^{K}
\lvert \varrho_{Kq}^{(S)} \rvert^{2} \, , \end{equation} which convey all of the information up to order $M$. Its experimental determination requires then reconstructing the corresponding multipoles, which can be accomplished using the methods discussed in Sec.~\ref{sec:tomo}.
\begin{figure}
\caption{(Color online) Degree of polarization $\mathbb{P}_{M}$ as a
function of the multipole order $M$ for the state $|S, 0 \rangle$
(left) and a quadrature coherent state $| \alpha_{+}, \alpha_{-}
\rangle$ with average number of photons $\bar{N} = \lvert \alpha_{+} \rvert^{2} + \lvert \alpha_{-} \rvert^{2}$ (right).
}
\label{fig:PK}
\end{figure}
Note that the monopole $\varrho_{00}^{(S)}$, being a trivial constant, is not included in the sum. We know from probability theory that cumulative distributions have remarkable properties~\cite{Jaynes:2003uq}. Moreover, our previous discussion in Section~\ref{sec:tomo} provides clear evidence that to obtain the $K$th multipole, one needs to determine all of the previous moments. As with any cumulative distribution, $ \mathcal{A}^{(S)}_{M} $ is a monotonic, nondecreasing function of the multipole order, with $\mathcal{A}^{(S)}_{2S}$ being the state purity minus the monopolar contribution.
For SU(2) coherent states, we get \begin{equation}
\label{eq:Aksu2}
\mathcal{A}^{(S)}_{M,\mathrm{SU(2)}} = \frac{2S}{2S +1} -
\frac{[\Gamma (2S + 1)]^{2}}{\Gamma (2S-M) \Gamma (2S + M +2)} \, , \end{equation} and it has been proven~\cite{Bjork:2015aa} that this value is indeed maximal for every $M$ for all states in the subspace $\mathcal{H}_{S}$. This strongly suggests the definition of a hierarchy of degrees of polarization that quantify the polarization information when measuring the Stokes operators up to order $M$: \begin{equation}
\label{eq:PK}
\mathbb{P}_{M} =
\sum_{S} w_{S} \; \left (
\frac{\mathcal{A}^{(S)}_{M}}{\mathcal{A}^{(S)}_{M,\mathrm{SU(2)}}} \right )^{1/2} \, . \end{equation} According to (\ref{eq:PK}), $\mathbb{P}_{M}= 1$, regardless of $M$, for SU(2) coherent states, which is compatible with the idea that they are the most localized states over the sphere.
For the significant case of the first-order degree ($M=1$), Eq.~(\ref{eq:PK}) gives \begin{equation}
\mathbb{P}_{1}= \sum_{S}^\infty w_{S}
\frac{ \sqrt{\langle \hat{S}_{1} \rangle^2+ \langle \hat{S}_{2} \rangle^2 +
\langle \hat{S}_{3} \rangle^2}}{\langle \hat{S}_{0} \rangle} \, , \end{equation} and the average values are calculated in every Fock layer $S$. Interestingly, this definition has recently been proposed as a way to circumvent the shortcomings of the standard degree of polarization~\cite{Kothe:2013fk}; in our approach, it emerges quite naturally.
For quadrature coherent states the result reads \begin{equation}
\mathbb{P}_{M} (|\alpha_{+}, \alpha_{-} \rangle ) = \sum _{S=M/2}^{\infty }
\frac{e^{-\bar{N}}\bar{N}^{2S}}{(2S)!} \simeq
\frac{1}{2} \text{erfc}
\left(\frac{M-\bar{N}}{\sqrt{2 \bar{N}}} \right ) \, , \end{equation}
where $\bar{N}$ is the average number of photons and the approximation in terms of the complementary error function is valid for $\bar{N} \gg 1$. From the properties of this function, we can estimate that the multipoles that contribute effectively are, roughly speaking, from 1 to $\bar{N}$. In Fig.~\ref{fig:PK} we plot $\mathbb{P}_{M}$ for the states $|S, 0 \rangle$ and $| \alpha_{+}, \alpha_{-} \rangle$.
For $M=2$, this gives \begin{equation}
\mathbb{P}_{2} (|\alpha_{+}, \alpha_{-} \rangle ) =
1 - (1+ \bar{N}) \exp (- \bar{N}) \, , \end{equation} which tends to unity when the average number of photons $\bar{N}$ becomes large enough, in agreement with previous second-order approaches~\cite{Klimov:2010uq,Singh:2013aa}.
In principle, one would naively expect that all the approaches discussed in this Section should be compatible with classical polarization, much in the spirit of the correspondence principle. Nevertheless, potential applications of quantum polarization seem to involve states without classical analog. In addition, inconsistencies and difficulties can be expected when characterizing something as complicated as quantum polarization by a single number. From this viewpoint, we believe that the hierarchy (\ref{eq:PK}) is the most proper and sensible way to deal with the problem.
\section{Polarized light and quantum complementarity} \label{sec:complementarity}
\subsection{SU(2) complementarity}
In the classical domain, the relative phase between the modes determines the shape of the polarization ellipse. It is thus natural to try a description of quantum polarization in terms of such a variable. A glance at the Stokes operators suggests that this information is encoded in $\hat{S}_{\pm}$, so we can perform a polar decomposition~\cite{Luis:1993aa,Sanchez-Soto:1994aa} \begin{equation} \hat{S}_{-} = \hat{E} \, \sqrt{\hat{S}_{+} \hat{S}_{-}} \, , \end{equation} where $\hat{S}_{+} \hat{S}_{-}$ is a Hermitian positive operator and the unitary operator $\hat{E}$ represents the exponential of the relative phase.
One can verify that $[\hat{E}, \hat{N}] = 0$, so we have $\hat{E} = \sum_{S} \hat{E}^{(S)}$ and we can work out the solution in $\mathcal{H}_{S}$. In each of these Fock layers, there are $2S+1$ orthonormal states $| \delta_{r}^{(S)} \rangle$ ($r = -S, \ldots, S $), given by \begin{equation} \label{eq:comES}
| \delta_{r}^{(S)} \rangle = \frac{1}{\sqrt{2S+1}} \sum_{m=-S}^{S}
e^{- i m \delta_{r}^{(S)}} |S, m \rangle \, , \qquad \qquad \delta_{r}^{(S)} = \frac{2\pi}{2S+1} r \, , \end{equation} such that \begin{equation}
\hat{E}^{(S)} | \delta_{r}^{(S)} \rangle = e^{ i \delta_{r}^{(S)}}
| \delta_{r}^{(S)} \rangle \, , \end{equation} so this is a good quantum description of the relative phase.
From Eq.~\eqref{eq:comES} we see that the eigenstates of $\hat{E}$ are related to the angular momentum basis $|S,m \rangle$ by a finite Fourier transform~\cite{Levy:1976aa,Guise:2012aa}. This means that the relative phase $\hat{E}$ and the number difference $\hat{S}_{3}$ are complementary observables and, in consequence, the corresponding eigenvectors $| \delta_{r}^{(S)} \rangle $ and $|S,m \rangle $ are mutually unbiased~\cite{Wootters:1989aa,Durt:2010aa}. This purely quantum effect has been experimentally tested~\cite{Tsegaye:2000aa,Usachev:2001aa}, confirming in a crystal-clear manner the differences between classical and quantum polarization properties.
On the other hand, since this complementary pair $\hat{E}$-$\hat{S}_{3}$ has discrete spectra, one could imagine representing polarization as a discrete grid~\cite{Vourdas:2004aa,Vourdas:2007aa,Bjork:2008aa}. This approach fulfills many of the desirable properties and the resulting discrete analogs of the $Q$-function can be experimentally determined by so-called weak measurements~\cite{Salvail:2013aa}.
\subsection{Degree of polarization and complementarity}
The superposition principle for quantum states is the very bedrock of quantum theory. Indeed, Young’s double slit experiment was central to the discussions that laid the foundations~\cite{Feynman:2006aa} of this discipline. It incorporates in a natural way Bohr’s principle of complementarity~\cite{Bohr:1928aa}, which can be concisely stated by saying that a quantum system possesses real but mutually exclusive properties.
The most popular formulation of this principle is probably the time-honored wave–particle duality, which restricts the coexistence of wave and particle qualities of quantum objects~\cite{Wootters:1979aa,Scully:1991aa,Zeilinger:1999aa}, in the sense that interferometric \emph{welcher Weg} information is complementary to the visibility of intensity fringes~\cite{Jaeger:1993aa,Jaeger:1995aa, Englert:1996aa}. This was later completed~\cite{Jakob:2010aa} by showing that concurrence $\mathcal{C}$~\cite{Wootters:1998aa}, a well-known measure of entanglement, naturally emerges as a quantity complementary to single-partite properties in a bipartite qubit system. The single-partite properties consist of two local but mutually exclusive realities given by visibility the $\mathcal{V}$ and predictability $\mathcal{P}$, forming what is commonly termed as wave–particle duality. Thus, the complementarity relation for bipartite qubits naturally contains three mutually exclusive quantities, namely \begin{equation} \mathcal{C}^{2} + \mathcal{V}^{2} + \mathcal{P}^{2} = 1 \, . \end{equation}
In the same vein, Mandel examined the question of whether the degree of coherence can be understood in terms of quantum complementarity and he established a quantitative relationship between the degree of coherence and the \emph{welcher Weg} information~\cite{Mandel:1991aa}; a convincing experimental demonstration was also presented~\cite{Zou:1991aa}.
Since the classical explanations of partial coherence and partial polarization are very similar in structure~\cite{Wolf:2007aa}, one might ask whether partial polarization can also be connected to quantum complementarity. This question was addressed recently~\cite{Lahiri:2011aa,Zela:2014aa,Eberly:2017aa,Lahiri:2017aa,Qian:2018aa,Kanseri:2019aa,Sanchez:2019aa,Qian:2020aa}, where much of this work has focused on single photons. This is somewhat special: for single photons, the semiclassical degree of polarization is equivalent to the purity of the state. Alternatively, the density matrix of a single-photon state contains only the first-order moment (i.e., the dipole), so, from the perspective of polarization, represents a classical state. However, the results have been appropriately extended to arbitrary quantum states~\cite{Norrman:2020aa}; viz., \begin{equation} \mathbb{D}^{2} + \mathbb{V}^{2} = \mathbb{P}^{2} \, , \end{equation} where the intensity distinguishability $\mathbb{D}$ and the Stokes visibility $\mathbb{V}$ are given by \begin{equation} \mathbb{D} = \frac{\lvert \langle \hat{N}_{+} \rangle - \langle \hat{N}_{-} \rangle \rvert}{\langle \hat{N}_{+} \rangle + \langle \hat{N}_{-} \rangle} \,, \qquad \qquad \mathbb{V} = \frac{2 \sqrt{\langle \hat{N}_{+} \rangle \langle \hat{N}_{-} \rangle}}{\langle \hat{N}_{+} \rangle + \langle \hat{N}_{-} \rangle} \lvert g_{\pm} \rvert \, , \end{equation} and the associated degree of polarization here turns out to be \begin{equation} \mathbb{P} = \left [ 1 - \frac{4 \langle \hat{N}_{+} \rangle \langle \hat{N}_{-} \rangle (1 - \mid g_{\pm} \mid^{2} )}{( \langle \hat{N}_{+} \rangle + \langle \hat{N}_{-} \rangle)^{2}} \right ]^{1/2} \, . \end{equation} In these expressions $g_{\pm}$ is a mode correlation coefficient defined as \begin{equation} g_{\pm} = \frac{\langle \hat{a}_{+}^{\dagger} \hat{a}_{-} \rangle}{\sqrt{\langle \hat{N}_{+} \rangle \langle \hat{N}_{-} \rangle}} \, . \end{equation} This shows that all perfectly polarized quantum states~\cite{Goldberg:2017}; i.e., states with $\mathbb{P}= 1$, obey strong complementarity. If the intensity distinguishability (or path predictability for a single photon) is zero, we obtain $\mathbb{V} = \mathbb{P}$; that is, the Stokes visibility is exactly determined by the degree of polarization. This result unifies the interpretations for the degree of polarization established in classical two-way interferometry, where the degree of polarization has been connected to intensity visibility and to polarization modulation~\cite{Leppanen:2014aa}. Therefore, in addition to its role as a complementarity measure, the degree of polarization can be viewed as an intrinsic quantity that characterizes the ability of light to exhibit intensity and polarization-state variation.
\section{Unpolarized light} \label{sec:unpol}
\subsection{Higher-order unpolarized states}
The constitution of unpolarized light has been investigated since the dawn of modern optics. Indeed, Verdet~\cite{Verdet:1869ve} already offered a lucid characterization of what was known as \emph{natural} light by using the projections of the intensity onto the axes of a rotated Cartesian coordinate system. Unpolarized states are those that remain invariant under any rotation of that coordinate system and under any phase shift between its rectangular components.
In classical optics, the field components of unpolarized light are modeled by zero-mean, uncorrelated, stationary Gaussian random processes~\cite{Kampen:2007qr}. The previous invariance conditions thus determine the entire probabilistic structure of the projected intensities~\cite{Barakat:1989fp}. In contrast, the standard theory is limited to first-order moments, presenting unpolarized light as having a zero-mean Stokes vector, which in geometrical terms means that the Stokes vector reduces to the origin of the Poincar\'e sphere.
At the quantum level, the invariance requirement fixes once and for all the structure of the density matrix, so it specifies the probability distribution for and, as a result, all the moments of the Stokes variables, as we have already discussed. However, one could think of extending this notion: when all of the multipoles up to a given order (say $M$) vanish, the state lacks of polarization information up to that order and hence will be called \emph{$M$th-order unpolarized}. The classical picture matches the first-order theory, corresponding to a certain set of quantum states that are only invariant to first-order \cite{Goldberg:2017}; whereas, the quantum condition implies that all the multipoles are identically null.
In consequence, using the previous notion of higher-order polarization degrees, a state is $M$th-order unpolarized when $\mathbb{P}_{M}^{(S)}=0$, which obviously implies $\mathcal{A}_M^{(S)}=0$; i.e., all the multipoles up to order $M$ vanish. We will denote these states as $\hat{\varrho}_{\mathrm{unpol},M}^{(S)}$. In more physical terms, the condition of $M$th-order unpolarization amounts to imposing that the moments $\langle \hat{\mathbf{S}}_{\mathbf{n}}^{\ell} \rangle$ be independent of the direction $\mathbf{n}$ for $\ell = 1, \ldots, M$ (i.e., that they be isotropic). Therefore, all of the moments up to order $M$ do not show any directional structure, while higher-order moments do.
Because of this, $M$th-order unpolarized states do carry polarization information when one inspects higher-order moments. As we have said, this is referred to as {hidden} polarization, according to the terminology coined by Klyshko~~\cite{Klyshko:1992wd,Klyshko:1997yq,Klyshko:1998wd}, although we will say that such states display higher-order polarization~\cite{Gupta:2011lq}.
We illustrate this point with a few examples, starting with single-photon states ($S=1/2$). The multipole expansion of an arbitrary single-photon state reads \begin{equation}
\hat{\varrho}^{(1/2)} = \varrho_{00}^{(1/2)} \, \hat{T}_{00}^{(1/2)} +
\sum_{q} \varrho_{1q}^{(1/2)} \, \hat{T}_{1q}^{(1/2)} \, . \end{equation} Since the state only has dipolar components, the quantum and classical descriptions coincide and these states can only be first-order unpolarized. Positivity constrains the possible values of the dipole to the range $ 0 \leq \mathcal{A}_{1}^{(1/2)} \leq 1/2$. The condition $\mathcal{A}_{1}^{(1/2)} =0$ immediately fixes the unpolarized states; viz., \begin{equation}
\varrho_{\mathrm{unpol},1}^{(1/2)} = \tfrac{1}{2} \; \leavevmode\hbox{\small1\normalsize\kern-.33em1}_{2} \, . \end{equation} We stress, though, that like all quantum objects, these states can only be considered as elements of an ensemble~\cite{Peres:2002oz}.
\begin{figure}
\caption{Second-order degree of polarization as a function of
purity, for the first-order unpolarized states \eqref{eq:diag2}.}
\label{fig:Pvspur}
\end{figure}
For two-photon states, there are first-order (classical) and second-order (quantum) unpolarized states. The general condition for first-order unpolarization is that the dipolar term vanishes; i.e., \begin{equation}
\hat{\varrho}_{\mathrm{unpol},1}^{(1) } =
\varrho_{00}^{(1)} \, \hat{T}_{00}^{(1)}
+\sum_{q} \varrho_{2 q}^{(1)} \, \hat{T}_{2 q}^{(1)} \, ,
\label{eq:un2ph} \end{equation} with the extra constraint of positivity. Let us assume that the density matrix can be diagonalized via an SU(2) transformation (which is true for a broad class of axially symmetric states~\cite{Blum:1981rb}), so that it can be expressed as \begin{equation}
\hat{\varrho}_{\mathrm{diag}}^{(1)} = \mathrm{diag}(\lambda_{1}, \lambda_{2}, \lambda_{3}) = \frac{1}{\sqrt{3}} \hat{T}_{00}^{(1)} +
\frac{\lambda_{1}-\lambda_{3}}{\sqrt{2}} \; \hat{T}_{10}^{(1)} +
\frac{1-3\lambda_{2}}{\sqrt{6}} \; \hat{T}_{20}^{(1)} \, ,
\label{eq:diag} \end{equation} where $\mathrm{diag} (d_{1}, \ldots, d_{n})$ represents a diagonal matrix whose diagonal elements are those in the argument. The state is first-order unpolarized when $\lambda_{1} =\lambda_{3}$. Since $\mathop{\mathrm{Tr}} \nolimits (\hat{\varrho}_{d}) = 1$, we can write \begin{equation}
\hat{\varrho}^{(1)}_{\mathrm{unpol},1} =
\mathrm{diag} ( \lambda, 1-2\lambda, \lambda )
\label{eq:diag2} \end{equation} and positivity enforces $ 0 \leq \lambda \leq 1/2$; i.e., $0 \leq \mathcal{A}_2^{(1)} \leq 2/3$. Both the purity $\mathcal{P} = \mathop{\mathrm{Tr}} \nolimits [ (\hat{\varrho}_{\mathrm{unpol},1}^{(1)})^{2} ]$ and the second-order degree $ \mathbb{P}_{2}$ depend on $\lambda$ \begin{equation}
\mathcal{P}^{(1)} = 6 \lambda^{2} - 4 \lambda + 1 \, ,
\qquad \qquad \qquad
\mathbb{P}_{2}^{(1)} = \sqrt{(3 \lambda -1)^2} \, , \end{equation} while $\mathbb{P}_{1}^{(1)} =0$, as anticipated. This can be concisely recast as \begin{equation}
\mathbb{P}_{2}^{(1)} = \sqrt{\tfrac{1}{2} [ 3 \mathcal{P}^{(1)} -1 ]} \, , \end{equation} which is plotted in Fig.~\ref{fig:Pvspur}. The maximum degree $\mathbb{P}_{2}^{(1)}$ is attained for the pure states \begin{equation}
\label{eq:pSon}
|\Psi_{\mathrm{unpol,1}}^{(1)} \rangle = \frac{1}{\sqrt{2}}
\sin \beta \left ( e^{i\alpha} |1, 1 \rangle - e^{-i\alpha}
|1,-1\rangle \right ) + \cos \beta |1,0\rangle \, , \end{equation}
which are transformed versions of the state $|1,0 \rangle$ under SU(2) transformations. These states have served as the conduit to experimentally verify the existence of hidden polarization~\cite{Usachev:2001ve,Sehat:2005wd}; for a more detailed analysis of these states, the reader is referred to Ref.~\cite{Hoz:2014kq}.
\subsection{Majorana representation} \label{sec:Majorana}
The SU(2) coherent states correspond as nearly as possible to a classical spin vector pointing in a particular direction. It is irresistible to ask which pure states are, in a sense, \emph{the opposite} of SU(2) coherent states and therefore the most quantum ones. This idea has been recently pursued~\cite{Zimba:2006fk,Crann:2010qd,Bannai:2011pi} as those states that \emph{point nowhere}; i.e., the average Stokes vector vanishes and the fluctuations up to order $M$ are isotropic: they have been dubbed \emph{anticoherent states}.
To investigate this point it is advantageous to use the Majorana stellar representation~\cite{Majorana:1932ul}, which allows us to uniquely depict a spin state state living in $\mathcal{H}_{S}$ by $2S$ points on the unit sphere~\cite{Bengtsson:2006aa}. Several decades after its conception, this representation has recently attracted a great deal of attention in several fields~\cite{Hannay:1998aa,Hannay:1998ab,Ribeiro:2007aa,Makela:2010aa,Lamacraft:2010aa,Bruno:2012aa,Lian:2012aa,Devi:2012aa,Cui:2013aa,Yang:2015aa,Liu:2016aa,Chryssomalakos:2018aa,Goldberg:2018aa,Chabaud:2020aa}.
The Majorana representation is a direct generalization of the Bloch sphere from spin-$1/2$ particles to spin-$S$ particles, making use of the coherent-state representation of the wave function $\Psi (\mathbf{n} ) = \langle \mathbf{n} | \Psi \rangle$ (to lighten notation, we will skip the label $S$, henceforth restricting to a fixed $S$). Once we insert the identity in terms of the states $|S,m\rangle $ and take into account the overlap $\langle \mathbf{n} | S, m\rangle $, the wavefunction can be written as \begin{equation}
\Psi( \mathbf{n} ) = \frac{1}{(1 + \lvert \zeta \rvert^{2})^{S}}
\sum_{m=-S}^S \sqrt{\frac{(2S)!}{(S-m)!(S+m)!}} \Psi_m \, \zeta^{S+m}\, ,
\label{eq:Majpol} \end{equation}
where $\zeta = \tan (\theta/2) e^{i \phi}$ is the stereographic projection mapping a point on the unit sphere with angle $(\theta, \phi)$ to the point $\zeta \in \mathbb{C}$. Apart from an unessential positive factor, this wave function is a polynomial of order $2S$; thus, $| \Psi \rangle$ is determined by the set $\{ \zeta_{i} \}$ of the $2S$ complex zeros of $\Psi ( \mathbf{n} )$~\cite{Bengtsson:2006aa} \begin{equation}
\Psi ( \mathbf{n} ) = \frac{Z^{\ast}_{2S}}{(1 + | \zeta |^{2})^{S}} (\zeta - \zeta_{1}) \ldots (\zeta - \zeta_{2S})\, , \end{equation}
with the resulting set suitably completed by points at infinity if the degree of $\Psi( \mathbf{n} )$ is less than $2S$. Here $Z^{\ast}_{2S}$ is the final component of the vector of coefficients of $| \Psi \rangle $ in the angular momentum basis. The corresponding configuration of points on the unit sphere is called the Majorana constellation associated with the state $|\Psi\rangle$.
There is a complementary way to look at the Majorana polynomial that helps us gain further insights. Any pure state $|\Psi \rangle \in \mathcal{H}_{S}$ can be factorized in terms of the bosonic operators $\hat{a}_{\pm}$ as \begin{equation}
\label{Eq: Majorana}
| \Psi \rangle = \frac{1}{\sqrt{\cal{N}}} \prod_{m=1}^{2S}
[ \cos ( {\theta_m}/{2} ) \hat{a}_+^\dagger +
e^{i \phi_m} \sin ( {\theta_m}/{2} ) \hat{a}_-^\dagger ]
|0_{+}, 0_{- } \rangle \, , \end{equation}
where $\cal{N}$ is a normalization factor, $|0_{+}, 0_{- } \rangle$ is the two-mode vacuum and the angles $\theta_m$ and $\phi_m$ satisfy the natural constraints $0 \leq \theta_m \leq \pi$ and $0 \leq \phi_m < 2 \pi$. Thus, each factor in \eqref{Eq: Majorana} can be visualized as a point on the unit sphere. Since the operators $\hat{a}^{\dagger}_{+}$ and $\hat{a}^{\dagger}_{-}$ create an excitation in right- and left-hand circularly polarized modes, respectively, each of the factors in (\ref{Eq: Majorana}) can also be naively thought of as creating an \emph{excitation component} of a polarization state corresponding to its position on the sphere~\cite{Bengtsson:2006aa}.
An SU(2) rotation simply corresponds to a rigid rotation of the Majorana constellation and, consequently, it does not affect the degree of polarization: states with the same constellation, irrespective of their relative orientations, have the same polarization invariants.
For SU(2) coherent states, the Majorana constellation collapses to a single point on the unit sphere. Intuitively, one would guess that states with the most isotropic polarization moments would have the most symmetric constellations that are possible. We further develop this idea in the subsequent section.
\subsection{Extremal polarizaton states}
To formalize the idea of the most quantum states we will use the cumulative distribution $\mathcal{A}_{M}$. The idea is to identify which states minimize $\mathcal{A}^{(S)}_{M}$ for each order $M$. We shall be considering only pure states, which we expand as $|\Psi \rangle =\sum_{m=-S}^{S} \Psi_{m} \, |S,m\rangle$. We then have that \begin{equation}
\label{eq:AMS}
\mathcal{A}_{M} =\sum_{K=1}^{M} \sum_{q=-K}^{K}
\frac{2K+1}{2S+1}
\left | \sum_{m,m^{\prime }=-S}^{S} C_{Sm,Kq}^{Sm^{\prime} }
\Psi_{m^{\prime}} \Psi_{m}^{\ast } \right |^{2} \, . \end{equation} As we have discussed, SU(2) coherent states maximize $\mathcal{A}_{M}$ for all orders $M$.
The strategy to minimize $\mathcal{A}_{M}$ is simple to state: starting from a set of unknown normalized state amplitudes in Eq.~(\ref{eq:AMS}), which we write as $\Psi_{m} = a_{m}+i b_{m}$ ($a_{m}, b_{m} \in \mathbb{R}$), we try to obtain $\mathcal{A}_{M} =0$ for the highest possible $M$. This yields a system of polynomial equations of degree two for $a_{m}$ and $b_{m}$, which we solve using Gr{\"o}bner bases~\cite{Adams:1994ru} implemented in the computer algebra system {\sc magma}~\cite{Bosma:1997xp}. Since the orientation of the constellation is irrelevant, one can reduce the number of variables by fixing one of the points to be at, say, the north pole and another to lie in the $S_2$-$S_3$ plane. In this way, we get exact algebraic expressions and we can detect when there is no feasible solution. Table~\ref{table1} lists the resulting states (which, in some cases, are not unique) for selected values of $S$. We also indicate the solutions' associated Majorana constellations. For completeness, in Fig.~\ref{fig:Qfunc} we plot the constellations as well as the $Q$-functions for some of these states. A complete list can be found in \cite{Markus:2015pr} and a more detailed discussion can be found in \cite{Bjork:2015ab}. The resulting states systematize the notion of anticoherent states (of note is the fierce objection of Roy Glauber, the father of coherent states, to such a term) they have been called the Kings of Quantumness to avoid a disputable denomination.
\begin{table}[t]
\caption{States for which $\mathcal{A}_{M}$ vanishes for the
indicated values of $S$. In the second column, we indicate the
order $M$, which we conjecture is the highest possible. We give
the nonzero state components $\Psi_{m}$ ($m=-S, \ldots, S$) and
describe the corresponding Majorana constellation.}
\label{table1}
\centering
\begin{tabular}{ccll}
\hline
$S$ & $M$ & State & Constellation \\
\hline
1 & 1 & $\Psi_{0} = 1$ & radial line \\
$\frac{3}{2}$ & 1 &
$\Psi_{\scriptsize{\pm \frac{3}{2}}} =
\scriptsize{\frac{1}{\sqrt{2}}}$ &
equatorial triangle \\
2 & 2 & $\Psi_{-1} = \scriptsize{\frac{2}{\sqrt{3}}} ,
\quad \Psi_{2} = \scriptsize{\sqrt{\frac{1}{3}}} $ &
tetrahedron \\
$\frac{5}{2}$ & 1 & $\Psi_{\scriptsize{\pm\frac{3}{2}}} =
\scriptsize{\frac{1}{\sqrt{2}}}$ &
equatorial triangle + poles \\
3 & 3 & $\Psi_{\pm 2} = \scriptsize{\frac{1}{\sqrt{2}}}$ &
octahedron \\
$\frac{7}{2}$ & 2 & $ \Psi_{- \scriptsize{\frac{5}{2}}}=
\Psi_{\scriptsize{\frac{1}{2}}} =
\scriptsize{\sqrt{\frac{7}{18}}},
\quad \Psi_{ \scriptsize{\frac{7}{2}}} =
\scriptsize{\sqrt{\frac{2}{9}}} $ & two triangles + pole \\
4 & 3 & $\Psi_{\pm 4} = \scriptsize{\sqrt{\frac{5}{24}}}, \quad
\Psi_{0} = \scriptsize{\sqrt{\frac{7}{12}}} $ & cube \\
$\frac{9}{2} $ & 2 & $\Psi_{\pm \scriptsize{\frac{9}{2}}} =
\scriptsize{\frac{1}{\sqrt{6}}} , \quad
\Psi_{\pm \scriptsize{\frac{3}{2}}}
= \scriptsize{\frac{1}{\sqrt{3}}} $ & three triangles \\
5 & 3 & $\Psi_{\pm 5}= \scriptsize{\frac{1}{\sqrt{5}}} ,
\quad \Psi_{0} = \scriptsize{\frac{3}{\sqrt{5}}}$ &
pentagonal prism \\
$\frac{11}{2} $ & 3 &
$\Psi_{\pm \scriptsize{\frac{11}{2}}} =
\scriptsize{\frac{\sqrt{17}}{12}} ,
\quad \Psi_{\pm \scriptsize{\frac{5}{2}}} =
i \scriptsize{\frac{\sqrt{55}}{12}}$ &
pentagon + two triangles \\
6 & 5 &$\Psi_{\pm 5}= \pm \scriptsize{\frac{\sqrt{7}}{5}},
\quad \Psi_{0} =- \scriptsize{\frac{\sqrt{11}}{5}} $ &
icosahedron \\
7 & 4 & $\Psi_{\pm 6}= \scriptsize{\sqrt{\frac{854}{3645}}}, \quad
\Psi_{\pm 3} = \scriptsize{\sqrt{\frac{637}{13420}} + i
\sqrt{\frac{512603}{9783180}}} $ &
three squares + poles \\
& & $\Psi_{0} = \scriptsize{\sqrt{\frac{12561757}{163053000}}} -
\scriptsize{i \sqrt{\frac{512603}{2013000}}}$ & \\
10 & 5 & $\Psi_{\pm 10}= \scriptsize{\sqrt{\frac{187}{1875}}} , \quad
\Psi_{\pm 5}= \pm \scriptsize{\sqrt{\frac{209}{625}}} , \quad
\Psi_{0} =\scriptsize{\sqrt{\frac{247}{1875}}}$ &
deformed dodecahedron
\end{tabular} \end{table}
From a physical perspective one would expect these constellations to have their points distributed as symmetrically as possible on the unit sphere. For some values of $S$, such as 4, 6, 8, 12 and 20, one can guess a maximally unpolarized constellation, in each case corresponding to the vertices of a Platonic solid. For other numbers it is not easy to guess an optimal, exact constellation, but solving the system of polynomial equations, as described before, yields exact algebraic expressions for the coefficients $\Psi_{m}$, from which one can easily compute the points of the Majorana constellation with arbitrary numerical precision.
The problem of distributing $N$ points on a sphere in the \emph{most symmetric} fashion has a long history and has many different solutions depending on the cost function one tries to optimize~\cite{Saff:1997aa,Brauchart:2015aa}. Here, we shall only discuss a few of the formulations: spherical $t$-designs~\cite{Delsarte:1977dn}, the Thomson problem~\cite{Thomson:1904qp,Ashby:1986bk, Edmundson:1992uf,Melnyk:1977gm} and the Queens of quantumness~\cite{Giraud:2010db}.
Spherical $t$-designs are configurations of $N$ points on a sphere such that the average value of any polynomial of degree at most $t$ is the same when evaluated over the $N$ points as over the entire sphere. Thus, the $N$ points can be seen to give a representative average value of all polynomials up to degree $t$. It has been conjectured that a state is unpolarized to order $t$ if and only if its Majorana constellation is a spherical $t$-design~\cite{Crann:2010qd}. However, while the statement is true for some $t$-designs, such as those represented by the Platonic solids, the general conjecture has been disproven~\cite{Bannai:2011pi}.
\begin{figure*}
\caption{Density plots of the SU(2) $Q$-functions for
the optimal states in Table I for the cases
$S = 5/2, 3, 7/2, 9/2, 5$, and $7$ (from left to right, blue
indicates the zero values and red maximal ones). On top, we sketch
the Majorana constellation for each of them.}
\label{fig:Qfunc}
\end{figure*}
It is clear that there should be some connection between the number of points $N$ and the maximal degree $t$ for which a spherical $t$-design exists. The configurations that maximize $t$ for a given $N$ are called optimal designs, and we use $t$ to denote the degree of an optimal $N$-point design in the following. No analytical relationship is known between $N$ and $t$: it is known that for a $t$-design, the number of points $N$ is at least proportional to $t^2$; whereas, for some orders $t$ the only known constructions have $N$ scaling proportionally with $t^3$. As a function of $N$, the order $t$ is non-monotonic. The results for $1 \leq N \leq 100$ are summarized in~\cite{Hardin:1996bv}.
From the numerical data thus far one can conclude that the maximum value of $M$and $t$ coincide. We therefore conjecture that if an optimal spherical design of order $t$ exists for some $N$, then one can find an $M$th-order unpolarized $N$-photon state with $M=t$.
The next thing one can note is that an optimal $t$-design does not necessarily give a $t$th-order unpolarized state. Quite often the configurations are similar, e.g. regular polygons with their surface normals along the polar axis, but displaced from each other along the axis by certain distances. However, these distances often need to be fine-tuned for an optimal $t$-design to become a maximally-unpolarized state. The Platonic solids are exceptions to this observation. That the optimal configurations for $t$-designs and maximally unpolarized states do not always coincide underscores the mystery that the optimal $t$ and maximal $M$ always seem to be equal for any $N$.
The Thomson problem consists of arranging $N$ identical point charges on the surface of a sphere so that the electrostatic potential energy of the entire configuration is minimized. The problem can be generalized to potential energies that are proportional to $r^{-d}$, where $r$ is the Euclidean distance between the charges. The choice $d=1$ is the Thomson problem, corresponding to the usual Coulomb potential; the case $d \rightarrow \infty$ is called Tammes problem~\cite{Tammes:1930rc}. For small $S$, up to 3, the Thomson configurations are identical to the optimal spherical $t$-design and to the maximally unpolarized states. For larger $S$, they differ in general and the degree of unpolarization of the Thomson states is lower than the maximum. Different from the two previous cases, the solution to the Thomson problem appears to be unique for every $S$~\cite{Erber:1991lq}.
The Queens of quantumness are the states that maximize the Hilbert-Schmidt distance to the closest point of the convex hull of the mixed SU(2) coherent states~\cite{Giraud:2010db}. This convex hull defines the subspace of classical states. Therefore, the states maximizing the distance to the nearest point on this hull can be thought of as having maximally quantum characteristics. In Ref.~\cite{Giraud:2010db} it is claimed that the Queens can be seen as the least classical (or most quantum) of all states given this metric. Although we have used another figure of merit, our approach and that in \cite{Giraud:2010db} share the view that the states \emph{most different} from SU(2) coherent states are the most quantum.
When we interpret our subspace $\mathcal{H}_{S}$ as the symmetric subspace of a system of $S$ qubits, the Kings seem to also be closely linked to other intriguing problems, such as maximally entangled symmetric states~\cite{Aulbach:2010jw,Giraud:2015oj} and $k$-maximally mixed states~\cite{Arnaud:2013hm,Goyeneche:2014so}.
\subsection{Metrological applications}
The main goal of quantum metrology is to measure a physical magnitude with surprising precision by exploiting quantum resources~\cite{Giovannetti:2011aa}. In particular, tailoring polarization states to better detect SU(2) rotations is quite a relevant problem with direct applications to magnetometry~\cite{Wasilewski:2010aa,Sewell:2012aa,Muessel:2014aa}, polarimetry~\cite{Meyer:2001aa,DAmbrosio:2013aa}, and metrology in general~\cite{Rozema:2014aa}.
The salient feature of maximally unpolarized states is their ability to detect small, but arbitrary, SU(2) transformations with optimal resolution. This was already anticipated in~\cite{Kolenderski:2008mo}, where the authors specifically found that, for photon numbers 4, 6, 8, 12 and 20, the states corresponding to regular polyhedron Majorana constellations best signaled misalignments between two Cartesian reference frames. To understand this, it is instructive to look at related states, namely the NOON states. Such NOON states are known to have the highest sensitivity to small rotations about the $\hat{S}_3$-axis among all states with a fixed excitation $S$ ~\cite{Bollinger:1996am}. This can be easily understood by looking at their Majorana constellation, which is apparent from Fig.~\ref{fig:cohNoon} and consists of $2S$ equidistantly placed points around the equator. A rotation around the $\hat{S}_3$-axis is described by the unitary operator $\hat{U}(\phi)=\exp(- i \phi \hat{S}_3)$; therefore, for $\phi=\pi/(2 S)$ the states $|\mathrm{NOON}\rangle$ and $\hat{U}(\phi )|\mathrm{NOON}\rangle$ are orthogonal, while for $\vartheta= q\pi/S$ they are parallel, where $q$ is an integer. Thus, it should not come as a surprise that NOON states are optimal for detecting small rotations around the $\hat{S}_3$-axis, in the interval $0 \leq \vartheta \leq \pi/(2 S)$. If the rotation axis lies in the equatorial plane, then a rotation of $\pi$ is needed to obtain a parallel state, irrespective of $S$. When $S$ is a half integer this only happens when the axis intersects one of the Majorana points, and when $S$ is an integer this happens if the axis either intersects either a point or is the intersector between two points. Thus, the rotation resolution is highly directional for a NOON state.
This is precisely the advantage of maximally unpolarized states: having a high degree of spherical symmetry, they resolve rotations around any axis approximately equally well. It may not be obvious from their appearance that they have high sensitivity to small rotations about an arbitrary axis. To substantiate this claim, recall that the action $\tau$ needed to make a state $|\Psi\rangle$ evolve so that $|\langle\Psi| \exp(i \hat{A} \tau) |\Psi\rangle|^2 = 1-\epsilon$, where $\epsilon$ is a small, positive, real number, and $\hat{A}$ is Hermitian, is inversely proportional to the variance $\var{\hat{A}}$~\cite{Mandelshtam:1945aa}. The relation connecting the evolution speed $d \epsilon/ d \tau$ and the variance is sometimes called the quantum speed limit~\cite{Taddei:2013aa,Campo:2013aa}. A NOON state in the $\hat{S}_3$ basis has maximal variance $\langle \var{\hat{S}_3} \rangle=S^2$ for a fixed $S$ and is thus the state with maximal sensitivity to a rotation around the $\hat{S}_3$ axis. However, the variances $\var{\hat{S}_1}$ and $\var{\hat{S}_2}$ are only $S/2$ and thus the state is rather insensitive to rotations around those axes (or around any rotation axis in the $\hat{S}_1$-$\hat{S}_2$ plane). In contrast, all the Kings of Quantumness have isotropic variances equal to $S(S+1)/3$; namely, close to the maximum possible variance. The quantum speed limit theorem thus asserts that, having a large and isotropic variance of the Stokes operator, these states are rather sensitive to rotations around any axis $\hat{S}_{{\bf n}}$~\cite{Chryssomalakos:2017aa,Martin:2020aa}.
Another way of explaIning the sensitivity to a rotation around an arbitrary axis is to observe that, since these states have maximal spherical symmetry, they become parallel, or almost parallel, for relatively small rotations around several axes. To quantify this statement one could use the Fisher information and the Cram\'{e}r-Rao bound to assess the uncertainty in estimating the rotation direction and the rotation angle~\cite{Taddei:2013aa,Campo:2013aa}.
\section{Conclusions} \label{sec:conclusions}
Quantum polarization provides an excellent landscape to flaunt nonclassical features. This is because the polarization degree of freedom is easily accessible both theoretically and experimentally. Moreover, we have shown that this variable finds parallels with many different topics of quantum physics.
As polarization is the manifestation of photon spin, it in turn contributes, together with the orbital angular momentum (OAM), to the total angular momentum of light. It should come as no surprise that the methods developed for polarization characterization lend themselves, virtually without modification, to the analysis of OAM. In fact, since it is at present experimentally easier to generate specific OAM states than the corresponding polarization states (which are, in general, highly entangled), some of the theory developed for polarization has actually been tested with OAM as the model system.
Light has been an excellent laboratory for research in quantum theory. We hope that the results summarized in this contribution show how this is particularly true for light polarization.
\noindent \textbf{Acknowledgments.} Over the years, the ideas in this paper have been further developed and expanded with questions, suggestions, criticism, and advice from many colleagues. Particular thanks for help in various ways goes to G. S. Agarwal, F. Bouchard, R. W. Boyd, A. S. Chirkin, J. H. Eberly, A. T. Friberg, R. J. Glauber, D. F. V. James, V. P. Karassiov, Y. H. Kim, E. Karimi, N. Korolkova, Ch. Marquardt, C. M\"{u}ller, A. Normann, \L. Rudnicki, Ch. Silberhorn, W. P. Schleich, and J. S\"{o}derholm.
\noindent \textbf{Funding.} Ministerio de Ciencia e Innovaci\'on (PGC2018-099183-B-I00); Consejo Nacional de Ciencia y Tecnologia de Mexico (254127); Fundacja na rzecz Nauki Polskiej (2018/MAB/5); Canada National Sciences and Engineering Research Council; Walter C. Sumner Foundation; Lachlan Gilchrist Fellowship Fund; Michael Smith Foreign Study Supplement; Mitacs Globalink; Ministry of Education and Science of the Russian Federation (Mega Grant 14.W03.31.0032).
\noindent\textbf{Disclosures.} The authors declare no conflicts of interest. \appendix
\section{Coherent states} \label{sec:cs}
The coherent-state approach is not just a convenient mathematical tool, but it also helps one to understand how physical properties of a system are reflected by the geometrical structure of the related phase space. The reader interested can check more details in the pertinent monographs~\cite{Gazeau:2009aa,Perelomov:1986ly}
Let $G$ be a Lie group (connected and simply connected, with finite dimension $n$), which is the dynamical group of a given quantum system. Let $T$ be a unitary irreducible representation (irrep) of $G$ acting on the Hilbert space $\mathcal{H}$: \begin{equation}
|\Psi_g\rangle= \hat{T} (g)| \Psi_0\rangle \, , \qquad \qquad
g \in G\, ,
\label{eq:defCS} \end{equation}
where $|\Psi_0\rangle$ is a fixed vector in $\mathcal{H}$ of the representation $\hat{T}(g)$. The subgroup $H \in G$ such that \begin{equation}
\hat{T} (h)| \Psi_0\rangle = e^{i \alpha (h)} | \Psi_{0} \rangle \, ,
\qquad \qquad \forall h \in H\, ,
\label{eq:defIso} \end{equation}
is called the isotropy group (or little group) of the state $| \Psi_{0} \rangle$. Since the states of a quantum system are defined up to multiplication by a global phase, any element of the orbit $ | \Psi_{g} \rangle$ of the state $| \Psi_{0} \rangle$ can be put in correspondence with an element of the coset space $X = G/H$. We thus define the set of coherent states for the group $G$ with respect to the fiducial vector $| \Psi_{0} \rangle $ by \begin{equation}
| \Omega \rangle = \hat{T} (\Omega) | \Psi_{0} \rangle \, , \end{equation} where $\Omega \in X$; that is, $g=\Omega h$.
An essential property is that the coherent states form an (overcomplete) basis in the state space of the system \begin{equation}
\int_X d\mu (\Omega) \,
|\Omega \rangle \langle\Omega| = \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}}
\label{eq:resolId} \, , \end{equation} where $d\mu(\Omega)$ is the invariant integration measure on $X$.
In principle, coherent states can be generated from any state $| \Psi_{0} \rangle$. However, the states $| \Psi_{0} \rangle$ having the largest isotropy groups generate coherent states closest to the classical states in the sense that they minimize the uncertainty relations.
An important class of coherent states corresponds to the coset spaces $X$ that are homogeneous K\"{a}hler manifolds. Then, a natural symplectic structure can be introduced on $X$, so that it can be considered the phase space of a classical dynamical system~\cite{Klimov:2009aa}.
Let us first consider the canonical example of the Heisenberg-Weyl group $W_{1}$, which is the dynamical symmetry group for a mode of the quantized radiation field. The associated Lie algebra $\mathfrak{w}_{1}$ is defined by the canonical commutation relations \begin{equation} [\hat{a}, \hat{a}^\dagger]=\hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} \, .
\label{eq:HW1} \end{equation} A general element of this algebra has the form $t \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} + i (\alpha^{\ast} \hat{a} - \alpha \hat{a}^\dagger)$, where $t \in \mathbb{R}$ and $\alpha \in \mathbb{C}$. Therefore, the elements of the group are obtained via the exponential map \begin{equation}
\hat{T}(t,\alpha)= e^{it} \hat{D}(\alpha) \, ,
\qquad \qquad
\hat{D}(\alpha)=\exp (\alpha \hat{a}^\dagger-\alpha^*\hat{a} )
\label{eq:disp} \, . \end{equation}
An element of this group is specified by $g= (t, \alpha)$ and $\alpha\in \mathbb{C}$ and the operators $\hat{T}(g)= \hat{T}(t,\alpha)$ are an irrep of the Hilbert space of Fock states $\{ | n\rangle \}$. The phase space is the complex plane $\mathbb{C} = W_1/U(1)$, and the (quadrature) coherent states are: \begin{equation}
|\alpha\rangle= \hat{D} (\alpha) | 0 \rangle,
\label{eq:CScan} \end{equation}
with the vacuum $|0 \rangle$ being the fiducial state.
The expansion in the Fock basis can be easily obtained by disentangling the displacement operator $\hat{D} (\alpha)$; the result is \begin{equation}
|\alpha\rangle= e^{-|\alpha|^2/2 }
\sum_{n=0}^{\infty} \frac{\alpha^n}{\sqrt{n!}}|n\rangle
\label{eq:CSStat} \, , \end{equation} which exhibits the well-known Poissonian statistics of a radiation field with average value $\bar{N} = \lvert \alpha \rvert^{2}$.
If we consider the transformation of the operators of the algebra $\mathfrak{w}_1$ by the elements of the group $W_1$, it is not difficult to show that: \begin{equation}
\label{a1}
\hat{D}(\alpha) \, \hat{a} \, \hat{D}^\dagger(\alpha) =
\hat{a} - \alpha \; \hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}} \, , \end{equation}
and by applying \eqref{a1} to the coherent states $|\alpha\rangle$ we see \begin{equation}
\hat{a} | \alpha \rangle= \alpha | \alpha\rangle
\label{eq:nose} , \end{equation} which is another equivalent definition of the coherent states.
The resolution of unity in terms of $|\alpha \rangle $ is \begin{equation}
\int_{\mathbb{C}} d\mu (\alpha ) \;
|\alpha\rangle\langle\alpha| =\hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}},
\qquad \qquad
d\mu(\alpha)=\frac{1}{\pi}\, d\alpha \, ,
\label{eq:eu} \end{equation} and, as a consequence, we can expand an arbitrary state using \eqref{eq:eu}: \begin{equation}
| \Psi \rangle= \int_{\mathbb{C}} d\mu (\alpha) \;
\Psi (\alpha^{\ast}) | \alpha\rangle,
\qquad \qquad
\Psi (\alpha^{\ast}) = \langle\alpha | \Psi \rangle \, .
\label{eq:GSexp} \end{equation}
The function $\Psi (\alpha^{\ast})$ is the coherent-state wave function of the state $|\Psi\rangle$.
The coherent states saturate the Heisenberg uncertainty relation \begin{equation}
\var_{\alpha}{\hat{x}} \; \var_{\alpha}{\hat{p}} \geq 1/2
\label{eq:MUS} \end{equation} where the canonical position-momentum operators are implicitly defined by $\hat{a} = (\hat{x} + i \hat{p})/\sqrt{2}$. For this reason, coherent states are often considered the \emph{most classical} states of the harmonic oscillator.
Next, we consider the SU(2) symmetry, whose Lie algebra $\mathfrak{su}(2)$ is spanned by the operators $\{\hat{S}_1,\hat{S}_2,\hat{S}_3\}$, with commutation relations \begin{equation} [\hat{S}_{k}, \hat{S}_{\ell} ]=i \epsilon_{klm} \hat{S}_{m} \, .
\label{eq:SU2CR} \end{equation}
The irreps are labeled by the index $S$ $(S=0,\tfrac{1}{2},1,\dots)$ and the carrier space $\mathcal{H}_S$ is spanned by the standard basis $\{ |S,m\rangle \mid m=S, \dots,-S) \}$. The highest weight state is $|S,S\rangle$ and is annihilated by the ladder operator $\hat{S}_{+}$. The isotropy subgroup for this state consists of all the elements of the form $\exp( i \psi \hat{S}_{3})$, so it is isomorphic to U(1). The coset space is then SU(2)/U(1), which is the unit sphere $\mathcal{S}_{2}$, and it is exactly the same as the classical phase space, the natural arena to describe the dynamics.
Since the irreps can be written in terms of the Euler angles as \begin{equation} \hat{T}^{(S)} (\phi, \theta, \psi) = \exp(-i\phi \hat{S}_3)\exp(-i\theta \hat{S}_2)\exp(-i\psi \hat{S}_3) \,, \end{equation} it is clear that the elements of SU(2)/U(1) can be represented as in \eqref{eq:dissph}; i.e., \begin{equation} \hat{D}(\mathbf{n}) = e^{- i \phi \hat{S}_{3}} e^{- i \theta \hat{S}_{2}} = \exp \left [ \tfrac{1}{2} \theta (\hat{S}_{+} e^{-i \phi} - \hat{S}_{-} e^{i \phi} )\right] \, , \end{equation} so they depend on the spherical angles $(\theta, \phi)$ determining the unit vector $\mathbf{n}$. The system of coherent states is thus \begin{equation}
|S,\mathbf{n} \rangle= \hat{D} (\mathbf{n}) |S,S \rangle \, .
\label{eq:CSSU2} \end{equation}
One can write the operator $\hat{D}(\mathbf{n} )$ in a disentangled form to obtain another useful parametrization of the SU(2) coherent states: \begin{equation}
|S, \zeta \rangle = (1 + | \zeta|^{2})^{-S}
\exp( \zeta\hat{S}_{+} ) |S, -S \rangle \, , \end{equation} with $\zeta = \tan (\theta/2) e^{i \phi}$ being the stereographic projection from the north pole of the sphere $\mathcal{S}^2$ onto the complex plane $\mathbb{C}$, tangent to the south pole.
Expanding the exponential, we obtain the expression for the SU(2)
coherent states in terms of the angular momentum basis $|S,m\rangle$ \begin{equation}
|S, \mathbf{n} \rangle = \sum_{m=-S}^S \binom{2S}{S+m}^{1/2}
[ \sin ( \theta / 2 ) ]^{S-m} \,
[ \cos ( \theta/2)]^{S+m} \, \exp [ - i ( S+m ) \phi] |S,m\rangle \, . \end{equation}
SU(2) coherent states are not orthogonal; their overlap is \begin{equation}
\lvert \langle \mathbf{n}_1 | \mathbf{n}_2 \rangle \rvert^{2} = \left [
\tfrac{1}{2} (1 + \mathbf{n}_{1} \cdot \mathbf{n}_{2} )
\right ]^{2S} \, , \end{equation} which has been used in \eqref{eq:QCS}.
The resolution of the identity now reads \begin{equation}
\int_{\mathcal{S}_{2}} d\mu ( \mathbf{n}) \,
|S, \mathbf{n} \rangle \langle S, \mathbf{n} | =
\hat{\leavevmode\hbox{\small1\normalsize\kern-.33em1}},
\qquad \qquad d\mu (\mathbf{n} ) = \frac{2S+1}{4\pi}
\sin\theta d\theta d\phi \, ,
\label{eq:SU2resun} \end{equation} which allows for an expansion \begin{equation}
|\Psi\rangle = \int d\mu(\mathbf{n}) \; \Psi(\mathbf{n} ) |S, \mathbf{n} \rangle \, ,
\label{eq:SU2CSExp} \end{equation} where $\Psi (\mathbf{n} )$ is the coherent-state wave function.
Finally, as discussed in Section~\ref{sec:uncrel}, the SU(2) coherent states minimize the Heisenberg uncertainty relation.
\section{Quasiprobability distributions on the sphere} \label{app:phsp}
Presenting quantum mechanics as a statistical theory on a classical phase space has attracted a great deal of attention since the very early days of this discipline.
The main ingredient for any successful phase-space method is a \emph{bona fide} mapping that relates operators with functions defined on a smooth manifold $\mathcal{M}$, the phase space of the system, endowed with a very precise mathematical structure~\cite{Kirillov:2004aa}. This mapping, first suggested by Weyl~\cite{Weyl:1928aa}, is not unique. In fact, a whole family of $s$-parametrized functions can be assigned to each operator and the choice of a particular element of the family depends on its convenience for each problem. In particular, the time-honored quasiprobability distributions are the functions connected with the density operator. The most common choices of $s$ are $+1$, 0, and $-1$, which correspond to the $P$ (Glauber-Sudarshan)~\cite{Glauber:1963aa,Sudarshan:1963aa}, $W$~(Wigner)~\cite{Wigner:1932uq}, and $Q$~(Husimi)~\cite{Husimi:1940aa} functions, respectively.
For the relevant case of the SU(2) dynamical symmetry, the $s$-parametrized Weyl-Stratanovich map \begin{equation}
\hat{A} \mapsto W_{A}^{(s)} (\mathbf{n} ) =
\mathop{\mathrm{Tr}} \nolimits [ \hat{A} \, \hat{w}^{(s)}(\mathbf{n} ) ] ,
\label{map} \end{equation} puts in one-to-one correspondence each operator $\hat{A}$ invariantly acting on $\mathcal{H}_{S}$ with a function on the sphere $\mathcal{S}_{2}$. The corresponding kernels $\hat{w}^{(s)}$ are defined as~\cite{Berezin:1975mw,Agarwal:1981bd,Varilly:1989ud,Klimov:2002cr} \begin{equation}
\hat{w}^{(s)}(\mathbf{n} )= \sqrt{\frac{4\pi}{2S+1}}
\sum_{K=0}^{2S} \sum_{q=-K}^{K}
( C_{SS,K0}^{SS} )^{-s} \; Y_{Kq}^{\ast}( \mathbf{n} ) \,
\hat{T}_{Kq}^{S}\,,
\label{ker} \end{equation} where $Y_{Kq}(\mathbf{n})$ are the spherical harmonics, $C_{S_{1}m_{1},S_{2}m_{2}}^{Sm}$ the Clebsch-Gordan coefficients and $\hat{T}_{Kq}^{S}$ the irreducible tensor operators \begin{equation}
\hat{T}_{Kq}^{S} = \sqrt{\frac{2K+1}{2S+1}}
\sum_{m,m^{\prime}=-S}^{S} C_{Sm,Kq}^{Sm^{\prime}} \,
|S,m^{\prime}\rangle \langle S,m| \, . \end{equation} As expected, the kernels are properly normalized \begin{equation}
\mathop{\mathrm{Tr}} \nolimits [ \hat{w}^{(s)} ( \mathbf{n} ) ] = 1 \, ,
\qquad
\frac{2S+1} {4\pi}\int_{\mathcal{S}_{2}}
d\mathbf{n}\; \hat{w}^{(s)} (\mathbf{n})=\leavevmode\hbox{\small1\normalsize\kern-.33em1} \,, \end{equation} with $d\mathbf{n} =\sin \theta\ d\theta\ d\phi $ the invariant measure on the sphere.
Consequently, the symbol of $\hat{A}$ can be concisely expressed as \begin{equation}
W_{A}^{(s)} ( \mathbf{n} ) = \sqrt{\frac{4\pi}{2S+1}}
\sum_{K=0}^{2S} \sum_{q=-K}^{K} ( C_{SS,K0}^{SS} )^{-s}
A_{Kq} \; Y_{LM}^{\ast} (\mathbf{n} ) \, , \label{eq:symb} \end{equation} where $A_{Kq}=\mathop{\mathrm{Tr}} \nolimits ( \hat{A} \hat{T}_{Kq}^{S \dagger} )$.
The traditional SU(2) quasiprobability distributions are simply the $s$-symbols of the density operator $\hat{\varrho}$. The value $s=0$ corresponds to the standard Wigner function, while $s=\pm 1$ leads to the $P$ and $Q$-functions respectively, defined as dual coefficients in the basis of spin coherent states $|S, \mathbf{n} \rangle$~\cite{Perelomov:1986ly}, according to \begin{equation}
Q(\mathbf{n} ) = \langle \mathbf{n} |\hat{\varrho}|\mathbf{n} \rangle \, ,
\qquad \qquad
\hat{\varrho} =\frac{2S+1}{4\pi}\int_{\mathcal{S}_{2}}d\mathbf{n} \;
P(\mathbf{n}) \; |\mathbf{n}\rangle \langle \mathbf{n} | \, . \end{equation} The symbols $W_{A}^{(s)} (\mathbf{n} )$ are covariant under SU(2) transformations and provide the overlap relation \begin{equation}
\mathop{\mathrm{Tr}} \nolimits (\hat{\varrho} \hat{A}) = \frac{2S+1}{4\pi}
\int_{\mathcal{S}_{2}} d\mathbf{n} \, W_{\varrho}^{(s)}(\mathbf{n} )
\,W_{A}^{(-s)}(\mathbf{n}) \, . \end{equation}
\end{document} |
\begin{document}
\title{\textbf{ Quantitative Local and Global A Priori Estimates \\ for Fractional Nonlinear Diffusion Equations}\\[7mm]}
\author{\Large Matteo Bonforte$^{\,a,\,b}$ ~and~ Juan Luis V\'azquez$^{\,a,\,c}$\\} \date{}
\maketitle
\
\begin{abstract}
We establish quantitative estimates for solutions $u(t,x)$ to the fractional nonlinear diffusion equation, $\partial_t u +(-\Delta)^s (u^m)=0$ \ in the whole range of exponents $m>0$, $0<s<1$. The equation is posed in the whole space $x\in\mathbb{R}^d$. We first obtain weighted global integral estimates that allow to establish existence of solutions for classes of large data. In the core of the paper we obtain quantitative pointwise lower estimates of the positivity of the solutions, depending only on the norm of the initial data in a certain ball. The estimates take a different form in three exponent ranges: slow diffusion, good range of fast diffusion, and very fast diffusion. Finally, we show existence and uniqueness of initial traces. \end{abstract}
\vskip 1cm
\noindent {\bf Keywords.} Nonlinear diffusion equation, Fractional Laplacian, Weighted global estimates, Existence for large data, Positivity estimates, Initial trace.\\[.5cm] {\sc Mathematics Subject Classification}. 35B45, 35B65, 35K55, 35K65.\\
\begin{itemize} \item[(a)] Departamento de Matem\'{a}ticas, Universidad Aut\'{o}noma de Madrid,\\ Campus de Cantoblanco, 28049 Madrid, Spain \item[(b)] e-mail address:~\texttt{matteo.bonforte@uam.es }\\ web-page:~\texttt{http://www.uam.es/matteo.bonforte} \item[(c)] e-mail address:~\texttt{juanluis.vazquez@uam.es }\\ web-page:~\texttt{http://www.uam.es/juanluis.vazquez} \end{itemize}
\tableofcontents
\section{Introduction}
We consider the class of nonnegative weak solutions of the fractional diffusion equation \begin{equation}\label{FDE.eq} \partial_t u +(-\Delta)^s (u^m)=0\quad \; \mbox{in }(0,T)\times\mathbb{R}^d\,, \end{equation} where $m>0$, $0<s<1$, $d\ge 1$, and $T>0$. The precise definition of the fractional Laplacian is given in Appendix \ref{ssec.app1}. We are mainly interested in values $m\ne1$, since the linear case is rather well known. For $s=1$ we recover the classical porous medium/fast diffusion equation (that will be shortened as PME/FDE respectively), whose theory is well-known, cf. \cite{VazBook}. We will call the case $s=1$ the standard diffusion case. \normalcolor We also assume that we are given initial data \begin{equation}\label{FDE.id} u(0,x)=u_0(x)\,, \end{equation} where in principle $u_0\in \mathrm{L}^1(\mathbb{R}^d)$ and $u_0\ge 0$. However, larger classes of initial data are sometimes considered, like changing sign solutions or non integrable data. The equation has recently attracted some attention in mathematical analysis. Such an interest has been motivated by its appearance as a model for anomalous diffusion in different applied contexts. For the reader's convenience we have listed in Appendix II (Section \ref{App.Motiv}) the most relevant sources to the applications that we know of. \normalcolor
We refer to \cite{DPQRV1,DPQRV2} for the basic theory of existence and uniqueness of weak solutions for the Cauchy problem \eqref{FDE.eq}$-$\eqref{FDE.id}. These papers also describe the main results on $L^q$ boundedness and $C^\alpha$ regularity, they show that nonnegative solutions are indeed positive everywhere, as well as some other basic properties of the nonlinear semigroup generated by the problem. Recently, the existence and properties of Barenblatt solutions for the Cauchy Problem was established in \cite{Vaz2012}. Related literature is also mentioned in these papers. \normalcolor
The main purpose of the present paper is obtaining quantitative a priori estimates of a local type for the solutions of the problem. Such estimates were obtained for the standard PME by Aronson-Caffarelli \cite{ArCaff} and by the authors for the standard FDE \cite{BV, BV3, BV-ADV}\,. This is not always possible for the present model due to the nonlocal character of the diffusion operator, but then global estimates occur in weighted spaces. The use of suitable weight functions allows to prove crucial $\mathrm{L}^1$-weighted estimates that enter substantially into the derivation of the main results. The results take different forms according to the value of the exponent $m$, a fact that is to be expected since it happens for standard diffusion. The list of bounds is as follows: weighted $\mathrm{L}^1$ estimates, half-Harnack parabolic estimates (i.\,e., quantitative pointwise lower estimates), and tail estimates (i.\,e., asymptotic spatial behaviour). As a first consequence of these estimates, an existence result for very weak solutions with non-integrable data in some weighted $\mathrm{L}^1_{\varphi}$ space is obtained. In particular, bounded initial data or data with slow growth at infinity are allowed.
We remind that Harnack inequalities are a standard tool used to develop further theory, see in that respect the work of Di Benedetto for quasilinear equations \cite{DBbook, DGVbook}. Let us quote four examples of application of the line of results of this paper that are already available: the existence and uniqueness of initial traces, which we do in Section 7; understanding the asymptotic behaviour of the fractional KPP equation \cite{SV2013}; dealing with nonlocal symmetrization problems \cite{VV2013}; or uniqueness issues for the fractional PME with variable density \cite{PT2}.\normalcolor
\textbf{Outline of the paper and main results. }
First, some preliminary information on case divisions. The case $m>1$ is called the {\sl (fractional) porous medium case} : contrary to the standard porous medium equation, it does not have the property of finite propagation, an important difference established in \cite{DPQRV1,DPQRV2}\,. The range of exponents $m\in (0,1)$ is called the {\sl (fractional) fast diffusion equation}, and it has special properties when $(d-2s)/d=: m_c<m<1$\,, which we call the {\sl good fast diffusion range}. When $0<m<m_c$ it is known that some solutions extinguish in finite time, which is a clear manifestation of the change of character of the equation, since solutions of the Cauchy problem exist globally in time and are positive everywhere in $Q=(0,+\infty)\times\mathbb{R}^d$ if $m\ge m_c$\,.\normalcolor
In Section \ref{sect.2}, we derive integral bounds in form of weighted $\mathrm{L}^1$ estimates, valid for nonnegative solutions of the Cauchy problem in the whole fast diffusion range $0<m<1$. Actually, they are valid for the difference of two ordered solutions, the precise statement is given in Theorem \ref{prop.HP.s}. Contrary to the purely local $\mathrm{L}^1$ estimates known in the standard fast diffusion case, cf. \cite{HP}, the estimates for $s<1$ are valid in weighted $\mathrm{L}^1$-spaces and the weight must decay at infinity with a certain decay rate, not too fast, not too slow. This is again a manifestation of the nonlocal properties of the fractional Laplacian. The estimates will be important as a priori bounds for solutions, or families of solutions, through the rest of the paper.
In Section \ref{sect.exist.large} we use the estimates of Section \ref{sect.2} to construct solutions for initial data that belong to weighted $\mathrm{L}^1$-spaces, in particular for data $0\le u_0\in \mathrm{L}^1_{\rm loc}(\mathbb{R}^d)$ such that $u_0(x)$ grows less than $O(|x|^{2s/m})$ as $|x|\to\infty$, in particular for all bounded data. These solutions can be uniquely identified as minimal solutions in a precise sense and satisfy many of the properties of the known class of bounded and integrable weak solutions.
Section \ref{sect.3} studies the actual positivity of nonnegative solutions via quantitative lower estimates for the good fast diffusion equation. Precise local lower bounds are contained in Theorem \ref{thm.lower}. The behaviour as $|x|\to\infty$ (so-called tail behaviour) is studied in Section \ref{ssec.gfd.tail}, and global spatial lower bounds are derived as a consequence in Section \ref{ssec.global.GFDE}. The merit of the estimates is that they are quantitative and most of the exponents are sharp. The lower estimates of this section can be adapted for the exponent $m=m_c$ separating both fast diffusion subranges, but only when $u_0\in \mathrm{L}^p_{loc}$ for some $p>1$. However, we refrain from doing this particular case in the present paper since the proof uses some other techniques that would lengthen the text.
The very fast diffusion range $0<m<m_c$ is studied in Section \ref{sect.4}. The weighted $\mathrm{L}^1$ estimates of Theorem \ref{prop.HP.s} continue to hold, but this does not allow to obtain the same type of quantitative lower bounds since the technique used in the good fast diffusion range does not work anymore. There are two problems: on the one hand the $L^1$--$L^\infty$ smoothing effect does not hold for general $\mathrm{L}^1$ initial data, on the other hand the presence of the extinction phenomenon makes things more complicated, and the extinction time enters directly the estimates of Theorem \ref{thm.lower.subcrit.}. These difficulties have already appeared in the standard FDE, $s=1$, and were treated in our paper \cite{BV-ADV}. However the technique used in that paper does not extend to $0<s<1$ and we present here a technique that is based on the careful use of weight factors, and in the limit $s=1$ gives a simpler proof of the result of \cite{BV-ADV}. We also study the problem of characterizing the finite time extinction in terms of the initial data; thus, we determine a class of initial data that produces solutions that extinguish in finite time, see Proposition \ref{prop.ext}, as well as a roughly complementary class of initial data for which the solution exists and is positive globally in time, see Corollary \ref{cor.not.ext.}\,.
Section \ref{sect.PME} is devoted to study similar questions for the porous medium case. Theorem \ref{thm.lower.pme} establishes local lower bounds of the Aronson-Caffarelli type for all $0<s\le 1$. The question of optimal decay as $|x|\to\infty$ is an open problem; for selfsimilar solutions it is solved in \cite{Vaz2012}.
In Section \ref{sect.traces} we address a different question that complements our previous results, i.\,e., the question of existence and uniqueness of an initial trace for nonnegative weak solutions defined in a strip $Q_T=(0,T]\times\mathbb{R}^d$. The main results are stated in Theorems \ref{thm.init.trace.m<1} and \ref{thm.init.trace.m>1}. This result can be combined in the reverse direction with the existence of solutions with initial data a nonnegative Radon measure, Theorem 4.1 of \cite{Vaz2012}.
In Appendix I we collect the definitions of weak, very weak and strong solutions, together with a number of technical results. As already mentioned above, Appendix II discusses applications.
\noindent {\sc About the linear equation.} Our estimates have counterparts for the linear fractional heat equation, case $m=1$, that are worth commenting. Thus, the lower bound of Section \ref{sect.PME} for $m>1$ passes to the limit $m\downarrow 1$, and this coincides with the limit $m\uparrow1$ of a part of the estimate for $m<1$ obtained in Section \ref{sect.3} for $m<1$. See Proposition \ref{thm.lower.m1}. Finally, we prove the existence and uniqueness of an initial trace also when $m=1$, cf. Theorem \ref{thm.init.trace.m=1}. This is an interesting result that is not present in the literature to our knowledge and complements the uniqueness results of \cite{BPSV2013}. \normalcolor
\noindent{\sc Notations.} Throughout the paper, we fix $m_c=(d-2s)/d$, $m_1=d/(d+2s)$, $p_c=d(1-m)/2$, and $\vartheta:=1/[2s-d(1-m)]$\,, which is positive if $m>m_c$\,. We will call $s$-Laplacian of $f$ the function $-(-\Delta)^{s}f$. This is consistent with the use in the standard case $s=1$.\normalcolor
\section{Weighted $\mathrm{L}^1$ estimates in the fast Diffusion range}\label{sect.2}
We will derive weighted $\mathrm{L}^1$ estimates which also hold for the standard FDE (i.e., the limit case $s=1$). When $s<1$ the equation is nonlocal, therefore we cannot expect purely local estimates to hold. Indeed we will obtain estimates in weighted spaces if the weight satisfies certain decay conditions at infinity. We present first a technical lemma which will be used several times in the rest of the paper.
\begin{lem}\label{Lem.phi}
Let $\varphi\in C^2(\mathbb{R}^d)$ and positive real function that is radially symmetric and decreasing in $|x|\ge 1$. Assume also that $\varphi(x)\le |x|^{-\alpha}$ and that $|D^2\varphi(x)| \le c_0 |x|^{-\alpha-2}$\,, for some positive constant $\alpha$ and for $|x|$ large enough. Then, for all $|x|\ge |x_0|>>1$ we have \begin{equation}\label{Delta.s.phi}
|(-\Delta)^s\varphi(x)|\le \left\{\begin{array}{lll}
\dfrac{c_1}{|x|^{\alpha+2s}}\,, & \mbox{if $\alpha<d$}\,,\\[5mm]
\dfrac{c_2\log|x|}{|x|^{d+2s}}\,, & \mbox{if $\alpha=d$}\,,\\[5mm]
\dfrac{c_3}{|x|^{d+2s}}\,, & \mbox{if $\alpha>d$}\,,\\[5mm] \end{array}\right. \end{equation}
with positive constants $c_1,c_2,c_3>0$ that depend only on $\alpha,s,d$ and $\|\varphi\|_{C^2(\mathbb{R}^d)}$. For $\alpha>d$ the reverse estimate holds from below if $\varphi\ge0$: \ $|(-\Delta)^s\varphi(x)|\ge c_4 |x|^{-(d+2s)}$ for all $|x|\ge |x_0|>>1$\,. \end{lem}
The proof is easy but technical, and is given in Appendix \ref{sec.A1} for the reader's convenience. We point out that the large-decay case $\alpha>d$ is what makes the estimate in the fractional Laplacian case very different from the usual Laplacian case. In particular, the $s$-Laplacian of a nonnegative smooth function with compact support is strictly positive outside of the support and has a certain decay at infinity, indeed the minimal decay $|x|^{-(d+2s)}$ is obtained for the $(-\Delta)^{s}\varphi$ when $\varphi\ge 0$ is compactly supported, cf. \cite{DPQRV2}. A suitable particular choice is the function $\varphi$ defined for $\alpha>0$ as $\varphi(x)=1$ for $|x|\le 1$ and \begin{equation}\label{phi} \varphi(x)=
\dfrac{1}{\left(1+(|x|^2-1)^4\right)^{\alpha/8}}\,, \qquad\mbox{if } |x|\ge 1\,. \end{equation}
We are now ready to present the weighted estimates.
\begin{thm}[Weighted $\mathrm{L}^1$ estimates]\label{prop.HP.s}
Let $u\ge v$ be two ordered solutions to the equation \eqref{FDE.eq}, with $0<m<1$. Let $\varphi_R(x)=\varphi(x/R)$ where $R>0$ and $\varphi$ is as in the previous lemma with $0\le \varphi(x)\le |x|^{-\alpha}$ for $|x|>>1$ and $$ d-\frac{2s}{1-m}<\alpha< d+\frac{2s}{m}\,. $$ Then, for all $0\le \tau,t <\infty$ we have \begin{equation}\label{HP.s} \left(\int_{\mathbb{R}^d}\big(u(t,x)- v(t,x)\big)\varphi_R(x)\,{\rm d}x\right)^{1-m}\le \left(\int_{\mathbb{R}^d}\big(u(\tau,x)- v(\tau,x)\big)\varphi_R(x)\,{\rm d}x\right)^{1-m}
+ \frac{C_1 \,|t-\tau|}{R^{2s-d(1-m)}} \end{equation} with $C_1>0$ that depends only on $\alpha,m,d$\,. \end{thm}
It is remarkable that the estimate holds for (very) weak solutions, maybe changing sign. Also, it is worth pointing out that the estimate holds both for $\tau<t$ and for $\tau>t$. In the limit $s\to 1$ we recover the well known $\mathrm{L}^1$ local estimates for the standard FDE.
\noindent {\sl Proof.~} \noindent$\bullet~$\textsc{Step 1. }\textit{A differential inequality for the weighted $\mathrm{L}^1$-norm. }If $\psi$ is a smooth and sufficiently decaying function we have \[ \begin{split}
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}\big(u(t,x)-v(t,x)\big)\psi(x)\,{\rm d}x\right|
&=\left|\int_{\mathbb{R}^d}\left((-\Delta)^s u^m-(-\Delta)^s v^m\right)\psi\,{\rm d}x\right|\\
&=_{(a)}\left|\int_{\mathbb{R}^d}\left(u^m- v^m\right)(-\Delta)^s\psi\,{\rm d}x\right|\\
&\le_{(b)} 2^{1-m}\int_{\mathbb{R}^d}(u- v)^m\left|(-\Delta)^s\psi\right|\,{\rm d}x\\ &\le_{(c)} 2 \left(\int_{\mathbb{R}^d}(u- v)\psi\,{\rm d}x\right)^m\,
\left(\int_{\mathbb{R}^d}\frac{\left|(-\Delta)^s\psi\right|^{\frac{1}{1-m}}}{\psi^\frac{m}{1-m}}\,{\rm d}x\right)^{1-m}\,. \end{split} \]
Notice that in $(a)$ we have used the fact that $(-\Delta)^s$ is a symmetric operator, while in $(b)$ we have used that $\left(u^m- v^m\right)\le 2^{1-m}(u-v)^m$, where $u^m=|u|^{m-1}u$ as mentioned. In $(c)$ we have used H\"older inequality with conjugate exponents $1/m>1$ and $1/(1-m)$. If the last integral factor is bounded, then we get \[
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}\big(u(t,x)-v(t,x)\big)\psi(x)\,{\rm d}x\right|\le C_\psi^{1-m}\left(\int_{\mathbb{R}^d}\big(u(t,x)- v(t,x)\big)\psi(x)\,{\rm d}x\right)^m \] Integrating the above differential inequality on $(\tau,t)$ with $\tau,t\ge 0$ we obtain: \[ \left(\int_{\mathbb{R}^d}\big(u(t,x)- v(t,x)\big)\psi(x)\,{\rm d}x\right)^{1-m}-\left(\int_{\mathbb{R}^d}\big(u(\tau,x)- v(\tau,x)\big)\psi(x)\,{\rm d}x\right)^{1-m}
\le (1-m)C_\psi^{1-m}\,|t-s| \] which is \eqref{HP.s} once we estimate the constant $C_\psi$, for a convenient choice of test function.
\noindent$\bullet~$\textsc{Step 2. }\textit{Estimating the constant $C_\psi$. }Choose $\psi(x)=\varphi_R(x):=\varphi(x/R)=\varphi(y)$\,, with $\varphi$ as in Lemma \ref{Lem.phi} and $y=x/R$\,, so that $(-\Delta)^s\psi(x)=(-\Delta)^s\varphi_R(x)=R^{-2s}(-\Delta)^s\varphi(y)$ \[\begin{split} C_\psi
&=\int_{\mathbb{R}^d}\frac{\left|(-\Delta)^s\varphi_R(x)\right|^{\frac{1}{1-m}}}{\varphi_R(x)^\frac{m}{1-m}}\,{\rm d}x
=R^{d-\frac{2s}{1-m}}\int_{\mathbb{R}^d}\frac{\left|(-\Delta)^s\varphi(y)\right|^{\frac{1}{1-m}}}{\varphi(y)^\frac{m}{1-m}}\,{\rm d}y\\
&=R^{d-\frac{2s}{1-m}}\left[\int_{B_{2}}\frac{\left|(-\Delta)^s\varphi(y)\right|^{\frac{1}{1-m}}}{\varphi(y)^\frac{m}{1-m}}\,{\rm d}y
+\int_{B_{2}^{\,c}}\frac{\left|(-\Delta)^s\varphi(y)\right|^{\frac{1}{1-m}}}{\varphi(y)^\frac{m}{1-m}}\,{\rm d}y\right]
= k_1 R^{d-\frac{2s}{1-m}}\,,\\ \end{split}\]
where it is easy to check that the first integral is bounded, since $\varphi\ge k_2>0$ on $B_{2}$\,, and when $|y|>|x_0|$ with $|x_0|>>1$ we know by estimates \eqref{Delta.s.phi} that \begin{equation}\label{c.psi}
\frac{\left|(-\Delta)^s\varphi(y)\right|^{\frac{1}{1-m}}}{\varphi(y)^\frac{m}{1-m}}\le \left\{\begin{array}{lll}
\dfrac{k_3}{|y|^{\alpha+\frac{2s}{1-m}}}\,, & \mbox{if $\alpha<d$}\,,\\[5mm]
\dfrac{k_4\log|y|}{|y|^{d+\frac{2s}{1-m}}}\,, & \mbox{if $\alpha=d$}\,,\\[5mm]
\dfrac{k_5}{|y|^{\frac{d+2s-\alpha m}{1-m}}}\,, & \mbox{if $\alpha>d$}\,,\\[5mm] \end{array}\right. \end{equation} therefore $k_1$ is finite whenever $d-\frac{2s}{1-m}<\alpha< d+\frac{2s}{m}$\,. Note that all the constants $k_i$ depend only on $\alpha, m, d$\,.\qed
\noindent\textbf{Remark. }The estimate implies the conservation of mass when $(d-2s)/d=m_c<m<1$, by letting $R\to \infty$. On the other hand, when $0<m<m_c$ solutions corresponding to $u_0\in\mathrm{L}^1(\mathbb{R}^d)\cap\mathrm{L}^p(\mathbb{R}^d)$ with $p\ge d(1-m)/2s$\,, extinguish in finite time $T>0$\,, (see e.g. \cite{DPQRV2}); the above estimates provide a lower bound for the extinction time in such a case, just by letting $\tau=T$ and $t=0$ in the above estimates: \begin{equation}\label{HP.s.T} \frac{1}{C_1\,R^{d(1-m)-2s}}\left(\int_{\mathbb{R}^d}u_0\,\varphi_R\,{\rm d}x\right)^{1-m}\le T \end{equation} Moreover, if the initial datum $u_0$ is such that the limit as $R\to+\infty$ of the right-hand side diverges to $+\infty$, then the corresponding solution $u(t,x)$ exists (and is positive) globally in time, as explained in Corollary \ref{cor.not.ext.}\,.
\section{Existence of solutions in weighted $\mathrm{L}^1$-spaces}\label{sect.exist.large}
\begin{thm}\label{exist.large}
Let $0<m<1$ and let $u_0\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$, where $\varphi$ is as in Theorem $\ref{prop.HP.s}$ with decay at infinity $|x|^{-\alpha}$, $d-[2s/(1-m)]<\alpha<d+(2s/m)$. Then there exists a very weak solution $u(t,\cdot)\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$ to equation \eqref{FDE.eq} on $[0,T]\times \mathbb{R}^d$, in the sense that \[ \int_0^T\int_{\mathbb{R}^d}u(t,x)\psi_t(t,x)\,{\rm d}x\,{\rm d}t =\int_0^T\int_{\mathbb{R}^d}u^m(t,x)(-\Delta)^s\psi(t,x)\,{\rm d}x\,{\rm d}t\,,\qquad\mbox{for all $\psi\in C_c^\infty([0,T]\times\mathbb{R}^d)\,.$} \] This solution is continuous in the weighted space, $u\in C([0,T]:\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x))$\,. \end{thm}
\noindent {\sl Proof.~}Let $\varphi=\varphi_R$ be as in Theorem \ref{prop.HP.s} with the decay at infinity $|x|^{-\alpha}$. Let $0\le u_{0,n}\in \mathrm{L}^1(\mathbb{R}^d)\cap \mathrm{L}^\infty(\mathbb{R}^d)$ be a non-decreasing sequence of initial data $u_{0,n-1}\le u_{0,n}$, converging monotonically to $u_0\in \mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$\,, i.\,e., such that $\int_{\mathbb{R}^d}(u_0- u_{n,0})\varphi\,{\rm d}x \to 0$ as $n\to \infty$. Consider the unique solutions $u_n(t,x)$ of equation \eqref{FDE.eq} with initial data $u_{0,n}$. By the comparison results of \cite{DPQRV2} we know that they form a monotone sequence. The weighted estimates \eqref{HP.s} show that the sequence is bounded in $\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$ uniformly in $t\in[0,T]$\,. By the monotone convergence theorem in $\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$, we know that the solutions $u_n(t,x)$ converge monotonically as $n\to \infty$ to a function $u(t,x)\in \mathrm{L}^\infty ((0,T): \mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x))$. Indeed, the weighted estimates \eqref{HP.s} show that when $u_0\in \mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$ then \begin{equation}\label{HP.s.3}\begin{split} \left(\int_{\mathbb{R}^d}u(t,x)\varphi(x)\,{\rm d}x\right)^{1-m} &=\lim_{n\to\infty}\left(\int_{\mathbb{R}^d}u_n(t,x)\varphi(x)\,{\rm d}x\right)^{1-m}\\ &\le \lim_{n\to\infty} \left(\int_{\mathbb{R}^d}\big(u_n(0,x)\big)\varphi(x)\,{\rm d}x\right)^{1-m} + C_1 R^{d(1-m)-2s}\,t\\ &=\left(\int_{\mathbb{R}^d}u_0(x)\varphi(x)\,{\rm d}x\right)^{1-m} + C_1 R^{d(1-m)-2s}\,t \end{split} \end{equation} At this point we need to show that the function $u(t,x)$ constructed as above is a very weak solution to equation \eqref{FDE.eq} on $[0,T]\times \mathbb{R}^d$\,, more precisely we have to show that for all $\psi\in C_c^\infty([0,T]\times\mathbb{R}^d)$ we have \begin{equation}\label{vw.sol} \int_0^T\int_{\mathbb{R}^d}u(t,x)\psi_t(t,x)\,{\rm d}x\,{\rm d}t =\int_0^T\int_{\mathbb{R}^d}u^m(t,x)(-\Delta)^s\psi(t,x)\,{\rm d}x\,{\rm d}t\,. \end{equation} By the results of \cite{DPQRV2} we know that each $u_n$ is a bounded strong solutions, since the initial data $u_0\in \mathrm{L}^1(\mathbb{R}^d)\cap\mathrm{L}^\infty(\mathbb{R}^d)$\,, therefore for all $\psi\in C_c^\infty([0,T]\times\mathbb{R}^d)$ we have \begin{equation}\label{sol.approx} \int_0^T\int_{\mathbb{R}^d}u_n(t,x)\psi_t(t,x)\,{\rm d}x\,{\rm d}t =\int_0^T\int_{\mathbb{R}^d}u^m_n(t,x)(-\Delta)^s\psi(t,x)\,{\rm d}x\,{\rm d}t\,. \end{equation} Now, for any $\psi\in C_c^\infty([0,T]\times\mathbb{R}^d)$ we easily have that \[ \lim_{n\to\infty}\int_0^T\int_{\mathbb{R}^d}u_n(t,x)\psi_t(t,x)\,{\rm d}x=\int_0^T\int_{\mathbb{R}^d}u(t,x)\psi_t(t,x)\,{\rm d}x \] since $\psi$ is compactly supported and we already know that $u_n(t,x)\to u(t,x)$ in $L^1_{\rm loc}$. On the other hand, for any $\psi\in C_c^\infty([0,T]\times\mathbb{R}^d)$ we have that \[ \lim_{n\to\infty}\int_0^T\int_{\mathbb{R}^d}u^m_n(t,x)(-\Delta)^s\psi(t,x)\,{\rm d}x\,{\rm d}t=\int_0^T\int_{\mathbb{R}^d}u^m(t,x)(-\Delta)^s\psi(t,x)\,{\rm d}x\,{\rm d}t \] since $u_n\le u$ and \[\begin{split} 0\le & \int_0^T\int_{\mathbb{R}^d}(u^m(t,x)-u_n^m(t,x))(-\Delta)^s\psi(t,x)\,{\rm d}x\,{\rm d}t\\
&\le \int_0^T\int_{\mathbb{R}^d}|u(t,x)-u_n(t,x)|^m\,\varphi^m(x)\frac{|(-\Delta)^s\psi(t,x)|}{\varphi^m(x)}\,{\rm d}x\,{\rm d}t\\
&\le \int_0^T\left(\int_{\mathbb{R}^d}|u(t,x)-u_n(t,x)|\,\varphi(x)\,{\rm d}x\right)^m
\left(\int_{\mathbb{R}^d}\left|\frac{|(-\Delta)^s\psi(t,x)|}{\varphi(x)^m}\right|^{\frac{1}{1-m}}\,{\rm d}x\,{\rm d}t\right)^{1-m}\\ &\le C \int_0^T\int_{\mathbb{R}^d}(u(t,x)-u_n(t,x))\varphi\,{\rm d}x\,{\rm d}t\to 0 \end{split}\] where we have used H\"older inequality with conjugate exponents $1/m$ and $1/(1-m)$, and we notice that \[
\left(\int_{\mathbb{R}^d}\left|\frac{|(-\Delta)^s\psi(t,x)|}{\varphi(x)^m}\right|^{\frac{1}{1-m}}\,{\rm d}x\,{\rm d}t\right)^{1-m}\le C \]
since $\psi$ is compactly supported, therefore by Lemma \ref{Lem.phi} we know that $\left|(-\Delta)^s\psi(t,x)\right|\le c_3|x|^{-(d+2s)}$, and the quotient \[
\left|\frac{|(-\Delta)^s\psi(t,x)|}{\varphi(x)^m}\right|^{\frac{1}{1-m}}\le \frac{c_3}{|x|^{\frac{d+2s-m \alpha}{1-m}}} \] is integrable when $\frac{d+2s-m \alpha}{1-m}>d$ that is when $\alpha<d+(2s/m)$. In the last step we already know that $\int_{\mathbb{R}^d}(u(t,x)-u_n(t,x))\varphi\,{\rm d}x\to 0$ when $\varphi$ is as above, i.e. as in Theorem \ref{prop.HP.s}. Therefore we can let $n\to\infty$ in \eqref{sol.approx} and obtain \eqref{vw.sol}\,.
For the solutions constructed above, the weighted estimates \eqref{HP.s} show that when $0\le u_0\in \mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$ imply \begin{equation}\label{HP.s.44}
\left|\int_{\mathbb{R}^d}u(t,x)\varphi_R(x)\,{\rm d}x- \int_{\mathbb{R}^d}u(\tau,x)\varphi_R(x)\,{\rm d}x\right|\le 2^{\frac{1}{1-m}}C_1 R^{d-\frac{2s}{1-m}}\,|t-\tau|^{\frac{1}{1-m}} \end{equation} which gives the continuity in $\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$\,. Therefore, the initial trace of this solution is given by $u_0\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)\,.$ \qed
\noindent\textbf{Remark. }The solutions constructed above only need to be integrable with respect to the weight $\varphi$, which has a tail of order less than $d+2s/m$. Therefore, we have proved existence of solutions corresponding to initial data $u_0$ that can grow at infinity as $|x|^{(2s/m)-\varepsilon}$ for any $\varepsilon >0$\,. Note that for the linear case $m=1$ this exponent is optimal in view of the representation of solutions in terms of the fundamental solution, but this does not seem to be the case for $m<1$.
\begin{thm}[Uniqueness] The solution constructed in Theorem $\ref{exist.large}$ by approximation from below is unique. We call it the minimal solution. In this class of solutions the standard comparison result holds, and also the estimates of Theorem $\ref{prop.HP.s}$\,. \end{thm}
\noindent {\sl Proof.~} We keep the notations of the proof of Theorem \ref{exist.large}. Assume that there exist another sequence $0\le v_{0,k}\in \mathrm{L}^1(\mathbb{R}^d)$ which is monotonically non-decreasing and converges monotonically to $u_0\in \mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$\,. By the same considerations as in the proof of Theorem \ref{exist.large}, we can show that there exists a solution $v(t,x)\in C([0,T]:\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x))$. We want to show that $u=v$, where $u$ is the solution constructed in the same way from the sequence $u_{0,n}$. We will prove equality by proving that $v\le u$ and then that $u\le v$. To prove that $v\le u$ we use the estimates \begin{equation}\label{final.001} \int_{\mathbb{R}^d}\big[v_k(t,x)-u_n(t,x)\big]_+\,{\rm d}x\le \int_{\mathbb{R}^d}\big[v_k(0,x)-u_n(0,x)\big]_+\,{\rm d}x \end{equation} which hold for any $u_n(t,\cdot), v_k(t,\cdot)\in \mathrm{L}^1(\mathbb{R}^d)$, see Theorem 6.2 of \cite{DPQRV2} for a proof. Letting $n\to \infty$ we get that \[ \lim_{n\to\infty}\int_{\mathbb{R}^d}\big[v_k(t,x)-u_n(t,x)\big]_+\,{\rm d}x \le \lim_{n\to\infty}\int_{\mathbb{R}^d}\big[v_k(0,x)-u_n(0,x)\big]_+\,{\rm d}x =\int_{\mathbb{R}^d}\big[v_k(0,x)-u_0(x)\big]_+\,{\rm d}x=0 \] since $v_k(0,x)\le u_0$ by construction. Therefore also $v_k(t,x)\le u(t,x)$ for $t>0$, so that in the limit $k\to \infty$ we obtain $v(t,x)\le u(t,x)$\,. The inequality $u\le v$ can be obtained simply by switching the roles of $u_n$ and $v_k$\,. The validity of estimates of Theorem $\ref{prop.HP.s}$ is guaranteed by the above limiting process. The comparison holds by taking the limits in inequality \eqref{final.001}, as it has been done for $\mathrm{L}^1$-solutions in \cite{DPQRV2}.\qed
\section{Good fast diffusion range}\label{sect.3}
The first result of the section will be the existence of local lower bounds. In the proof we will use Lemma \ref{Lem.Opt}, which is a simple optimization lemma that we state in Appendix \ref{app.opt}\,. We recall that $m_c:=d/(d-2s)$ and $\vartheta:=1/[2s-d(1-m)]$ which is positive for $m>m_c$\,.
\begin{figure}
\caption{\noindent\textit{Black: Lower bounds in the two time ranges. Blue: Upper bounds (smoothing effects), which has the same behaviour when $t\ge t_*$\,.}}
\label{fig.1}
\end{figure}
\begin{thm}[Local lower bounds]\label{thm.lower}
Let $R_0>0$, $m_c<m<1$ and let $0\le u_0\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$, where $\varphi$ is as in Theorem $\ref{prop.HP.s}$ with decay at infinity $|x|^{-\alpha}$, $d-[2s/(1-m)]<\alpha<d+(2s/m)$. Let $u(t,\cdot)\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$ be a very weak solution to Equation \eqref{FDE.eq} corresponding to the initial datum $u_0$. Then there exists a time \begin{equation}\label{t*}
t_*:=C_* \,R_0^{2s-d(1-m)}\,\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m} \end{equation} such that \begin{equation}\label{low.1.thm} \inf_{x\in B_{R_0/2}}u(t,x)\ge K_1\,R_0^{-\frac{2s}{1-m}}\,t^{\frac{1}{1-m}}\quad \mbox{ if } \ 0\le t\le t_*\,, \end{equation} and \begin{equation}
\inf_{x\in B_{R_0/2}}u(t,x)\ge K_2\dfrac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}}{t^{d\vartheta}}\quad \mbox{ if } \ t\ge t_*\,. \end{equation} The positive constants $C_*,K_1,K_2$ depend only on $m,s$ and $d\ge 1$. See Fig. 1. \end{thm}
\noindent\textbf{Remarks. }(i) The lower estimate for small times is an absolute bound in the sense that it does not depend on the initial data (though $t_*$ does depend).\\ (ii) We obtain the following expressions for $K_1$ and $K_2$ and $C_*$: \begin{equation}\label{K.i}\begin{split} &K_1:= \dfrac{K_2}{\left[2^{\frac{2}{\vartheta}+1}s\vartheta\left(\omega_d\,I_\infty\right)^{\frac{1}{d\vartheta}}
\right]^{d\vartheta+\frac{1}{1-m}}}\,,\qquad\mbox{and}\qquad \\ &K_2:=\left[\left(\frac{2s}{d(1-m)}\right)^{\frac{1}{\vartheta}}-1\right]^{\frac{1}{1-m}} \left[\frac{d(1-m)}{2s} \frac{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}-1}{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}}\right]^{\frac{2s\vartheta}{1-m}} \frac{\alpha-d}{2(\alpha-d)+1}\,\frac{1}{\omega_d 4^d C_1^{d\vartheta}}\\ &C_* = 2s\vartheta\left(\omega_d\, 2^d\, I_\infty\right)^{\frac{1}{d\vartheta}} \end{split} \end{equation} where $C_1>0$ is the constant in the $\mathrm{L}^1$-weighted estimates of Proposition \ref{prop.HP.s} that depends on $\alpha,m,d$, with $d<\alpha< d+\frac{2s}{m}$, and $I_\infty>0$ is the constant in the smoothing effects \eqref{Smoothing.FFDE}, cf. Theorem 2.2 of \cite{DPQRV2}.\\ (iii) We can always choose $\alpha=d/m<d+2s/m$\,, since $2s>d(1-m)$.
\noindent {\sl Proof.~}The proof is divided in several steps.
\noindent$\bullet~$\textsc{Step 1. }\textit{Reduction. }By the comparison principle that it is sufficient to prove lower bounds for solutions $u$ to the following reduced problem: \begin{equation}\label{FFDE.Prob.Red} \left\{ \begin{array}{lll} \partial_t u +(-\Delta)^s (u^m)=0\,,\; &\mbox{in }(0,\infty)\times\mathbb{R}^d\,,\\ u(0,\cdot)=u_0\chi_{B_{R_0}}=\underline{u_0}\,,\; &\mbox{in }\mathbb{R}^d\,, \end{array} \right. \end{equation}
where $m_c<m<1$\,, $0<s<1$\,, and $R_0>0$\,. We only assume that $0\le u_0\in\mathrm{L}^1(B_{R_0})$\,, which implies that $\underline{u_0}\in \mathrm{L}^1(\mathbb{R}^d)$ since $\mathrm{supp}(\underline{u_0})\subseteq B_{R_0}$ and also that $\|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}=\|u_0\|_{\mathrm{L}^1(B_{R_0})}$\,. It is not restrictive to assume that the ball $B_{R_0}$ is centered at the origin.
\noindent$\bullet~$\textsc{Step 2. }\textit{Smoothing effects. }In \cite{DPQRV2} there are the global $\mathrm{L}^1-\mathrm{L}^\infty$ smoothing effects which provide global upper bounds for solutions to the Cauchy problem \ref{FDE.eq}\,. We apply such smoothing effects to solutions to our reduced Problem \ref{FFDE.Prob.Red} to get \begin{equation}\label{Smoothing.FFDE}
\|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\le \frac{I_\infty}{t^{d\vartheta}}\|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}^{2s\vartheta}
=\frac{I_\infty}{t^{d\vartheta}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta} \end{equation} where $\vartheta=1/[2s-d(1-m)]$ and the constant $I_\infty$ only depends on $d,s,m$\,.
\noindent$\bullet~$\textsc{Step 3. }\textit{Aleksandrov principle. }We recall Theorem 11.2 of \cite{Vaz2012}, we have that \[
u(t,0)\ge u(t,x)\,,\qquad\mbox{for all}\; t>0\;\mbox{and}\; |x|\ge 2R_0\,. \] Therefore one has that \begin{equation}\label{aleks.1}
\|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d\setminus B_{2R_0})}=\sup_{x\in \mathbb{R}^d\setminus B_{2R_0}}u(t,x)\le u(t,0)\,. \end{equation}
\noindent$\bullet~$\textsc{Step 4. }\textit{Lower estimates for the $\mathrm{L}^\infty$-norm on an annulus. }We combine the $\mathrm{L}^1$-weighted estimates of Theorem \ref{prop.HP.s} with the smoothing effects of Step 2: estimates \eqref{HP.s} read in this context \begin{equation}\label{HP.s.step41} \left(\int_{B_{R_0}}u_0\,{\rm d}x\right)^{1-m}\le \left(\int_{\mathbb{R}^d}u_0\varphi_R(x)\,{\rm d}x\right)^{1-m}\le \left(\int_{\mathbb{R}^d}u(t,x)\varphi_R(x)\,{\rm d}x\right)^{1-m} + C_1 R^{d(1-m)-2s}\,t \end{equation}
we have chosen $R\ge 2 R_0>0$ and $\varphi_R(x)=\varphi(x/R)$ with $\varphi$ as in Lemma \ref{Lem.phi} (with the explicit form given in formula \eqref{phi})\,, so that $\varphi_R(x)=1$ on $B_R$ and $0\le \varphi_R(x)\le |x|^{-\alpha}$ for $|x|>>R$ with $d-2s/(1-m)<\alpha< d+2s/m$\,, and we recall that $C_1>0$ depends only on $\alpha,m,d$\,. \begin{equation}\label{HP.s.2}\begin{split}
&\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m} - C_1 R^{d(1-m)-2s}\,t
\le \left(\int_{\mathbb{R}^d}u(t,x)\varphi_R(x)\,{\rm d}x\right)^{1-m}\\ &\le \left(\int_{\mathbb{R}^d\setminus B_{2 R_0}}u(t,x)\varphi_R(x)\,{\rm d}x\right)^{1-m}
+ \left(\int_{B_{2 R_0}}u(t,x)\varphi_R(x)\,{\rm d}x\right)^{1-m}=(I)+(II)\,.\\ \end{split} \end{equation} We first estimate (I), to this end we observe that if we choose $d<\alpha< d+2s/m$ we have that \begin{equation}\label{ineq.phi.R} \begin{split} \int_{\mathbb{R}^d\setminus B_{2 R_0}}\varphi_R(x)\,{\rm d}x
&=\int_{\mathbb{R}^d\setminus B_{R}}\varphi_R(x)\,{\rm d}x +\int_{B_R\setminus B_{2 R_0}}\varphi_R(x)\,{\rm d}x
=\int_{\mathbb{R}^d\setminus B_{R}}\varphi_R(x)\,{\rm d}x +\int_{B_R\setminus B_{2 R_0}}1\,{\rm d}x\\
&\le\int_{\mathbb{R}^d\setminus B_{R}}\dfrac{1}{\big[1+(|x/R|^2-1)^4\big]^{\alpha/8}}\,{\rm d}x+\omega_d R^d\\
&=\omega_d R^d \int_{1}^{+\infty}\dfrac{r^{d-1}}{\big[1+(r^2-1)^4\big]^{\alpha/8}}{\rm d} r +\omega_d R^d\\
&=\omega_d R^d \left[\int_{1}^{4}\dfrac{r^{d-1}}{\big[1+(r^2-1)^4\big]^{\alpha/8}}{\rm d} r
+\int_{4}^{+\infty}\dfrac{r^{d-1}}{\big[1+(r^2-1)^4\big]^{\alpha/8}}{\rm d} r +1\right]\\
&\le_{(a)}\omega_d R^d \left[1+ 4^d+4^{\alpha/8}\int_{4}^{+\infty}r^{d-1-\alpha}{\rm d} r\right]
=\omega_d R^d \left[1+4^d+\frac{4^{\alpha/8}}{\alpha-d}\frac{1}{4^{\alpha-d}}\right]\\
&\le \omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}R^d\\ \end{split} \end{equation} where we have used that $R\ge 2 R_0$ and in $(a)$ we have used the fact that $\varphi_R\le 1$ and that $1+\big(r^2-1\big)^4\ge r^8/4$\,, if $r\ge 4$\,. Therefore we have \[ \begin{split} (I)^{\frac{1}{1-m}} &=\int_{\mathbb{R}^d\setminus B_{2 R_0}}u(t,x)\varphi_R(x)\,{\rm d}x
\le \|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d\setminus B_{2 R_0})}\int_{\mathbb{R}^d\setminus B_{2 R_0}}\varphi_R(x)\,{\rm d}x\\
&\le \omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}R^d \|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d\setminus B_{2 R_0})} \le \omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}\,R^d\, u(t,0) \end{split} \] where in the last step we have used inequality \eqref{aleks.1} of Step 3, derived from Aleksandrov principle.
\noindent We now estimate $(II)$ as follows: \[\begin{split} (II)^{\frac{1}{1-m}}
&=\int_{B_{2 R_0}}u(t,x)\varphi_R(x)\,{\rm d}x\le \|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\int_{B_{2 R_0}}\varphi_R(x)\,{\rm d}x\\
&\le_{(a)} \omega_d\, 2^d \,R_0^d\, \|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}
\le_{(b)} \omega_d\, 2^d\, R_0^d \,\frac{I_\infty}{t^{d\vartheta}}\,\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta} \end{split}\]
where in $(a)$ we have used that $\varphi_R(x)=1$ on $B_R$, $2 R_0<R$ and $|B_R|=\omega_d R^d$\,. In $(b)$ we have used the smoothing effect \eqref{Smoothing.FFDE}\,. Plugging the above estimates into \eqref{HP.s.2} gives \begin{equation}\label{HP.s.33}\begin{split}
\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m} - C_1 R^{d(1-m)-2s}\,t
&\le \,\frac{\left[\omega_d\, 2^d\, R_0^d I_\infty\,\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}\right]^{1-m}}{t^{d(1-m)\vartheta}}\,\\
&+\left[\omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}\right]^{1-m}\,R^{d(1-m)}\, u^{1-m}(t,0) \,,\\ \end{split} \end{equation} or equivalently \begin{equation}\label{HP.s.4}\begin{split}
\left[ \|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m}
-\,\frac{\left[\omega_d\, 2^d\, R_0^d I_\infty\,\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}\right]^{1-m}}{t^{d(1-m)\vartheta}}\right]&\frac{1}{R^{d(1-m)}}\,
- \frac{C_1\, t}{R^{2s}}\\
&\le \left[\omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}\right]^{1-m}\, u^{1-m}(t,0)\,.\\ \end{split} \end{equation}
\noindent$\bullet~$\textsc{Step 5. }\textit{Optimization. }The previous estimate \eqref{HP.s.4} is useful only if we can make sure that the left-hand side has a positive lower bound. Let us write inequality \eqref{HP.s.4} as \begin{equation}\label{HP.s.5}\begin{split} F(t,R):=&\frac{A(t)}{R^{d(1-m)}}\,
- \frac{B\, t}{R^{2s}}
\le \left[\omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}\right]^{1-m}\, u^{1-m}(t,0)\,,\\ \end{split} \end{equation} with \begin{equation}\label{F.A.B.C}\begin{split}
A(t)=\left[M-\,\frac{C}{t^{d(1-m)\vartheta}}\right]\,,\; M:=\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m}\,,\; C:=\left[\omega_d\, 2^d\, R_0^d I_\infty\,\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}\right]^{1-m}\,,\; B=C_1 \end{split} \end{equation} where $C_1>0$ is the constant of $\mathrm{L}^1$-weighted estimates of Theorem \ref{prop.HP.s}, and $I_\infty>0$ is the constant of the smoothing effects \eqref{Smoothing.FFDE} of Step 2. We now optimize the function $F$ as in Lemma \ref{Lem.Opt} so that there exists \begin{equation}\label{t.min.step5} t_*:=2s\vartheta\left(\frac{C}{M}\right)^{\frac{1}{d(1-m)\vartheta}}
=2s\vartheta\left(\omega_d\, 2^d\, I_\infty\right)^{\frac{1}{d\vartheta}}\,R_0^{\frac{1}{\vartheta}}\,\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m} \end{equation} and \begin{equation}\label{R.max.step5} \begin{split} \overline{R}(t) &=\left(\frac{2sBt}{d(1-m)A(t)}\right)^{\vartheta}
\ge\overline{R}(t_*)=\left[\frac{2s}{d(1-m)}\frac{(2s\vartheta)^{2s\vartheta}}{(2s\vartheta)^{d(1-m)}-1}\right]^{\vartheta}
\frac{B^\vartheta C^{\frac{1}{d(1-m)}}}{M^{\frac{2s\vartheta}{d(1-m)}}}\\ &=\left[\frac{2s}{d(1-m)}\frac{(2s\vartheta)^{2s\vartheta}}{(2s\vartheta)^{d(1-m)}-1}\right]^{\vartheta}
\omega_d^{\frac{1}{d}}\, 2\, R_0 I_\infty^{\frac{1}{d}}C_1^\vartheta \ge_{(a)}2 R_0\,, \end{split} \end{equation} where in $(a)$ we have used that the constants $I_\infty>0$ and $C_1>0$ are constants in the upper bounds \eqref{Smoothing.FFDE} and \eqref{HP.s.step41} respectively, so that we can chose them to be arbitrarily large to fulfill the condition $\overline{R}(t_*)\ge 2 R$. Therefore for all $t\ge t_*$ we have that \[\begin{split} \left[\omega_d 4^d \frac{2(\alpha-d)+1}{\alpha-d}\right]^{1-m}\, u^{1-m}(t,0) &\ge F(\overline{R}(t),t) =\left[\left(\frac{2s}{d(1-m)}\right)^{\frac{1}{\vartheta}}-1\right]\left[\frac{d(1-m)}{2s}\right]^{2s\vartheta} \frac{A(t)^{2s\vartheta}}{(Bt)^{d(1-m)\vartheta}}\\ &\ge \left[\left(\frac{2s}{d(1-m)}\right)^{\frac{1}{\vartheta}}-1\right]\left[\frac{d(1-m)}{2s}\right]^{2s\vartheta} \frac{A(t_*)^{2s\vartheta}}{C_1^{d(1-m)\vartheta}}\frac{1}{t^{d(1-m)\vartheta}} \end{split}\] since $A(t)\ge A(t_*)$ for all $t\ge t_*$, and it is easy to check that \[ A(t_*)=\left[1-\,\frac{1}{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}}\right]
\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m} =\frac{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}-1}{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}}
\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m}>0\,,\; \] since we recall that $2s\vartheta>1$. Summing up we have obtained \begin{equation}\label{step5.1}
u(t,0)\ge K_2 \frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}}{t^{d\theta}}\,, \end{equation} for all $t\ge t_*>0$\,, where $K_2$ only depends from $\alpha,m,s,d$ and takes the form \begin{equation}\label{step5.1.K} K_2:=\left[\left(\frac{2s}{d(1-m)}\right)^{\frac{1}{\vartheta}}-1\right]^{\frac{1}{1-m}} \left[\frac{d(1-m)}{2s} \frac{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}-1}{\left(2s\vartheta\,\right)^{d(1-m)\vartheta}}\right]^{\frac{2s\vartheta}{1-m}} \frac{\alpha-d}{2(\alpha-d)+1}\,\frac{1}{\omega_d 4^d C_1^{d\vartheta}} \end{equation} Note that in the limit $m\to 1$ the constant $K_2\to 0$. By a standard argument it is easy to pass from the center to the infimum on $B_{R_0/2}(0)$ in the above estimates.
\noindent$\bullet~$\textsc{Step 6. }\textit{Positivity backward in time. }Using Benilan-Crandall estimates which depend only by the homogeneity of the equations, cf. \cite{BCr} \begin{equation}\label{BC.est} u_t\le \frac{u}{(1-m)t} \end{equation} we can prove positivity in the time interval $[0,t_*]$. These estimates in the fractional case has been proven in \cite{DPQRV2}, and imply that the function: $u(t,x)t^{-1/(1-m)}$ is non-increasing in time, thus for any $t\in (0,t_*)$ and $x\in B_{R_0/2}(0)$\,, inequality \eqref{step5.1} gives \[
u(t,x)\geq \frac{u(t_*,x)}{t_*^{\frac{1}{1-m}}}t^{\frac{1}{1-m}}\ge K_2 \frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}}{t_*^{d\theta+\frac{1}{1-m}}}t^{\frac{1}{1-m}} =\frac{K_2}
{\left[2^{\frac{2}{\vartheta}+1}s\vartheta\left(\omega_d\,I_\infty\right)^{\frac{1}{d\vartheta}}
\right]^{d\theta+\frac{1}{1-m}}}\left[\frac{t}{R_0^{2s}}\right]^{\frac{1}{1-m}} \] where $K_2>0$ is given in \eqref{step5.1.K}\,, and $t_*$ is given by \eqref{t.min.step5}, and it is easy to check that \begin{equation*}\label{step7.1}\begin{split}
\frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}}{t_*^{d\theta+\frac{1}{1-m}}}
&=\frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}}
{\left[2s\vartheta\left(\omega_d\, 2^d\,I_\infty\right)^{\frac{1}{d\vartheta}}\,(R_0)^{\frac{1}{\vartheta}}
\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m}\right]^{d\theta+\frac{1}{1-m}}} =\frac{1}
{\left[2^{\frac{2}{\vartheta}+1}s\vartheta\left(\omega_d\,I_\infty\right)^{\frac{1}{d\vartheta}}
\right]^{d\theta+\frac{1}{1-m}}R_0^{\frac{2s}{1-m}}}\,. \end{split} \end{equation*} The proof is concluded.\qed
{\bf Remark.} This lower estimate holds in the limit $m\to 1$ and gives lower estimates for the linear fractional heat equation of the following form.
\begin{prop}\label{thm.lower.m=1} Let $u\ge 0$ be a weak solution to the Cauchy Problem \eqref{FDE.eq}, corresponding to $u_0\in \mathrm{L}^1(\mathbb{R}^d)$ and $m=1$. Then $\vartheta=1/2s>0$ and the estimate says that for given $R_0$ and $t_*:=C_1 \,R_0^{2s}$, then \begin{equation}
\inf_{x\in B_{R_0/2}}u(t,x)\ge K_2\dfrac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}}{t^{d/2s}}\quad \mbox{ if } \ t\ge t_*\,. \end{equation} The positive constant $K_2$ depends only on $C_1$, $s$ and $d$. \end{prop}
The proof is easily obtained from the integral representation of the solution.
\subsection{Minimal space-like tail behaviour}\label{ssec.gfd.tail}
As a corollary of the previous lower bound, we obtain a quantitative bound from below for the space-like behaviour of any nonnegative solution. We consider a solution that has a certain initial mass $M$ in the ball of radius 1 and apply the result of Theorem \ref{thm.lower} after displacing the origin of space coordinates to a point $x_0$ with $|x_0|>>1$. We then consider the formula \eqref{t*} for the critical time with center $x_0$ and radius $R_0=|x_0|+2$, so that the ball $B_{R_0}(x_0)$ contains the mass $M$ mentioned above. As $R_0\to \infty$ also $t_*\to\infty$. We can therefore use the lower bound \eqref{low.1.thm} to get an estimate of the form \begin{equation}\label{low.bdd.20}
u(t,x_0)\ge G(u_0,t)\,|x_0|^{-2s/(1-m)}\,, \end{equation} where $G(u_0,t)$ is given in \eqref{low.1.thm}. According to the results of \cite{Vaz2012} the Barenblatt solutions have this precise spatial behaviour in the range $m_c<m<m_1$, with $m_1=d/(d+2s)$, therefore the asymptotic estimate is sharp in this range.
\noindent \subsection{Global spatial lower bounds in the case $m_1<m<1$}\label{ssec.global.GFDE}
We would like to prove that the solution can always be bounded from below by a Barenblatt solution, so the lower bound will be sharp. \begin{figure}
\caption{\noindent\textit{Lower bounds for the spatial decay rates of solutions. Recall that $m_c=(d-2s)/d$ and $m_1=d/(d+2s)$}}
\label{fig.2}
\end{figure} In the range $m_c<m<m_1$ the lower bound of Theorem $\ref{thm.lower}$, gives sharp lower bounds with the same tails as the Barenblatt solutions, as explained in Section \ref{ssec.gfd.tail}. In the range $m_1<m\le 1$ the lower bound given by \eqref{low.bdd.20} is not sharp and the following Theorem \ref{thm.lower.m1} (respectively Proposition $\ref{thm.lower.m=1}$ when $m=1$) proves that any solution with data in $\mathrm{L}^1(\mathbb{R}^d)$ can always be bounded from below by a Barenblatt solution (respectively by the fundamental solution when $m=1$). See Figure \ref{fig.2}.
\begin{thm}[Global Lower Bounds when $m_1<m<1$]\label{thm.lower.m1} Under the conditions of Theorem $\ref{thm.lower}$ we have in the range $m_1<m<1$ \begin{equation}
u(t,x)\ge \frac{C(t)}{|x|^{d+2s}}\qquad \mbox{ when $|x|>>1$.} \end{equation} valid for all $0<t<T$ with some bounded function $C>0$ that depends on $t,T$ and on the data. \end{thm}
\noindent {\sl Proof.~}The proof consists of several steps.
\noindent$\bullet~$\textsc{Step 1. } We begin under the extra assumption that $u_0(x)\ge 2c>0$ in a ball, that can be taken to be $B_1(0)$ by scaling. Therefore, there exists a $t_1$ such that
$u(t,x)\ge c$ for all $0\le t\le t_1$ and all $|x|\le 1$. We also assume that $u_0$ is continuous and goes to zero uniformly as $|x|\to\infty$.
Consider the function $u_{0,\varepsilon}(x)=u_0(x)+\varepsilon$\,, and let $u_\varepsilon(t,x)\ge u(t,x)$ be the corresponding solutions. By the usual theory, \cite{DPQRV2}, we know that $u_\varepsilon\ge \varepsilon$, $u_\varepsilon-\varepsilon\in \mathrm{L}^1(\mathbb{R}^d)$, since $u_0\in \mathrm{L}^1(\mathbb{R}^d)$. Moreover, it is proved in the theory that $u_\varepsilon\to \varepsilon$ as $|x|\to\infty$ for every $t>0$.
\noindent$\bullet~$\textsc{Step 2. }Let $\underline{u}=u^*(t+\tau,x)$, where $u^*$ is the Barenblatt solution with mass $M>0$. We refer to \cite{Vaz2012} for a complete discussion about Barenblatt solutions. By choosing the mass $M:=\int_{\mathbb{R}^d}\underline{u}\,{\rm d}x$ very small, we can find $\tau=\tau(\varepsilon)>0$ so that $\underline{u}(t,x)\le c/4$ for $|x|\ge \delta$ and $0<t<t_1$, and $\underline{u}\le \varepsilon/2$ for $|x|\ge 1$.
\noindent$\bullet~$\textsc{Step 3. }We compare both continuous solutions in the exterior domain $\Omega=\{x: |x|\ge 1\}$. At the first time where $\underline{u}$ touches $u_\varepsilon$ from below at a point $|x|> 1$, we have $\partial_t(u_\varepsilon-\underline{u})\le 0$. Let now $w=u_\varepsilon^m-\underline{u}^m$ to get \[\begin{split}
(-\Delta)^s w(x) &=k_{s,d}\int_{\mathbb{R}^d}\frac{w(x)-w(y)}{|x-y|^{d+2s}}\,{\rm d}y \\
&= k_{s,d}\int_{\{|x|\le 1\}}\frac{w(x)-w(y)}{|x-y|^{d+2s}}\,{\rm d}y
+k_{s,d}\int_{\{|x|\ge 1\}}\frac{w(x)-w(y)}{|x-y|^{d+2s}}\,{\rm d}y=I_1+I_2 \end{split} \]
We want to prove now that both $I_1< 0$ and $I_2< 0$, which leads to a contradiction. In this way we conclude that $\underline{u}<u_\varepsilon$ for all $0<t<t_1$ and all $|x|\ge 1$.
The fact that $I_2\le 0$ comes from the fact that $w(x)=0$ by our choice of $x$ and $w(y)\ge 0$ since $(x,t)$ is the first contact point. Due to the fact that $w(y)>0$ near $|x|=1$ and $w$ is continous we get $I_2<0$.
As for $I_1$, the denominator is like a constant in that domain and we have to estimate $w(y)$. We know that for $\delta<|x|<1$ we have $u_\varepsilon \ge c$ and $\underline{u}\le c/4$, hence $w(y)\ge C\,c^{m}>0$ and this contributes to the integral something that is like $-C\,c^{m}$, which is not small. In the small ball $|x|\le \delta $ we use the worst case estimate $-w(y)\le \underline{u}$ and $\underline{u}(t_1,y)$ has mass at most $M$ which is small, this contributes at most a bad term of order $$
C\int_{|x|\le \delta} \underline{u}^m\,dx\le C M^m \,\delta^{d(1-m)}, $$ which is small if $\delta $ and $M$ are small (here we use $m<1$). Therefore $I_1<0$.
Moreover, one has to ensure that $\underline{u}(0,x)<u_\varepsilon(0,x)$ for $|x|\ge 1$. Since $u_\varepsilon\ge \varepsilon$ and \[
\underline{u}(0,x)=\tau^{-\alpha} F(|x|\,\tau^{-\beta})\le \tau^{-\alpha} F(\tau^{-\beta})=c \tau^{-\alpha+\beta(d+2s)}=c\tau^{2s\beta}\le \varepsilon \]
at least for sufficiently small $\tau$, recall Step 2. By the parabolic comparison theorem we conclude that $\underline{u}<u_\varepsilon$ for all $0<t<t_1$ and all $|x|\ge 1$.
\noindent$\bullet~$\textsc{Step 4. }We finally let $\varepsilon\to 0$ and also $\tau$ may go to zero, and we obtain that $u^*(x,t):=\lim_{\varepsilon\to 0}u_\varepsilon= u$, therefore we can conclude that $u(x,t)\ge c/|x|^{d+2s}$ when $|x|>>1$ and $t=t_1$.
\noindent$\bullet~$\textsc{Step 5. }Once we have obtained the spatial lower bound at times $t\le t_1$, then we can compare with a Barenblatt solution and continue the lower bound for all times, to finally get that the spatial tail of the solution $u$ can be bounded from below by $u\ge c/|x|^{d+2s}$ when $|x|>>1$.\qed
\section{Very fast diffusion range}\label{sect.4}
In the very fast diffusion range $0<m<m_c$, the weighted $\mathrm{L}^1$ estimates of Theorem \ref{prop.HP.s} continue to hold, but this does not allow to obtain quantitative lower bounds since technique used in the good fast diffusion range does not work anymore. One problem is that the smoothing effect does not hold for general $\mathrm{L}^1$ initial data, therefore the optimization of Lemma \ref{Lem.Opt} is no more valid, since $2s<d(1-m)$ in this range. Hence the need for new weighted $\mathrm{L}^1$ estimates, in the form given in Step 3 of the proof of Theorem \ref{thm.lower.subcrit.} below. Another problem typical of this range of exponent is the presence of the extinction time, which enters directly in the estimates of Theorem \ref{thm.lower.subcrit.}. We present here a technique that is based on the careful use of weight factors.
\begin{thm}[Local lower bounds I]\label{thm.lower.subcrit.} Let $u$ be a weak solution to the equation \eqref{FDE.eq}, corresponding to $u_0\in \mathrm{L}^1(\mathbb{R}^d)\cap\mathrm{L}^{p_c}(\mathbb{R}^d)$ with $0<m<m_c=d/(d-2s)$, $0<s<1$ and let $p_c=d(1-m)/(2s)$. Let also $T=T(u_0)$ be the finite extinction time for $u$. Then for every $R_0>0$, there exists a time \begin{equation}\label{t*.subcrit}
t_*:= C_*\,R_0^{2s-d(1-m)}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m}\le T(u_0)\,, \end{equation} such that \begin{equation}\label{thm.pos.T.VFDE}
\inf_{x\in B_{R_0/2}}u(t,x)\ge K \frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{\frac{1}{m}} }{R_0^{\frac{d-2s}{m}} } \frac{t^{\frac{1}{1-m}}}{T^{\frac{1}{m(1-m)}}} \quad \mbox{ if } \ 0\le t\le t_*\,, \end{equation} where $C_*$ and $K$ are explicit positive universal constants, that depend only on $m,s,d$. \end{thm} The expression of the constants is \begin{equation}\label{const.subcrit}\begin{split}
C_*&:=\frac{k_{s,d}\,\omega_d^m}{4^{d+1-2s}}\,,\qquad K :=\left(\frac{k_{s,d}}{4^{3d+1-2s}\,d}\right)^{\frac{1}{m}}\,,\\
\end{split}\end{equation}\normalcolor where $k_{s,d}$ is the constant of the representation formula $ \varphi(x)=k_{s,d}\int_{\mathbb{R}^d}\frac{\rho(y)}{|x-y|^{d-2s}}\,{\rm d}y$ and $\omega_d$ is the volume of the unit ball.
\noindent {\sl Proof. of Theorem \ref{thm.lower.subcrit.}~} It is divided into several steps as follows.
\noindent$\bullet~$\textsc{Step 1. }\textit{Reduction. }By the comparison principle that it is sufficient to prove lower bounds for solutions $u$ to the following reduced problem: \begin{equation}\label{FDE.eq.Red.VFDE} \left\{ \begin{array}{lll} \partial_t u +(-\Delta)^s (u^m)=0\,,\; &\mbox{in }(0,\infty)\times\mathbb{R}^d\,,\\ u(0,\cdot)=u_0\chi_{B_{R_0}}=\underline{u_0}\,,\; &\mbox{in }\mathbb{R}^d\,, \end{array} \right. \end{equation}
where $m>1$\,, $0<s<1$\,, and $R_0>0$\,. We only assume that $0\le u_0\in\mathrm{L}^1(B_{R_0})$\,, which implies that $\underline{u_0}\in \mathrm{L}^1(\mathbb{R}^d)$ since $\mathrm{supp}(\underline{u_0})\subseteq B_{R_0}$ and also that $\|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}=\|u_0\|_{\mathrm{L}^1(B_{R_0})}$\,. It is not restrictive to assume that the ball $B_{R_0}$ is centered at the origin. We call $M_0=\|u_0\|_{\mathrm{L}^1(B_{R_0})}$.
\noindent$\bullet~$\textsc{Step 2. }\textit{Aleksandrov principle. } We recall Theorem 11.2 of \cite{Vaz2012}. In view of the fact that the initial function is supported in the ball $B_{R_0}(0)$, we have that \[
u(t,0)\ge u(t,x)\,,\qquad\mbox{for all}\; t>0\;\mbox{and}\; |x|\ge 2R_0\,. \] Therefore, one has \begin{equation}\label{aleks.1.PME} \sup_{x\in \mathbb{R}^d\setminus B_{2R_0}}u(t,x)\le u(t,0)\,. \end{equation}
\noindent$\bullet~$\textsc{Step 3. }\textit{$\mathrm{L}^1$ Weighted estimates. }Choose a test function $\varphi\ge 0$ such that $-(-\Delta)^s\varphi=\rho$ with $\rho=0$ on $B_{2R_0}$ and on $B_{R_1}^c$, and $0< \rho \le 1$ on the annulus $A:= B_{R_1}\setminus B_{2R_0}$, with $0<2R_0\le R_1$, and $R_0$ as in Step 1, such that $\mathrm{supp}(u_0)\subseteq B_{R_0}$. Using the explicit representation of $\varphi$ in terms $\rho$ and the integral kernel $K(x,y)=k_{s,d}|x-y|^{n-2s}$ we get the estimates \[
\varphi(x)=k_{s,d}\int_{\mathbb{R}^d}\frac{\rho(y)}{|x-y|^{d-2s}}\,{\rm d}y\ge \frac{k_{s,d} \|\rho\|_1}{(R_1+R_0)^{d-2s} }\ge k_0>0\,,\qquad\mbox{for all $x\in B_{R_0}(0)$\,,} \]
since $|x-y|\le R_0+R_1$. We can always choose $\rho\ge 1/2$ on the smaller annulus $A_0= B_{2R_0+3(R_1-2R_0)/4}\setminus B_{2R_0+(R_1-2R_0)/4}\subseteq A$\,, so that \[ \begin{split}
\|\rho\|_1
&= \int_{A_1}\rho(x)\,{\rm d}x
\ge\int_{A_0}\rho(x)\,{\rm d}x
\ge \frac{|A_0|}{2}
=\frac{1}{2}|B_{2R_0+3(R_1-2R_0)/4}\setminus B_{2R_0+(R_1-2R_0)/4}|\\
&=\frac{\omega_d}{2}\left[\left(2R_0+\frac{1}{4}(R_1-2R_0) + (R_1-2R_0)\right)^d-\left(2R_0+\frac{1}{4}(R_1-2R_0)\right)^d\right]
\ge \frac{\omega_d}{2}(R_1-2R_0)^d \end{split} \] since $(a+b)^d- a^d\ge b^d$ for any $a,b\ge 0$. Then $k_0>0$ takes the form \begin{equation}\label{k0.VFFDE} k_0:=\frac{k_{s,d}\,\omega_d}{2}\frac{(R_1-2R_0)^d}{(R_1+R_0)^{d-2s}} \end{equation}\normalcolor Now we observe that letting $T=T(u_0)>0$ be the finite extinction time for the reduced problem \eqref{FDE.eq.Red.VFDE}, we obtain \begin{equation}\label{step.3.1}\begin{split} \int_{\mathbb{R}^d}u_0(x)\varphi\,{\rm d}x &=\int_{\mathbb{R}^d}u(0,x)\varphi\,{\rm d}x - \int_{\mathbb{R}^d}u(T,x)\varphi\,{\rm d}x
=-\int_0^T\int_{\mathbb{R}^d}\partial_\tau u(\tau,x)\varphi(x)\,{\rm d}x {\rm d}\tau\\
&= \int_0^T\int_{\mathbb{R}^d}(-\Delta)^s \big(u^m(\tau,x)\big)\,\varphi(x)\,{\rm d}x {\rm d}\tau
= \int_0^T\int_{\mathbb{R}^d}u^m(\tau,x)(-\Delta)^s\varphi(x)\,{\rm d}x {\rm d}\tau\\
&= \int_0^T\int_{A}u^m(\tau,x)\rho(x) \,{\rm d}x {\rm d}\tau\\
&= \int_0^{t_*}\int_{A}u^m(\tau,x)\rho(x)\,{\rm d}x {\rm d}\tau + \int_{t_*}^T\int_{A}u^m(\tau,x)\rho(x)\,{\rm d}x {\rm d}\tau :=(I)+(II)\\ \end{split} \end{equation} where $0\le t_*\le T$ will be chosen later.
Next we estimate $(I)$. We first observe that \begin{equation*} \begin{split} \int_{A}u^m(\tau,x)\rho(x)\,{\rm d}x &\le \int_{B_{R_1}}u^m(\tau,x)\,{\rm d}x
\le |B_{R_1}|^{1-m}\left(\int_{B_{R_1}}u(\tau,x)\,{\rm d}x\right)^m\\
&\le |B_{R_1}|^{1-m}\left(\int_{\mathbb{R}^d}u(\tau,x)\,{\rm d}x\right)^m
\le |B_{R_1}|^{1-m}\left(\int_{\mathbb{R}^d}\underline{u_0}(x)\,{\rm d}x\right)^m\\
&\le |B_{R_1}|^{1-m}M_0^m \end{split} \end{equation*}
since $0<m<1$\,, $0<\rho\le 1$, and in the last step we have used the fact that $\|u(t)\|_{\mathrm{L}^1(\mathbb{R}^d)}\le \|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}$, which has been proven in \cite{DPQRV2}\,, together with the fact that $M_0=\|u_0\|_{\mathrm{L}^1(B_{R_0})}=\|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}$. Therefore, \begin{equation}\label{step.3.2} \begin{split} (I):= \int_0^{t_*}\int_{A}u^m(\tau,x)\rho(x)\,{\rm d}x{\rm d}\tau
&\le |B_{R_1}|^{1-m}\,t_*\,M_0^m\,. \end{split} \end{equation} We now estimate (II) by using the Aleksandrov principle: \begin{equation}\label{step.3.3} \begin{split} (II):= \int_{t_*}^T \int_{A}u^m(\tau,x)\rho(x)\,{\rm d}x{\rm d}\tau & \le \int_{t_*}^T \int_{A}u^m(\tau,x)\,{\rm d}x{\rm d}\tau =_{(a)} (T-t_*) \int_{A}u^m(\tau_1,x)\,{\rm d}x\\
&\le (T-t_*) |A| \sup_{x\in A} u^m(\tau_1,x)
\le (T-t_*) |A| u^m(\tau_1,0) \end{split} \end{equation} where in $(a)$ we have used the mean value theorem for the function $U(\tau)=\int_{A}u^m(\tau,x)\,{\rm d}x$ so that there exists a $\tau_1\in[t_*,T]$ such that $\int_{t_*}^T U(\tau){\rm d}\tau=(T-t_*)U(\tau_1)$. In $(b)$ we have used the Aleksandrov principle, which gives $\sup\limits_{x\in A} u^m(\tau_1,x)\le u^m(\tau_1,0)$. Summing up, we have obtained, joining \eqref{step.3.1}, \eqref{step.3.2} and \eqref{step.3.3} \begin{equation}\label{step.3.4}\begin{split} \int_{\mathbb{R}^d}u_0(x)\varphi\,{\rm d}x
&\le |B_{R_1}|^{1-m}\,t_*\,M_0^m+ (T-t_*) |A| u^m(\tau_1,0) \end{split} \end{equation} for some $\tau_1\in[t_*,T]$. In addition, we have $\int_{\mathbb{R}^d}u_0(x)\varphi\,{\rm d}x \ge M_0 k_0$. We finally remark that from inequality \eqref{step.3.4} we get a lower bound for the extinction time, just by letting $t_*=T$ in formula \eqref{step.3.4}: \begin{equation}\label{low.bdd.T} k_0 M_0\le \int_{\mathbb{R}^d}u_0(x)\varphi\,{\rm d}x
\le |B_{R_1}|^{1-m}\,T\,M_0^m\,,\qquad\quad\mbox{that is }\qquad\quad T\ge k_0 \frac{M_0^{1-m}}{|B_{R_1}|^{1-m}} \end{equation}
\noindent$\bullet~$\textsc{Step 4. }\textit{Choosing the critical time $t_*$. }We now choose $t_*$ to be small enough, more precisely \begin{equation}\label{step.4.t*}
t_*:= \frac{k_0}{2}\frac{M_0^{1-m}}{|B_{R_1}|^{1-m}}\le T\,, \end{equation} we note that $t_*\le T$ follows by \eqref{low.bdd.T}. With this choice of $t_*$, inequality \eqref{step.3.4} becomes \begin{equation} \begin{split}
\frac{k_0}{2} M_0 = k_0 M_0-|B_{R_1}|^{1-m}\,t_*\,M_0^m
&\le (T-t_*) |A| u^m(\tau_1,0)\le T\, |A|\, u^m(\tau_1,0) \end{split} \end{equation} which is the desired positivity estimate at a time $\tau_1\in [t_*,T]$\,, namely \begin{equation}\label{step.4.1} \begin{split}
\frac{k_0\, M_0 }{2T\, |B_{R_1}\setminus B_{2R_0}|} \le u^m(\tau_1,0) \end{split} \end{equation}
\noindent$\bullet~$\textsc{Step 5. }\textit{Positivity backward in time. }Using Benilan-Crandall estimates which depend only by the homogeneity of the equations, cf. \cite{BCr} \begin{equation}\label{BC.est.1} u_t\le \frac{u}{(1-m)t} \end{equation} we can prove positivity in the time interval $[0,\tau_1]$. These estimates in the fractional case has been proven in \cite{DPQRV2}, and imply that the function: $u(t,x)t^{-1/(1-m)}$ is non-increasing in time, thus for any $t\in [0,\tau_1]$ we have that \begin{equation}\label{111}\begin{split} u(t,0) &\ge \frac{t^{\frac{1}{1-m}}}{\tau_1^{\frac{1}{1-m}}}u(\tau_1,0)\ge \frac{t^{\frac{1}{1-m}}}{T^{\frac{1}{1-m}}}u(\tau_1,0)
\ge \left[\frac{k_0\, M_0 }{2 T\, |B_{R_1}\setminus B_{2R_0}|}\right]^{\frac{1}{m}}\frac{t^{\frac{1}{1-m}}}{T^{\frac{1}{1-m}}}\\ &=\left[\frac{k_{s,d}}{4(R_1+R_0)^{d-2s}}\frac{(R_1-2R_0)^d}{R_1^d-(2R_0)^d}\right]^{\frac{1}{m}} \frac{t^{\frac{1}{1-m}}}{T^{\frac{1}{m(1-m)}}}M_0^{\frac{1}{m}}\\ \end{split} \end{equation} since $t_*\le \tau_1\le T$\,. Moreover we have that \begin{equation}\label{222}\begin{split} u(t,0) &\ge\left[\frac{k_{s,d}}{4(R_1+R_0)^{d-2s}}\frac{(R_1-2R_0)^{d-1}}{d(2R_0)^{d-1}}\right]^{\frac{1}{m}} \frac{t^{\frac{1}{1-m}}}{T^{\frac{1}{m(1-m)}}}M_0^{\frac{1}{m}}\\ &=\left(\frac{k_{s,d}}{4d}\right)^{1/m}\left(\frac{R_1}{2R_0}-1\right)^{\frac{d-1}{m}} \frac{t^{\frac{1}{1-m}}}{T^{\frac{1}{m(1-m)}}}\frac{M_0^{\frac{1}{m}}}{(R_1+R_0)^{\frac{d-2s}{m}}}\\ \end{split} \end{equation} where we have used the numerical inequality $a^d-b^d\le da^{d-1}(a-b)$\,, valid for any $a=R_1>2R_0=b$ to pass from \eqref{111} to \eqref{222}. By a standard argument it is easy to pass from the center to the infimum on $B_{R_0/2}(0)$ in the above estimates. The proof is concluded once we let $R_1=3R_0$.\qed
\noindent {\bf Remarks. }(i) This result can be written alternatively as saying that there exists a universal constant $K_1=\max\{K^{-m}, C_*^{1/(1-m)}\}$ such for all solutions in the above class we have: for any $0\le t\le T$ and $R>0$ \begin{equation}\label{AC.s.VFDE}
\frac{\|u_0\|_{\mathrm{L}^1(B_{R})}}{R^d }\le K_1\left[\frac{t^{\frac{1}{1-m}}}{R^{\frac{2s}{1-m}}} +\frac{T^{\frac{1}{1-m}}} {t^{\frac{m}{1-m}}R^{2s}} \inf_{x\in B_{R/2}}u^m(t,x) \right]. \end{equation} This is easy to prove: by the previous Theorem, we have that either $t_*\le t$, that is \[
\frac{\|u_0\|_{\mathrm{L}^1(B_{R})}}{R^d}\le \left[\frac{t}{C_*R^{2s}}\right]^{\frac{1}{1-m}} \] or that $0\le t\le t_*$ and \eqref{thm.pos.T.VFDE} holds, namely \[
\frac{\|u_0\|_{\mathrm{L}^1(B_{R})}}{R^d } \le \frac{T^{\frac{1}{1-m}}} {K^{m} t^{\frac{m}{1-m}}R^{2s}} \inf_{x\in B_{R/2}}u^m(t,x) \] therefore, letting $K_1=\max\{K^{-m}, C_*^{1/(1-m)}\}$ we get \eqref{AC.s.VFDE}\,.
This equivalent version is in complete formal agreement with similar estimates proved by the authors in \cite{BV-ADV}, in the case $s=1$. However, our proof below differs from the one in \cite{BV-ADV}, and provides an alternative proof when $s=1$. On the other hand, here we are considering solutions to the Cauchy problem, while in \cite{BV-ADV} we consider local weak solutions (i.e. without specifying boundary conditions). These estimates have been called Aronson-Caffarelli estimates in \cite{BV-ADV}, when $s=1$, since they are quite similar to the one that can be obtained for $m>1$, see Section \ref{sect.PME}. Finally we shall remark that in Section \ref{sect.ext.T} we will obtain quantitative upper estimates on the extinction time, and this will help to eliminate $T$ from the above lower estimates.
\noindent (ii) By comparison it is easy to prove that this estimates hold for a larger class of solutions, more precisely for the class of very weak solutions to the Cauchy Problem \eqref{FDE.eq} constructed in Theorem \ref{exist.large}, Section \ref{sect.exist.large}. This implies that the positivity result holds for solutions $u(t,\cdot)\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$ corresponding to initial data $0\le u_0\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$, where $\varphi$ is as in Theorem $\ref{prop.HP.s}$ with decay at infinity $|x|^{-\alpha}$, $d-[2s/(1-m)]<\alpha<d+(2s/m)$.
Once comparison is used, we can use as $T$ the extinction time of the reduced problem \ref{FDE.eq.Red.VFDE} in Step 1 of the above proof. In this way the quantitative result applies to solutions $u$ that may not extinguish in finite time. Therefore we can interpret $T$ as the \textit{minimal life time for the solution $u(t,\cdot)$}, a concept that was already introduced by the authors in \cite{BV-ADV}\,, for which formula \eqref{t*.subcrit} provides a quantitative lower bound, namely \begin{equation}\label{low.not.extinct}
t_*:= C_*\,R_0^{2s-d(1-m)}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m} \le T(u_0)\,. \end{equation} \begin{cor}[Solutions that do not extinguish in finite time]\label{cor.not.ext.} Let $0<m<m_c$ and consider an initial datum $0\le u_0\in\mathrm{L}^1(\mathbb{R}^d, \varphi\,{\rm d}x)$, where $\varphi$ is as in Theorem $\ref{prop.HP.s}$, in particular, when $u_0\in\mathrm{L}^1(\mathbb{R}^d)$. Assume moreover that \begin{equation}\label{555}
\liminf_{R\to +\infty}\;R^{\frac{2s}{1-m}-d}\|u_0\|_{\mathrm{L}^1(B_R)}=+\infty\,. \end{equation} Then the corresponding solution $u(t,x)$ exists and is positive globally in space and time, hence does not extinguish in finite time. Moreover the quantitative lower bounds \eqref{thm.pos.T.VFDE} of Theorem $\ref{thm.lower.subcrit.}$ hold for any $0\le t\le t_*$ with $t_*$ given in \eqref{t*.subcrit} and $T=T(u_0\chi_{B_{R_0}})<+\infty$ is the extinction time of a reduced problem. \end{cor} \noindent {\sl Proof.~}If we consider an initial data with that behaviour at infinity, then by Theorem \ref{exist.large} there exists a very weak solution. By letting $R\to +\infty$ in the above lower bound \eqref{low.not.extinct} for $T$, to conclude that the minimal life time $T(u_0\chi_{B_{R}})\to \infty$, recalling that in this very fast diffusion range we have $2s<d(1-m)$, since $0<m<m_c$. \qed
\noindent\textbf{Remark. }A practical assumption on the initial datum $u_0$ that implies \eqref{555} is \begin{equation}\label{555.1}
\liminf_{|x|\to +\infty}\;|x|^{\frac{2s}{1-m}}u_0(x)=+\infty\,. \end{equation} In view of Proposition $\ref{prop.ext}$ below, the exponent is sharp.
\subsection{Estimating the extinction time. }\label{sect.ext.T}
We next estimate the extinction time in terms of the initial data, extending a classical result of Benilan and Crandall \cite{BCr-cont}. This is needed to eliminate the dependence on $T$ in the above lower estimates when we consider initial data in $\mathrm{L}^1(\mathbb{R}^d)\cap\mathrm{L}^{p_c}(\mathbb{R}^d)$. For a detailed study of extinction time in the standard fast diffusion equation, see \cite{VazLN}.
\begin{prop}[Upper bounds for the extinction time]\label{prop.ext} Let $u$ be a weak solution to the equation \eqref{FDE.eq}, corresponding to $u_0\in \mathrm{L}^1(\mathbb{R}^d)\cap\mathrm{L}^{p_c}(\mathbb{R}^d)$ with $0<m<m_c=d/(d-2s)$, $0<s<1$ and let $p_c=d(1-m)/(2s)$. Then for all $0\le \tau\le t$ the following estimate holds true \begin{equation}\label{ineq.pc}
\left[\int_{\mathbb{R}^d}|u(t,x)|^{p_c}\,{\rm d}x\right]^{\frac{2s}{d}}\le \left[\int_{\mathbb{R}^d}|u(\tau,x)|^{p_c}\,{\rm d}x\right]^{\frac{2s}{d}} - \frac{4m[d(1-m)-2s]}{d(d-2s)\mathcal{S}_s^2}(t-\tau) \end{equation} Moreover, there exists a finite extinction time $T\ge 0$ which can be bounded above as follows \begin{equation}\label{ineq.T.ext}
T\le \frac{d(d-2s)\mathcal{S}_s^2}{4m[d(1-m)-2s]} \|u_0\|_{\mathrm{L}^{p_c}(\mathbb{R}^d)}^{1-m}\,. \end{equation} \end{prop}
\noindent {\sl Proof.~}The proof presented below is analogous to the one of Theorem 9.5 of \cite{DPQRV2}\,, but here we pay attention to the quantitative estimates\,. We multiply the equation by $|u|^{p-2}u$ with $p>1$, and integrate in $\mathbb{R}^d$. Using Strook-Varopoulos inequality \eqref{StrVar.ineq} in the form \eqref{StrVar.ineq.um}, we get \begin{equation}\label{T.1.ext}\begin{split}
\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}|u(t,x)|^p\,{\rm d}x
&=-p\int_{\mathbb{R}^d}|u|^{p-2}u\,(-\Delta)^s (|u|^{m-1}u)\,\,{\rm d}x\\
&\le -\frac{4mp(p-1)}{(p+m-1)^2}\int_{\mathbb{R}^d}\left|(-\Delta)^{\frac{s}{2}}|u|^{\frac{p+m-1}{2}}\right|^2\,{\rm d}x\\
&\le -\frac{4mp(p-1)}{(p+m-1)^2\mathcal{S}_s^2}\left(\int_{\mathbb{R}^d}|u|^{\frac{d(p+m-1)}{d-2s}}\,{\rm d}x\right)^{\frac{d-2s}{d}} \end{split} \end{equation}
where in the last step we have used the Sobolev inequality \eqref{Sob.Fract} applied to $f=|u|^{\frac{p+m-1}{2}}$\,. Now we make the choice $p=p_c=d(1-m)/2s$, so that $p_c=\frac{d(p_c+m-1)}{d-2s}$, and we know that $p_c>1$ if and only if $m<m_c=d/(d-2s)$\,, and inequality \eqref{T.1.ext} becomes \begin{equation}\label{T.2.ext}\begin{split}
\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}|u(t,x)|^{p_c}\,{\rm d}x
&\le -\frac{4m[d(1-m)-2s]}{2s(d-2s)\mathcal{S}_s^2}\left(\int_{\mathbb{R}^d}|u(t,x)|^{p_c}\,{\rm d}x\right)^{1-\frac{2s}{d}} \end{split} \end{equation} Integrating the above differential inequality on $(\tau,t)$ gives both \eqref{ineq.pc} and inequality \eqref{ineq.T.ext}\,.\qed
Thanks to the above estimates we can get rid of the extinction time $T$ in the lower estimates of Theorem \ref{thm.lower.subcrit.}. \begin{thm}[Local lower bounds II]\label{thm.lower.subcrit.Tpc} Let $u$ be a weak solution to the equation \eqref{FDE.eq}, corresponding to $u_0\in \mathrm{L}^1(\mathbb{R}^d)\cap\mathrm{L}^{p_c}(\mathbb{R}^d)$ with $0<m<m_c=d/(d-2s)$, $0<s<1$ and let $p_c=d(1-m)/(2s)$. Then for every ball $B_{2R_0}\subset \Omega$, there exists a time \begin{equation}\label{t*.subcrit.2}
t_*:= C_*\,R_0^{2s-d(1-m)}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{1-m}\le T(u_0)\le \overline{C}\|u_0\|_{\mathrm{L}^{p_c}(\mathbb{R}^d)}^{1-m}\,, \end{equation} where we recall that $T(u_0)$ is the finite extinction time, such that \begin{equation}\label{thm.pos.T.pc.VFDE}
\inf_{x\in B_{R_0/2}}u(t,x)\ge K_2 \frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{\frac{1}{m}} }{R_0^{\frac{d-2s}{m}} } \frac{t^{\frac{1}{1-m}}}{\|u_0\|_{\mathrm{L}^{p_c}(B_{R_0})}^{\frac{d}{2m}}} \quad \mbox{ if } \ 0\le t\le t_*\,, \end{equation} where $C_*$ and $K_1$ are explicit positive universal constants, that depend only on $m,s,d$. \end{thm} The expression of the constants \begin{equation}\label{const.subcrit.T.pc}\begin{split} K_2 :=K\left[\frac{4m[d(1-m)-2s]}{d(d-2s)\mathcal{S}_s^2}\right]^{\frac{1}{m(1-m)}}\,,\qquad \overline{C}:=\frac{d(d-2s)\mathcal{S}_s^2}{4m[d(1-m)-2s]} \\
\end{split}\end{equation}\normalcolor where $C_*$ and $K$ are as in \eqref{thm.pos.T.VFDE} and $k_{s,d}$ is the constant of the representation formula $ \varphi(x)=k_{s,d}\int_{\mathbb{R}^d}\frac{\rho(y)}{|x-y|^{d-2s}}\,{\rm d}y$.
\noindent {\bf Remark.} This result can be written alternatively as saying that there exists a universal constant $K_3=\max\{K_2^{-m}, C_*^{1/(1-m)}\}$ such for all solutions in the above class we have: for any $0\le t\le T$ and $R>0$ \begin{equation}\label{AC.s.VFDE.pc}
\frac{\|u_0\|_{\mathrm{L}^1(B_{R})}}{R^d \left[1\vee\|u_0\|_{\mathrm{L}^{p_c}(B_{R})}\right]} \le K_3\left[\frac{t^{\frac{1}{1-m}}}{R^{\frac{2s}{1-m}}} +\frac{1}{t^{\frac{m}{1-m}}R^{2s}} \inf_{x\in B_{R/2}}u^m(t,x) \right] \end{equation} This equivalent version is in complete formal agreement with similar estimates proved by the authors in \cite{BV-ADV}, in the case $s=1$.
\noindent {\sl Proof of Theorem \ref{thm.lower.subcrit.Tpc}.~}The proof is the same as for Theorem \ref{thm.lower.subcrit.}, as far as the first $3$ steps are concerned. At the end of Step 3, we need to bound from above the extinction time for the reduced problem \eqref{FDE.eq.Red.VFDE} with the estimates \eqref{ineq.T.ext} which give \begin{equation}\label{ineq.T.ext.2}
T\le \frac{d(d-2s)\mathcal{S}_s^2}{4m[d(1-m)-2s]} \left[\int_{\mathbb{R}^d}|u_0(x)|^{p_c}\,{\rm d}x\right]^{\frac{2s}{d}}=\frac{d(d-2s)\mathcal{S}_s^2}{4m[d(1-m)-2s]} \left[\int_{B_{R_0}}|u_0(x)|^{p_c}\,{\rm d}x\right]^{\frac{2s}{d}}\,, \end{equation} since $\mathrm{supp}(u_0)\subseteq B_{R_0}$\,. Then the proof follows simply by replacing $T$ with the above upper bound.\qed
\section{The Porous medium case}\label{sect.PME}
Lower estimates for nonnegative solutions of the standard porous medium equation were obtained in Aronson-Caffarelli in a famous paper \cite{ArCaff}. We want to show in this section how such a priori estimates extend to the fractional version considered in this paper.
\begin{thm}[Local lower bound]\label{thm.lower.pme}
Let $u$ be a weak solution to Equation \eqref{FDE.eq}, corresponding to $u_0\in \mathrm{L}^1(\mathbb{R}^d)$. and let $m>1$. We put $\vartheta:=1/[2s+d(m-1)]>0$. Then there exists a time \begin{equation}\label{t*.PME}
t_*:=C \,R^{2s+d(m-1)}\,\|u_0\|_{\mathrm{L}^1(B_{R})}^{-(m-1)} \end{equation} such that for every $t\ge t_*$ we have the lower bound \begin{equation}
\inf_{x\in B_{R/2}}u(t,x)\ge K\,\dfrac{\|u_0\|_{\mathrm{L}^1(B_{R})}^{2s\vartheta}}{t^{d\vartheta}} \end{equation}
valid for all $R>0$. The positive constants $C$ and $K$ depend only on $m,s$ and $d$, and
not on $R$. \end{thm}
\noindent {\bf Remark.} This result can be written alternatively as saying that there exists a universal constant $C_1=C_1(d,s,m)$ such for all solutions in the above class we have \begin{equation}\label{AC.s.} \int_{B_R(0)} u_0(x)\,dx \le C_1\left (R^{1/\vartheta(m-1)}t^{-1/(m-1)}+ u(0,t)^{1/2s\vartheta}t^{d/2s}\right). \end{equation} This equivalent version is in complete formal agreement with Aronson-Caffarelli's estimate for $s=1$. However, our proof below differs very strongly from the ideas used in Aronson-Caffarelli's case since we cannot use the property of finite propagation of solution with compact support, which is false for $s<1$.
\noindent {\sl Proof.~} It is divided into several steps as follows.
\noindent$\bullet~$\textsc{Step 1. }\textit{Reduction. }By the comparison principle that it is sufficient to prove lower bounds for solutions $u$ to the following reduced problem: \begin{equation}\label{FFDE.Prob.Red.PME} \left\{ \begin{array}{lll} \partial_t u +(-\Delta)^s (u^m)=0\,,\; &\mbox{in }(0,\infty)\times\mathbb{R}^d\,,\\ u(0,\cdot)=u_0\chi_{B_{R_0}}=\underline{u_0}\,,\; &\mbox{in }\mathbb{R}^d\,, \end{array} \right. \end{equation}
where $m>1$\,, $0<s<1$\,, and $R_0>0$\,. We only assume that $0\le u_0\in\mathrm{L}^1(B_{R_0})$\,, which implies that $\underline{u_0}\in \mathrm{L}^1(\mathbb{R}^d)$ since $\mathrm{supp}(\underline{u_0})\subseteq B_{R_0}$ and also that $\|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}=\|u_0\|_{\mathrm{L}^1(B_{R_0})}$\,. It is not restrictive to assume that the ball $B_{R_0}$ is centered at the origin.
\noindent$\bullet~$\textsc{Step 2. }\textit{Smoothing effects. } In \cite{DPQRV2} there are the global $\mathrm{L}^1$-$\mathrm{L}^\infty$ smoothing effects, which can be applied to solutions to our reduced Problem \ref{FFDE.Prob.Red.PME} as follows:\normalcolor \begin{equation}\label{Smoothing.FPME}
\|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\le \frac{I_\infty}{t^{d\vartheta}}\|\underline{u_0}\|_{\mathrm{L}^1(\mathbb{R}^d)}^{2s\vartheta}
=\frac{I_\infty}{t^{d\vartheta}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta} \end{equation} where $\vartheta=1/[2s+d(m-1)]$ and the constant $I_\infty$ only depends on $d,s,m$\,.
\noindent$\bullet~$\textsc{Step 3. }\textit{Aleksandrov principle. } We recall Theorem 11.2 of \cite{Vaz2012}. In view of the fact that the initial function is supported in the ball $B_{R_0}(0)$, we have that \[
u(t,0)\ge u(t,x)\,,\qquad\mbox{for all}\; t>0\;\mbox{and}\; |x|\ge 2R_0\,. \] Therefore, one has \begin{equation}\label{aleks.1.PME.1} \sup_{x\in \mathbb{R}^d\setminus B_{2R_0}}u(t,x)\le u(t,0)\,. \end{equation}
\noindent$\bullet~$\textsc{Step 4. }\textit{Weighted estimates. }If $\psi$ is a smooth, nonnegative, and sufficiently decaying function, we have \[ \begin{split}
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|
&=\left|\int_{\mathbb{R}^d}\left((-\Delta)^s u^m \right)\psi\,{\rm d}x\right|
=_{(a)}\left|\int_{\mathbb{R}^d} u^m (-\Delta)^s\psi\,{\rm d}x\right|\\
&\le \|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}^{m-1}\left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)} \int_{\mathbb{R}^d} u(t,x) \,{\rm d}x\\
&\le_{(b)} \frac{I_\infty^{m-1}}{t^{d\vartheta(m-1)}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta(m-1)}
\left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)} \int_{\mathbb{R}^d} \underline{u_0}(x) \,{\rm d}x\\ &:= \frac{I_\infty^{m-1}}{t^{d\vartheta(m-1)}}
\left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta(m-1)+1} := \frac{K[u_0,\psi]}{t^{d\vartheta(m-1)}}\,. \end{split} \] Notice that in $(a)$ we have used the fact that $(-\Delta)^s$ is a symmetric operator, while in $(b)$ we have used the smoothing effect \eqref{Smoothing.FPME} of Step 2 and the the conservation of mass: $\int_{\mathbb{R}^d} u(t,x) \,{\rm d}x=\int_{\mathbb{R}^d} \underline{u_0}(x) \,{\rm d}x$\,, for all $t>0$, together with the fact that $\mathrm{supp}(\underline{u_0})\subseteq B_{R_0}$. We refer to \cite{DPQRV2} for a proof of the smoothing effect and of the conservation of mass. Summing up, \[
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|\le \frac{K[u_0,\psi]}{t^{1-2s\vartheta}}\,, \] since $d\vartheta(m-1)=1-2s\vartheta$. Integrating the above differential inequality on $(0,t)$ with $t\ge 0$ we obtain: \[ -\frac{K[u_0,\psi]}{2s\vartheta}\,t^{2s\vartheta}\le \int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x-\int_{\mathbb{R}^d}u(0,x)\psi(x)\,{\rm d}x \le \frac{K[u_0,\psi]}{2s\vartheta}\,t^{2s\vartheta}\,. \] We will use this in the form \begin{equation}\label{step4.1} \int_{\mathbb{R}^d}u(0,x)\psi(x)\,{\rm d}x - \frac{K[u_0,\psi]}{2s\vartheta}\,t^{2s\vartheta}\le \int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\,. \end{equation} Moreover, if $\psi\in\mathrm{L}^1(\mathbb{R}^d)$ and $R_1\ge 2R_0$, we have \begin{equation}\label{step4.3}\begin{split} \int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x
&= \int_{B_{R_1}}u(t,x)\psi(x)\,{\rm d}x+ \int_{B_{R_1}^c}u(t,x)\psi(x)\,{\rm d}x\\
&\le_{(a)} |B_{R_1}|\sup_{|x|\le {R_1}}u(t,x) + \sup_{x\in \mathbb{R}^d\setminus B_{2R_0}}u(t,x)\int_{B_R^c}\psi(x)\,{\rm d}x\\
&\le_{(b)} |B_{R_1}|\frac{I_\infty}{t^{d\vartheta}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta} + u(t,0)\int_{B_{R_1}^c}\psi(x)\,{\rm d}x \end{split} \end{equation} where in $(a)$ we have used the fact that $\psi\le 1$, $R_1\ge 2R_0$\,, and that $\psi\in\mathrm{L}^1(\mathbb{R}^d)$. In $(b)$ we have used the smoothing effect \eqref{Smoothing.FPME} of step 2 and the Aleksandrov principle of Step 3\,. Putting together inequalities \eqref{step4.1} and \eqref{step4.3}\,, we obtain \begin{equation}\label{step4.4}
\int_{\mathbb{R}^d}u(0,x)\psi(x)\,{\rm d}x - \frac{K[u_0,\psi]}{2s\vartheta}\,t^{2s\vartheta}-|B_{R_1}|\frac{I_\infty}{t^{d\vartheta}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta} \le u(t,0)\int_{B_{R_1}^c}\psi(x)\,{\rm d}x\,. \end{equation}
Next, in order to estimate $K[u_0,\psi]$ in a convenient way we take $\psi(x)=\phi(|x|/R)$ with $\phi$ as in Lemma \ref{Lem.phi}\,, we have $|(-\Delta)^s\psi|\le c_3 R^{-2s}$\,, for some constant $c_3= c_3(d,s)$\,. Then, \begin{equation}\label{step4.2}\begin{split}
K[u_0,\psi]=I_\infty^{m-1}\left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta(m-1)+1}
\le \frac{c_3 I_\infty^{m-1}}{R^{2s}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta(m-1)+1} \end{split}\end{equation}
When $R\ge R_0$ and $R_1\ge 2R_0$, we arrive at \begin{equation}\label{step4.5}\begin{split}
\|u_0\|_{\mathrm{L}^1(B_{R_0})} - \frac{c_3 I_\infty^{m-1}}{2s\vartheta}\frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta(m-1)+1}}{R^{2s}} \,t^{2s\vartheta}
&-\omega_d R_1^d \frac{I_\infty}{t^{d\vartheta}}\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta} \le u(t,0)\int_{B_{R_1}^c}\psi(x)\,{\rm d}x\\ &\le u(t,0)R^d \int_{\mathbb{R}^d}\varphi(x)\,{\rm d}x=c_4 R^d u(t,0)\,. \end{split} \end{equation}
\noindent$\bullet~$\textsc{Step 5. }\textit{Choosing the parameters. }We want to choose $t>0$, $R\ge R_0$ and $R\ge 2R_0$ so that the left-hand side of \eqref{step4.5} is larger than $\|u_0\|_{\mathrm{L}^1(B_{R_0})}/2$, which will then give the desired bound from below for $u(0,t)$. We first make the choice \begin{equation}
R_1^d=\frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{d(m-1)\vartheta}}{4 \omega_d I_\infty}{t^{d\vartheta}}\,, \end{equation} which will satisfy the condition $R_1\ge 2R_0$ if and only if $t\ge t_*$ where \begin{equation}
t_*^{\vartheta}=c_5 R_0\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{-(m-1)\vartheta}, \qquad c_5=2^{1+(2/d}(\omega_d I_\infty)^{1/d}. \end{equation} Now we can make the second choice, $R$ has to be large enough, for instance: \begin{equation}
R=c_6\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{\vartheta(m-1)}\,t^{\vartheta}\,,\qquad c_6=\left(\frac{4\,c_3 I_\infty^{m-1}}{2s\vartheta}\right)^{1/2s} \end{equation} Both choices will give for $t\ge t_*$ the lower bound \[
\frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}}{2c_4\, R^d}\le u(t,0)\,, \] which can be rewritten as \begin{equation}\label{step4.6}
c_7\frac{\|u_0\|_{\mathrm{L}^1(B_{R_0})}^{2s\vartheta}}{t^{d\vartheta}} \le u(t,0)\,,\quad \mbox{for any}\quad t\ge t_*\,,\quad \mbox{with}\quad c_7=\frac{1}{2c_4\, c_6^d}\mbox{\,.} \end{equation} By a standard argument it is easy to pass from the center to the infimum on $B_{R_0/2}(0)$ in the above estimates.\qed
\noindent {\bf Remark}. In the limit $m\to 1$ of the estimate of Theorem \ref{thm.lower.pme} we obtain the result of Proposition \ref{thm.lower.m=1} for $m=1$.
\noindent {\bf Open Problem.} To calculate the positivity of the solutions for small times is not known yet.
\section{Existence and uniqueness of initial traces}\label{sect.traces}
The existence of solutions of the Cauchy Problem (\ref{FDE.eq})-(\ref{FDE.id}) can be extended to the case where the initial datum is a finite and nonnegative Radon measure. We denote by
${\cal M}^+(\RR^d)$ the space of such measures on $\RR^d$. Here is the result proved in Theorem 4.1 of \cite{Vaz2012}.
\noindent {\bf Theorem.} {\sl For every $\mu\in {\cal M}^+(\RR^d)$ there exists a nonnegative and continuous weak solution of Equation \eqref{FDE.eq} in $Q=(0,\infty)\times \mathbb{R}^d$ taking initial data $\mu$ in the sense that for every $\varphi\in C_c^{2}(\mathbb{R}^d)$ we have \begin{equation} \lim_{t\to 0^+} \int u(t,x)\varphi(x)\,dx=\int \varphi(x)d\mu(x)\,. \end{equation} } In this section we address the reverse problem, i.\,e., given a solution to find the initial trace. In the case $s=1$ such question was solved thanks to the works of Aronson-Caffarelli \cite{ArCaff}, Dahlberg-Kenig \cite{DK2}, Pierre \cite{Pierre} and others, see a presentation in \cite{VazBook}, Chapter 13.
\begin{lem}[Conditions for existence and uniqueness of initial traces]\label{lem.init.trace} Let $m>0$ and let $u$ be a solution to equation \eqref{FDE.eq} in $(0,T]\times\mathbb{R}^d$. Assume that there exist a time $0<T_1\le T$, some positive constants $K_1,K_2,\alpha>0$ and a continuous function $\omega:[0,+\infty)\to [0,+\infty)$, with $\omega(0)=0$ such that \begin{equation}\label{hyp.1.lem.init} (i)\qquad \sup_{t\in (0,T_1]}\int_{B_R(x_0)}u(t,x)\,{\rm d}x\le K_1\,,\qquad\forall\; R>0\,,\;x_0\in\mathbb{R}^d\,, \end{equation} as well as \begin{equation}\label{hyp.2.lem.init} (ii)\qquad \left[\int_{\mathbb{R}^d}u(t,x)\varphi(x)\,{\rm d}x\right]^\alpha\le \left[\int_{\mathbb{R}^d}u(t',x)\varphi(x)\,{\rm d}x\right]^\alpha
+K_2\,\omega(|t-t'|) \end{equation} for all $0<t,t'\le T_1$ and for all $\varphi\in C_c^\infty(\mathbb{R}^d)$\,. Then there exists a unique nonnegative Radon measure $\mu$ as initial trace, that is \[ \int_{\mathbb{R}^d}\varphi\,{\rm d}\mu=\lim_{t\to 0^+}\int_{\mathbb{R}^d}u(t,x)\varphi(x)\,{\rm d}x\,,\qquad\mbox{for all }\varphi\in C_c^\infty(\mathbb{R}^d)\,. \] Moreover the initial trace $\mu$ satisfies the bound \eqref{hyp.1.lem.init} with the same constant, namely $\mu(B_{R}(x_0))\le K_1$\,. \end{lem}
Notice that the constants $K_1$ and $K_2$ may depend on $u$ and $\varphi$, usually through some norm.
\noindent {\sl Proof.~}The proof is divided in two steps in which we prove existence and uniqueness of the initial trace respectively.
\noindent$\bullet~$\textsc{Step 1. }\textit{Existence of the initial trace. }Hypothesis $(i)$ easily implies that \[ \limsup_{t\to 0^+}\int_{B_R(x_0)}u(t,x)\,{\rm d}x\le K_1\,,\qquad\forall\; R>0\,,\;x_0\in\mathbb{R}^d\,. \] Moreover, it implies weak compactness for measures (to be more precise, weak$^*$ compactness, see Theorem \ref{thm.cpt.radon} in the Appendix \ref{appendix.meas}), so that there exists a sequence $t_k\to 0^+$ as $k\to \infty$ with $0<t_k<T_1$\,, and a nonnegative Radon measure $\mu$ so that \[ \lim_{k\to\infty}\int_{\mathbb{R}^d}u(t_k,x)\varphi(x)\,\,{\rm d}x=\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu\qquad\mbox{for all }\varphi\in C^0_c(\mathbb{R}^d)\,. \] The bound \eqref{intit.trace.bdd.lem} on the initial trace: $\mu(B_R(x_0))\le K_1$ follows from the above bound on the $\limsup$\,.
\noindent$\bullet~$\textsc{Step 2. }\textit{Uniqueness of the initial trace. } The initial trace whose existence we have just proved may, of course, depend on the sequence $t_k$. We will now show that this is not the case, thanks to hypothesis $(ii)$. Assume that there exist two sequences $t_k\to 0^+$ and $t'_k\to 0^+$ as $k\to \infty$\,, so that $u(t_k)\to \mu$ and $u(t'_k)\to \nu$,with $\mu, \nu\in {\cal M}^+(\RR^d)$. We will prove that \begin{equation}\label{step.2.uniq.1} \int_{\mathbb{R}^d}\varphi\,{\rm d}\mu=\int_{\mathbb{R}^d}\varphi\,{\rm d}\nu\qquad\mbox{for all }\varphi\in C^{\infty}_c(\mathbb{R}^d)\,. \end{equation}
so that $\mu=\nu$ as positive linear functionals on $C^{\infty}_c(\mathbb{R}^d)$. Then by the Riesz representation theorem (cf. Theorem \ref{thm.riesz.radon}) we know that $\mu=\nu$ also as Radon measures on $\mathbb{R}^d$. Therefore, it only remains to prove \eqref{step.2.uniq.1}\,: hypothesis $(ii)$ implies that for any $t,t' >0$\,, with $0<t+t'\le T_1\le T$, and any $\varphi\in C^{\infty}_c(\mathbb{R}^d)$\, we have $\omega(|(t+t')-t|)=\omega(t')$ and \begin{equation}\label{lem61.step2.1} \left[\int_{\mathbb{R}^d}u(t,x)\varphi(x)\,{\rm d}x\right]^\alpha\le \left[\int_{\mathbb{R}^d}u(t+t',x)\varphi(x)\,{\rm d}x\right]^\alpha
+K_2\omega(t')\,. \end{equation} First we let $t=t_k$ and $t'>0$ to be chosen later, then we let $t_k\to 0^+$ so that $u(t_k)\rightharpoonup \mu$, and we get \begin{equation}\label{lem61.step2.2} \left[\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu\right]^\alpha\le \left[\int_{\mathbb{R}^d}u(t',x)\,{\rm d}x\right]^\alpha
+K_2\omega(t')\,. \end{equation} Then we put $t'=t'_k$ and let $t'_k\to 0^+$ so that $u(t'_k)\rightharpoonup \nu$, $\omega(t'_k)\to \omega(0)=0$ and we obtain the first inequality \begin{equation}\label{lem61.step2.3} \left[\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu\right]^\alpha\le\left[\int_{\mathbb{R}^d}\varphi\,{\rm d}\nu\right]^\alpha\,. \end{equation} Then, we proceed exactly in the same way but we exchange the roles of $t_k$ and $t'_k$ to obtain the opposite inequality $\left[\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu\right]^\alpha\le\left[\int_{\mathbb{R}^d}\varphi\,{\rm d}\nu\right]^\alpha\,.$ Therefore we have that $\mu=\nu$ as positive linear functionals on $C^{\infty}_c(\mathbb{R}^d)$\, as desired.\qed
\begin{thm}[Existence and uniqueness of initial trace, FD case]\label{thm.init.trace.m<1}
Let $0<m<1$ and let $u$ be a nonnegative weak solution of equation \eqref{FDE.eq} in $(0,T]\times\mathbb{R}^d$. Assume that $\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}<\infty$. Then there exists a unique nonnegative Radon measure $\mu$ as initial trace, that is \begin{equation}\label{eq.trace1} \int_{\mathbb{R}^d}\psi\,{\rm d}\mu=\lim_{t\to 0^+}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,\,{\rm d}x\,,\qquad\mbox{for all }\psi\in C_0(\mathbb{R}^d)\,. \end{equation} Moreover, the initial trace $\mu$ satisfies the bound \begin{equation}\label{intit.trace.bdd.lem}
\mu(B_{R}(x_0))\le \|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)} + C_1 R^{d(1-m)-2s}\,T\,. \end{equation} where $C_1=C_1(m,d,s)>0$ as in \eqref{HP.s}. \end{thm} \noindent {\sl Proof.~}The proof is divided into three steps.
\noindent$\bullet~$\textsc{Step 1. }\textit{Weighted estimates I. Existence. }First we recall the weighted estimates of Theorem \ref{prop.HP.s}\,, which imply for all $0\le t\le T_1\le T$ \begin{equation}\label{HP.s.2.mu}\begin{split} \left(\int_{\mathbb{R}^d}u(t,x)\phi_R(x)\,{\rm d}x\right)^{1-m} &\le\left(\int_{\mathbb{R}^d}u(T,x)\phi_R(x)\,{\rm d}x\right)^{1-m}
+ C_1 R^{d(1-m)-2s}\,|T-T_1|\\
&\le \|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)} + C_1 R^{d(1-m)-2s}\,T := K_1 \end{split} \end{equation}
since $\phi_R\le 1$ and where $C_1>0$ depends only on $\alpha,m,d$ as in Theorem \ref{prop.HP.s}\,. Since $\phi_R\ge 1$ on $B_R$ it is clear that this implies hypothesis $(i)$ of Lemma \ref{lem.init.trace}, therefore it guarantees the existence of an initial trace that satisfies the bound $\mu(B_R(x_0))\le K_1=\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)} + C_1 R^{d(1-m)-2s}\,T$\,.
\noindent$\bullet~$\textsc{Step 2. }\textit{Pseudo-local estimates. Uniqueness. }In order to prove uniqueness of the initial trace is is sufficient to prove hypothesis $(ii)$ of Lemma \ref{lem.init.trace}, namely we need to prove that \begin{equation}\label{hyp.2.lem.init.2} \left[\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right]^\alpha\le \left[\int_{\mathbb{R}^d}u(t',x)\psi(x)\,{\rm d}x\right]^\alpha
+K_2\,\omega(|t-t'|) \end{equation}
for all $0<t,t'\le T_1\le T$ and for all $\psi\in C_c^\infty(\mathbb{R}^d)$\,. We will see that this is true for $\alpha=1$ and $\omega(|t-t'|)=|t-t'|$. Let $\psi\in C_c^\infty(\mathbb{R}^d)$, then we have \[ \begin{split}
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|
&=\left|\int_{\mathbb{R}^d}(-\Delta)^s u^m\psi\,{\rm d}x\right|
=_{(a)}\left|\int_{\mathbb{R}^d}u^m(-\Delta)^s\psi\,{\rm d}x\right|
\le \int_{\mathbb{R}^d}u^m\,\phi_R(x)\,\frac{\left|(-\Delta)^s\psi(x)\right|}{\phi_R(x)}\,{\rm d}x\\
&\le_{(b)} \left\|\frac{\left|(-\Delta)^s\psi(x)\right|}{\phi_R(x)}\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}
\left(\int_{\mathbb{R}^d}\phi_R\,{\rm d}x\right)^{1-m}\,\left(\int_{\mathbb{R}^d}u\phi_R\,{\rm d}x\right)^m \\
&\le k_7\,\|\phi_R\|_{\mathrm{L}^1(\mathbb{R}^d)}\,K_1:= K_2\,. \end{split} \] Notice that in $(a)$ we have used the fact that $(-\Delta)^s$ is a symmetric operator. In $(b)$ we have chosen $\phi_R(x):=\phi(x/R)$\,, with $\phi$ as in \eqref{phi} of Lemma \ref{Lem.phi}, with the decay at infinity $\alpha=d+2s$. It then follows that \[
\left\|\frac{\left|(-\Delta)^s\psi(x)\right|}{\phi_R(x)}\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\le k_7\,, \]
since we know by Lemma \ref{Lem.phi} that $\left|(-\Delta)^s\psi(x)\right|\le k_5\,|x|^{-(d+2s)}$\,, \normalcolor and we have chosen $\phi_R\ge k_6/|x|^{d+2s}$\,. We have also used the fact that the $\mathrm{L}^m$-norm ($m<1$) is less than the $\mathrm{L}^1$ norm since the measure $\phi_R\,{\rm d}x$ is finite. In the last line of the display we have used the bound of Step 1, namely that $\left(\int_{\mathbb{R}^d}u(t,x)\varphi_R(x)\,{\rm d}x\right)^{1-m}\le M_T + C_1 R^{d(1-m)-2s}\,T := K_1$ for all $0\le t\le T_1$\,. Summing up, we have obtained: \[
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|\le K_2\,. \] Integrating the above differential inequality we obtain: \begin{equation}\label{est.loc.nonloc.0} \int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\le \int_{\mathbb{R}^d}u(\tau,x)\psi(x)\,{\rm d}x
+ K_2\,|t-\tau|\qquad\mbox{ for any $\tau,t\ge 0$ and all $\psi\in C_c^\infty(\mathbb{R}^d)$\,.} \end{equation} \noindent$\bullet~$\textsc{Step 3. } We still have to pass from test functions $\psi\in C_c^\infty(\mathbb{R}^d)$ to $\psi\in C_c^0(\mathbb{R}^d)$ in formula \eqref{eq.trace1}, but this is easy by approximation (mollification).\qed
\noindent\textbf{Remarks. } (i) The proof applies with minor modification to the class of solutions with data $u_0\in \mathrm{L}^1(\mathbb{R}^d,\varphi\,{\rm d}x)$ constructed in Section \ref{sect.exist.large}\,.
\noindent(ii) Notice that estimates \eqref{est.loc.nonloc.0} are only pseudo-local estimates: the global information about $u(T)$, namely the bound $\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}$ is contained in the constant $K_1$ and therefore in $K_2$.
\noindent (iii) The existence of solutions and traces for the standard FDE with (not necessarily locally finite) Borel measures as data is studied in Chasseigne-Vazquez \cite{ChVaz02}. We do not address the corresponding question here.
\color{darkblue}
\begin{thm}[\color{darkblue}\bf Existence and uniqueness of initial trace, HE case $m=1$]\label{thm.init.trace.m=1}
Let $m=1$ and let $u$ be a nonnegative weak solution of equation \eqref{FDE.eq} in $(0,T]\times\mathbb{R}^d$. Assume that $\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d,\varphi)}<\infty$ where $\varphi$ is as in Theorem $\ref{prop.HP.s}$ with decay at infinity $|x|^{-\alpha}$, $\alpha=d+2s$. Then there exists a unique nonnegative Radon measure $\mu$ as initial trace, that is \begin{equation*} \int_{\mathbb{R}^d}\psi\,{\rm d}\mu=\lim_{t\to 0^+}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,\,{\rm d}x\,,\qquad\mbox{for all }\psi\in C_0(\mathbb{R}^d)\,. \end{equation*} Moreover, the initial trace $\mu$ satisfies the bound \begin{equation*}
\mu(B_{R}(x_0))\le \mathrm{e}^{K_0\,T}\,\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d,\varphi)}\,, \end{equation*} where $K_0=K_0(m,d,s)>0$ as in \eqref{HE.2}. \end{thm}
\noindent{\bf Proof.~}The proof is divided into three steps.
\noindent$\bullet~$\textsc{Step 1. }\textit{Weighted estimates I. Existence. }First we prove the weighted estimates when $m=1$: for all $0\le t\le T_1\le T$ \begin{equation}\label{HE.1}\begin{split} \int_{\mathbb{R}^d}u(t,x)\phi_R(x)\,{\rm d}x &\le\mathrm{e}^{K_0(T-t)}\,\int_{\mathbb{R}^d}u(T,x)\phi_R(x)\,{\rm d}x
\le \mathrm{e}^{K_0\,T}\,\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d,\phi_R)}:= K_1 \end{split} \end{equation}
The proof of the above inequality is as follows. Consider a function $\varphi=\phi_R$ as in Theorem $\ref{prop.HP.s}$ with decay at infinity $|x|^{-\alpha}$, $\alpha=d+2s$ and such that $\phi_R\ge 1$ on $B_R$, so that by Lemma \eqref{Lem.phi} we have \begin{equation}\label{HE.2}
\left\|\frac{(-\Delta)^s\phi_R}{\phi_R}\right\|_\infty \le K_0<+\infty \end{equation} so that \[
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u\phi_R \,{\rm d}x \right|
= \left|\int_{\mathbb{R}^d}u\, (-\Delta)^s\phi_R\,{\rm d}x \right|
\le \left\|\frac{(-\Delta)^s\phi_R}{\phi_R}\right\|_\infty\,\int_{\mathbb{R}^d} u\, \phi_R\,{\rm d}x\le K_0 \int_{\mathbb{R}^d} u\, \phi_R\,{\rm d}x\,. \] from which \eqref{HE.1} follows. Since $\phi_R\ge 1$ on $B_R$ it is clear that \eqref{HE.1} implies hypothesis $(i)$ of Lemma \ref{lem.init.trace}, therefore it guarantees the existence of an initial trace that satisfies the bound
$\mu(B_R(x_0))\le K_1=\mathrm{e}^{K_0\,T}\,\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d,\varphi)}\,.$
\noindent$\bullet~$\textsc{Step 2. }\textit{Pseudo-local estimates. Uniqueness. }In order to prove uniqueness of the initial trace is is sufficient to prove hypothesis $(ii)$ of Lemma \ref{lem.init.trace}, namely we need to prove that \begin{equation}\label{HE.3} \left[\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right]^\alpha\le \left[\int_{\mathbb{R}^d}u(t',x)\psi(x)\,{\rm d}x\right]^\alpha
+K_2\,\omega(|t-t'|) \end{equation}
for all $0<t,t'\le T_1\le T$ and for all $\psi\in C_c^\infty(\mathbb{R}^d)$\,. We will see that this is true for $\alpha=1$ and $\omega(|t-t'|)=|t-t'|$. Let $\psi\in C_c^\infty(\mathbb{R}^d)$, then we have \[ \begin{split}
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|
&=\left|\int_{\mathbb{R}^d}(-\Delta)^s u\psi\,{\rm d}x\right|
=_{(a)}\left|\int_{\mathbb{R}^d}u^m(-\Delta)^s\psi\,{\rm d}x\right|
\le \int_{\mathbb{R}^d}u\,\phi_R(x)\,\frac{\left|(-\Delta)^s\psi(x)\right|}{\phi_R(x)}\,{\rm d}x \\
&\le_{(b)} \left\|\frac{\left|(-\Delta)^s\psi(x)\right|}{\phi_R(x)}\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}
\int_{\mathbb{R}^d}u\phi_R\,{\rm d}x \le K'_0\,K_1:= K_2\,. \end{split} \] Notice that in $(a)$ we have used the fact that $(-\Delta)^s$ is a symmetric operator. In $(b)$ we have chosen $\phi_R(x)$\,, as in Step 1, so that, by Lemma \eqref{Lem.phi}, we have \[
\left\|\frac{\left|(-\Delta)^s\psi(x)\right|}{\phi_R(x)}\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\le K'_0\,. \] Finally, we have used the bound \eqref{HE.1} of Step 1: for all $0\le t\le T_1$ we have $\int_{\mathbb{R}^d}u(t,x)\phi_R(x)\,{\rm d}x\le K_1$ \,. Summing up, we have obtained that for all $t\ge 0$ and all $\psi\in C_c^\infty(\mathbb{R}^d)$ \[
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|\le K_2\,. \]
Integrating the above differential inequality, we obtain \eqref{HE.3} with $\alpha=1$ and $\omega(|t-t'|)=|t-t'|$\,.
\noindent$\bullet~$\textsc{Step 3. } We still have to pass from test functions $\psi\in C_c^\infty(\mathbb{R}^d)$ to $\psi\in C_c^0(\mathbb{R}^d)$ in formula \eqref{eq.trace1}, but this is easy by approximation (mollification).\qed
\normalcolor
\begin{thm}[Existence and uniqueness of initial trace, PME case]\label{thm.init.trace.m>1}
Let $m>1$ and let $u$ be a solution to the Cauchy problem \ref{FDE.eq} on $(0,T]\times\mathbb{R}^d$. Assume that $\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}+\|u(T)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}<+\infty$. Then there exists a unique nonnegative Borel measure $\mu$ as initial trace, that is \begin{equation}\label{eq.trace2} \int_{\mathbb{R}^d}\psi\,{\rm d}\mu=\lim_{t\to 0^+}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\,,\qquad\mbox{for all }\psi\in C_0(\mathbb{R}^d)\,. \end{equation} Moreover the initial trace $\mu$ satisfies the bound \begin{equation}\label{intit.trace.bdd.lem.1} \mu(B_{R}(x_0))\le C_1\left[\left(\frac{R^{2s+d(m-1)}}{T}\right)^{\frac{1}{m-1}}+ T^{\frac{d}{2s}}\, u(x_0,T)^{\frac{1}{2s\vartheta}}\right]\,, \end{equation} where $C_1=C_1(m,d,s)>0$ as in Theorem \ref{thm.lower.pme}. \end{thm} \noindent {\sl Proof.~}The proof is divided in three steps
\noindent$\bullet~$\textsc{Step 1. }\textit{Weighted estimates I. Existence. }First we recall the lower bounds of Theorem \eqref{thm.lower.pme} rewritten in the form \eqref{AC.s.} \begin{equation}\label{AC.s.2}\begin{split} \int_{B_R(x_0)} u(\tau,x)\,dx &\le C_1\left[\left(\frac{R^{2s+d(m-1)}}{T}\right)^{\frac{1}{m-1}}+ T^{\frac{d}{2s}}\, u(x_0,T)^{\frac{1}{2s\vartheta}}\right] := K_1. \end{split} \end{equation} on the time interval $(\tau,T]\subseteq (0,T]$\,. It is clear that this implies hypothesis $(i)$ of Lemma \ref{lem.init.trace}, therefore it guarantees the existence of an initial trace that satisfy the bound $\mu(B_R(x_0))\le K_1\,.$
\noindent$\bullet~$\textsc{Step 2. }\textit{Smoothing effects and mass conservation. }In \cite{DPQRV2} there are the global $\mathrm{L}^1-\mathrm{L}^\infty$ smoothing effects which provide global upper bounds for solutions to the Cauchy problem \ref{FDE.eq}\,. We apply such smoothing effects to solutions to our reduced Problem \ref{FFDE.Prob.Red} to get \begin{equation}\label{Smoothing.FPME.1}
\|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\le \frac{2^{d\vartheta}I_\infty}{t^{d\vartheta}}\|u(t/2)\|_{\mathrm{L}^1(\mathbb{R}^d)}^{2s\vartheta} \end{equation} where $\vartheta=1/[2s+d(m-1)]$ and the constant $I_\infty$ only depends on $d,s,m$\,. Moreover, we know that there holds also the conservation of mass on the time interval $[t/2,T]\subset (0,T]$, so that inequality \eqref{Smoothing.FPME.1} becomes \begin{equation}\label{Smoothing.FPME.2}
\|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\le \frac{2^{d\vartheta}I_\infty}{t^{d\vartheta}}\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}^{2s\vartheta}\,. \end{equation}
\noindent$\bullet~$\textsc{Step 3. }\textit{Weighted estimates II. Pseudo-local estimates. Uniqueness. }In order to prove uniqueness of the initial trace is is sufficient to prove hypothesis $(ii)$ of Lemma \ref{lem.init.trace}, namely we need to prove \begin{equation}\label{hyp.2.lem.init.2.2} \left[\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right]^\alpha\le \left[\int_{\mathbb{R}^d}u(t',x)\psi(x)\,{\rm d}x\right]^\alpha
+K_2\,\omega(|t-t'|) \end{equation}
for all $0<t,t'\le T_1\le T$ and for all $\psi\in C_c^\infty(\mathbb{R}^d)$\,. We will see that this is true for $\alpha=1$ and $\omega(|t-t'|)=|t^\sigma-t'^\sigma|)$ with $\sigma=2s/[2s+d(m-1)]$\,. Let $\psi\in C_c^\infty(\mathbb{R}^d)$, then we have \[ \begin{split}
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|
&=\left|\int_{\mathbb{R}^d}(-\Delta)^s u^m\psi\,{\rm d}x\right|
=_{(a)}\left|\int_{\mathbb{R}^d}u^m(-\Delta)^s\psi\,{\rm d}x\right|\\
&\le \|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}^{m-1} \left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)} \int_{\mathbb{R}^d}u(t) \,{\rm d}x\\
&\le_{(b)} \frac{2^{d\vartheta(m-1)}I_\infty^{m-1}}{t^{d\vartheta(m-1)}}\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}^{2s\vartheta(m-1)} \left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\|u(t)\|_{\mathrm{L}^\infty(\mathbb{R}^d)}\\
&=\frac{2^{d\vartheta(m-1)}I_\infty^{m-1}}{t^{d\vartheta(m-1)}}\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}^{2s\vartheta(m-1)+1} \left\|(-\Delta)^s\psi\right\|_{\mathrm{L}^\infty(\mathbb{R}^d)} := \frac{K_2}{t^{d\vartheta(m-1)}}\,. \end{split} \] Notice that in $(a)$ we have used the fact that $(-\Delta)^s$ is a symmetric operator. In $(b)$ we have used the smoothing effect \eqref{Smoothing.FPME.2} of Step 2\,. Summing up we have obtained: \[
\left|\frac{{\rm d}}{\,{\rm d}t}\int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\right|\le \frac{K_2}{t^{d\vartheta(m-1)}}\,. \] Integrating the above differential inequality we obtain for any $s,t\ge 0$: \begin{equation}\label{est.loc.nonloc} \int_{\mathbb{R}^d}u(t,x)\psi(x)\,{\rm d}x\le \int_{\mathbb{R}^d}u(s,x)\psi(x)\,{\rm d}x
+ 2s\vartheta\,K_2\,\left|t^{2s\vartheta}-s^{2s\vartheta}\right|\quad\mbox{ for all $\psi\in C_c^\infty(\mathbb{R}^d)$\,.} \end{equation} \noindent$\bullet~$\textsc{Step 4. } We still have to pass from test functions $\psi\in C_c^\infty(\mathbb{R}^d)$ to $\psi\in C_c^0(\mathbb{R}^d)$ in formula \eqref{eq.trace2}, but this is easy by approximation (mollification).\qed
We notice that the estimates \eqref{est.loc.nonloc} are only pseudo-local estimates: the global information about $u(T)$, namely the bound $\|u(T)\|_{\mathrm{L}^1(\mathbb{R}^d)}$ is contained in the constant $K_2$.
\section{Appendix I. Definitions, complements and computations}\label{sec.app}
\subsection{Definition of the fractional Laplacian.}\label{ssec.app1}
According to Stein, \cite{Stein70}, chapter V, the definition of the nonlocal operator $(-\Delta)^{\sigma/2}$, known as the Laplacian of order $\sigma$, is done by means of Fourier series \begin{equation}
((-\Delta)^{\sigma/2}f)^{\widehat{}}(x)=(2\pi|x|)^{\sigma} \hat f (x)\,, \end{equation} and can be used for positive and negative values of $\sigma$. If $0<\sigma<2$, we can also use the representation by means of an hypersingular kernel, \begin{equation}\label{formula.slapl} (-\Delta)^{\sigma/2} g(x)= c_{d,\sigma }\mbox{
P.V.}\int_{\mathbb{R}^d} \frac{g(x)-g(z)}{|x-z|^{d+{\sigma} }}\,dz, \end{equation} where $c_{d,\sigma }=\frac{2^{\sigma-1}\sigma\Gamma((d+\sigma)/2)}{\pi^{d/2}\Gamma(1-\sigma/2)}$ is a normalization constant. Another classical way of defining the fractional powers of a linear self-adjoint nonnegative operator, in terms of the associated semigroup, which in our case reads \begin{equation} \displaystyle(-\Delta)^{\sigma/2} g(x)=\frac1{\Gamma(-\frac{\sigma}2)}\int_0^\infty \left(e^{t\Delta}g(x)-g(x)\right)\frac{dt}{t^{1+\frac{\sigma}2}}. \label{laplace}\end{equation} In this paper we consistently put $\sigma=2s$, $0<s<1$ (sometimes, also $s=1$).
\subsection{Definition of weak and very weak solutions}
We recall here the definitions of weak and strong solutions taken from \cite{DPQRV2}. We finally introduce the definition of very weak solutions.
\begin{defn}\label{def:weak.solution.nonlocal} A function $u$ is a {\sl weak} solution to Equation \eqref{FDE.eq} if: \begin{itemize}
\item $u\in C((0,\infty): L^1(\mathbb{R}^d))$, $|u|^{m-1}u \in L^2_{\rm loc}((0,\infty):\dot{H}^{s}(\mathbb{R}^d))$; \item The identity \begin{equation} \displaystyle \int_0^\infty\int_{\mathbb{R}^d}u\dfrac{\partial
\varphi}{\partial t}\,\,{\rm d}x\,{\rm d}t-\int_0^\infty\int_{\mathbb{R}^d}(-\Delta)^{s/2}(|u|^{m-1}u)(-\Delta)^{s/2}\varphi\,\,{\rm d}x\,{\rm d}t=0. \end{equation} holds for every $\varphi\in C_0^1(\mathbb{R}^d\times(0,\infty))$; \item A {\sl weak} solution to Problem \eqref{FDE.eq}--\eqref{FDE.id} is a weak solution to Equation \eqref{FDE.eq} such that moreover $u\in C([0,\infty): L^1(\mathbb{R}^d))$ and $u(0,\cdot)=u_0\in \mathrm{L}^1(\mathbb{R}^d)$. \end{itemize} \end{defn} Note that in \cite{DPQRV2} these weak solutions are given the more precise name {\sl weak $L^1$-energy solutions}. We recall that the fractional Sobolev space $\dot{H}^{s}(\mathbb{R}^d)$ is defined as the completion of $C_0^\infty(\mathbb{R}^d)$ with the norm $$
\|\psi\|_{\dot{H}^{s}}=\left(\int_{\mathbb{R}^d}
|\xi|^\sigma|\hat{\psi}|^2\,d\xi\right)^{1/2}
=\|(-\Delta)^{s/2}\psi\|_{2}. $$
\begin{defn} We say that a weak solution $u$ to Problem \eqref{FDE.eq}--\eqref{FDE.id} is a strong solution if moreover $\partial_tu\in L^\infty((\tau,\infty):L^1(\mathbb{R}^d))$, for every $\tau>0$. \label{def:strong.solution}\end{defn}
\begin{defn}\label{def:weak.solution.nonlocal.2} A function $u$ is a {\it very weak} solution to Equation \eqref{FDE.eq} if: \begin{itemize}
\item $u\in C((0,\infty): L^1_{\rm loc}(\mathbb{R}^d))$, $|u|^{m-1}u \in L^1_{\rm loc}\left((0,\infty):L^1\left(\mathbb{R}^d, (1+|x|)^{-(d+2s)}\,{\rm d}x\right)\right)$; \item The identity \begin{equation} \displaystyle \int_0^\infty\int_{\mathbb{R}^d}u\dfrac{\partial
\varphi}{\partial t}\,\,{\rm d}x\,{\rm d}t-\int_0^\infty\int_{\mathbb{R}^d} |u|^{m-1}u\,(-\Delta)^{s}\varphi\,\,{\rm d}x \,{\rm d}t=0. \end{equation} holds for every $\varphi\in C_c^\infty([0,T]\times\mathbb{R}^d)$\,; \item A {\sl very weak} solution to Problem \eqref{FDE.eq}--\eqref{FDE.id} is very weak solution to Equation \eqref{FDE.eq} such that moreover $u\in C([0,\infty): L^1_{\rm loc}(\mathbb{R}^d))$ and $u(0,\cdot)=u_0\in \mathrm{L}^1_{\rm loc}(\mathbb{R}^d)$. \end{itemize} \end{defn}
\normalcolor
\subsection{Some functional inequalities related to the fractional Laplacian} We recall here some useful functional inequalities which have been used throughout the paper. \begin{lem}[Stroock-Varopoulos' inequality] Let $0<s<1$\,, $q>1$. Then \begin{equation}\label{StrVar.ineq}
\int_{\mathbb{R}^d}|v|^{q-2}v\,(-\Delta)^s v\,\,{\rm d}x\ge \frac{4(q-1)}{q^2}\int_{\mathbb{R}^d}\left|(-\Delta)^{\frac{s}{2}}|v|^{\frac{q}{2}}\right|^s\,{\rm d}x\,, \end{equation} for all $v\in \mathrm{L}^q(\mathbb{R}^d)$ such that $(-\Delta)^s v\in \mathrm{L}^q(\mathbb{R}^d)$. \end{lem} \noindent\textbf{Remark. }We have used the above Stroock-Varopoulos inequality, applied to $0\le v=u^m$ and $q=(p+m-1)/m>1$\,, whenever $p>1$, which is \begin{equation}\label{StrVar.ineq.um}
\int_{\mathbb{R}^d}|u|^{p-2}u\,(-\Delta)^s (|u|^{m-1}u)\,\,{\rm d}x\ge \frac{4m(p-1)}{(p+m-1)^2}\int_{\mathbb{R}^d}\left|(-\Delta)^{\frac{s}{2}}|u|^{\frac{p+m-1}{2}}\right|^s\,{\rm d}x\,. \end{equation}
\begin{thm}[Sobolev Inequality] Let $0<s\le 1$ and $2s<d$. Then \begin{equation}\label{Sob.Fract}
\|f\|_{\frac{2d}{d-2s}}\le \mathcal{S}_s \left\|(-\Delta)^{s/2}f\right\|_2 \end{equation} where the best constant is given by \begin{equation}\label{best.const.Sob.Fract} \mathcal{S}_s^2 := 2^{-2s}\,\pi^{-s}\frac{\Gamma\left(\frac{d-2s}{2}\right)}{\Gamma\left(\frac{d+2s}{2}\right)}
\left[\frac{\Gamma(d)}{\Gamma(d/2)}\right]^{\frac{2s}{d}}
=\frac{\Gamma\left(\frac{d-2s}{2}\right)}{\Gamma\left(\frac{d+2s}{2}\right)}|\mathbb{S}_d|^{-\frac{2s}{d}} \end{equation} and is attained on the family of functions \[ F(x):=a\left[b^2+(x-x_0)^2\right]^{-\frac{d-2s}{2}}\,,\qquad\mbox{with $x,x_0\in\mathbb{R}^d$ and $a\in \mathbb{R}$\,, $b>0$\,.} \] \end{thm}
\subsection{Proof of Lemma \ref{Lem.phi}}\label{sec.A1}
\noindent {\sl Proof.~}The proof is divided into several steps.
\noindent$\bullet~$\textsc{Step 1. }\textit{The integral is convergent. }First we have to prove that \begin{equation*}
c_{d,s}^{-1}\left|(-\Delta)^s\varphi (x)\right|=\left|\int_{\mathbb{R}^d}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\right|<\infty\qquad\mbox{for any }x\in \mathbb{R}^d \end{equation*} to this end we fix $x\in \mathbb{R}^d$ and we split the integral in two parts: \[
\int_{\mathbb{R}^d}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
=\int_{|x-y|>\delta}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
+\int_{|x-y|\le \delta}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y= I + II \] where $\delta>0$ is taken so small that the following Taylor expansion around $x\in\mathbb{R}^d$ holds true \[ \varphi (y)= \varphi (x)+\nabla\varphi (x)\cdot(y-x)+(y-x)^t \,{\rm D^2}\varphi (\overline{x})\,(y-x) \] for some $\overline{x}\in B_1(x)$\,. Therefore we have \[ \begin{split}
I &= \left|\int_{|x-y|\le \delta}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\right|\\
&= \left|\int_{|x-y|\le \delta}\frac{\nabla\varphi (x)\cdot(y-x)}{|x-y|^{d+2s}}\,{\rm d}y +
\int_{|x-y|\le \delta}\frac{(y-x)^t \,{\rm D^2}\varphi (\overline{x})\,(y-x)}{|x-y|^{d+2s}}\,{\rm d}y\right|\\
&\le_{(a)} \sup_{1\le i,j\le d}\|\partial_{ij}\varphi \|_{\mathrm{L}^\infty(\mathbb{R}^d)} \left|\int_{|x-y|\le \delta}\frac{1}{|x-y|^{d-(2-2s)}}\,{\rm d}y\right|\\
&\le \sup_{1\le i,j\le d}\|\partial_{ij}\varphi \|_{\mathrm{L}^\infty(\mathbb{R}^d)} \int_0^{\delta}\frac{{\rm d} r}{r^{1-2(1-s)}} =_{(b)}K\frac{\delta^{2(1-s)}}{(2(1-s))} \end{split} \] where in $(a)$ we have used that \[
P.V.\int_{|x-y|\le \delta}\frac{\nabla\varphi (x)\cdot(y-x)}{|x-y|^{d+2s}}\,{\rm d}y=0 \]
for symmetry reasons. In $(b)$ we used the fact that $|\partial_{ij}\varphi (z)|\le K$ for some positive constant $K$ that depends only on $\alpha$\,. On the other hand, the outer integral is easily seen to be finite, indeed \[
II= \left|\int_{|x-y|>\delta}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\right|
\le 2\|\varphi \|_{\mathrm{L}^\infty(\mathbb{R}^d)}\left|\int_{|x-y|>\delta}\frac{1}{|x-y|^{d+2s}}\,{\rm d}y\right| \le 2\omega_d \int_\delta^{\infty}\frac{{\rm d} r}{r^{1+2s}}=\frac{\omega_d}{s\delta^{2s}}\,. \]
The above estimates for $I$ and $II$ do not depend on $x\in\mathbb{R}^d$, hence $\left|(-\Delta)^s\varphi (x)\right|$ is finite for all $x\in \mathbb{R}^d$\,. \begin{figure}
\caption{\noindent\textit{The $4$ regions in which we split the integral}}
\label{fig.3}
\end{figure}
\noindent$\bullet~$\textsc{Step 2. }\textit{Better estimates for $|x|$ large. } We are going to use the hypothesis that $\varphi$ is radially symmetric and decreasing for $|x|\ge 1$ and that $\varphi(x)\le |x|^{-\alpha}$\,, $|D^2\varphi(x)| \le c_0 |x|^{-\alpha-2}$\,, for some positive constant $\alpha$ and for $|x|$ large enough. We are interested in the behaviour of $|(-\Delta)^s\varphi (x)|$ for large values of $x$, therefore we fix $x\in \mathbb{R}^d$ with $|x|$ sufficiently large. We have to estimate \begin{equation*}
c_{d,s}^{-1}\left|(-\Delta)^s\varphi (x)\right|=\left|\int_{\mathbb{R}^d}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\right|\,, \end{equation*} to this end we split the integral into four parts, see Figure \ref{fig.3}, \begin{equation}\label{splitting.4}\begin{split}
\int_{\mathbb{R}^d}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
=\int_{|y|> 3|x|/2}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
+\int_{\left\{|x|\le 2|y|\le 3|x|\right\}\setminus B_{|x|/2}(x)}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\\
+\int_{ B_{|x|/2}(x)}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
+\int_{|y|<|x|/2}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y =I+II+III+IV \end{split} \end{equation} We estimate the four integrals separately, keeping in mind that we are assuming $\varphi\ge 0$ in this latter case. The first integral can be estimated as follows \[
I=\int_{|y|> 3|x|/2}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
\le \omega_d \varphi (x) \int_{\frac{3|x|}{2}}^{\infty}\frac{{\rm d} r}{r^{1+2s}}=\frac{k_1}{|x|^{\alpha+2s}} \]
since $\varphi (y)\le \varphi (x)$ when $|y|> 3|x|/2$, therefore $|\varphi (x)-\varphi (y)|\le \varphi (x)$\,, and we remark that the constant $k_1$ depends only on $\alpha, s, d$, since $\varphi(x)\le |x|^{-\alpha}$ and $|x|$ is large enough. The second integral gives \[
II\le \left|\int_{\left\{|x|\le 2|y|\le 3|x|\right\}\setminus B_{|x|/2}(x)}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\right|
\le \frac{\varphi (x/2) }{\big(|x|/2\big)^{d+2s}}\int_{\frac{|x|}{2}}^{\frac{3|x|}{2}}r^{d-1}{\rm d} r\le \frac{k_2}{|x|^{\alpha+2s}} \]
since $\varphi (y)\le \varphi (x/2)$ when $|y|> |x|/2$, therefore $|\varphi (x)-\varphi (y)|\le \varphi (x/2)$\,, and we remark that the constant $k_2$ depends only on $\alpha, s, d$, since $\varphi(x/2)\le |x|^{-\alpha}$ and $|x|$ is large enough.\\ We can estimate the third integral as follows: \[ \begin{split}
III &= \left|\int_{|x-y|\le \frac{|x|}{2}}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y\right|\\
&= \left|\int_{|x-y|\le \frac{|x|}{2}}\frac{\nabla\varphi (x)\cdot(y-x)}{|x-y|^{d+2s}}\,{\rm d}y +
\int_{|x-y|\le \frac{|x|}{2}}\frac{(y-x)^t \,{\rm Hess}\varphi (\overline{x})\,(y-x)}{|x-y|^{d+2s}}\,{\rm d}y\right|\\
&\le_{(a)} \sup_{1\le i,j\le d}\|\partial_{ij}\varphi \|_{\mathrm{L}^\infty(B_{\frac{|x|}{2}}(x))} \left|\int_{|x-y|\le \frac{|x|}{2}}\frac{1}{|x-y|^{d-(2-2s)}}\,{\rm d}y\right|\\
&\le \frac{k'_3}{|x|^{\alpha+2}} \int_0^{\frac{|x|}{2}}\frac{{\rm d} r}{r^{1-2(1-s)}}
=\frac{k'_3}{|x|^{\alpha+2}}\left(\frac{|x|}{2}\right)^{2-2s}=\frac{k_3}{|x|^{\alpha+2s}} \end{split} \] where in $(a)$ we have used that \[
P.V.\int_{|x-y|\le \delta}\frac{\nabla\varphi (x)\cdot(y-x)}{|x-y|^{d+2s}}\,{\rm d}y=0 \]
for symmetry reasons as in Step 2. In $(b)$ we used the fact that $|z-x|<|x|/2$ implies $|x|/2<|z|<3|x|/2$, therefore $|\partial^2_{ij}\varphi (z)|\le c_0/|z|^{\alpha+2}\le 2^{\alpha+2}c_0/|x|^{\alpha+2}$ for all $z\in B_{|x|/2}(x)$, recalling that $|x|$ is always taken large enough. The constants $k'_3$ and $k_3$ depend only on $\alpha, s,d$.\\ It only remains to estimate the fourth integral: \[
IV \le \int_{|y|<|x|/2}\frac{\big|\varphi (x)-\varphi (y)\big|}{|x-y|^{d+2s}}\,{\rm d}y
\le \frac{2^{d+2s}}{|x|^{d+2s}}\int_{|y|<|x|/2}\varphi (y)\,{\rm d}y. \]
since we observe that $|y|<|x|/2$ implies $\varphi (x)\le \varphi (2y)\le \varphi (y)$ which gives $|\varphi (x)-\varphi (y)|\le \varphi (y)$, moreover we also have that $|y|<|x|/2$ implies that $|y-x|> |x|/2$. The term represents the long-range influence of the inner core of the function at large distances and will make for different conclusions of the lemma depending on the case. Indeed, we have the following estimates for $|x|$ large enough:\begin{itemize}
\item If $\alpha>d$ the last integral is finite and we get $IV\le k_4/|x|^{d+2s}$.
\item If $\alpha< d$ the last integral grows like $|x|^{d-\alpha}$ and we get $IV\le k_5/|x|^{\alpha+2s}$.
\item Finally when $\alpha=d$ we get $IV\le k_6\log|x|/|x|^{d+2s}$\,. \end{itemize} We finally remark that the constants $k_4,k_5,k_6$ depend only on $\alpha,s,d$\,.
\noindent$\bullet~$\textsc{Step 3. }\textit{Positivity estimates for $|x|$ large. }In the case when $\alpha>d$ we need to prove that if $\varphi\ge0$ then we have that $|(-\Delta)^s\varphi(x)|\ge c_4 |x|^{-(d+2s)}$ for all $|x|\ge |x_0|>>1$\,. We split the integral into four parts, as in Step 2, equation \eqref{splitting.4}, see Figure \ref{fig.3}, \[
c_{d,s}^{-1}(-\Delta)^s\varphi (x)=\int_{\mathbb{R}^d}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y =I+II+III+IV \]
We have proven in Step 2 that $|I|+|II|+|III|\le (k_1+k_2+k_3)\,/|x|^{\alpha+2s}$ and we recall that the constant $k_i$ depend only on $\alpha, s, d$. We just have to obtain better estimates for the last term, to this end we further split the integral in two parts: \[\begin{split}
IV = \int_{|y|<|x|/2}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
=\int_{|y|<|x|/2}\frac{\varphi (x)}{|x-y|^{d+2s}}\,{\rm d}y -\int_{|y|<|x|/2}\frac{\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y = IV_a- IV_b \end{split} \] Let us calculate \[
0\le IV_a=\int_{|y|<|x|/2}\frac{\varphi (x)}{|x-y|^{d+2s}}\,{\rm d}y
\le \frac{\varphi(x)}{(|x|/2)^{d+2s}} \int_{|y|<|x|/2}\,{\rm d}y
\le \frac{k_4}{|x|^{\alpha+2s}} \]
since $|x-y|\ge |x|/2$ when $|y|\le |x|/2$ and $\varphi(x)\le |x|^{-\alpha}$. We remark that the constant $k_4$ depends only on $\alpha, s, d$. On the other hand, $IV_b\ge 0$ and \[
0\le IV_b=\int_{|y|<|x|/2}\frac{\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
\le \frac{1}{(|x|/2)^{d+2s}} \int_{|y|<|x|/2}\varphi(y)\,{\rm d}y
\le \frac{\|\varphi\|_{\mathrm{L}^1(\mathbb{R}^d)}}{|x|^{d+2s}} \] Summing up, we have obtained that \[ \begin{split}
-(-\Delta)^s\varphi (x)&=-c_{d,s}\int_{\mathbb{R}^d}\frac{\varphi (x)-\varphi (y)}{|x-y|^{d+2s}}\,{\rm d}y
\ge c_{d,s}\left[IV_b -\big(\,|I|+|II|+|III|+|IV_a|\,\big)\right]\\
&\ge c_{d,s}\left[\frac{\|\varphi\|_{\mathrm{L}^1(\mathbb{R}^d)}}{|x|^{d+2s}}-\frac{k_5}{|x|^{\alpha+2s}}\right]
=\left[\|\varphi\|_{\mathrm{L}^1(\mathbb{R}^d)}-\frac{k_5}{|x|^{\alpha-d}}\right]\frac{c_{d,s}}{|x|^{d+2s}}\ge \frac{c_4}{|x|^{d+2s}} \end{split} \]
since $|I|+|II|+|III|+|IV_a|\le (k_1+k_2+k_3+k_4)\,|x|^{\alpha+2s}=k_5\,|x|^{\alpha+2s}$ and $c_4>0$ since $\alpha>d$, if we choose $|x|$ sufficiently large, namely $|x|^{\alpha-d}\ge k_5/\|\varphi\|_{\mathrm{L}^1(\mathbb{R}^d)}$\,.\qed
\normalcolor
\subsection{Optimization Lemma}\label{app.opt}
We state and prove here a simple technical lemma that has been used in the proof of Theorem \ref{thm.lower}\,.
\begin{lem}\label{Lem.Opt} Let $0<m,s<1$\,, $2s>d(1-m)$\,, $\vartheta=1/[2s-d(1-m)]>0$ and $B,C,t>0$. Define \[ F(t,R):=\frac{A(t)}{R^{d(1-m)}}-\frac{B\,t}{R^{2s}}\,,\qquad\mbox{with}\qquad A(t):= M-\frac{C}{t^{d(1-m)\vartheta}}\,. \] Then there exists \begin{equation}\label{t.min} t_*:=2s\vartheta\left(\frac{C}{M}\right)^{\frac{1}{d(1-m)\vartheta}}>0 \end{equation} and \begin{equation}\label{R.max} \overline{R}(t)=\left(\frac{2sBt}{d(1-m)A(t)}\right)^{\vartheta} \ge\overline{R}(t_*)=\left[\frac{2s}{d(1-m)}\frac{(2s\vartheta)^{2s\vartheta}}{(2s\vartheta)^{d(1-m)}-1}\right]^{\vartheta}\frac{B^\vartheta C^{\frac{1}{d(1-m)}}}{M^{\frac{2s\vartheta}{d(1-m)}}} >0 \end{equation} so that for all $t\ge t_*$ we have \[ F(\overline{R}(t),t)=\left[\left(\frac{2s}{d(1-m)}\right)^{\frac{1}{\vartheta}}-1\right]\left[\frac{d(1-m)}{2s}\right]^{2s\vartheta} \frac{A(t)^{2s\vartheta}}{(Bt)^{d(1-m)\vartheta}}>0\,. \] \end{lem} \noindent {\sl Proof.~}First we observe that $A(t)$ is monotone increasing in $t>0$, and that $A(t_*/2s\vartheta)=0$, where $t_*$ has the expression given by \eqref{t.min}, so that $A(t)>A(t_*)>A(t/2s\vartheta)=0$ since $2s\vartheta>1$, and \[ A(t_*)=\frac{(2s\vartheta)^{d(1-m)\vartheta}-1}{(2s\vartheta)^{d(1-m)\vartheta}}M>0 \] Moreover, it easy to check that $t_*$ is also the value for which $A(t_*)-t_*A'(t_*)=0$. Next we fix a time $t\ge t_*$ and we find the maximum with respect to $R$ of the function $F(t,R)$: \[ \partial_R F(R,t)=-\frac{d(1-m)A(t)}{R^{d(1-m)\vartheta+1}}+\frac{2sB\,t}{R^{2s+1}}\,, \] and $\partial_R F(\overline{R}(t),t)=0$, so that the maximum is attained at $\overline{R}(t)$ whose expression it is easily checked to be \eqref{R.max}. It only remains to prove that $\overline{R}(t)\ge\overline{R}(t_*)>0$, to this end we observe that \[ \partial_t \overline{R}(t)=\vartheta\left[\frac{2sB}{d(1-m)}\right]^\vartheta\left[\frac{t}{A(t)}\right]^{\vartheta-1}\frac{A(t)-tA'(t)}{A(t)^2} \] and it is clear now that the minimum is attained at $t_*$\,, since $\partial_t \overline{R}(t_*)=0$\,, because we already know that $A(t_*)-t_*A'(t_*)=0$.\qed
\subsection{Reminder about measure theory}\label{appendix.meas}
We recall here some basic facts on measure theory for convenience of the reader. We refer the interested reader to the books \cite{EvansMeas, RudinRealCplx}.
\begin{defn} A measure $\mu$ is \textsl{regular} if \[ \forall A\subseteq \mathbb{R}^d \; \exists B\;\mu\mbox{-measurable such that }A\subseteq B\; \mbox{and}\;\mu(A)=\mu(B)\,. \] A measure $\mu$ is \textsl{Borel} if every Borel set $\mathcal{B}(\mathbb{R}^d)$ is $\mu$-measurable. A measure $\mu$ is \textsl{Borel regular }if \[ \forall A\subseteq \mathbb{R}^d \; \exists B\in\mathcal{B}(\mathbb{R}^d)\mbox{ such that }A\subseteq B\; \mbox{and}\;\mu(A)=\mu(B)\,. \] A measure $\mu$ is \textsl{Radon} if is Borel regular and $\mu(K)<+\infty$ for any compact set $K\subset\mathbb{R}^d$.\\ A sequence of measures $\mu_n$ \textsl{converges weakly (star)} to the measure $\mu$, $\mu_n\rightharpoonup \mu$ as $n\to\infty$ if \[ \lim_{n\to\infty}\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu_n=\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu\qquad\mbox{for all }\varphi\in C^0_c(\mathbb{R}^d)\,. \] \end{defn} \begin{thm}[Weak compactness for measures]\label{thm.cpt.radon} Let $\{\mu_n\}$ be a sequence of Radon measures on $\mathbb{R}^d$ satisfying \[ \sup_{n}\mu_n(K)<\infty\qquad\mbox{for any compact set $K\subset\mathbb{R}^d$.} \] Then there exists a subsequence $\mu_{n_k}$ and a Radon measure $\mu$ such that $\mu_{n_k}\rightharpoonup \mu$ as $k\to\infty$\,. \end{thm}
\begin{thm}[Riesz Representation Theorem]\label{thm.riesz.radon} Assume $L:C_c^{\infty}(\mathbb{R}^d)\to\mathbb{R}$ is linear and nonnegative, so that \[ L\varphi\ge 0\qquad\mbox{for all }0\le \varphi\in C_c^{\infty} \] Then there is a unique Radon measure $\mu$ on $\mathbb{R}^d$ such that \[ L\varphi=\int_{\mathbb{R}^d}\varphi\,{\rm d}\mu \qquad\mbox{for all } \varphi\in C_c^{\infty} \] \end{thm}
\color{darkblue} \section{Appendix II. Applied literature and motivation}\label{App.Motiv}
We gather here some updated information on the occurrence of the nonlinear fractional diffusion equation we propose and related models in the physical or probabilistic literature.
$\bullet$ A great variety of diffusive problems in nature, namely those referred to as normal diffusion, are satisfactorily described by the classical Heat Equation or Fokker-Planck linear equation. However, anomalous diffusion is nowadays intensively studied, both theoretically and experimentally since it conveniently explains a number of phenomena in several areas of physics, biology, ecology, geophysics, and many others, which can be briefly summarized as having non-Brownian scaling. A large variety of phenomena in physics and finance are modeled by linear anomalous diffusion equations, see e.g. \cite{A2009, C2004, W2001}. Fractional kinetic equations of the diffusion, diffusion-advection, and Fokker-Planck type represent a useful approach for the description of transport dynamics in complex systems which are governed by anomalous diffusion. These fractional equations are usually derived asymptotically from basic random walk models, cf. \cite{MK2000}.
$\bullet$ Anomalous diffusion often takes a nonlinear form. To be more specific, there exist many phenomena in nature where, as time goes on, a crossover is observed between different diffusion regimes. Tsallis et al. \cite{BGT2000, LMT2003} discuss the following cases: (i) a mixture of the porous medium equation, which is connected with non-extensive statistical mechanics, with the normal diffusion equation; (ii) a mixture of the fractional time derivative and normal diffusion equations; (iii) a mixture of the fractional space derivative, which is related with L\'evy flights, and normal diffusion equations. In all three cases a crossover is obtained between anomalous and normal diffusions. This leads to models of nonlinear diffusion of porous medium or fast diffusion types with standard or fractional Laplace operators, cf. eqn. (4) of \cite{BGT2000}.
$\bullet$ There have been many studies of hydrodynamic limits of interacting particle systems with long-range dynamics, which lead to fractional diffusion equations of our type, mainly linear like in \cite{JKOlla}, \cite{Jara0}, but also nonlinear in the recent literature, cf. the works \cite{Jara1}, \cite{Jara2}. Thus, in the last reference, Jara and co-authors study the non-equilibrium functional central limit theorem for the position of a tagged particle in a mean-zero one-dimensional zero-range process. The asymptotic behavior of the particle is described by a stochastic differential equation governed by the solution of the following nonlinear hydrodynamic (PDE) equation, $\partial_t \rho = \sigma^2 \partial^2_x \Phi(\rho)$. When $\Phi$ is a power we recover equation \eqref{FDE.eq}.
$\bullet$ Equations like the last one (in several space dimensions) occur in boundary heat control, as already mentioned by Athanasopoulos and Caffarelli \cite{AC2010}\,, where they refer to the model formulated in the book by Duvaut and Lions \cite{DL1972}, and use the so-called Caffarelli-Silvestre extension \cite{CS2007}.
$\bullet$ The combination of diffusion with convection is an important research topic with abundant literature. The use of nonlinear fractional diffusion in that setting has been studied by Cifani and Jakobsen \cite{CJ}, where references to other models is given.
$\bullet$ A different version of the nonlinear fractional diffusion equation of porous medium type takes the form $u_t=\nabla (u\, \nabla {\cal K} u)$, where $K$ is the Riesz operator that expresses the inverse to the fractional Laplacian, ${\cal K} u=(-\Delta)^{-s}u$. This has been studied by Caffarelli and Vazquez in \cite{CV2010, CV2011} and Biler, Karch and Monneau in \cite{BKM2010}, where the equation is derived in the framework of the theory of dislocations. This model has some strikingly different properties, like lack of strict positivity and occurrence of free boundaries. See the survey \cite{VazAbel}. \normalcolor
\noindent {\textbf{\large \sc Acknowledgment.} Both authors funded have been partially funded by Project MTM2011-24696 (Spain). We are grateful to A. de Pablo, F. Quir\'os and A. Rodriguez for discussions of the topic, and to the referee for enlightening suggestions.
\vskip .3cm
\end{document} |
\begin{document}
\noindent
\title[Kronecker limit formulas]{The Kronecker limit formulas via the distribution relation} \author{Kenichi Bannai} \address{Department of Mathematics, Keio University, 3-14-1 Hiyoshi, Kouhoku-ku, Yokohama 223-8522, Japan} \email{bannai@math.keio.ac.jp} \author{Shinichi Kobayashi} \address{Graduate School of Mathematics, Nagoya University,
Furo-cho Chikusa-ku, Nagoya 464-8602, Japan} \email{shinichi@math.nagoya-u.ac.jp} \footnote{The 2000 Mathematics Subject Classification: 11M35, 11M36, 11S80} \begin{abstract}
In this paper, we give a proof of the classical Kronecker limit formulas using the distribution relation of the Eisenstein-Kronecker series.
Using a similar idea, we then prove $p$-adic analogues of the Kronecker limit formulas for the
$p$-adic Eisenstein-Kronecker functions defined in our previous paper. \end{abstract}
\maketitle
\section{Introduction}
In these notes, we will give a proof of the classical first and second Kronecker limit formulas concerning the limit of values of Eisenstein-Kronecker-Lerch series. Our proof is based on the distribution relation of the Eisenstein-Kronecker-Lerch series. Using a similar idea, we then prove $p$-adic analogues of these formulas for the $p$-adic Eisenstein-Kronecker functions defined in our previous paper \cite{BFK}.
Let $\Gamma \subset \mathbb{C}$ be a lattice. We define a pairing for $z$, $w \in \mathbb{C}$ by $\pair{z,w}_\Gamma := \Exp {(z \overline{w} - w \overline{z})/A(\Gamma)}$, where $A(\Gamma)$ is the area of the fundamental domain of $\Gamma$ divided by $\boldsymbol\pi = 3.1415\cdots$. Then for an integer $a$ and a fixed $z_0$, $w_0 \in \mathbb{C}$, the Eisenstein-Kronecker-Lerch series is defined as \begin{equation*} \label{equation: definition of Eisenstein-Kronecker*}
K^*_a(z_0,w_0,s; \Gamma) = {\sum}^*_{\gamma \in \Gamma}
\frac{(\overline{z}_0 + \overline{\gamma})^a}{|z_0 + \gamma|^{2s}} \pair{\gamma, w_0}_\Gamma, \end{equation*} where $\sum^*$ denotes the sum taken over all $\gamma\in\Gamma$ except for $\gamma=-z_0$ if $z_0 \in \Gamma$. The above series converges for $\operatorname{Re}(s) > a/2+1$, but one may give it meaning for general $s$ by analytic continuation. In what follows, we omit the $\Gamma$ from the notations if there is no fear of confusion.
We let $\theta(z)$ be the reduced theta function on $\mathbb{C}/\Gamma$ associated to the divisor $[0] \subset \mathbb{C}/\Gamma$, normalized so that $\theta'(0) =1$. Then the Kronecker limit formulas are given as follows.
\begin{theorem}[Kronecker limit formulas]\label{thm: KLF}
Let $c$ be the Euler constant
$
c:=\lim_{n\rightarrow \infty} \left(1+\frac{1}{2}+ \cdots +\frac{1}{n}-\log n\right),
$
and let $\Delta$ be the discriminant of $\Gamma$ defined as $\Delta:=g_2^3-27g_3^2$, where
$
g_k:=\sum_{\gamma \in \Gamma \setminus \{0\}} \gamma^{-2k}.
$
Then we have the following.
\begin{enumerate}
\item The first limit formula
$$
\lim_{s \rightarrow 1}\left(A K^*_0(0, 0, s)-\frac{1}{s-1}\right)
=-\frac{1}{12}\log|\Delta|^2-2\log A+2c.
$$
\item For $z \notin \Gamma$, the second limit formula
$$
A K^*_0(0,z,1)=-\log |\theta(z)|^2+\frac{|z|^2}{A}-\frac{1}{12}\log|\Delta|^2.
$$
\end{enumerate} \end{theorem}
Numerous proofs exist for this classical formula, and many of the proofs rely on arguments concerning the moduli space. We give a proof of the above theorem, valid for a fixed lattice $\Gamma \subset \mathbb{C}$, using the Kronecker theta function. As in the original proof by Kronecker, we first prove the second limit formula, and then deduce the first limit formula from the second. The key in our proof is the distribution relation for the Eisenstein-Kronecker function.
Our view of understanding the Kronecker limit formulas in terms of the Kronecker theta function and the distribution relation allows us to prove the following $p$-adic analogues of Theorem \ref{thm: KLF}. Suppose now that $\Gamma$ corresponds to a period lattice corresponding to the invariant differential $\omega = dx/y$ of an elliptic curve $E: y^2 = 4x^3 - g_2 x - g_3$ with complex multiplication by the ring of integers $\mathcal{O}_{\boldsymbol{K}}$ of an imaginary quadratic field ${\boldsymbol{K}}$. We assume in addition that $E$ is defined over ${\boldsymbol{K}}$, and that the model above has good reduction at the primes above $p \geq 5$. We fix a branch of the $p$-adic logarithm, which is a homomorphism $\log_p : \mathbb{C}_p^\times \rightarrow \mathbb{C}_p$. In the paper \cite{BFK}, we introduced the $p$-adic Eisenstein-Kronecker series $E^\mathrm{col}_{a,b}(z)$ as a Coleman function for integers $a$, $b$ such that $b \geq 0$. This function is a $p$-adic analogue of the Kronecker double series $$
K^*_{a+b}(0,z,b) = {\sum}^* _{\gamma\in\Gamma}\frac{\overline\gamma^{a+b}}{|\gamma|^{b}} \pair{\gamma,z}. $$ In this paper, we let $$
K^\mathrm{col}_{a+b}(0,z,b) := E^\mathrm{col}_{a,b}(z) $$ to highlight the analogy. Then in analogy with Theorem \ref{thm: KLF} (ii), we have the following.
\begin{theorem}[$p$-adic Kronecker second limit formula]\label{thm: pKLF}
For any prime $p \geq 5$ of good reduction, we have the second limit formula
$$
K^\mathrm{col}_0(0,z,1)= - \log_p \theta(z) - \frac{1}{12}\log_p \Delta,
$$
where $\log_p \theta(z)$ is a certain $p$-adic analogue of the function $\log|\theta(z)| - |z|^2/A$
defined in Definition \ref{def: log-p-theta} using the reduced theta function $\theta(z)$ and the branch of our $p$-adic logarithm. \end{theorem}
The $p$-adic analogues of Kronecker second limit formula were previously investigated by Katz \cite{Ka} and de Shalit \cite{dS} in the context of $p$-adic $L$-functions when $p$ is a prime of good ordinary reduction. Our formulation via $p$-adic Eisenstein-Kronecker series gives a direct $p$-adic analogue, and is valid even for supersingular $p$.
When $p \geq 5$ is a prime of good ordinary reduction, we defined in \cite{BK1} \S 3.1 a two-variable $p$-adic measure $\mu:=\mu_{0,0}$ on $\mathbb{Z}_p \times \mathbb{Z}_p$ interpolating Eisenstein-Kronecker numbers, or more precisely, the values $K^*_{a+b}(0,0,b)/A^a$ for $a$, $b \geq 0$. We define the $p$-adic Eisenstein-Kronecker-Lerch series by $$
K^{(p)}_{a}(0, 0, s) := \int_{\mathbb{Z}^\times_p \times \mathbb{Z}^\times_p} \pair{x}^{s-1} \omega(y)^{a-1}\pair{y}^{a-s} d \mu(x,y) $$ for any $s \in \mathbb{Z}_p$, where $\pair{-} : \mathbb{Z}_p^\times \rightarrow \mathbb{C}_p^\times$ is given as the composition $\mathbb{Z}_p^\times \rightarrow 1 + p\mathbb{Z}_p \hookrightarrow \mathbb{C}_p^\times$ and $\omega: \mathbb{Z}_p^\times \rightarrow \mu_{p-1}$ is the Teichm\"uller character. Then an argument similar to the proof of Theorem \ref{thm: KLF} (i) gives the following.
\begin{proposition}\label{pro: pKLF}
Suppose $p \geq 5$ is a prime of good ordinary reduction. Then
$$
\lim_{s\rightarrow 1} K^{(p)}_{0}(0,0,s) = \Omega_p^{-1}\left( 1 - \frac{1}{p} \right) \log_p \overline\pi
$$
where $\Omega_p$ is a $p$-adic period of the formal group of $E$. \end{proposition}
The proof of the above proposition is similar to that of the proof of Theorem \ref{thm: KLF}. However, due to the existence of a trivial zero for the function $K^{(p)}_0(0,0,s)$ at $s=1$, the analogy with the classical case is not perfect. See Remark \ref{rem: not pKLF} for details.
\section{Kronecker limit formulas}
\subsection{Kronecker's Theorem}
In this section, we recall the definition of Eisenstein-Kronecker-Lerch series and the Kronecker theta function. Then we will state Kronecker's theorem giving the relation between the two. All of the results are contained in \cite{We1}. Se also \cite{BK1} or \cite{BKT}.
We fix a lattice $\Gamma$ in $\mathbb{C}$ and let $A$ be the area of the fundamental domain of $\Gamma$ divided by $\pi$. Let $a$ be an integer $\geq 0$. For a fixed $z_0$, $w_0 \in \mathbb{C}$, we let $\theta^*_a(t,z_0,w_0)$ be the function $$
\theta^*_a(t,z_0,w_0) = {\sum_{\gamma \in \Gamma}}^* \exp(- t|z_0 + \gamma|^2/A) \pair{\gamma, w_0} (\overline z_0 +\overline \gamma)^a, $$ where $\sum^*$ means the sum taken over all $\gamma \in \Gamma$ other than $-z_0$ if $z_0$ is in $\Gamma$. Furthermore, we let $$
I_a(z_0, w_0, s) := \int_{1}^\infty\theta^*_a(t,z_0,w_0) t^{s-1} dt. $$ Then we have \begin{multline}\label{eq: integral expression}
A^s \Gamma(s) K^*_a(z_0, w_0, s) = I_a(z_0, w_0, s)
- \frac{\delta_{z_0, a}}{s} \pair{w_0,z_0}\\
+ I_{a} (w_0, z_0, a+1-s) \pair{w_0,z_0} + \frac{\delta_{w_0, a}}{s-1}, \end{multline} where $\delta_{a,x} = 1$ if $a =0$ and $x \in \Gamma$, and $\delta_{a,x} = 0$ otherwise. The above integral expression gives the meromorphic continuation of $K^*_a(z_0, w_0, s)$ to the whole complex plane, and also the functional equation.
We next review the definition of the Kronecker theta function. We let $\theta(z)$ be the reduced theta function on $\mathbb{C}/\Gamma$ associated to the divisor $[0] \in \mathbb{C}/\Gamma$, normalized so that $\theta'(0) =1$. This function may be given explicitly in terms of the Weierstrass $\sigma$-function \begin{equation*}\label{product expansion of sigma}
\sigma(z) : = z \prod_{\gamma \in \Gamma \setminus \{ 0 \}}
\left( 1 - \frac{z}{\gamma} \right) \Exp{ \frac{z}{\gamma} + \frac{z^2}{2 \gamma^2}} \end{equation*} as follows. Let $e_{0,2}^* := \lim_{s \rightarrow 2^+} \sum_{\gamma \in \Gamma \setminus \{ 0 \}}
\overline{\gamma}^2 |\gamma|^{-2s}$. Then $\theta(z)$ is given as $$
\theta(z) = \Exp{\frac{- e_{0,2}^* z^2 }{2}} \sigma(z). $$ This function is known to satisfy the transformation formula $$
\theta(z+ \gamma) = \varepsilon(\gamma) \Exp{ \frac{z \overline \gamma}{A} + \frac{\gamma \overline \gamma}{2A}} \theta(z) $$ for any $\gamma \in \Gamma$, where $\varepsilon : \Gamma \rightarrow \{ \pm 1 \}$ is such that $\varepsilon(\gamma) = -1$ if $\gamma \in 2 \Gamma$ and $\varepsilon(\gamma)=1$ otherwise.
We define the Kronecker theta function $\Theta(z,w)$ by $$
\Theta(z,w) := \frac{\theta(z+w)}{ \theta(z)\theta(w)}. $$ The above function is known to be a reduced theta function associated to the Poincar\'e bundle on $\mathbb{C}/\Gamma\times\mathbb{C}/\Gamma$.
For any $z$, $w \in \mathbb{C}$ such that $z$, $w \not\in \Gamma$, we let $K_a(z,w,s) := K^*_a(z,w,s)$, which we view as a $\mathscr C^\infty$ function for $z$ and $w$. The relation between this function and the Kronecker theta function is given by the following theorem due to Kronecker. \begin{theorem}[Kronecker]\label{theorem; kronecker}
$$
\Theta(z,w) = \exp\left[ \frac{z \overline w}{A}\right] K_1(z,w,1).
$$ \end{theorem} The above theorem was originally proved in terms of Jacobi theta functions by Kronecker using moduli arguments (See for example \cite{We1}.) In \cite{BK1} or \cite{BK2}, we give another proof valid for a fixed lattice $\Gamma\subset\mathbb{C}$ using the fact that both sides of the equality are reduced meromorphic theta functions associated to the Poincar\'e bundle on $\mathbb{C}/\Gamma \times \mathbb{C}/\Gamma$, with the same poles and the same residue at each pole.
\subsection{Proof of the second limit formula.}
In this subsection, we deduce Theorem \ref{thm: KLF} (ii) from Theorem \ref{theorem; kronecker}.
\begin{proposition}\label{pro: up to C}
There exists a constant $C$ such that
$$
\log |\theta(z)|^2-\frac{|z|^2}{A}=-A K^*_0(0,z,1)+C
$$
for any $z \notin \Gamma$. \end{proposition}
\begin{proof}
By Theorem \ref{theorem; kronecker} and the fact that
$$
\lim_{z \rightarrow 0} \left[ K_1(z,w,1) -\frac{1}{ z} \right] = K^*_1(0,w,1),
$$
we have
\begin{equation*}
\lim_{z \rightarrow 0} \left( \Theta(z,w) - \frac{1}{z} \right)
= K^*_1(0,w,1) + \frac{\overline w}{A}.
\end{equation*}
Direct computation also shows that
\begin{equation*}
\lim_{z \rightarrow 0} \left(\Theta(z,w) - \frac{1}{z} \right)= \frac{\theta'(w)}{\theta(w)}.
\end{equation*}
Hence we have
$$
K^*_1(0,w,1) + \frac{\overline w}{A} = \frac{\theta'(w)}{\theta(w)}.
$$
In particular,
\begin{equation*}\label{equation: leff}
\frac{\partial}{\partial z} \left( \log \theta(z) - \frac{z \overline z}{A} \right)= K^*_1(0,z,1).
\end{equation*}
Therefore, if we let $\Xi(z)$ be the function
$$
\Xi(z) := \log |\theta(z)|^2 - \frac{|z|^2}{A},
$$
then we have
\begin{align*}
\frac{\partial}{\partial z} \Xi(z) = K^*_1(0,z,1), \qquad \frac{\partial}{\partial \overline z} \Xi(z)
= \overline{K^*_{1}(0,z,1)}.
\end{align*}
On the other hand, one can directly show that
\begin{align*}
A\frac{\partial}{\partial z} K^*_0(0,z,1) =- {K^*_1(0,z,1)}, \qquad
A\frac{\partial}{\partial \overline z} K^*_0(0,z,1)=- {\overline{K^*_1(0,z,1)}}.
\end{align*}
(See for example, Lemma 2.5 and the first formula of p.22 of \cite{BKT}. )
Hence $\Xi(z)+AK^*_0(0,z,1)$ must be constant. \end{proof}
Our goal is to determine the constant $C$. We use the following result, which is a type of distribution relation.
\begin{lemma}\label{lem: C}
We have
$$
\sum_{z_n \not=0} \;K^*_0(0, z_n, 1)=-\frac{2\log n}{A},
$$
where the sum is over all $n$-torsion points $z_n$ of $\mathbb{C}/\Gamma$ except zero. \end{lemma}
\begin{proof}
We have
$$
\sum_{z_n \in \frac{1}{n}\Gamma/\Gamma} \pair{\gamma, z_n}=
\begin{cases}
n^2 \qquad &(\gamma \in n\Gamma) \\
0 \qquad &(\gamma \notin n\Gamma).
\end{cases}
$$
Hence
$$
\frac{1}{n^2}\sum_{z_n} \;K^*_0(0, z_n, s)=\sum_{\gamma \in n\Gamma}
\frac{1}{|\gamma|^{2s}}=\frac{1}{n^{2s}}K^*_0(0, 0, s)
$$
when the real part of $s$ is sufficiently large, and hence for any $s$ by analytic continuation.
In particular, we have
$$
\frac{1}{n^2}\sum_{z_n\not=0} \;K^*_0(0, z_n, s)=\left(\frac{1}{n^{2s}}-\frac{1}{n^2}\right)K^*_0(0, 0, s).
$$
Since the residue of $K^*_0(0, 0, s)$ at $s=1$ is $1/A$, we have
$$
\frac{1}{n^2}\sum_{z_n\not=0} \;K^*_0(0, z_n, 1)=-\frac{2 \log n}{n^{2}A}
$$
as desired. \end{proof} The above lemma shows that the constant $C$ is $$
C=\frac{1}{n^2-1}\left[\sum_{z_n\not=0} \left( \log |\theta(z_n)|^2-\frac{|z_n|^2}{A}\right)-2\log n\right]. $$ We will now calculate this value explicitly in terms of $\Delta$.
\begin{proposition}\label{pro: C}
We have
$$
\frac{1}{4} \log|\Delta'|^2=-
\sum_{z_2\not=0}\left( \log |\theta(z_2)|^2-\frac{|z_2|^2}{A}\right)
$$
where $z_2$ runs through non-trivial $2$-torsion points of $\mathbb{C}/\Gamma$ and
$$
\Delta'=(e_1-e_2)^2(e_2-e_3)^2(e_3-e_1)^2
$$
for $y^2=4x^3-g_2x-g_3=4(x-e_1)(x-e_2)(x-e_3)$. \end{proposition}
\begin{proof}
Note that
$$
(x-e_1)(x-e_2)(x-e_3)=\prod_{z_2\not=0}(x-\wp(z_2)).
$$
Then if $\Gamma=\mathbb{Z} \omega_1+\mathbb{Z} \omega_2$,
we may suppose that $e_1=\wp(\omega_1/2)$, $e_2=\wp(\omega_2/2)$ and
$e_3=\wp((\omega_1+\omega_2)/2)$.
Since
$$
{\theta(z+w) \theta(z-w)}{\theta(z)^{-2}\theta(w)^{-2}}=\wp(w)-\wp(z),
$$
we have
$$
{\theta\left(\frac{\omega_1+\omega_2}{2}\right)\theta\left(\frac{\omega_1-\omega_2}{2}\right)}
{\theta\left(\frac{\omega_1}{2}\right)^{-2}\theta\left(\frac{\omega_2}{2}\right)^{-2}}
=e_2-e_1,
$$
$$
{\theta\left(\omega_1+\frac{\omega_2}{2}\right)\theta\left(\frac{\omega_2}{2}\right)}
{\theta\left(\frac{\omega_1+\omega_2}{2}\right)^{-2}\theta\left(\frac{\omega_1}{2}\right)^{-2}}
=e_1-e_3,
$$
$$
{\theta\left(\omega_2+\frac{\omega_1}{2}\right)\theta\left(\frac{\omega_1}{2}\right)}
{\theta\left(\frac{\omega_1+\omega_2}{2}\right)^{-2}\theta\left(\frac{\omega_2}{2}\right)^{-2}}
=e_2-e_3.
$$
Hence using the transformation formula of $\theta(z)$, the value $\Delta'$ is
$$
\exp\left[\frac{\omega_1\overline{\omega_1}+\omega_2\overline{\omega_2}+\overline{\omega_1}\omega_2}{A}\right]
{\theta\left(\frac{\omega_1}{2}\right)^{-4}
\theta\left(\frac{\omega_2}{2}\right)^{-4}\theta\left(\frac{\omega_1+\omega_2}{2}\right)^{-4}}.
$$
Multiplying it and its complex conjugation and taking the logarithm,
we obtain the formula. Note that since we take the
logarithm of {\it positive real} numbers, the values do not depend on the choice of the branch of the logarithm. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm: KLF} (ii)]
Since the Ramanujan $\Delta$ is given by $\Delta=2^4 \Delta'$, we have by Lemma \ref{lem: C} ad Proposition \ref{pro: C}
$$
C=\frac{1}{3}\left(-\frac{1}{4} \log|\Delta'|^2-2\log 2\right)=-\frac{1}{12}\log|\Delta|^2.
$$
Our assertion now follows from Proposition \ref{pro: up to C}. \end{proof}
\subsection{Proof of the first limit formula.}
We now prove Theorem \ref{thm: KLF} (i) using the second limit formula.
\begin{proof}[Proof of Theorem \ref{thm: KLF} (i)]
From (\ref{eq: integral expression}), we have
\begin{align*}
A^{s-1} \Gamma(s) & \left(A K^*_0(0, 0, s)-\frac{1}{s-1}\right)\\
&= I_0(0, 0, s)
- \frac{1}{s} + I_{0} (0, 0, 1-s) - \frac{A^{s-1} \Gamma(s)-1}{s-1}.
\end{align*}
Therefore, we have
\begin{equation}\label{eq: integral expression 1}
\lim_{s \rightarrow 1}\left(A K^*_0(0, 0, s)-\frac{1}{s-1}\right) = I_0(0, 0, 1)
-1 + I_{0} (0, 0, 0)
-\log A+c,
\end{equation}
where $c$ is the Euler constant as before and we used the fact $\Gamma'(1)=-c$.
On the other hand, we have
\begin{equation*}
AK^*_0(0, z, 1) = I_0(0, z, 1) - 1 + I_{0} (z, 0, 0).
\end{equation*}
We let $$I_{0}^* (z, 0, s)= I_{0} (z, 0, s)-\int_{1}^\infty \exp(-t|z|^2/A) t^{s-1} dt.$$
Then
$\displaystyle\lim_{z \rightarrow 0} I_0(0, z, 1)= I_0(0, 0, 1)$ and
$\displaystyle\lim_{z \rightarrow 0} I_0^*(z, 0, 0)= I_0(0, 0, 0)$.
We have
\begin{align*}
\Gamma(s)-\frac{1}{s}&=
\int_{|z|^2/A}^\infty e^{-t} t^{s-1} dt+\int_{0}^{|z|^2/A} e^{-t} t^{s-1} dt-\frac{1}{s}\\
&=
\int_{|z|^2/A}^\infty e^{-t} t^{s-1} dt+\int_{0}^{|z|^2/A} (e^{-t}-1) t^{s-1} dt+
\frac{1}{s}\left[ \left(\frac{|z|^2}{A}\right)^s-1\right].
\end{align*}
Taking $s \rightarrow 0$, we have
$$
-c=\int_{|z|^2/A}^\infty e^{-t} t^{-1} dt+\int_{0}^{|z|^2/A} (e^{-t}-1) t^{-1} dt
+\log \left(\frac{|z|^2}{A}\right).
$$
Hence
\begin{align*}
&AK^*_0(0, z, 1) = I_0(0, z, 1) - 1 + I^*_{0} (z, 0, 0)+\int_{1}^\infty \exp(-t|z|^2/A) t^{-1} dt \\
&= I_0(0, z, 1) - 1 + I^*_{0} (z, 0, 0)
-c-\int_{0}^{|z|^2/A} (e^{-t}-1) t^{-1} dt-\log \left(\frac{|z|^2}{A}\right).
\end{align*}
Therefore
\begin{align*}
\lim_{z \rightarrow 0}
\left(AK^*_0(0, z, 1)+\log |z|^2 \right)= I_0(0, 0, 1) - 1 + I_{0} (0, 0, 0)
-c+\log A.
\end{align*}
Finally, combining this with (\ref{eq: integral expression 1}) and the second limit formula, we have
\begin{align*}
\lim_{s \rightarrow 1}\left(A K^*_0(0, 0, s)-\frac{1}{s-1}\right)&=
\lim_{z \rightarrow 0}
\left(AK^*_0(0, z, 1)+\log |z|^2 \right) -2\log A+2c \\
=&-\frac{1}{12}\log|\Delta|^2-2\log A+2c.
\end{align*}
This proves our assertion. \end{proof}
\section{$p$-adic Kronecker limit formulas}
\subsection{The $p$-adic Eisenstein-Kronecker functions}
Assume now the conditions of the second half of the introduction. In \cite{BFK}, we defined a $p$-adic analogue of the Kronecker double series as a Coleman function on a CM elliptic curve. In order to prove the $p$-adic limit formulas, we
define in this subsection a $p$-adic analogue of the function $\log |\theta(z)|^2-|z|^2/A$, which turns out to be a Coleman function. We then prove the distribution relation, which will be used to characterize this function.
Let $p$ be a prime $\geq 5$. In what follows, fix an embedding of $\overline{\mathbb{Q}}$ into $\mathbb{C}_p$. We fix a branch of the logarithm, which is a homomorphism $\log_p: \mathbb{C}_p^\times \rightarrow \mathbb{C}_p$. We extend this homomorphism to $\mathbb{C}_p[[t]]$ by using the decomposition $\mathbb{C}_p[[t]]=\mathbb{C}_p \times (1+t\mathbb{C}_p[[t]])$ and defining
$\log_p (1-tf(t))=-\sum {t^nf^n(t)}/n$ for any $f(t) \in \mathbb{C}_p[[t]]$. Let $E$ be a CM elliptic curve as in the introduction and let $\Gamma$ be the period lattice of $E\otimes\mathbb{C}$. For $z_0 \in \Gamma \otimes \mathbb{Q}$, we let
$$
\theta_{z_0}(z):=\theta(z+z_0) \exp\left(-\frac{z\overline{z_0}}{A}-\frac{z_0\overline{z_0}}{2A}\right).
$$ Then by \cite{BK1}, the Taylor series of $\theta_{z_0}(z)$ at $z=0$ has algebraic coefficients. If we consider the formal composition $$
\widehat{\theta}_{z_0}(t):=\theta_{z_0}(z)|_{z=\lambda(t)} $$ of this series with $\lambda(t)$, where $\lambda(t)$ is the formal logarithm of the formal group of $E$, then we may regard this power series as an element in $\mathbb{C}_p[[t]]$. Considering its derivatives, we may prove that $\log_p \widehat{\theta}_{z_0}(t)$ is a rigid analytic function on
the open unit disc over $\mathbb{C}_p$, namely, it is convergent if $|t|<1$.
We use the same notations as in \cite{BFK}. In particular, we fix a prime $\mathfrak{p}$ in $\mathcal{O}_{\boldsymbol{K}}$ over $p \geq 5$, and we let $\pi:= \psi_{E/{\boldsymbol{K}}}(\mathfrak{p})$, where $\psi_{E/{\boldsymbol{K}}}$ is the Gr\"ossen character of $K$ associated to the elliptic curve $E$. Then $\pi$ is a generator of the ideal $\mathfrak{p}$.
\begin{definition}\label{def: log-p-theta}
We let $\log _p \theta$ be the function in $A_{\log}(U)$ defined by
$$
\log _p \theta |_{]z_0[}:=\log_p \widehat{\theta}_{z_0}(t) \in A_{\log}(]z_0[)
$$
on each residue disc $]z_0[$. \end{definition}
Now we investigate basic properties of $\log_p {\theta}$.
\begin{proposition}
For $z_0 \in \Gamma \otimes \mathbb{Q}$ and $z_\alpha$ such that
$\alpha z_\alpha \in \Gamma$ for $\pi$-power morphism $\alpha \in \mathrm{End}_{\overline{\mathbb{Q}}}(E)$, we have
$$
\log_p \widehat{\theta}_{z_0}(t \oplus t_\alpha)=\log_p\widehat{\theta}_{z_0+z_\alpha}(t)
$$
where $z_\alpha \in \mathbb{C}$ is a lift of a torsion point in $\mathbb{C}/\Gamma$
corresponding to $t_\alpha \in E(\mathbb{C}_p)_{\mathrm{tor}}$, and the right hand side is independent
of the choice of the lift. \end{proposition}
\begin{proof}
Let $\alpha$ and $\beta$ be elements of $\mathcal{O}_K$ such that
$2\alpha | \beta$ and $\beta z_0 \in \Gamma$.
Then $f_{\beta}(z):=\theta(z)^{N\beta}/\theta( \beta z)$ is a rational function on $E$ over $\overline{\mathbb{Q}}$.
We have
\begin{equation}\label{equation: logtheta}
\theta_{z_0+z_\alpha}(z)^{N\beta}= \pm \theta( \beta z)
\tau_{z_0+z_\alpha}^*f_{\beta}(z).
\end{equation}
Similarly, we have
$$
\widehat{\theta}_{z_0}(z)^{N\beta}
= \pm \theta( \beta z) \tau_{z_0}^*f_{\beta}(z).
$$
Since $f_\beta$ is a rational function, we also have
$$
\tau_{z_0+z_\alpha}^*f_{\beta}(t)= \tau_{z_0}^*f_{\beta}(t\oplus t_\alpha).
$$
Hence we have
\begin{equation}\label{equation: logtheta2}
\widehat{\theta}_{z_0}(t \oplus t_\alpha)^{N\beta}
= \pm \theta( [\beta] t) \tau_{z_0+z_\alpha}^*f_{\beta}(t).
\end{equation}
Our assertion now follows from \eqref{equation: logtheta} and \eqref{equation: logtheta2}. \end{proof}
\begin{corollary}\label{corollary: interpolation}
Let $t_\alpha$ be a $\pi$-power torsion point, and we assume that $z_0 \not=0$ or $t_\alpha\not=0$.
Then we have
$$
\log_p \widehat{\theta}_{z_0}(t_\alpha)=
\log_p \left(\widehat{\theta}(z_0+z_\alpha) \exp\left[-\frac{(z_0+z_\alpha)\overline{(z_0+z_\alpha)}}{2A}\right] \right).
$$ \end{corollary}
Roughly speaking, $\log_p \theta(z)$ is a $p$-adic function which interpolates the special values $\log \theta(z)-z \overline{z}/2A$ at torsion points.
We thus regard $\log_p \theta(z)$ as a $p$-adic analogue of the function $\log |\theta(z)|^2-|z|^2/A$.
\subsection{The $p$-adic second limit formula}
In this subsection, we prove Theorem \ref{thm: pKLF}, which is a $p$-adic analogue of the Kronecker second limit formula.
\begin{proposition}\label{proposition: theta distribution}
Let $z_0$ be a $\mathfrak{f}$-torsion point of $\mathbb{C}/\Gamma$. Then for $\alpha \in \mathcal{O}_K$
we have
$$
\theta_{\alpha z_0}( \alpha z)^{24 N(\alpha\mathfrak{f})}= \Delta^{2N(\alpha\mathfrak{f})(N\alpha-1)}
\prod_{z_\alpha \in E[\alpha]} \theta_{z_0+z_\alpha}(z)^{24 N(\alpha\mathfrak{f})}
$$ where $z_\alpha$ is a lift of a $\alpha$-torsion point of $E$ and the right hand side is independent of the choice of the lifts $z_0$ and $z_\alpha$ on $\mathbb{C}$. \end{proposition} \begin{proof}
Since for $\gamma \in \Gamma$ we have
$\theta_{z_0+\gamma}(z)=\pm \pair{z_0/2, \gamma} \theta_{z_0}(z)$,
the function $ \theta_{z_0}(z)^{2N\mathfrak{f}}$ is independent of the lift $z_0$ if
$(N\mathfrak{f})z_0 \in \Gamma$. The independence of the lifts of $z_0$ and $z_\alpha$ follows from this fact.
The logarithmic derivatives of both sides coincide (see for example Proposition 2.15 of \cite{BFK}).
Hence for each $\alpha$, there exists a constant $c_\alpha$ such that
$$
\theta_{\alpha z_0}( \alpha z)^{2N(\alpha\mathfrak{f})}= c_\alpha
\prod_{z_\alpha \in E[\alpha]} \theta_{z_0+z_\alpha}(z)^{2N(\alpha\mathfrak{f})}.
$$
By the definition of $\theta_{z_0}(z)$, we see that $c_{\alpha}$ is independent of $z_0$.
Hence we may assume that $z_0=0$ and $N\mathfrak{f}=1$.
Then we have
\begin{align*}
\prod_{z_{\alpha \beta} \in E[\alpha \beta]} \theta_{z_{\alpha \beta}}(z)^{2N(\alpha\beta)}
= \prod_{z_{\alpha \beta} \in E[\alpha \beta]/E[\alpha]} \prod_{z_{\alpha } \in E[\alpha ]}
\theta_{z_{\alpha \beta}+z_{\alpha}}(z)^{2N(\alpha\beta)}\\
= \prod_{z_{\alpha \beta} \in E[\alpha \beta]/E[\alpha]}
c_\alpha^{N\beta} \theta_{ \alpha z_{\alpha \beta}}(\alpha z)^{2N(\alpha\beta)}\\
= c_\alpha^{N\beta^2} c_\beta^{2N\alpha} \theta(\beta \alpha z)^{2N(\alpha\beta)}.
\end{align*}
Hence we have
$
c_\alpha^{N\beta^2} c_\beta^{2N\alpha}=c_{\alpha \beta}=c_\beta^{N\alpha^2} c_\alpha^{2N\beta}
$
or equivalently,
$$
c_\alpha^{N\beta(N\beta-1)}=c_\beta^{N\alpha(N\alpha-1)}.
$$
In particular, $c_\alpha^{12}=c_2^{N\alpha(N\alpha-1)}$.
On the other hand, we consider the constant term of
$$
\frac{\theta(2z)^8}{\theta(z)^8}=c_2 \prod_{z_2 \in E[2]-\{0\}} \theta_{z_2}(z)^{8}.
$$
As in the proof of Proposition \ref{pro: C},
we have
$$
\prod_{z_2 \in E[2]-\{0\}} \theta_{z_2}(0)^{8}=\Delta'^{-2}.
$$
Hence $c_2=2^8 \Delta'^2=\Delta^2$. Our assertion now follows from these facts. \end{proof}
\begin{corollary}
The function $\Xi(z):= - \log_p \theta (z) -\frac{1}{12}\log_p \Delta$ satisfies the distribution relation
$$
\Xi(\alpha z)=\sum_{z_\alpha \in E[\alpha]} \Xi(z+z_\alpha).
$$ \end{corollary}
\begin{proof}
By Proposition \ref{proposition: theta distribution}, we have
$$
\log_p \widehat{\theta}_{\alpha z_0}([\alpha]t)=\frac{N\alpha-1}{12} \log_p \Delta
+\sum_{t_\alpha \in E[\alpha]} \log_p \widehat{\theta}_{z_0}(t \oplus t_\alpha).
$$
Our assertion follows from this formula. \end{proof}
We now prove the $p$-adic second limit formula.
\begin{proof}[Proof of Theorem \ref{thm: pKLF}]
Since the derivatives of $\Xi(z)$ and
$$
K_0^\mathrm{col}(0,z,1):=E^{\mathrm{col}}_{1,1}(z)
$$ are equal, their difference
$c(z)=\Xi(z)-E^{\mathrm{col}}_{1,1}(z)$ is a constant on the residue disc $]z_0[$.
By (i) and the definition of $E^{\mathrm{col}}_{1,1}(z)$, the locally constant function $c(z)$ satisfies the distribution relation.
For any torsion point $z_0$ of order $\mathfrak{f}$, we take $N$ such that $\pi^N \equiv 1 \mod \mathfrak{f}$.
Then $[\pi^N]^*(]z_0[)=]z_0[$ and
$$
[\pi^N]^*c(z) |_{]z_0[}=\sum_{w \in E[\pi^N]} c(z+w) |_{]z_0[}.
$$
Since $c(z) |_{]z_0[}$ is constant, the above relation shows $c(z) |_{]z_0[}=0$. \end{proof}
The above result shows in particular that $\Xi(z)=- \log_p \theta (z) -\frac{1}{12}\log_p \Delta$ is in fact a Coleman function.
\subsection{$p$-adic Eisenstein-Kronecker-Lerch series}
In this subsection, we introduce the $p$-adic Eisenstein-Kronecker-Lerch series. Then we consider a $p$-adic counterpart to the arguments concerning the Kronecker first limit formula in the classical case and prove Proposition \ref{pro: pKLF}.
Let $p \geq 5$ be a prime of good \textit{ordinary} reduction for $E$, and we fix a prime $\mathfrak{p}$ of $\mathcal{O}_{\boldsymbol{K}}$ over $p$. We defined in \cite{BK1} \S 3.1 a $p$-adic measure $\mu:=\mu_{0, 0}$ on $\mathbb{Z}_p \times \mathbb{Z}_p$ interpolating the Eisenstein-Kronecker numbers, or more precisely, the special values of Eisenstein-Kronecker-Lerch series $K^*_{a+b}(0,0,b; \Gamma)/A(\Gamma)^a$ for $a$, $b \geq 0$, where $\Gamma$ is the period lattice of $E$. We define the $p$-adic Eisenstein-Kronecker-Lerch function as in the introduction as follows.
\begin{definition} For any integer $a \in \mathbb{Z}$, we define the \textit{$p$-adic Eisenstein-Kronecker-Lerch function} by
$$
K^{(p)}_a(0,0,s) := \int_{\mathbb{Z}_p^\times \times \mathbb{Z}_p^\times} \pair{x}^{s-1}\pair{y}^{a-s} \omega(y)^{a-1} d \mu(x,y).
$$ \end{definition}
The $p$-adic Eisenstein-Kronecker-Lerch function is analytic in $s \in \mathbb{Z}_p$. The reason we view this function as a $p$-adic analogue of Eisenstein-Kronecker-Lerch series is the following interpolation property.
\begin{proposition}
For any integer $a$, $b$ such that $a \geq b > 0$ and $b \equiv 1 \pmod{p-1}$, we have
\begin{equation}\label{eq: interpolation}
\frac{K^{(p)}_a(0,0,b)}{\Omega_p^{a-1}}
=(-1)^{a-1}(b-1)! \left(1-\frac{\pi^{a}}{p^{a-b+1}}\right)
\left(1-\frac{\pi^{a}}{p^b} \right)\frac{K^*_{a}(0,0,b)}{A(\Gamma)^{a-b}},
\end{equation}
where $\Omega_p$ is a $p$-adic period of the formal group of $E$. \end{proposition}
\begin{proof}
This follows from the interpolation property of the measure $\mu:=\mu_{0,0}$ given in \cite{BK1} Proposition 3.5. \end{proof}
We now give the proof of Proposition \ref{pro: pKLF}.
\begin{proof}[Proof of Proposition \ref{pro: pKLF}]
We consider the function
$$
f(t):=\Omega_p \int_{\mathbb{Z}_p^\times \times \mathbb{Z}_p^\times}y^{-1} \exp(y\Omega_p^{-1} \lambda(t)) d\mu(x,y)
$$
on the $p$-adic residue disc $]0[$ around $0$.
Then
\begin{multline*}
\lambda'(t)^{-1}\frac{d}{dt}f(t)=\int_{\mathbb{Z}_p^\times \times \mathbb{Z}_p^\times} \exp(y\Omega_p^{-1} \lambda(t)) d\mu(x,y)\\
= F_1(t; \Gamma)-{\overline{\pi}}^{-1}F_1([\pi]t; \Gamma)-F_1(t; \overline\mathfrak{p}\Gamma)
+{\overline{\pi}}^{-1} F_1([\pi]t; \overline\mathfrak{p}\Gamma).
\end{multline*}
Hence for $E_{1,1}^{(p)}(z; \Gamma) := E_{1,1}^\mathrm{col}(z; \Gamma) - p^{-1} E_{1,1}^\mathrm{col}(\pi z; \Gamma)$, the function
$$
f(t)-E_{1,1}^{(p)}(z; \Gamma)+E_{1,1}^{(p)}(z; \overline\mathfrak{p}\Gamma)
$$
is a constant on the residue disc $]0[$.
Furthermore, both functions satisfy the distribution relation $\sum_{t_{\pi} \in E[\pi]} f(t\oplus t_{\pi})=0$ and
$\sum_{t_{\pi} \in E[\pi]} E_{1,1}^{(p)}(t\oplus t_{\pi})=0$. Hence we must have
$f(t)=E_{1,1}^{(p)}(z; \Gamma)-E_{1,1}^{(p)}(z; \overline\mathfrak{p}\Gamma)$
on ${]0[}$. On the other hand, the $p$-adic second limit formula shows that
$$
E_{1,1}^{(p)}(z; \Gamma)=\log_p \theta(z; \Gamma)
-\frac{1}{p}\log_p \theta(\pi z; \Gamma)+\frac{1}{12}\left(1-\frac{1}{p}\right) \log_p \Delta(\Gamma),
$$
hence we have
$$
f(0) = \bigl.E_{1,1}^{(p)}(z; \Gamma)-E_{1,1}^{(p)}( z; \overline{\pi}\Gamma)\;\bigr|_{z=0}=
\left(1-\frac{1}{p}\right) \log_p \overline{\pi}.
$$
Our assertion now follows from the fact that
$
f(0)=\Omega_p K^{(p)}_0(0,0,1).
$ \end{proof}
\begin{remark}\label{rem: not pKLF}
In the interpolation formula of \eqref{eq: interpolation}, if we let $a=0$ and $b=1$, then
the interpolation factor of the the right hand side vanishes. Hence the value
$$
\Omega_p K^{(p)}_0(0,0,1) = \int_{\mathbb{Z}_p^\times \times \mathbb{Z}_p^\times} y^{-1} d\mu(x,y)
$$
is in some sense not the constant term but
the residue at $s=1$ of the $p$-adic analogue of $\sum_{\gamma \in \Gamma}^*1/|\gamma|^{2s}$.
Because of this fact, the formula of Proposition \ref{pro: pKLF} is not a perfect $p$-adic analogue of the classical Kronecker first limit formula. \end{remark}
\end{document} |
\begin{document}
\title{Minimum Coverage Instrumentation}
\begin{abstract}
Modern compilers leverage block coverage profile data to carry out downstream profile-guided
optimizations to improve the runtime performance and the size of a binary.
Given a control-flow graph $G=(V, E)$ of a function in the binary, where nodes in $V$ correspond to basic blocks (sequences of instructions that are always executed sequentially) and edges in $E$ represent jumps in the control flow, the goal is to know for each block $u \in V$ whether $u$ was executed during a session. To this end, extra instrumentation code that records when a block is executed needs to be added to the binary. This extra code creates a time and space overhead, which one would like to minimize as much as possible.
Motivated by this application, we study the \prob{minimum coverage instrumentation} problem, where the goal is to find a minimum size subset of blocks to instrument such that the coverage of the remaining blocks in the graph can be inferred from the coverage status of the instrumented subset. Our main result is an algorithm to find an optimal instrumentation strategy and to carry out the inference in $O(|E|)$ time.
We also study variants of this basic problem in which we are interested in learning the coverage of edges instead of the nodes, or when we are only allowed to instrument edges instead of the nodes. \end{abstract}
\section{Introduction}
Code profiling is an important tool in modern compilers that unlocks downstream analysis and optimizations of binaries based on their run-time behavior. Arguably, the most commonly supported profiling primitive is frequency counts: Given a control-flow graph associated with a function, the compiler injects additional code to record how many times each node (representing a basic block of instructions) or each edge (representing jumps in the control flow) is executed~\cite{BL1994,HMPWY22}. This profile data can then be used to carry out profile-guided optimization of the binary to improve its run-time performance. For example, one can optimize the layout of basic blocks in memory to decrease the number of instruction cache misses incurred while fetching the code for execution and therefore improve the overall performance of the binary~\cite{PH90,NewellP20,MestrePS20}. Another prominent use-case of frequency counts is reducing the size of mobile applications via improved function outlining~\cite{CLB21,LeeHT22}.
In this paper we focus on the computational problem of profiling block coverage. Unlike the frequency profiling in which the goal is to count the frequency of every block, the coverage profiling asks whether a block has been executed during a session. Coverage instrumentation is important for identifying gaps in program test design~\cite{Agrawal94} and is used to guide optimizations in modern mobile compilers~\cite{LeeHT22}. Given a (directed) control-flow graph $G=(V, E)$ of a function, we add extra instrumentation code that records when a block is executed. While in principle one could use counts to infer coverage, this is not a practical approach as the overhead of instrumenting block frequencies is much higher than the overhead of block coverage\footnote{The overhead is higher both in terms of the binary size (a counter typically requires 4 bytes for an integer versus 1 byte for a boolean) as well as time (updating a counter typically requires two extra machine-level instructions to load and increment the count before its value is stored).}. Another simple strategy for coverage instrumentation is to add a (boolean) counter at every block. This might however, incur an unnecessary overhead, as not every block needs to be instrumented to determine the coverage status of every block in the function. For example, to learn the coverage of a chain of blocks (all having in- and out-degrees of $1$), it is sufficient to instrument the coverage of only one block in the chain. Thus our goal is to minimize the overhead as much as possible. In other words, we want to find a minimum size subset of nodes to instrument such that it is always possible to reconstruct the coverage of all nodes in the graph from the coverage of instrumented ones.
Our main result is an optimal algorithm for the problem
(formally defined in \cref{sect:prob}) that finds the smallest set of blocks to instrument and carries out the inference in $O(|E|)$ time. We also study a variant of this basic problem where we are interested in learning the coverage of edges instead of nodes, and another variant where we can instrument edges instead of nodes. For edge-coverage edge-instrumentation we are able to get an optimal algorithm and for vertex-coverage edge-instrumentation we develop an approximation algorithm.
\subsection{Related Work}
Profile-guided optimization is an essential step in modern compilers; we refer to \cite{BL1994,PH90,LeeHT22,HMPWY22} and references thereof for an overview of the field. A classical problem in the area is that of profiling binaries to compute frequency counts. The study of how many basic blocks need to be instrumented to compute frequency counts goes back to the 70's. Nahapetian~\cite{Nah1973} determined the necessary number of blocks to instrument via certain reduction rules of the control-flow graph. Knuth~and~Stevenson~\cite{KS1973,Knuth73} provide an alternative interpretation of the algorithm and a proof of optimality. The latter is based on computing the minimum spanning tree in the graph and is a part of most modern instrumentation-based profiling tools.
Ball and Larus~\cite{BL1994} define a hierarchy of frequency profiling problems. These problems have two dimensions: what is profiled in the control-flow graph (that is, basic blocks or jumps) and where the instrumentation code is placed (blocks or jumps). They denote the problems as follows. The vertex profiling problem is to determine block frequencies and is denoted by $Vprof(\cdot)$, while the edge profiling problem is to determine jump frequencies and is denoted $Eprof(\cdot)$. Given that one can place counters on blocks or jumps, there are two placement strategies, $Vcnt$ and $Ecnt$.
As such there are four problems with known algorithmic results: \begin{itemize}
\item $Eprof(Ecnt)$: Solved optimally by Knuth~\cite{Knuth73} using spanning trees.
\item $Vprof(Vcnt)$: Solved optimally by Nahapetian and Knuth~\textit{et al\@.\xspace}~\cite{Nah1973,KS1973} via a reduction to $Eprof(Ecnt)$.
\item $Eprof(Vcnt)$: There exist instances where the edge counts cannot be uniquely determined from vertex counts~\cite{Pro1982}, so this problem does not admit an algorithm.
\item $Vprof(Ecnt)$: Ball and Larus~\cite[Sect. 3.3]{BL1994} provide a characterization of when the set of edges is sufficient for determining all vertex frequencies. However, the complexity of the minimization problem remains open. \end{itemize}
Coverage profiling, on the other hand, has received much less attention in the literature and, to the best of our knowledge, has not been thoroughly studied from a theoretical point of view. And while it may be temping to think that one could use an optimal solution for frequency count instrumentation as a basis for an optimal coverage instrumentation, the examples in \cref{app:examples} show that this is not a via approach since a feasible solution for one problem need not be feasible for the other problem; indeed, the size of the optimal solution for these two problems can differ widely.
Agrawal~\cite{Agrawal94} considers several problems related to test coverage of control-flow graphs. The main focus of the work is to find a small subset of nodes $S$ such that any set of executions that covers $S$ also covers all other nodes, which is useful when designing tests. The paper also proposes an algorithm for finding a coverage instrumentation that runs in $O(|V||E|)$ time but does not provide a proof that the scheme has minimum size.
Tikir and Hollingsworth~\cite{TikirH05} propose using dynamic functions to reduce the profiling overhead. Their system periodically removes the instrumentation code that updates the coverage status of covered blocks since further executions do not provide additional information. As part of their system, they propose a linear-time heuristic for finding a coverage instrumentation scheme. However, their algorithm is not optimal and their approach is not technically feasible in some architectures.
Finally, on the practical side, various heuristics for coverage instrumentation have been implemented in compilers~\cite{asan} but the algorithms have no performance guarantees and may produce sub-optimal results.
\section{Problem Statement} \label{sect:prob}
Let $G=(V, E)$ be a directed graph representing a control-flow graph (\df{CFG}). We assume $G$ has two distinct nodes $s$ and $t$, called the \df{entry node} and the \df{terminal node}, such that $\deg^{in}(s) = 0$ and $\deg^{out}(t) = 0$. Furthermore, every node in $G$ is reachable from $s$ and every node in $G$ can reach $t$ via a (directed) path.
An execution trace of $G$ is a collection of (not necessarily simple) $s$-$t$ paths. The (full) \df{coverage profile} associated with an execution trace is a truth assignment $C: V \rightarrow \{\top, \bot\}$ where $C(u) = \top$ if and only if $u$ is spanned by one of the paths in the execution trace. We let $\mathcal{C}$ be the collection of all coverage profiles induced by some execution trace of $G$. A \df{partial coverage profile} $C_S$ is the restriction of $C$ to a subset $S \subset V$.
A \df{coverage instrumentation scheme} consists of a set of nodes $S \subset V$ and an efficiently computable inference function $\Psi$ that, given a partial coverage profile defined on $S$, outputs a full coverage profile. We say that the coverage instrumentation scheme $(S, \Psi)$ is \df{valid} if for any valid coverage profile $C \in \mathcal{C}$, we have $\Psi(C_S) = C$. Finally, we define the \df{size} of the scheme to be $|S|$.
The \prob{minimum block coverage instrumentation} problem is to select a minimum size coverage instrumentation scheme $(S, \Psi)$. While our main metric for evaluating a scheme is its size, $|S|$, we also care, as a secondary metric, about the time complexity of both finding the scheme and of evaluating $\Psi$.
\begin{example}
\label{ex:diamond}
Consider the following toy instance with
\(V = \{ v_1, v_2, v_3, v_4 \}\) and\linebreak \(E = \{ (v_1, v_2), (v_2, v_4), (v_1, v_3), (v_3, v_4) \}\).
\begin{center}
\begin{tikzpicture}[
block/.style={circle,draw=black,fill=white, inner sep = 2pt},
jump/.style={->},
scale=1.2
]
\draw (0, 2) node[block] (v1) {$v_1$};
\draw (-1, 1) node[block] (v2) {$v_2$};
\draw (1, 1) node[block] (v3) {$v_3$};
\draw (0, 0) node[block] (v4) {$v_4$};
\draw (v1) edge[jump] (v2);
\draw (v1) edge[jump] (v3);
\draw (v2) edge[jump] (v4);
\draw (v3) edge[jump] (v4);
\end{tikzpicture}
\end{center}
Notice that in this case
\[\mathcal{C} = \{ \emptyset, \{v_1, v_2, v_4\}, \{v_1, v_3, v_4\}, \{v_1, v_2, v_3, v_4 \} \}.\]
It is easy to see that the optimal solution is $S=\{v_2, v_3\}$ and for any partial coverage $C_S$ induced by $C \in \mathcal{C}$ we have
\[ \Psi(C_S) =
\begin{cases}
C_S \cup \{v_1 \rightarrow \bot, v_4 \rightarrow \bot\} & \text{if } C_S = \{v_2 \rightarrow \bot, v_3 \rightarrow \bot\} \\
C_S \cup \{v_1 \rightarrow \top, v_4 \rightarrow \top\} & \text{o.w.}
\end{cases} \] \end{example}
\subsection{Our results}
While the main focus of this paper is on \prob{minimum block coverage instrumentation}, we also consider related variants of the main problem where we want to compute edge coverage and/or where we are allowed to instrument edges. Analogously to the hierarchy of Ball and Larus~\cite{BL1994} for frequency profiling, this gives rise to four problems, which we denote by \prob{$X$-cov $Y$-instr} for $X, Y \in \{V, E\}$, where we want to compute coverage data of $X$ while instrumenting a subset of $Y$.
Our results for these variants are as follows: \begin{enumerate}
\item \prob{$V$-cov $V$-instr}: In \cref{sec:main} we give an optimal linear-time algorithm.
\item \prob{$E$-cov $E$-instr}: In \cref{sec:reduction} we show a reduction to \prob{$V$-cov $V$-instr}.
\item \prob{$E$-cov $V$-instr}: In \cref{sec:impossible} we show that it is not possible in general to infer edge coverage from vertex coverage data.
\item \prob{$V$-cov $E$-instr}: In \cref{sec:approximation} we give a 2-approximation algorithm. \end{enumerate}
\section{Algorithmic Framework}
\label{sec:framework}
Before we describe and prove the correctness of our algorithm, we need to develop some basic graph theoretic concepts.
\begin{definition}
For any vertex $u \in V$ we let
\begin{itemize}
\item $A(u)$ be the set of nodes that can be reached from $s$ while avoiding $u$, and
\item $B(u)$ be the set of nodes that can reach $t$ while avoiding $u$.
\end{itemize} \end{definition}
Observe that $A(s) = \emptyset$, $u \notin A(u) \cup B(u)$ for any $u \in V$, and $B(t) = \emptyset$. Note that we can compute $A(u)$ and $B(u)$ in $O(|E|)$ time for a fixed $u \in V$ using a modified BFS or DFS search. We state a simple observation about these sets that will be useful later on.
\begin{lemma}
\label{lem:AorA-BorB}
For any two vertices $u, v \in V$ we have:
\begin{itemize}
\item $v \in A(u)$ or $u \in A(v)$, and
\item $v \in B(u)$ or $v \in B(v)$.
\end{itemize} \end{lemma}
\begin{proof}
Let $P$ be a simple path $s$-$u$. If $v \notin P$ then $u \in A(v)$. Otherwise, trim the path to get an $s$-$v$ path that avoids $u$, which shows that $v \in A(u)$. Thus, proving the first statement. The second statement is proved analogously starting with a $u$-$t$ path. \end{proof}
\subsection{Ambiguous nodes}
\begin{definition}
We say a node $u$ is \df{ambiguous} if
\begin{itemize}
\item $ \exists \, x \in N^{in}(u) \cap (A(u) \cap B(u))$, and
\item $ \exists \, y \in N^{out}(u) \cap (A(u) \cap B(u))$.
\end{itemize} \end{definition}
\begin{lemma}
\label{lem:need-ambiguous}
Let $(S, \Psi)$ be a valid scheme then every ambiguous node $u \in V$ must belong to $S$. \end{lemma}
\begin{proof}
Let $x \in N^{in}(u) \cap (A(u) \cap B(u))$; that is, there exist $s$-$x$ and $x$-$t$ paths avoiding $u$. We can concatenate both paths to form an $s$-$t$ path $P_x$ going through $x$ that avoids $u$. The same line of reasoning applied to $y \in N^{out}(u) \cap (A(u) \cap B(u))$ yields another $s$-$T$ path $P_y$ going through $y$ avoiding $u$. Finally, consider concatenating the $s$-$x$ path, followed by $u$, followed by the $y$-$T$ path, and call $P_u$ the resulting $s$-$t$ path. \cref{fig:ambiguous} shows an example instance of what these paths may look like.
\begin{figure}
\caption{
An example of paths $P_x$ (in red), $P_y$ (in orange), and $P_u$ (in dotted black).
}
\label{fig:ambiguous}
\end{figure}
Let $D$ and $D'$ be the coverage profiles associated with $\{ P_x, P_y \}$ and $\{P_x, P_y, P_u \}$ respectively. Notice that $D(v) = D'(v)$ for $v \neq u$ and $D(u) \neq D'(u)$. Therefore, even if we knew the coverage of every nodes in $V - u$, it is not possible to differentiate between $D$ and $D'$ unless $u \in S$. \end{proof}
It is worth noting that while every ambiguous node must be part of a valid scheme, these nodes by themselves may not form a valid scheme, in which case additional nodes are needed.
\begin{example}
Consider the following example with
\(V = \{ v_1, v_2, v_3 \}\) and\linebreak \(E = \{ (v_1, v_2), (v_2, v_3), (v_2, v_3) \}\).
\begin{center}
\begin{tikzpicture}[
block/.style={circle,draw=black,fill=white, inner sep = 2pt},
jump/.style={->},
scale=1.2
]
\draw (0, 2) node[block] (v1) {$v_1$};
\draw (-1, 1) node[block] (v2) {$v_2$};
\draw (0, 0) node[block] (v3) {$v_3$};
\draw (v1) edge[jump] (v2);
\draw (v1) edge[jump] (v3);
\draw (v2) edge[jump] (v3);
\end{tikzpicture}
\end{center}
Notice that in this case
\[\mathcal{C} = \{ \emptyset, \{v_1, v_3\}, \{v_1, v_2, v_3\}\}.\]
and $v_2$ is the only ambiguous node. However, $\{v_2\}$ is not enough to distinguish between profiles $\{ v_1 \rightarrow \bot, v_2 \rightarrow \bot, v_3 \rightarrow \bot\}$ and $\{ v_1 \rightarrow \top, v_2 \rightarrow \bot, v_3 \rightarrow \top\}$. Thus another node needs to be instrumented (either $v_1$ or $v_3$). \end{example}
\subsection{Forward and backward inference}
At the heart of our method is the concept of the forward and backward inference graphs, which we define next.
\begin{definition}
We say that a node $u \in V$ is \df{forward inferable} if $N^{out}(u) \setminus A(u) \neq \emptyset$ and $N^{out}(u) \cap (A(u) \cap B(u)) = \emptyset$. And we define the forward inference graph $(V, F)$ where $(u, v) \in F$ if $u$ is forward inferable and $v \in N^{out}(v) \setminus A(u)$. \end{definition}
\begin{definition}
We say that a node $u \in V$ is \df{backward inferable} if $N^{in}(u) \setminus B(u) \neq \emptyset$ and $N^{in}(u) \cap (A(u) \cap B(u)) = \emptyset$. And we define the backward inference graph $(V, D)$ where $(u, v) \in D$ if $u$ is backward inferable and $v \in N^{in}(u) \setminus B(u)$. \end{definition}
Notice how the edges in the backward inference graph reverse the direction of the input graph edges that are used to make the inference. This is because we want the inference graphs to capture the precedence constraints needed to make the inferences.
\begin{lemma}
\label{lem:one-step}
Suppose that $u$ is forward (backward) inferable, and let $X$ be the set of successors of $u$ in the forward (backward) inference graph. Let $C$ be a valid coverage profile, then
\[ \bigvee_{u \in X} C(u) \equiv C(v). \] \end{lemma}
\begin{proof}
We prove the forward inference case only as the backward inference case is analogous. Let $P$ be an $s$-$t$ path going through a vertex $v \in X$. Since $X$ is non-empty, we know that such a path exists. Since $X \cap A(u) = \emptyset$, it follows that $u \in P$. Therefore, if $C(v) = \top$ then $C(u) = \top$.
Now let $Q$ be an $s$-$t$ path going through $u$ and let $v$ be the vertex right after the last occurrence of $u$ in $Q$. This means that $v \in N^{out}(u) \cap B(u)$. Since $u$ in inferable, it follows that $v \notin A(u)$, Therefore, if $C(u) = \top$ then $\exists\, v \in X: C(v) = \top$. \end{proof}
An \emph{inference scheme} is a partition $(\alpha, \phi, \beta)$ of $V$ into three parts where $\alpha$ is the set of instrumented nodes, $\phi$ is the set of forward inferable nodes, and $\beta$ is the set of backward inferable nodes.
The inference graph associated with an inference scheme $(\alpha, \phi, \beta)$ is the directed graph $H$ where for each $u \in \phi$, $\delta^{out}_H(u)$ is the set of forward inference edges out of $u$, and for each $u \in \beta$, $\delta^{out}_H(u)$ is the set of backward inference edges out of $u$. We say the scheme is valid if its associated inference graph $H$ is acyclic.
Finally, we associate with an inference scheme $(\alpha, \phi, \beta)$ a coverage instrumentation scheme $(S, \Psi)$ where $S = \alpha$ and $\Psi(C_S)$ is the result of starting from the partial coverage profile $C_S$ and and iteratively applying \cref{lem:one-step} to those nodes in $\phi \cup \beta$ in inverse topological order $v_1, v_2, \ldots, v_n$ in $H$ (edges go from right to left).
\begin{lemma}
\label{lem:valid-inference}
Given a valid inference scheme $(\alpha, \phi, \beta)$, its associated coverage instrumentation scheme $(S, \Psi)$ is also valid. Furthermore, for any coverage profile $C$, the function $\Psi(C_S)$ can be evaluated in $O(|E|)$ time given the inference graph $H$. \end{lemma}
\begin{proof}
Let $C^{(i)}$ be the partial coverage profile resulting in taking $C^{(i-1)}$ and adding the result of processing vertex $v_i$. Namely, if $v_i \in \alpha$ then $C^{(i)}(v_i) = C_S(v_i)$, and if $v_i \in \phi \cup \beta$ then $C^{(i)}(v_i)$ is set using \cref{lem:one-step}. The correctness rests on the the fact that we can always apply \cref{lem:one-step} at each step because all the out going neighbors of $v_i$ in the inference graph upon which $v_i$ depends for its inference have been already processed earlier because the order of processing is inverse topological order.
To see the claim about the time complexity of evaluating $\Psi$, we note that computing the needed topological order can be done in $O(|E|)$ time and that once we have that the iterative process also runs in linear time. \end{proof}
Our approach is to show that there always exists an inference scheme $(\alpha, \phi, \beta)$ that induces an optimal coverage instrumentation scheme. And that such scheme can be computed efficiently in $O(|E|)$ time.
\section{Optimal $V$-coverage $V$-instrumentation} \label{sec:main}
In this section we design an algorithm to compute an optimal coverage instrumentation scheme. For now, we are only concerned about its correctness and leave time complexity considerations for later. To this end, we study the cycle structure of inference graphs, which we will leverage to design our algorithm.
\begin{lemma}
\label{lem:acyclic}
The forward (backward) inference graph is acyclic. \end{lemma}
\begin{proof}
We give the proof for the forward inference graph as the proof for backward inference is analogous.
Suppose, for the sake of contradiction, that the graph has a cycle $v_1, v_2, \ldots, v_k$. For some $i$, there must exists an $s$-$v_i$ path that avoids the rest of the cycle. This means that $v_i \in A(v_j)$ for all $j\neq i$. But since $(v_{i-1}, v_i)$ is a forward inference edge, this means that $v_i \in N^{out}(v_i) \setminus A(v_{i-1})$, which implies $v_i \in A(v_{i-1})$. Contradicting our assumption. \end{proof}
\begin{lemma}
\label{lem:short-cycles}
Let $H$ be the union of the forward and backward inference graphs. Then every simple cycle in $H$ has at most two nodes. \end{lemma}
\begin{proof}
First, we note that because of \cref{lem:acyclic}, it must be the case that the cycle uses edges from both the forward and the backward inference graphs. That is, for some even $\ell$, there are nodes $u_0, u_1, \ldots, u_{\ell - 1} \in C$ such that the segment of the cycle from $u_i$ to $u_{i+1}$ is made up of forward edges if $i$ is even and backward edges if $i$ is odd; in this context, we use the notation $u_\ell = u_0$.
First, let us consider the case where we alternate from forward to backward more than once; that is, $\ell > 2$. Without loss of generality suppose that there exists a $s$-$u_0$ path avoiding all other $u_2, u_4,\ldots$ (we can always relabel the nodes so that is the case). In particular, this means that there exists a path $P$ from $s$ to $u_1$ via $u_0$ that avoids $u_2$. This in turn means that there exists a backward inference edge $(x, y)$ along the segment of the cycle from $u_1$ to $u_2$ such that $x \in A(y)$ which contradicts the definition of backward inference edge.
Now, let us consider the case where we alternate only once; that is, $\ell = 2$. Suppose that one of the two segment, say $u_0$-$u_1$ has two or more edges. Then we take a path from $s$ to $u$ (which must avoid at least the node ahead of $u_0$ in the cycle) and then follow the edges in the input graph inducing the reverse edges in the segment $u_1$-$u_0$. It follows that there must exists a forward inference edge $(x, y)$ along the segment $u_0$-$u_1$ such that $y \in A(x)$, which contradicts the definition of forward edge. The case when the long segment is made up of backward inference edges is handled in the similar fashion.
The only case that remains to consider is when alternate only once and the segments $u_0$-$u_1$ and $u_1$-$u_0$ consist of a single edge. In this case the cycle consists of only two nodes, as prescribed by the lemma statement. \end{proof}
A simple consequence of \cref{lem:short-cycles} is that a connected component in the union of the forward and backward inference graphs has a tree-like structure where every edge in the tree induces a pair of anti-parallel edges in the connected component. Our ultimate goal is to select an inference scheme that breaks these cycles by judiciously choosing to do either forward or backward inferences or by instrumenting additional nodes. Before we can do that, we need to better understand the structure of these connected components.
\begin{lemma}
\label{lem:path}
Let $H$ be the union of the forward and backward inference graphs and $C$ be a connected component in $H$. Then $C$ induces a directed path in the forward inference graph, and the reverse paths in the backward inference graph. There are no further edges in $H[C]$. \end{lemma}
\begin{proof}
As already explained connected components are made up of a collection of pairs of anti-parallel edges connected forming a tree-like structure. If the structure is not a tree then there must exists a node that has two incoming forward edges or two outgoing forward edges. Let us consider the former case; namely, there exists two forward edges $(x, u)$ and $(y, u)$. From \cref{lem:AorA-BorB} we know that $x \in A(y)$ or $y \in A(x)$; suppose without loss of generality the former is true. That is, there exists an $s$-$x$ path that avoids $y$, which we can extend by appending the edge $(x, u)$ thus showing that $u \in A(y)$. But this contradict the fact that $(y, u)$ is a forward inference edge.
Now let us consider the case where there exists two forward inference edges $(u, x)$ and $(u, y)$, or equivalently, that $(x, u)$ and $(y, u)$ are backward inference edges. From \cref{lem:AorA-BorB} we know that $x \in B(y)$ or $y \in B(y)$; suppose without loss of generality the former is true. That is, there exists a $x$-$t$ path that avoids $y$, which we can extend by pre-pending the edge $(u, x)$ and so $u \in B(y)$. But this contradicts the fact that $(y, u)$ is a backward inference edge.
Therefore, the tree-like structure of $C$ must be a path and must consist of one forward inference path and the reversed backward inference path. Otherwise, we are back the cases we just ruled out. \end{proof}
Everything is in place to state our algorithm \alg{optimal-instrumentation}. Remember our goal is to construct an inference scheme $(\alpha, \phi, \beta)$. As a first step we identify all ambiguous nodes in $G$ and add them to $\alpha$. Then build the union of the forward and backward inference graphs and compute its connected components. Each connected component $C$ consists of a forward inference path $v_1, v_2, \ldots, v_k$ and a backward inference path $v_k, v_{k-1}, \ldots, v_1$. For a trivial component where $k=1$, if $v_1$ is ambiguous, there is nothing to do; otherwise, we add it to $\phi$ or $\beta$ depending on whether if forward or backward inferable (if both options are possible, pick one arbitrarily.) For non-trivial components where $k > 1$, if $v_1$ happens to backward inferable (from nodes outside the component) then add $C$ to $\beta$. Otherwise, if $v_k$ happens to be be forward inferable (from nodes outside the component) then we add $C$ to $\phi$. Finally, if neither $v_1$ is backward inferable or $v_k$ is forward inferable, we add $v_1$ to $\alpha$ and $\{v_2, \ldots, v_{k}\}$ to $\beta$. The output of the algorithm is the coverage instrumentation scheme $(S, \Psi)$ associated with $(\alpha, \phi, \beta)$.
\begin{theorem}
\label{thm:optimal}
\alg{optimal-instrumentation} returns a valid $V$-coverage $V-$instrumentation scheme that has minimum size. \end{theorem}
\begin{proof}
First we show that $(\alpha, \phi, \beta)$ is a valid inference scheme. This boils down to arguing that the inference graph associated with the inference scheme is acyclic. Because of the special structure of the connected components in the union of the forward and backward inference graphs, and they way each component is dealt with, we are guaranteed that no cycles are present in the inference graph. Therefore, the inference scheme is valid. \cref{lem:valid-inference} guarantees that the associated coverage instrumentation scheme is also valid.
Finally, we argue that the scheme has minimum size. In \cref{lem:need-ambiguous} we already argued that any valid instrumentation must use all ambiguous nodes. Let $C$ be a connected component where our solution instruments the head of the component. Let us argue that a coverage instrumentation scheme that does not instrument a single node in $C$ must be invalid. The argument is similar but applied to the first node in $C$ (that is not backward inferable) and the last node in $C$ (that is not forward inferable).
Let $x \in N^{in}(v_1) \cap (A(v_1) \cap B(v_1))$; that is, there exist $s$-$x$ and $x$-$t$ paths avoiding $v_1$. We can concatenate both paths to form an $s$-$t$ path $P_x$ going through $x$ that avoids $v_1$. This line of reasoning applied to $y \in N^{out}(v_k) \cap (A(v_k) \cap B(v_k))$ yields another $s$-$t$ path $P_y$ going through $y$ avoiding $v_k$. It's not hard to see that both paths avoid $C$ altogether. Finally, consider concatenating the $s$-$x$ path, followed by $C$, followed by the $y$-$t$ path, and call $P_C$ the resulting $s$-$t$ path. \cref{fig:connected-component} shows an example instance of what these paths may look like.
\begin{figure}
\caption{
An example showing the paths that certify the need to instrument at least one block inside a connected component $C=\{v_1, \ldots, v_k\}$: Paths $P_{x}$ (in red), $P_{y}$ (in blue), and $P_C$ (in dotted black).
}
\label{fig:connected-component}
\end{figure}
Let $D$ and $D'$ be the coverage profiles associated with $\{ P_{x}, P_{y} \}$ and $\{P_{x}, P_{y}, P_{C} \}$ respectively. Notice that $D(v) = D'(v)$ for $v \notin C$ and $D(v) \neq D'(v)$ for all $v \in C$. Therefore, even if we knew the coverage of every nodes in $V \setminus C$, it is not possible to differentiate between $D$ and $D'$ unless we instrument at least on node in $C$. \end{proof}
\subsection{Time complexity}
Finally, we turn our attention to the time complexity of our algorithm.
\begin{theorem}
\alg{optimal-instrumentation} can be implemented to run in $O(|E|)$ time. \end{theorem}
\begin{proof}
Note that if we could perform $O(1)$-time membership queries over the $A$ and $B$ sets, then the rest of the algorithm can be implemented to run in linear time: computing the forward and backward inference graphs, identifying the strongly connected components in the union of those graphs, and deciding on the instrumentation of those components can also be done in $O(|E|)$ time.
We focus our attention on implementing membership queries of the sets $\{A(u) : u \in V \}$, as queries on the set $\{B(u) : u \in V\}$ can be implementing in a similar way by reversing the direction of the edges and using the terminal $t$ as the entry node.
We use dominator trees~\cite{Prosser59}, which offer a compact representation of the dominance relation: a node $x$ dominates a node $y$ if and only if all paths from the entry node to $y$ go through $x$. The dominator tree is an out-branching rooted at the entry node such that if $x$ dominates $y$ if and only if $y$ is a descendant to $x$. While the naive algorithm for computing a dominator tree takes $O(|V|^2)$ time, more efficient $O(|E|)$-time algorithms exist~\cite{AlstrupHLT99,BuchsbaumGKRTW08,BuchsbaumKRW05,GeorgiadisT04}.
Notice that $v \in A(u)$ if an only if $u$ does not dominate $v$. Thus, testing if $v \in A(u)$ can be translated of the query of whether $u$ is not an ancestor of $v$ in the dominator tree. This last task can be one in $O(1)$ time if we allow $O(|V|)$ time to pre-process the dominator tree using standard techniques. \end{proof}
\section{Optimal $E$-coverage $E$-instrumentation} \label{sec:reduction}
In this section we show a reduction from an instance of \prob{$E$-cov $E$-instr} to an instance of \prob{$V$-cov $V$-intr} such that an optimal solution for the latter can be transformed into an optimal solution for the former.
Given an input graph $G=(V, E)$, we construct an auxiliary graph $H$ by subdividing every edge in $E$. In other words, $H=(V \cup V_E, A)$ where $V_E = \{v_e \mid e \in E\}$ and \begin{align*}
A = \bigcup_{e = (u, v) \in E} \left\{(u, v_e), (v_e, v)\right\}. \end{align*}
The first thing to note is that solving the $V$-coverage $V$-instrumentation problem in $H$ yields a solution to the problem of learning the coverage of both $V$ and $E$ by instrumenting a subset of $V$ and a subset of $E$. This is not exactly the problem we want to solve, but as we shall shortly argue, there is no additional cost in learning the coverage of $V$, and that even though we have the freedom of instrumenting vertices in $V$, we can always find an optimal solution that only instruments a subset of $E$.
\begin{lemma}
Suppose we knew the coverage status in $H$ of every vertex in $V_E$, then we can infer the coverage status of the remaining vertices in $V$. \end{lemma}
\begin{proof}
For every vertex $u \in V$, the coverage status of $u$ equals the disjunction of the coverage status of edges incident on $u$. \end{proof}
Therefore, the cost of a vertex-coverage vertex-instrumentation in $H$ is the same as the cost doing a vertex-instrumentation to learn the coverage of only $V_E$. With that out of the way, let us now reason about the vertices in $H$ that the optimal solution instruments.
\begin{lemma}
\label{lem:never-ambiguous}
For any $u \in V$, the corresponding node in $H$ is never ambiguous. \end{lemma}
\begin{proof}
This is because the vertices in $N^{in}_H(u) = \{ v_{(u,x)} : x \in N^{in}_G(u) \}$ only have $u$ as a successor, so $N^{in}_H(u) \cap B_H(u) = \emptyset$. Similarly, $N^{out}_H(u) = \{ v_{(u, x)} : x \in N^{out}_G(u) \}$ only have $u$ as a predecessor, so $N^{out}_H(u) \cap A_H(u) = \emptyset$. \end{proof}
This means that the only way that when running algorithm \alg{optimal-instrumentation} on $H$ we instrument a vertex $u \in V$ is that the node is part of a connected component $C$ that needs to be instrumented. We use the following observation to replace each of those vertices with an equivalent edge.
\begin{lemma}
\label{lem:alternating-component}
Let $C$ be a strongly connected component in the inference graph of $H$ with $|C| \geq 2$. Then $C$ consists in a path alternating between vertices in $V$ and $V_E$. \end{lemma}
\begin{proof}
By \cref{lem:path} $C$ is a path in $H$. Notice that $H$ is a bipartite graph with shores $V$ and $V_E$. Therefore, since every path in $H$ must alternate between $V$ and $V_E$, so does must $C$. \end{proof}
Let $S$ be an optimal vertex-coverage vertex-instrumentation for $H$. If $S \cap V = \emptyset$ then we are done as $S \subset E_V$ since this corresponds to a pure $E$-instrumentation in $G$. Otherwise, if $u \in S \cap V$ then by \cref{lem:never-ambiguous}, it must be that $u$ belongs to some strongly connected component $C$ with $|C| \geq 2$ in the inference graph of $H$. Finally, we swap out $u$ from $S$ with another vertex in $C \cap V_E$, which by \cref{lem:alternating-component} we are guaranteed to exist.
Even easier, we can implement \alg{optimal-instrumentation} to avoid inadvertently picking a vertex from $V$ when trying to instrument each connected component. Let \alg{reduction-coverage} be the algorithm that applies this modified \alg{optimal-instrumentation} to $H$. Putting everything together we get.
\begin{theorem}
\label{thm:E-cov-E-instr}
\alg{reduction-coverage} return an optimal instrumentation scheme for edge-coverage edge-instrumentation in $O(|E|)$ time. \end{theorem}
\section{Impossibility of $E$-coverage $V$-instrumentation}
\label{sec:impossible}
Let $G$ be a layered graph with the following layers $\{v_1\}$, $\{v_2, v_3\}$, $\{v_4, v_5\}$, and $\{v_6\}$, and where every layer is fully connected to the next as shown in \cref{fig:impossible}.
\begin{figure}
\caption{
An example instance where it is impossible to infer edge coverage from vertex coverage data.
}
\label{fig:impossible}
\end{figure}
Consider two execution traces in $G$: \[ \{ (v_1, v_2, v_4, v_6), (v_1, v_3, v_5, v_6) \} \text{ and } \{ (v_1, v_2, v_5, v_6), (v_1, v_2, v_5, v_6) \}.\] These two execution traces have have different edge coverage profile. However, they share the same vertex coverage profile, so it is impossible to differentiate between the two only using this data.
\section{Approximate $V$-coverage $E$-instrumentation}
\label{sec:approximation}
In this section we develop a 2-approximation algorithm for the problem of learning the coverage status of vertices using edge coverage instrumentation. Our algorithm is based on the following observation about ambiguous vertices.
\begin{lemma}
Let $u\in V$ be an ambiguous vertex. Let $X = N^{in}(u) \cap (A(u) \cap B(u))$ and $Y = N^{out}(u) \cap (A(u) \cap (B(u)))$. Then every valid edge instrumentation scheme must instrument either $(X, u)$ or $(u, Y)$. \end{lemma}
\begin{proof}
First we note that both $X$ and $Y$ are non-empty by virtue of $u$ being ambiguous. Now suppose that there exists $x \in X$ and $y \in Y$ such that we do not instrument edges $(x, u)$ or $(u, y)$. Using the same logic (and the same example as in \cref{fig:ambiguous} we can conclude that there exists two execution traces that only differ in the coverage status of these two edges and $u$. Thus, if we assume that the instrumentation scheme is valid, it must be the case that we either instrument every edges in $(X, u)$ or $(u, Y)$. \end{proof}
Our strategy is to instrument the set ($X$ or $Y$) with minimum cardinality. Notice that our choice is locally optimal in the sense that the optimal solution needs to instrument at least that many edges incident on $u$.
We use the same concept of inference graph that we developed in \cref{sec:framework}. By \cref{lem:path} we know that the only cycles present in the inference graph are induced by the edges of a directed path in the input graph. Following the same argument we used in \cref{thm:optimal}, we get that if we do note instrument a single edge incident on the vertices in the path it is not possible to infer their coverage status. On the other hand, instrumenting a single edge along the path is enough to infer status of the whole chain. Again, our choice is locally optimal.
We call this algorithm \alg{local-instrumentation}. The next theorem bounds the approximation ratio it can attain.
\begin{theorem}
\alg{local-instrumentation} returns a valid $V$-coverage $E$-instrumentation scheme that is 2-approximate. \end{theorem}
\begin{proof}
The validity of the scheme follows from the observations already made. We use the local ratio technique to argue that it is a 2-approximation. For each ambiguous node $u$ we construct an edge weight function $w$ where
\[ w(e) = \begin{cases}
1 & u \in e \\
0 & o.w.
\end{cases}
\]
For strongly connected components $C$ in the inference graph, we similarly defined an edge weight function $w$ where
\[ w(e) = \begin{cases}
1 & \exists u \in C : u \in E \\
0 & o.w.
\end{cases} \]
Let $w_1, w_2, \ldots$ be the edge functions defined in this way. Furthermore, let $F$ be the edges our algorithm decides to instrument and $O$ be the edges instrumented by an optimal solution. It follows that
\[ |S| \leq \sum_i w_i(S) \leq \sum_i w_i(O) \leq 2 |O|,\]
where the last inequality follows from the fact that every edge $(u, v) \in O$ can contribute to the weight of the edge function defined for $u$ and for $v$. \end{proof}
\section{Conclusion}
This paper provides a thorough theoretical study of the \prob{minimum coverage instrumentation} problem. Although we are able to provide definite answers to some of the variants considered, there are several problems worth studying that remain open:
\begin{itemize}
\item What is the computational complexity of $V$-coverage $E$-instrumentation?
\item In certain applications, one might be interested in learning the coverage status of a subset of
the nodes, $S \subseteq V$. Given such a subset, we can define the $S$-coverage problem in the natural way:
find the minimum subset of nodes (or edges) to instrument in order to be able to infer coverage of $S$.
What is the computational complexity of the problem?
\item In this paper, we focused on control-flow graphs that have a source node and a terminal node
such that all executions start at the source and end at the terminal. However, compilers also operate with other
types of graphs, such as \df{call graphs}, which represent calls between different functions in
a binary. Such a graph does not necessarily have a terminal node (for programs running continuously) and functions return control to the caller when they are done (which is not captured in our model). It would be interesting to adjust the model and the
algorithms for such an application. \end{itemize}
We conclude by mentioning that algorithm \alg{optimal-coverage} has been implemented in the open-source LLVM compiler project~\cite{mbc}. An extensive evaluation on real-world benchmarks indicates that only $\approx 60\%$ of basic blocks need to be instrumented.
\appendix
\section{Examples} \label{app:examples}
In this section we consider an example control-flow graphs where the size of the optimal solution for coverage instrumentation and frequency count instrumentation differ.
\begin{example}
Let $G$ consist of a path $v_1, v_2, \ldots, v_k$ with self loops at every node. We claim that the size of the optimal block coverage instrumentation of $G$ is 1, whereas the size of its optimal block frequency count instrumentation is $k$.
\begin{center}
\begin{tikzpicture}[
block/.style={circle,draw=black,fill=white, inner sep = 2pt},
jump/.style={->},
scale=1.2
]
\path (0, 0) node[block] (v1) {$v_1$}
-- ++(1, 0) node[block] (v2) {$v_2$}
-- ++(1, 0) node[block] (v3) {$v_3$}
-- ++(1, 0) node (dots) {$\cdots$}
-- ++(1, 0) node[block] (vk) {$v_k$};
\draw (v1) edge[jump] (v2);
\draw (v1) edge[jump, loop above] (v1);
\draw (v2) edge[jump] (v3);
\draw (v2) edge[jump, loop above] (v2);
\draw (v3) edge[jump] (dots);
\draw (v3) edge[jump, loop above] (v3);
\draw (dots) edge[jump] (vk);
\draw (vk) edge[jump, loop above] (vk);
\end{tikzpicture}
\end{center}
Indeed, it is easy to see that the optimal solution for block coverage instrumentation requires a single block as the coverage status of all blocks must be always the same. On the other hand frequency counter instrumentation requires every single block since the self-loops effectively mean that the number of time each block is executed is independent from the other blocks. \end{example}
\begin{example}
Let $G$ be a series parallel graph resulting from doing a serial composition of the diamond graph in \cref{ex:diamond} with itself $k$ times; namely, the graph is a sequence of diamonds as the one shown below. We claim that the optimal block coverage solution has size $2k$ whereas the optimal frequency count instrumentation has size $k+1$.
\begin{center}
\begin{tikzpicture}[
block/.style={circle,draw=black,fill=white, inner sep = 2pt},
jump/.style={->},
scale=1.2
]
\draw (0, 0) node[block] (v1) {$v_1$};
\draw (1, 1) node[block] (v2) {$v_2$};
\draw (1, -1) node[block] (v3) {$v_3$};
\draw (2, 0) node[block] (v4) {$v_4$};
\draw (3, 1) node[block] (v5) {$v_5$};
\draw (3, -1) node[block] (v6) {$v_6$};
\draw (4, 0) node[block] (v7) {$v_7$};
\draw (5, 0) node (dots) {$\cdots$};
\draw (6, 0) node[block,label=right:$v_{3k-2}$, inner sep=6pt] (v8) {};
\draw (7, 1) node[block, label=right:$v_{3k-1}$, inner sep=6pt] (v9) {};
\draw (7, -1) node[block,label=right:$v_{3k}$, inner sep=6pt] (v10) {};
\draw (8, 0) node[block,label=right:$v_{3k+1}$, inner sep=6pt] (v11) {};
\draw (v1) edge[jump] (v2);
\draw (v1) edge[jump] (v3);
\draw (v2) edge[jump] (v4);
\draw (v3) edge[jump] (v4);
\draw (v4) edge[jump] (v5);
\draw (v4) edge[jump] (v6);
\draw (v5) edge[jump] (v7);
\draw (v6) edge[jump] (v7);
\draw (v8) edge[jump] (v9);
\draw (v8) edge[jump] (v10);
\draw (v9) edge[jump] (v11);
\draw (v10) edge[jump] (v11);
\end{tikzpicture}
\end{center}
Indeed, all the vertices with in-degree 1 are ambiguous and there are $2k$ such vertices (2 vertices per diamond block). On the other hand, instrumenting the entry node plus a single node with in-degree 1 per diamond is enough to recover the counts of all nodes. \end{example}
\end{document} |
\begin{document}
\title{Controlling Costs: Feature Selection on a Budget}
\begin{abstract}
The traditional framework for feature selection treats all features as costing the same amount. However, in reality, a scientist often has considerable discretion regarding which variables to measure, and the decision involves a tradeoff between model accuracy and cost (where cost can refer to money, time, difficulty, or intrusiveness).
In particular, unnecessarily including an expensive feature in a model is worse than unnecessarily including a cheap feature. We propose a procedure, which we call cheap knockoffs,
for performing feature selection in a cost-conscious manner.
The key idea behind our method is to force higher cost features to compete with more knockoffs than cheaper features. We derive an upper bound on the weighted false discovery proportion associated with this procedure, which corresponds to the fraction of the feature cost that is wasted on unimportant features. We prove that this bound holds simultaneously with high probability over a path of selected variable sets of increasing size. A user may thus select a set of features based, for example, on the overall budget, while knowing that no more than a particular fraction of feature cost is wasted.
We investigate, through simulation and a biomedical application, the practical importance of incorporating cost considerations into the feature selection process. \end{abstract}
\section{Introduction} \label{sec:introduction} The traditional framework for feature selection ignores the fact that, in practice, different features may have different costs. In reality, practitioners must balance the opposing demands of model accuracy and budget considerations. For example, as we will see in Section \ref{sec:data}, in medical diagnosis, doctors often have a wide range of options for what features to measure: a laboratory result may provide highly relevant information yet is expensive in terms of money, time, and the burden on patients; a simple questionnaire or even demographic information may be less informative but incurs lower costs. When a questionnaire would suffice for forming an accurate diagnosis, performing a laboratory examination would be practically misguided. Likewise, how should we decide whether to sequence a patient's entire genome or simply to conduct some cheap lab tests? This same challenge appears in other domains. For example, to determine the veracity of an online news article, do we require high-quality features based on an expert's reading, or do features derived from natural language processing suffice?
Consider the response of interest $Y$ and a set of features $X_1, \ldots, X_p$, where for each feature $X_j$, there is an associated cost $\omega_j > 0$. In this paper, we consider a very general model where
$Y | X_1, \ldots, X_p$
follows an arbitrary distribution, and we assume that the joint distribution of $X_1, \ldots, X_p$ is known. Let $\mathcal{H}_0$ be the set of irrelevant features, i.e., $j \in \mathcal{H}_0$ if and only if $X_j$ is independent of $Y$ conditional on the other variables $\{X_k: k\neq j\}$ \citep[Definition 1 in][]{candes2018panning}. Given a set of selected features $\mathcal{R} \subseteq \{1, \ldots, p\}$, the false discovery proportion ($\textsc{FDP}$) is defined as $|\mathcal{R} \cap \mathcal{H}_0| / |\mathcal{R}|$, i.e., it is the fraction of selected features that are unnecessarily included.
\citet{barber2015} proposed the knockoff filter, a feature selection procedure that provably controls the false discovery rate, defined as $\mathrm{E}(\textsc{FDP})$. For each feature, they construct a knockoff feature, i.e., a carefully constructed fake copy of that feature. A feature is then only selected if it shows considerably more association with the response than its knockoff counterpart. \citet{katsevich2018towards} showed that one can directly upper-bound the false discovery proportion, with high probability, simultaneously for an entire path of selected models, $\mathcal{R}_1, \ldots, \mathcal{R}_p$, where $\mathcal{R}_k \subseteq \mathcal{R}_{k + 1}$ for all $k$.
However, the false discovery proportion and the false discovery rate put all features on an equal footing, and do not consider their costs $\omega_1, \ldots, \omega_p$. To overcome this shortcoming, the weighted false discovery proportion ($\textsc{wFDP}$; \citealt{benjamini1997multiple}) is defined as $ \textsc{wFDP} (\mathcal{R}) = C(\mathcal{R} \cap \mathcal{H}_0) / C(\mathcal{R})$, i.e., the fraction of the total cost that is wasted, where $C(\mathcal{A}) = \sum_{j \in \mathcal{A}} \omega_j$ is the cost of measuring the features in $\mathcal{A}$.
The weighted false discovery proportion and weighted false discovery rate are not new \citep{benjamini1997multiple,benjamini2007false}, and the Benjamini-Hochberg procedure \citep{benjamini1995controlling} has been generalized to the weighted false discovery rate setting. A related criterion is the penalty-weighted false discovery rate \citep{ramdas2019unified}, which can be controlled with the p-filter. However, the aforementioned procedures only provably control the corresponding criteria under restrictive dependence assumptions on the $p$-values \citep{benjamini2001}. Under arbitrary dependence, the reshaping process \citep{benjamini2001, blanchard2008two, ramdas2019unified} needs to be applied, which can greatly reduce power. \citet{basu2018weighted} proposed a procedure that has asymptotic control of a related quantity, namely $\mathrm{E}[C(\mathcal{R} \cap \mathcal{H}_0)] / \mathrm{E}[C(\mathcal{R})]$, in a mixture model under certain regularity conditions.
In this work, we adapt the ideas of knockoffs \citep{barber2015} and simultaneous inference \citep{goeman2011multiple,katsevich2018towards} to the setting where features have costs. The key to our method, which we call {\em cheap knockoffs}, is to construct multiple knockoffs for each feature, with more expensive features having more knockoffs. A feature is selected only if it beats all of its knockoff counterparts; thus, costlier features have more competition. This procedure yields a path of selected feature sets $\mathcal{R}_1, \ldots, \mathcal{R}_p$ for which $\textsc{wFDP} (\mathcal{R}_k)$ is bounded by a certain computable quantity with high probability, regardless of how $k$ is chosen. Unlike existing work on weighted false discovery rate control \citep{benjamini1997multiple,benjamini2007false, ramdas2019unified}, our method provably bounds the weighted false discovery proportion under arbitrary dependence among features. \citet{yu2021high} recently proposed a predictive modeling method in high-dimensional cost-constrained linear regression problems. Different from their focus which is on good prediction performance under budget constraints, our method aims at recovering the true set of features (as defined in $\mathcal{H}_0^C$) with $\textsc{wFDP}$ control.
\section{Cheap knockoffs} \subsection{A review of model-X knockoffs and simultaneous inference} \label{sec:standard} Our method is based on the model-X knockoff procedure \citep{candes2018panning} and its multiple knockoff extension \citep{2018arXiv181011378R}, which provably control the false discovery rate for arbitrary sample size $n$ and number of features $p$. For simplicity, we focus on the following linear model setting \begin{align}
\mathrm{E} \left[Y | X_1, \ldots, X_p \right] = \sum_{j = 1}^p \beta_j X_j,
\quad \left(X_1, \ldots, X_p \right)^T \sim N(\mathbf{0}, \Sigma).
\label{eq:model} \end{align}
We start by briefly reviewing the model-X knockoff approach in the simultaneous inference setting, applied specifically in the linear model \eqref{eq:model}. Throughout this paper, we denote $\mathbf{X} \in \mathbb R^{n \times p}$ as a data matrix, and $\mathbf{y} \in \mathbb R^n$ as a response vector, where $(\mathbf{X}_{i1}, \ldots, \mathbf{X}_{ip}, \mathbf{y}_i) \in \mathbb R^{p} \times \mathbb R$ are independently and identically distributed as $(X_1, \ldots, X_p, Y)$ for $i = 1, \ldots, n$. \begin{enumerate}
\item For each variable $X_j$, construct a knockoff variable $\tilde{X}_j$ that satisfies:
\begin{enumerate}
\item $\mathrm{E}(\tilde{X}_j) = \mathrm{E}(X_j)$;
\item $\mathrm{Cov}(\tilde{X}_j, \tilde{X}_k) = \mathrm{Cov}(X_j, X_k)$ for all $k$;
\item $\mathrm{Cov}(\tilde{X}_j, X_k) = \mathrm{Cov}(X_j, X_k) - s_j \mathbbm{1} \{j = k\}$ for some $s_j \geq 0$.
\end{enumerate}
The knockoff variables $\tilde{X} = (\tilde{X}_1, \ldots, \tilde{X}_p)$ are constructed to resemble $X$ without any knowledge of the response $Y$. We denote $\tilde{\mathbf{X}} \in \mathbb R^{n \times p}$ as the constructed knockoff matrix of $\mathbf{X}$ in a way that $(\tilde{\mathbf{X}}_{i1}, \ldots, \tilde{\mathbf{X}}_{ip})$ is a knockoff of $(\mathbf{X}_{i1}, \ldots, \mathbf{X}_{ip})$ for $i = 1, \ldots, n$.
\item For each $j \in \{1, \ldots, p\}$, compute statistics $T_j$ and $\tilde{T}_j$ for the variables $X_j$ and $\tilde{X}_j$, respectively. For example, these could be the absolute values of the coefficients of a lasso regression \citep{tibshirani1996regression} on the augmented design matrix $\mathbf{Z} = [\mathbf{X}, \tilde{\mathbf{X}}] \in \mathbb R^{n \times 2p}$:
\begin{align}
\hat{\theta}(\lambda) = \argmin_{\theta \in \mathbb R^{2p}} \left( \frac{1}{2} \norm{\mathbf{y} - \mathbf{Z} \theta}_2^2 + \lambda \norm{\theta}_1 \right),
\label{eq:lasso}
\end{align} with
$T_j = |\hat{\theta}(\lambda)_j|$ and $\tilde{T}_j = |\hat{\theta}(\lambda)_{j + p}|$.
The value of $\lambda$ can be fixed in advance, or selected using cross-validation.
The knockoff statistics are then defined as
$W_j = T_j - \tilde{T}_{j}$.
\citet{barber2015} and \citet{candes2018panning} discuss other choices of $T_j$'s and $W_j$'s.
Intuitively, a large value of $W_j$ indicates that $X_j$ is a genuine signal variable, i.e., the distribution of $Y$ depends on $X_j$, whereas a small or negative value of $W_j$ indicates that $X_j$ may be irrelevant.
\item For any ordering of variables $\sigma(1), \ldots, \sigma(p)$, e.g., $|W_{\sigma(1)}| \geq |W_{\sigma(2)}| \geq \ldots \geq |W_{\sigma(p)}|$, report the sets of selected variables $\mathcal{R}_{k} = \left\{ \sigma(j): \sigma(j) \leq \sigma(k), W_{\sigma(j)} > 0 \right\}$, for $k \in \{ 1, \ldots, p\}$. \end{enumerate}
\citet{katsevich2018towards} work within the simultaneous inference framework \citep{goeman2011multiple}, in which a practitioner wishes to obtain a final set of selected variables with false discovery proportion control when choosing among $\{\mathcal{R}_k, k = 1, \ldots, p\}$. To allow for such behavior, \citet{katsevich2018towards} form a computable upper bound $\mathcal{U}_k$ such that $\textsc{FDP}(\mathcal{R}_k)\le \mathcal{U}_k$ holds simultaneously over all $k$ with some known probability.
\subsection{Multiple knockoffs based on cost} \label{sec:mknockoffs} The knockoff procedure described in the previous section constructs a single knockoff variable for each feature, and then selects features based solely on the values of $W_1, \ldots, W_p$. \citet{barber2015} and \citet{candes2018panning} discuss the possibility of constructing $K$ knockoffs per feature for some value $K>1$ with the goal of achieving higher statistical power and stability. This has been pursued in \citet{2018arXiv181011378R} and \citet{emery2019multiple}.
We make a simple yet crucial modification to the multiple knockoffs idea, allowing different features to have different numbers of knockoffs, so that an expensive irrelevant feature will have a lower chance of entering the model than a cheap irrelevant feature. Assume that the feature costs $\omega_1, \ldots, \omega_p$ are integers with $\omega_j \geq 2$. We construct $\omega_j - 1$ knockoff variables for each original variable $X_j$. If $X_j$ is irrelevant, i.e., $j \in \mathcal{H}_0$, then we expect it to be selected with probability $1/\omega_j$. We also incorporate costs into the construction of the sequence of selected feature sets $\mathcal{R}_k$. The cheap knockoff procedure generalizes the multiple knockoff procedure of \citet{2018arXiv181011378R} to the cost-conscious setting: \begin{enumerate}
\item For each variable $X_j$ with cost $\omega_j$, denote $\tilde{X}_j^{(1)} = X_j$ and construct the knockoff variables $\tilde{X}_j^{(2)}, \tilde{X}_j^{(3)}, \dots, \tilde{X}_j^{(\omega_j)}$ such that:
\begin{enumerate}
\item $\mathrm{E}(\tilde{X}_j^{(\ell)}) = \mathrm{E}(X_j)$ for $\ell \in \{2, \ldots, \omega_j\}$.
\item $\mathrm{Cov}( \tilde{X}_j^{(\ell)}, \tilde{X}_k^{(m)} ) = \mathrm{Cov}( X_j, X_k ) - s_j \mathbbm{1}\{j = k\} \mathbbm{1}\{ \ell \neq m \}$ for all $\ell \in \{1, \ldots, \omega_j\}$, $m \in \{ 1, \ldots, \omega_k \}$, $j, k \in \{1, \ldots, p \}$, and some constant $s_j \geq 0$.
\end{enumerate}
We denote $\tilde{\mathbf{X}}_j^{(\ell)} \in \mathbb R^{n}$ as the constructed knockoff variables of $\mathbf{X}_j$, such that $(\tilde{\mathbf{X}}_{ij}^{(\ell)})^{\ell = 1, \ldots, \omega_j}_{j = 1, \ldots, p}$ satisfies the condition above for $(\mathbf{X}_{ij})_{j = 1, \ldots, p}$ for $i = 1, \ldots, n$.
\item For each $j \in \{1, \ldots, p\}$, compute the statistics $T_j^{(1)}$ (corresponding to the original variable) and $T_j^{(2)}, \dots, T_j^{(\omega_j)}$ (corresponding to the $\omega_j - 1$ knockoff variables).
For example, these could be the absolute values of the coefficients of the following lasso regression:
\begin{align}
\{\hat{\theta}_{j}^{(\ell)}(\lambda)\}_{j \leq p, \ell \leq \omega_j} = \argmin_{\theta_j^{(\ell)}: j \leq p, \ell \leq \omega_j} \left( \frac{1}{2} \norm{\mathbf{y} - \sum_{j = 1}^p \sum_{\ell = 1}^{\omega_j} \tilde{\mathbf{X}}_j^{(\ell)} \theta_{j}^{(\ell)}}_2^2 + \lambda \sum_{j = 1}^p \sum_{\ell = 1}^{\omega_j} |\theta_j^{(\ell)}| \right),
\label{eq:lassom}
\end{align}
with $T_{j}^{(\ell)} = |\hat{\theta}_j^{(\ell)}(\lambda)|$. The value of $\lambda$ in \eqref{eq:lassom} can be selected using cross-validation.
We define
\begin{align}
\kappa_j = \argmax_{1 \leq \ell \leq \omega_j} T_j^{(\ell)}.
\label{eq:Wj}
\end{align}
\item For any ordering of variables $\sigma(1), \ldots, \sigma(p)$, report the sets of selected variables $\mathcal{R}_{k} = \left\{ \sigma(j): \sigma(j) \leq \sigma(k), \kappa_{\sigma(j)} = 1 \right\}$, for $k \in \{ 1, \ldots, p\}$. \end{enumerate}
In Step 1, various methods are available for constructing multiple knockoffs given that the distribution of $X$ is known \citep[see, e.g.,][]{candes2018panning, 2018arXiv181011378R}. The computation of $\kappa_j$ in Step 2 involves the $\omega_j$ statistics $T_j^{(1)}, \ldots, T_j^{(\omega_j)}$; $\kappa_j = 1$ indicates that the original variable beats all of its $\omega_j - 1$ knockoff copies. We show in the supplementary material that the probability of this occurring for an irrelevant feature is inversely proportional to the feature's cost. This is the key property used to show the simultaneous control of the weighted false discovery proportion in the next section.
In principle, any ordering of variables can be used to obtain $\mathcal{R}_k$. In simulations, we consider a specific ordering such that $\tau_{\sigma(1)} \geq \tau_{\sigma(2)} \ldots \geq \tau_{\sigma(p)}$, where $\tau_j = 2\omega_j^{-1}\{ T^{(\kappa_j)}_j - \max_{\ell \neq \kappa_j} T_j^{(\ell)} \}$. One reason for this specific choice of $\tau_j$ is that when $\omega_1 = \ldots = \omega_p = 2$, the above procedure is exactly the same as the standard knockoff procedure reviewed in Section \ref{sec:standard}. In particular, $W_j > 0$ if and only if $\kappa_j = 1$, and $|W_j| = \tau_j$. Moreover, all else being equal, we want to make use of cheap features over expensive features. For this reason, we set $\tau_j$ to be inversely proportional to the feature cost.
\subsection{Simultaneous control of the weighted false discovery proportion} \label{sec:theory} Having constructed a cost-conscious path of selected variable sets $\mathcal{R}_1, \ldots, \mathcal{R}_p$, we next provide a simultaneous high-probability bound on the weighted false discovery proportion along this path. The next theorem and the remark that follows establish that the computable quantities $\bar{\mathcal{U}}(\mathcal{R}_1, c), \ldots, \bar{\mathcal{U}}(\mathcal{R}_p, c)$, defined below in \eqref{eq:ubar}, simultaneously upper bound $\textsc{wFDP} (\mathcal{R}_1), \ldots, \textsc{wFDP}(\mathcal{R}_p)$ with a known probability. This means that for any choice of $k$, with high probability our selected feature set is not too wasteful (in terms of the fraction of cost spent on irrelevant features). \begin{theorem} \label{thm:spotting}
For any $\alpha \in (0, 1)$, we have
\begin{align}
\mathbb{P} \left\{ \textsc{wFDP}\left(\mathcal{R}_k\right) \leq \mathcal{U}\left(\mathcal{R}_k, c\right) \text{ \normalfont for all } k\right\} \geq 1 - \alpha,
\label{eq:spotting}
\end{align}
where for any constant $c > 0$,
\begin{align}
\mathcal{U}(\mathcal{R}_k, c) = -\log \alpha \left[\frac{1 + c\sum_{j = 1}^k \mathbbm{1}\left \{ j \notin \mathcal{R}_k \right \}}{ \left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ j \in \mathcal{R}_k \right\} \right) \vee 1} \right] \left[\max_{k \in \mathcal{H}_0} \frac{\omega_k}{\log \left\{ \omega_k - \left( \omega_k - 1 \right) \alpha^{c} \right\}}\right].
\label{eq:wfdpbar}
\end{align} \end{theorem} For the standard knockoff procedure described in Section \ref{sec:standard}, we have $\omega_1 = \ldots = \omega_p = 2$. In that case, with $c = 1$, \eqref{eq:wfdpbar} reduces exactly to the bound from applying Theorem 2 of \citet{katsevich2018towards} to the Selective and Adaptive SeqStep procedure \citep{barber2015} with $p_\ast = \lambda = 1/2$.
As mentioned in Section \ref{sec:standard}, our procedure can be generalized to any known distribution of $X$ and any unknown conditional distribution of $Y$ given $X$. For example, in the binary classification data example in Section \ref{sec:data}, we consider the statistics $\{T_j^{(\ell)}\}$ derived from $\ell_1$-penalized logistic regression. Following the arguments \citet{candes2018panning}, we can show that Theorem \ref{thm:spotting} also holds for this choice of $\{T_j^{(\ell)}\}$.
\begin{remark}
The weighted false discovery proportion upper bound $\mathcal{U}(\mathcal{R}_k, c)$ depends on the unknown set $\mathcal{H}_0$. In practice, we can use an upper bound
\begin{align}
\bar{\mathcal{U}}(\mathcal{R}_k, c) = -\log \alpha \left[\frac{1 + c\sum_{j = 1}^k \mathbbm{1}\left \{ j \notin \mathcal{R}_k \right \}}{\left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ j \in \mathcal{R}_k \right\} \right) \vee 1} \right] \left[\max_{k} \frac{\omega_k}{\log \left\{ \omega_k - \left( \omega_k - 1 \right) \alpha^{c} \right\}}\right].
\label{eq:ubar}
\end{align}
Moreover, if an estimated set $\hat{\mathcal{H}}_0$ satisfying $\mathcal{H}_0 \subseteq \hat{\mathcal{H}}_0$ is available, then \eqref{eq:wfdpbar} with the maximum taken over $\hat{\mathcal{H}}_0$ gives a tighter bound in \eqref{eq:spotting}. \end{remark}
Our procedure yields a sequence of sets $\mathcal{R}_k$ of selected variables, and the bound in \eqref{eq:spotting} gives a specific description of the tradeoff between capturing enough of the signal variables and incurring too much cost. The simultaneous nature of the bound means that $\textsc{wFDP}(\mathcal{R}_k)$ is controlled regardless of the approach used to select $k$: the choice of $k$ can depend on the size of $\mathcal{R}_k$, the cost of $\mathcal{R}_k$, or in fact any function of the data.
\section{Simulation studies} \label{sec:simulation} We now investigate the feature selection performance of cheap knockoffs in simulation. We set $n = 200$ and $p = 30$. Each element of the design matrix $\mathbf{X} \in \mathbb R^{n \times p}$ is independent and identically distributed as $N(0, 1)$. The response is generated from the linear model \eqref{eq:model} with Gaussian errors $\varepsilon \sim N(0, \sigma^2)$ and $\sigma^2=(4n)^{-1}\snorm{\mathbf{X} \beta}_2^2$. We let $\beta_{1} = \ldots = \beta_{10} = 2$, and $\beta_j = 0$ for $j > 10$. We set the first half of the relevant features to be expensive and the second half to be cheap, i.e., $\omega_{1} = \ldots = \omega_5 = 6$, and $\omega_{6} = \ldots = \omega_{10} = 2$. For the irrelevant features, i.e., for any $j > 10$, we set $\mathbb{P}(\omega_j = 6) = \gamma$ and $\mathbb{P}(\omega_j = 2) = 1 - \gamma$, where $\gamma \in \{0, 0.25, 0.5, 0.75, 1\}$.
We construct multiple knockoff variables using entropy maximization \citep{2018arXiv181011378R}, and we compute the statistics $T_j^{(\ell)}$ as the absolute value of the lasso coefficient estimates in \eqref{eq:lassom}, with the tuning parameter selected using cross-validation. In Appendix \ref{app:time} we report the wall-clock running time of cheap knockoffs in the numerical studies. We find that the majority of computation is spent on generating multiple knockoffs, which is challenging when $p$ is large and (or) the feature costs are large (after dividing by their greatest common factor). In such cases, alternative construction methods could be used. For example, \citet[Appendix A.1.][]{2018arXiv181011378R} show that an equicorrelation construction has a closed form expression, which is particularly favorable in computation since it does not depend on the number of multiple knockoffs (and equivalently, the feature costs).
We first verify the bound in Theorem \ref{thm:spotting} and compare the performance of cheap knockoffs to \citet{katsevich2018towards}, which ignores feature costs. In particular, by carrying out Steps 1-3 in Section \ref{sec:standard} with $\omega_1 = \ldots = \omega_p = 2$ in \eqref{eq:ubar}, the bound in \eqref{eq:ubar} coincides with the result in \citet{katsevich2018towards}. We denote this approach as \citet{katsevich2018towards}. For both methods, we take $\alpha = 0.2$ in \eqref{eq:ubar}. In Fig.~\ref{fig:ratio} we report both the ratio $\bar{\mathcal{U}}(\mathcal{R}_k, 1)^{-1}\textsc{wFDP}(\mathcal{R}_k)$ and the actual weighted false discovery proportion $\textsc{wFDP}(\mathcal{R}_k)$ for each $\mathcal{R}_k$ for both methods in the settings where $\gamma = 0, 0.5,$ and $1$. \begin{figure}
\caption{Each line represents one of 100 simulated datasets. Jitter is applied to ease visualization. The black dashed lines represent cheap knockoffs (our proposal) which incorporates feature costs, and the red solid lines represent \citet{katsevich2018towards} which does not make use of feature costs. Top panel: the cheap knockoffs approach controls the weighted false discovery proportion with the desired probability ($\alpha=0.2$) whereas the \citet{katsevich2018towards} procedure does not. Bottom panel: The cheap knockoffs attains lower weighted false discovery proportion than the \citet{katsevich2018towards} procedure for most values of $k$ when $\gamma$ is large.}
\label{fig:ratio}
\end{figure} As seen in Fig.~\ref{fig:ratio}, the ratio $\bar{\mathcal{U}}(\mathcal{R}_k, 1)^{-1}\textsc{wFDP}(\mathcal{R}_k)$ for our cheap knockoff procedure is mostly below $1$, indicating that the bound in Theorem \ref{thm:spotting} holds. Moreover, when $\gamma$ is large, the weighted false discovery proportion for the cheap knockoff procedure is lower than \citet{katsevich2018towards} for most values of $k$. Table~\ref{tab:ratio} gives the estimated probability that the bound is violated, i.e., $\widehat{\mathbb{P}}(\sup_{k} \bar{\mathcal{U}}_k^{-1}(\mathcal{R}_k, 1) \textsc{wFDP}(\mathcal{R}_k) > 1)$, for each method for $\gamma \in \{0, 0.25, 0.5, 0.75, 1\}$. \begin{table}[H]
\begin{center}
\begin{tabular}{ c | c | c | c | c | c}
\hline
$\gamma$ & 0 & 0.25 & 0.5 & 0.75 & 1\\ \hline
Cheap knockoffs (our proposal) & 0.08 & 0.05 & 0.08 & 0.07 & 0.04\\ \hline
\citet{katsevich2018towards} & 0.01 & 0.05 & 0.12 & 0.25 & 0.31\\
\hline
\end{tabular}
\end{center}
\caption{Proportion of 100 simulated datasets for which $\sup_{k} \bar{\mathcal{U}}_k^{-1}(\mathcal{R}_k, 1) \textsc{wFDP}(\mathcal{R}_k) > 1$ is violated. Our proposed cost-conscious procedure successfully controls the probability below the $\alpha = 0.2$ level for all values of $\gamma$, while \citet{katsevich2018towards} does not control this probability when $\gamma = 0.75$ and $\gamma = 1$.}
\label{tab:ratio} \end{table}
We see that the \citet{katsevich2018towards} procedure which is not cost-conscious performs worse as $\gamma$ increases, that is, when irrelevant variables are more likely to be expensive. Since the method ignores cost, it may erroneously select expensive irrelevant features, leading to poor weighted false discovery proportion.
While our proposal focuses on recovering the correct set of features with simultaneous $\textsc{wFDP}$ control, we show empirically that the set of features selected by cheap knockoffs usually incurs low cost without compromising prediction accuracy. Specifically, for each set of selected variables $\mathcal{R}_1, \ldots, \mathcal{R}_p$, we compute both the root mean squared prediction error of the least squares model fit to the variables in $\mathcal{R}_k$, and the total cost $\sum_{j \in \mathcal{R}_k} \omega_j$. We see from Fig.~\ref{fig:pred} that for a given budget, the cheap knockoff procedure attains smaller prediction error than the procedure in \citet{katsevich2018towards}, which is not cost-conscious. In particular, the cheap knockoff procedure tends to select all five of the cheap relevant features before any expensive feature is let in the model, whereas \citet{katsevich2018towards} does not take feature cost into consideration. For $k \geq 10$, $\mathcal{R}_k$ for both methods includes essentially all the relevant features, thus giving similar performance. \begin{figure}
\caption{Tradeoff between prediction accuracy and total cost (averaged over 100 simulations). The line with dots in black represents the cheap knockoff procedure, and the line with crosses in red represents \citet{katsevich2018towards}. The cost of the model selected by our cost-conscious procedure can be much lower than that of the procedure in \citet{katsevich2018towards} without sacrificing predictive performance.}
\label{fig:pred}
\end{figure}
\section{Data application} \label{sec:data} To gauge the performance of cheap knockoffs in a real dataset, we consider data from the National Health and Nutrition Examination Survey (NHANES) (\citealt{nhanes}, processed in \citealt{kachuee2019opportunistic, kachuee2019cost}). The dataset contains 92062 samples of survey participants. We consider 30 features, which can be broadly categorized into four types: demographics, questionnaire-based, examination-based, and laboratory-based. For each feature, medical experts suggest a corresponding integer-valued cost (ranging from 2 to 9) for that feature based on ``the overall financial burden, patient privacy, and patient inconvenience'' \citep{kachuee2019cost}. A brief summary of the 30 features can be found in Table~\ref{tab:features}. Finally, each observation is associated with a label of pre-diabetes/diabetes (as one category) or normal. The task is to select features that are closely associated with diabetes while taking feature cost into consideration.
\begin{table}[H]
\begin{tabular}{lll}
& \textbf{Examples} & \textbf{Cost} \\
\textbf{Demographics} & Age; Income; Education level & 2 to 4 \\
\textbf{Questionnaire} & Average sleep length (in hours) & 4 \\
\textbf{Examination} & Diastolic Blood pressure; Systolic Blood Pressure & 5 \\
\textbf{Laboratory} & Cholesterol; Triglyceride; Fibrinogen & 9
\end{tabular}
\caption{Examples of the features in the NHANES dataset}
\label{tab:features} \end{table}
We consider the cheap knockoff procedure as in Section \ref{sec:mknockoffs}, modified so that the statistics $\{T_j^{(\ell)}\}$ computed in \eqref{eq:lassom} are derived from $\ell_1$-penalized logistic regression (instead of $\ell_1$-penalized least squares). Following the arguments in \citet{candes2018panning}, we can show that Theorem \ref{thm:spotting} also holds for this choice of $\{T_j^{(\ell)}\}$.
To numerically verify Theorem \ref{thm:spotting}, we would need to know the true set of relevant variables. We test the cheap knockoff procedure using partially-simulated data. To form a reasonable ground truth, we start by performing logistic regression on a random set of 72062 samples. In total, we retain 11 variables whose $p$-values are smaller than 0.01 / 30 (by Bonferroni correction). We take these as the true set of relevant variables (see Appendix \ref{app:truth} for the list of relevant variables). We next generate responses for the remaining 20000 samples from a logistic regression model using only these selected features. The coefficient values used correspond to those from the fitted logistic regression estimates. We then randomly divide these 20000 samples (with simulated responses) into 50 non-overlapping sets, each containing 400 samples. On each set, we run our method to obtain a path of selected variables. Finally, we compute the estimated probability that the bound in \eqref{eq:wfdpbar} is violated, i.e., $\widehat{\mathbb{P}}(\sup_{k} \bar{\mathcal{U}}_k^{-1}(\mathcal{R}_k, 1) \textsc{wFDP}(\mathcal{R}_k) > 1)$ for $\alpha \in \{0.05, 0.1, ..., 0.5\}$. We see from Table \ref{tab:ratio_data} that the estimated probability is lower than the corresponding value of $\alpha$, indicating that Theorem \ref{thm:spotting} holds for our proposed cost-conscious procedure. \begin{table}[H]
\begin{center}
\begin{tabular}{ c | c | c | c | c | c | c | c | c | c | c}
\hline
$\alpha$ & 0.05 & 0.10 & 0.15 & 0.20 & 0.25 & 0.30 & 0.35 & 0.40 &0.45 &0.50\\ \hline
Cheap knockoffs & 0.04 & 0.04 & 0.04 & 0.04 & 0.04 & 0.04 & 0.04 & 0.04 & 0.04 & 0.06 \\
\hline
\end{tabular}
\end{center}
\caption{Proportion of 50 data subsets for which $\sup_{k} \bar{\mathcal{U}}_k^{-1}(\mathcal{R}_k, 1) \textsc{wFDP}(\mathcal{R}_k) > 1$ is violated. }
\label{tab:ratio_data} \end{table}
On each of the 50 non-overlapping data subsets, we further compute $\textsc{wFDP}$ and cost for the path of selected variables $\mathcal{R}_k$ returned by cheap knockoffs and the proposal in \citet{katsevich2018towards}, which ignores feature costs. Figure~\ref{fig:NHANES_wfdp} reports the $20$, $50$, and $80$ percentiles (over the 50 non-overlapping sets) of $\textsc{wFDP}$ and cost, and shows that our proposal effectively attains a lower $\textsc{wFDP}$ and a lower cost than the proposal in \citet{katsevich2018towards} which is not cost-conscious.
\begin{figure}
\caption{The 20, 50, and 80 percentiles of $\textsc{wFDP}$ (left panel) and cost (right panel) over 50 non-overlapping data subsets of cheap knockoffs and the procedure in \citet{katsevich2018towards}. \\}
\label{fig:NHANES_wfdp}
\end{figure}
Although prediction performance of the selected model is not the main theoretical focus of our proposal, we next study the prediction performance and the total cost of the selected variables. For comparison, we consider the following methods: \begin{enumerate}
\item \textbf{Katsevich \& Ramdas(2018)}: the proposal of \citet{katsevich2018towards} applied to the `Selective and adaptive SeqStep' method. It is equivalent to our method if we ignore the cost information, i.e., we set $\omega_1 = \omega_2 = ... = \omega_{30} = 2$.
\item \textbf{Logistic regression}: logistic regression applied to all 30 features. This procedure is not cost-conscious, and does not perform features selection. We use this as a benchmark for classification performance. \end{enumerate} We run these methods on all 92062 observations. Given the large sample size, we expect training error to be a good approximation of the generalization error. Furthermore, to highlight the effects of feature costs, we consider exaggerating the feature costs by using the squares of their actual costs.
\begin{figure}
\caption{\emph{Left}: The classification performance (in terms of the area under the ROC curve) for different sizes of the selected model $\mathcal{R}_k$ ($k = 1, \ldots, 30$). \emph{Center}: The total cost for different sizes of the selected model.
\emph{Right}: The classification performance versus the cost of the selected model. In all three panels of this figure, we consider the squared costs to highlight the effects of feature costs.}
\label{fig:NHANES}
\end{figure} From Figure~\ref{fig:NHANES}, we see that cheap knockoffs can achieve favorable classification performance at a low feature cost. In particular, the first two panels show that for a fixed model size, cheap knockoffs tends to achieve slightly worse classification performance than the procedure of \citet{katsevich2018towards}, which is not cost-conscious. However, our method achieves this classification performance at a lower cost. The right panel shows that for a given model cost, our method can obtain favorable classification performance compared with the proposal of \citet{katsevich2018towards}. Moreover, our method's classification performance is close to the benchmark of logistic regression, while using a much cheaper set of features.
In Figures~\ref{fig:path} and \ref{fig:path_exp}, we show the path of variables selected by cheap knockoffs and that of \citet{katsevich2018towards}. Each point represents a variable added to a model (with the feature name in the legend). For example, we see that both methods include \texttt{Gender}, \texttt{Height}, \texttt{Weight}, and \texttt{Triglyceride} when the model size is 4. However, the cheap knockoff procedure tends to select cheaper features first, adding the expensive laboratory feature \texttt{Triglyceride} last among these four features. By comparison, the proposal of \citet{katsevich2018towards} does not show any preference for inexpensive features. For the model with two variables, cheap knockoffs selects \texttt{Gender} and \texttt{Height}, which has lower cost and better classification performance than the model of \texttt{Height} and \texttt{Weight} selected by \citet{katsevich2018towards}.
In addition, in Figure~\ref{fig:path_exp}, we present the path of variables selected by cheap knockoffs applied with squared feature costs, where squaring has been performed to exaggerate the effect of the feature costs. Comparing with Figure~\ref{fig:path}, we see that cheap knockoffs tends to select less expensive features, while still attaining comparable classification performance. In particular, when the costs are squared, cheap knockoffs no longer selects \texttt{Diastolic BP(2nd)}, \texttt{Systolic BP(4th)}, \texttt{Systolic BP(1st)}, \texttt{Diastolic BP(3rd)}, \texttt{Vigorous activity}, and \texttt{Upper leg length}. Among these omitted variables, only \texttt{Upper leg length} is considered relevant by the logistic regression (see Appendix \ref{app:truth}).
\begin{figure}
\caption{The path of variables selected by cheap knockoffs (top) and the proposal of \citet{katsevich2018towards} (bottom). Each point represents a newly selected feature in the model. Variable indices are ordered from cheapest to most expensive.}
\label{fig:path}
\end{figure} \begin{figure}
\caption{The path of variables selected by cheap knockoffs, with squared costs. Each point represents a newly selected feature in the model. Variable indices are ordered from cheapest to most expensive.}
\label{fig:path_exp}
\end{figure}
\section{Discussion} In this paper, we proposed cheap knockoffs, a procedure for performing feature selection when features have costs. Cheap knockoffs is based on the idea of constructing multiple knockoffs for each feature. In particular, cheap knockoffs forces more expensive features to compete with more knockoffs, making it harder for expensive features to be selected. Our method yields a path of selected feature sets, and we show that the weighted false discovery proportion is simultaneously bounded with high probability along this path.
An interesting yet challenging future research direction is to develop a method based on the multiple knockoffs idea that provably controls the weighted false discovery rate. The martingale-type arguments used in the original knockoff paper rely on certain symmetries that are broken when the numbers of knockoffs constructed for different features are not all equal.
Finally, an \texttt{R} package named \texttt{cheapknockoff}, implementing our proposed method, is available on \url{https://github.com/hugogogo/cheapknockoff}. The simulation studies in Section \ref{sec:simulation} use the \texttt{simulator} package \citep{2016arXiv160700021B}, and the code to reproduce the simulation results (in Section \ref{sec:simulation}) and the NHANES data analysis (in Section \ref{sec:data}) is available at \url{https://github.com/hugogogo/reproducible/tree/master/cheapknockoff}. The NHANES dataset \citep{nhanes} is processed in \citet{kachuee2019opportunistic, kachuee2019cost} and is available at \url{https://github.com/mkachuee/Opportunistic}.
\section{Acknowledgments} \label{sec:acknowledgments} The authors thank Will Fithian for suggesting the simultaneous inference framework. All authors were supported by NIH Grant R01GM123993. Jacob Bien was also supported by NSF CAREER Award DMS-1653017 and Daniela Witten was also partially supported by NIH Grant DP5OD009145, NSF CAREER Award DMS-1252624, and a Simons Investigator Award in Mathematical Modeling of Living Systems.
\appendix \appendixpage \section{NHANES dataset: significant features in logistic regression} \label{app:truth} In the order of increasing $p$-values (smaller than 0.01 / 30): \begin{table}[H]
\centering
\begin{tabular}{ll}
\hline
Name & $p$-value \\
\hline
Gender & $1.73 \times 10^{-262}$ \\
Triglyceride & $5.92 \times 10^{-214}$ \\
Height & $1.17 \times 10^{-184}$ \\
Weight & $1.98 \times 10^{-102}$ \\
Waist circumference & $4.09 \times 10^{-37}$ \\
Body mass index & $4.02 \times 10^{-31}$ \\
High blood pressure history & $1.51 \times 10^{-27}$ \\
Cholesterol & $4.92 \times 10^{-24}$ \\
Education & $8.16 \times 10^{-10}$ \\
Upper leg length & $3.17 \times 10^{-5}$ \\
Systolic BP(3rd) & $1.01 \times 10^{-4}$\\
\hline
\end{tabular} \end{table}
\section{Running time comparison in numerical studies} \label{app:time} \begin{table}[H]
\centering
\begin{tabular}{ c | c | c | c | c | c}
\hline
$\gamma$ & 0 & 0.25 & 0.5 & 0.75 & 1\\ \hline
Cheap knockoffs (our proposal) & 2.796 & 2.772 & 2.784 & 2.798 & 2.812\\ \hline
\citet{katsevich2018towards} & 0.273 & 0.250 & 0.258 & 0.251 & 0.253\\
\hline
\end{tabular}
\caption{Wall-clock time comparison (in seconds, averaged over 100 simulated datasets) between our proposal and \citet{katsevich2018towards} in generating Table~\ref{tab:ratio}. }
\label{tab:time_simu} \end{table} \begin{table}[H]
\centering
\captionsetup{type=table}
\begin{tabular}{ c | c }
\hline
Cheap knockoffs (our proposal) & 7.284 \\ \hline
\citet{katsevich2018towards} & 2.678 \\
\hline
\end{tabular}
\caption{Wall-clock time comparison (in seconds, averaged over 50 non-overlapping data subsets) between our proposal and \citet{katsevich2018towards} in generating Figure~\ref{fig:NHANES_wfdp}. }
\label{tab:time_data} \end{table}
\section{Properties of multiple knockoffs} \label{app:mkv} We study the properties of the multiple knockoffs constructed in Step 1 of Section \ref{sec:mknockoffs}. Define \begin{align}
\tilde{Z} = \left(\tilde{X}_1^{(2)}, \ldots, \tilde{X}_1^{(\omega_1)}, \tilde{X}_2^{(2)}, \ldots, \tilde{X}_2^{(\omega_2)}, \ldots, \tilde{X}_p^{(2)}, \ldots, \tilde{X}_p^{(\omega_p)}\right)^T \in \mathbb R^{\sum_j(\omega_j - 1)}
\nonumber \end{align} as the random vector of all knockoff features, and \begin{align}
Z = \left(\tilde{X}_1^{(1)}, \tilde{X}_1^{(2)}, \ldots, \tilde{X}_1^{(\omega_1)}, \tilde{X}_2^{(1)}, \tilde{X}_2^{(2)}, \ldots, \tilde{X}_2^{(\omega_2)}, \ldots, \tilde{X}_p^{(1)}, \tilde{X}_p^{(2)}, \ldots, \tilde{X}_p^{(\omega_p)}\right)^T \in \mathbb R^{\sum_j\omega_j},
\label{eq:Z} \end{align} where $\tilde{X}_j^{(1)} = X_j$ is the original feature for $j = 1, \ldots, p$. For any $p$-tuple of permutations $\varsigma = (\varsigma_1, \ldots, \varsigma_p)$ where $\varsigma_j$ is a permutation on the set $\{1, \ldots, \omega_j\}$, and for any vector $v = (v_1^{(1)}, \ldots, v_1^{(\omega_1)}, \ldots, v_p^{(1)}, \ldots, v_p^{(\omega_p)}) \in \mathbb R^{\sum_j \omega_j}$, we define \begin{align}
v_{\mathrm{swap}(\varsigma)} = \left( v_1^{(\varsigma_1(1))}, \ldots, v_1^{(\varsigma_1(\omega_1))}, v_2^{(\varsigma_2(1))}, \ldots, v_2^{(\varsigma_2(\omega_2))}, \ldots, v_p^{(\varsigma_p(1))}, \ldots, v_p^{(\varsigma_p(\omega_p))} \right)^T \in \mathbb R^{\sum_j \omega_j}.
\nonumber \end{align} Therefore, $Z_{\mathrm{swap}(\varsigma)}$ denotes the random vector where each $\varsigma_j$ permutes the $\omega_j$ knockoff features (including the original one) corresponding to $X_j$.
We generalize the definition of multiple model-X knockoffs \citep[Definition 3.2 in][]{2018arXiv181011378R} to our setting in which each feature can have a different number of knockoffs: \begin{definition}
Consider any cost vector $\omega = (\omega_1, \ldots, \omega_p)$, where $\omega_j > 1$ are integers. The random vector $\tilde{Z}$ is a valid $\omega$-knockoff of $X = (X_1, \ldots, X_p)$ if
\begin{enumerate}
\item $Z_{\mathrm{swap}(\varsigma)}$ and $Z$ are identically distributed for any tuple of permutations $\varsigma = (\varsigma_1, \ldots, \varsigma_p)$;
\item $\tilde{Z}$ and $Y$ are conditionally independent given $X$.
\end{enumerate} \end{definition} Under the assumption that $X$ follows a multivariate Gaussian distribution, it can be verified \citep[see, e.g., Proposition 3.4 in][]{2018arXiv181011378R} that following Step 1 in Section \ref{sec:mknockoffs}, the vector $\tilde{Z}$ is a valid $\omega$-knockoff of $X$. In particular, the second property is guaranteed provided that the construction of $\tilde{Z}$ does not use $Y$, as in \citet{2018arXiv181011378R}.
The next lemma states the exchangeability property of the irrelevant features and their knockoffs, i.e., we can permute an irrelevant feature and its knockoffs without changing the joint distribution of $Z$ and $Y$. \begin{lemma}[Exchangeability of irrelevant features and their knockoffs] \label{lem:exchange} Consider any tuple of permutations $\varsigma = (\varsigma_1, \ldots, \varsigma_p)$, where $\varsigma_j$ is the identity permutation for $j \notin \mathcal{H}_0$, and $\varsigma_j$ is an arbitrary permutation over the set $\{1, \ldots, \omega_j\}$ for $j \in \mathcal{H}_0$. If $\tilde{Z}$ is a valid $\omega$-knockoff of $X$, then $(Z, Y)$ and $(Z_{\mathrm{swap}(\varsigma)}, Y)$ are identically distributed. \end{lemma} \begin{proof}
By the property of a valid $\omega$-knockoff, $Z_{\mathrm{swap}(\varsigma)}$ and $Z$ are identically distributed. So it is left to show that $Y | Z$ and $Y | Z_{\mathrm{swap}(\varsigma)}$ are identically distributed. This can be shown using the same arguments as in the proof of Lemma 1 in \citet{candes2018panning}. \end{proof}
We denote \begin{align}
T = \left( T_1^{(1)}, \ldots, T_1^{(\omega_1)}, T_2^{(1)}, \ldots, T_2^{(\omega_2)}, \ldots, T_p^{(1)}, \ldots, T_p^{(\omega_p)} \right) \in \mathbb R^{\sum_j \omega_j},
\nonumber \end{align} for $T_j^{(\ell)}$ defined in Step 2 of Section \ref{sec:mknockoffs}. Furthermore, we define component-wise order statistics on $T$, \begin{align}
T_{\mathrm{ordered}} =\left( T_{1, (1)}, \ldots, T_{1, (\omega_1)}, T_{2, (1)}, \ldots, T_{2, (\omega_2)}, \ldots, T_{p, (1)}, \ldots, T_{p, (\omega_p)} \right) \in \mathbb R^{\sum_j \omega_j}
\nonumber \end{align} such that $T_{j, (1)} \geq T_{j, (2)} \geq \ldots \geq T_{j, (\omega_j)}$ for all $j$.
The following lemma characterizes the multiple knockoff statistics $\{\kappa_j\}_{j = 1}^p$ computed in Step 2 of Section \ref{sec:mknockoffs}. It essentially states that for $j \in \mathcal{H}_0$, the statistics $\kappa_j$ corresponding to the irrelevant feature $X_j$ is uniformly distributed on the set $\{1, \ldots, \omega_j\}$, and is independent of the statistics corresponding to all other features and the component-wise order statistics $T_{\mathrm{ordered}}$. This property generalizes the ``coin-flip'' property of the standard model-X knockoff \citep[see, e.g., Lemma 2 in][]{candes2018panning}, and is the key to the proof of Theorem \ref{thm:spotting}. \begin{lemma}[Multiple knockoff statistics] \label{lem:uniform}
Suppose $\tilde{Z}$ is a valid $\omega$-knockoff of $Z$. For any $j \in \mathcal{H}_0$, the statistic $\kappa_j$ is uniformly distributed on the set $\{1, \ldots, \omega_j\}$, and is independent of $\{\kappa_k\}_{k \neq j}$ and the order statistics $T_{\mathrm{ordered}}$. \end{lemma} \begin{proof}
We adapt the proof idea in B.2 of \citet{2018arXiv181011378R}. Consider any tuple of permutations $\varsigma = (\varsigma_1, \ldots, \varsigma_p)$, where $\varsigma_j$ is the identity permutation for $j \notin \mathcal{H}_0$, and $\varsigma_j$ is an arbitrary permutation over the set $\{1, \ldots, \omega_j\}$ for $j \in \mathcal{H}_0$. We first show that $(\varsigma_1(\kappa_1), \ldots, \varsigma_p(\kappa_p), T_{\mathrm{ordered}})$ has the same distribution as $(\kappa_1, \ldots, \kappa_p, T_{\mathrm{ordered}})$.
We denote $\varsigma^{-1} = (\varsigma_1^{-1}, \ldots, \varsigma_p^{-1})$ where $\varsigma_j^{-1}$ is the inverse permutation of $\varsigma_j$.
Recall from Step 2 of Section \ref{sec:mknockoffs}, combined with the definition of $Z$ in \eqref{eq:Z}, that $T = f(Z, Y)$ for some map $f$, and observe that $T_{\mathrm{swap}(\varsigma^{-1})} = f (Z_{\mathrm{swap}(\varsigma^{-1})}, Y)$. So by Lemma \ref{lem:exchange}, we have that $T_{\mathrm{swap}(\varsigma^{-1})}$ and $T$ are identically distributed. For any $k_j \in \{1, \ldots, \omega_j\}$ and $t_{j\ell} \in \mathbb R$ for $j = 1, \ldots, p$ and $\ell = 1, \ldots, \omega_j$, we have
\begin{align}
&\mathbb{P}\left[\bigcap_{j = 1}^p \{\kappa_j = k_j\}, \bigcap_{j = 1}^p \bigcap_{\ell = 1}^{\omega_j} \{T_{j, (\ell)} = t_{j\ell}\}\right] \nonumber\\
= &\mathbb{P} \left[\bigcap_{j = 1}^p \{T_j^{(k_j)} = T_{j, (1)} = t_{j1}\}, \bigcap_{j = 1}^p \bigcap_{\ell = 1}^{\omega_j} \{T_{j, (\ell)} = t_{j\ell}\}\right] \nonumber\\
=& \mathbb{P} \left[\bigcap_{j = 1}^p \{T_j^{(\varsigma_j^{-1}(k_j))} = T_{j, (1)} = t_{j1} \}, \bigcap_{j = 1}^p \bigcap_{\ell = 1}^{\omega_j} \{T_{j, (\ell)} = t_{j\ell}\} \right] \nonumber\\
=& \mathbb{P}\left[\bigcap_{j = 1}^p \{\kappa_j = \varsigma_j^{-1}(k_j)\}, \bigcap_{j = 1}^p \bigcap_{\ell = 1}^{\omega_j} \{T_{j, (\ell)} = t_{j\ell}\}\right] \nonumber\\
=& \mathbb{P}\left[\bigcap_{j = 1}^p \{\varsigma_j(\kappa_j) = k_j\}, \bigcap_{j = 1}^p \bigcap_{\ell = 1}^{\omega_j} \{T_{j, (\ell)} = t_{j\ell}\}\right] \nonumber,
\end{align}
where the first and the third equalities hold from the definition of $\kappa_j$'s, the second equality holds because $T_{\mathrm{swap}(\varsigma^{-1})}$ and $T$ are identically distributed, along with the fact that $(T_{\mathrm{swap}(\varsigma^{-1})})_{\mathrm{ordered}} = T_{\mathrm{ordered}}$.
Therefore, we have shown that
\begin{align}
(\varsigma_1(\kappa_1), \ldots, \varsigma_p(\kappa_p), T_{\mathrm{ordered}}) \text{ and } (\kappa_1, \ldots, \kappa_p, T_{\mathrm{ordered}}) \text{ are identically distributed}.
\label{eq:joint}
\end{align}
For any $j \in \mathcal{H}_0$, now we further assume that $\varsigma_k$ is an identity permutation for all $k \neq j$, and $\varsigma_j$ is an arbitrary permutation on the set $\{1, \ldots, \omega_j\}$. The equality in joint distributions \eqref{eq:joint} implies that $\varsigma_j(\kappa_j)$ has the same distribution as $\kappa_j$. Since $\varsigma_j$ is an arbitrary permutation on the set $\{1, \ldots, \omega_j\}$, we have that $\kappa_j$ is uniformly distributed on the set $\{1, \ldots, \omega_j\}$, i.e.,
\begin{align}
\mathbb{P}(\kappa_j = i) = \omega_j^{-1} \qquad \forall i \in \{1, \ldots, \omega_j\}.
\label{eq:marginal}
\end{align}
Furthermore, for any $i_k \in \{1, \ldots, \omega_k\}$ for $k \neq j$, and $t \in \mathbb R^{\sum_\ell \omega_{\ell}}$,
\begin{align}
\mathbb{P}\left[\varsigma_j(\kappa_j) = i \Big | \bigcap_{k \neq j} \left\{\kappa_k = i_k \right\}, T_{\mathrm{ordered}} = t \right] =&
\frac{\mathbb{P} \left[\varsigma_j(\kappa_j) = i, \bigcap_{k \neq j} \left\{\varsigma_k(\kappa_k) = i_k\right\}, T_{\mathrm{ordered}} = t\right]}{\mathbb{P} \left[ \bigcap_{k \neq j} \left\{\kappa_k = i_k\right\}, T_{\mathrm{ordered}} = t \right]} \nonumber\\
=& \frac{\mathbb{P} \left[\kappa_j = i, \bigcap_{k \neq j} \left\{\kappa_k = i_k \right\}, T_{\mathrm{ordered}} = t\right]}{\mathbb{P} \left[ \bigcap_{k \neq j} \left\{\kappa_k = i_k\right\}, T_{\mathrm{ordered}} = t \right]} \nonumber\\
= &\mathbb{P}\left[\kappa_j = i \Big | \bigcap_{k \neq j} \left\{\kappa_k = i_k \right\}, T_{\mathrm{ordered}} = t \right],
\nonumber
\end{align}
where the first equality holds from Bayes formula and the fact that $\varsigma_k$ is the identity permutation for all $k \neq j$, and the second equality holds from \eqref{eq:joint}. Therefore, for any $i_k \in \{1, \ldots, \omega_k\}$ for $k \neq j$, and $t \in \mathbb R^{\sum_\ell \omega_{\ell}}$, we have that
\begin{align}
\mathbb{P} \left[\kappa_j = i \Big | \bigcap_{k \neq j} \left\{\kappa_k = i_k \right\}, T_{\mathrm{ordered}} = t\right] = \omega_j^{-1} \qquad \forall i \in \{1, \ldots, \omega_j\}.
\label{eq:conditional}
\end{align}
Combining \eqref{eq:marginal} and \eqref{eq:conditional}, we have that $\kappa_j$ is independent of $\{\kappa_k\}_{k \neq j}$ and $T_{\mathrm{ordered}}$. \end{proof}
\section{Proof of Theorem \ref{thm:spotting}} \label{eq:proof_spotting} Without loss of generality, we assume that the ordering in Step 3 of Section \ref{sec:mknockoffs} is such that $\sigma(j) = j$ for $j \in \{1, \ldots, p\}$. Consider \begin{align}
\mathcal{V} (\mathcal{R}_k, c) = \frac{c^{-1} + \sum_{j} \mathbbm{1}\left \{ j \notin \mathcal{R}_k \right \}}{\left(\sum_{j} \omega_j \mathbbm{1} \left\{ j \in \mathcal{R}_k \right\}\right) \vee 1} = \frac{c^{-1} + \sum_{j = 1}^k \mathbbm{1}\left \{ \kappa_j > 1 \right \}}{\left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ \kappa_j = 1 \right\} \right) \vee 1}
\label{eq:wfdphat} \end{align} for some constant $c$. Recall that \begin{align}
\textsc{wFDP} (\mathcal{R}_k) = \frac{\sum_{j} \omega_j \mathbbm{1} \left\{ j \in \mathcal{H}_0 \cap \mathcal{R}_k \right\} }{\left(\sum_{j} \omega_j \mathbbm{1} \left\{ j \in \mathcal{R}_k \right\}\right) \vee 1} = \frac{\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ j \in \mathcal{H}_0 \right\} \mathbbm{1}\left\{ \kappa_j = 1 \right\} }{\left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ \kappa_j = 1 \right\}\right) \vee 1}.
\nonumber \end{align} We have the following key lemma: \begin{lemma} \label{thm:key}
Let $\mathcal{V} (\mathcal{R}_k, c)$ be defined as in \eqref{eq:wfdphat}. Then for any $\alpha \in (0, 1)$, there exists $x > 0$ such that
\begin{align}
\mathbb{P} \left[ \sup_k \frac{\textsc{wFDP}(\mathcal{R}_k)}{\mathcal{V} (\mathcal{R}_k, c)} \geq x \right] \leq \alpha.
\label{eq:thm}
\end{align} \end{lemma}
\begin{proof}[Proof of Lemma \ref{thm:key}]
For any $x > 0$, from \eqref{eq:wfdphat},
\begin{align}
&\mathbb{P} \left\{ \sup_k \frac{\textsc{wFDP}(\mathcal{R}_k)}{\mathcal{V} (\mathcal{R}_k, c)} \geq x \right\} \nonumber\\
=& \mathbb{P} \left\{ \sup_k \left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ \kappa_j = 1 \right\} \mathbbm{1} \left\{ j \in \mathcal{H}_0 \right\} - x \sum_{j = 1}^k \mathbbm{1} \left \{ \kappa_j > 1 \right \}\right) \geq c^{-1}x \right\} \nonumber\\
\leq & \mathbb{P} \left\{ \sup_k \left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ \kappa_j = 1 \right\} \mathbbm{1} \left\{ j \in \mathcal{H}_0 \right\} - x \sum_{j = 1}^k \mathbbm{1} \left \{ \kappa_j > 1 \right \} \mathbbm{1} \left\{ j \in \mathcal{H}_0 \right\}\right) \geq c^{-1}x \right\} \nonumber\\
=& \mathbb{P} \left[ \sup_k \exp \left[ \theta \left\{ \sum_{j = 1}^k \omega_j \left( \mathbbm{1} \left\{ \kappa_j = 1 \right\} - \frac{x}{\omega_j} \mathbbm{1} \left \{ \kappa_j > 1 \right \} \right) \mathbbm{1} \left\{ j \in \mathcal{H}_0 \right\} \right\} \right] \geq \exp \left( c^{-1}x \theta \right)\right]
\nonumber
\end{align}
for any $\theta > 0$. Define
\begin{align}
Z_k = \exp \left[ \theta \left\{ \sum_{j = 1}^k \omega_j \left( \mathbbm{1} \left\{ \kappa_j = 1 \right\} - \frac{x}{\omega_j} \mathbbm{1} \left \{ \kappa_j > 1 \right \} \right) \mathbbm{1} \left\{ j \in \mathcal{H}_0 \right\} \right\} \right]
\end{align}
for $k \geq 1$, and $Z_0 = 1$.
Next we find a value of $\theta > 0$ such that $\{Z_k\}$ is a super-martingale with respect to a certain filtration $\mathcal{F}_k$. If such a value of $\theta$ exists, then from Ville's maximal inequality for super-martingales \citep{ville1939etude}, we have that
\begin{align}
\mathbb{P} \left[ \sup_k \frac{\textsc{wFDP}(\mathcal{R}_k)}{\mathcal{V} (\mathcal{R}_k, c)} \geq x \right] \leq \mathbb{P} \left\{ \sup_k Z_k \geq \exp(c^{-1}\theta x) \right\} \leq \frac{\mathrm{E}(Z_0)}{\exp(c^{-1}\theta x)} = \exp(-c^{-1}\theta x).
\label{eq:ville}
\end{align}
So it is left to show that $Z_k$ is a super-martingale with respect to a filtration $\mathcal{F}_k$, where
$\mathcal{F}_k$ is the $\sigma$-field generated from $\{\kappa_j\}_{j \leq k, j \in \mathcal{H}_0}$.
First we observe that $Z_k$ is adapted to $\mathcal{F}_k$ for all $k$. By definition of a super-martingale, it is left to show that
\begin{align}
\mathrm{E} \left( \frac{Z_k}{Z_{k - 1}} \mid \mathcal{F}_{k - 1} \right) = \mathrm{E} \left[ \exp \left\{ \omega_k \theta \left( \mathbbm{1} \left\{ \kappa_k = 1 \right\} - \frac{x}{\omega_k} \mathbbm{1} \left\{\kappa_k > 1 \right\} \right) \mathbbm{1} \left\{ k \in \mathcal{H}_0 \right\} \right\} \mid \mathcal{F}_{k - 1} \right] \leq 1.
\nonumber
\end{align}
First, we observe that this holds trivially for $k \notin \mathcal{H}_0$. For $k \in \mathcal{H}_0$, we have
\begin{align}
\mathrm{E} \left( \frac{Z_k}{Z_{k - 1}} \mid \mathcal{F}_{k - 1} \right) =& \mathrm{E} \left[ \exp \left\{ \omega_k \theta \left( \mathbbm{1} \left\{ \kappa_k = 1 \right\} - \frac{x}{\omega_k} \mathbbm{1} \left\{ \kappa_k > 1 \right\} \right) \right\} \mid \mathcal{F}_{k - 1} \right] \nonumber\\
=& \mathrm{E} \left[ \mathbbm{1} \left\{ \kappa_k = 1 \right\} \exp \left( \omega_k \theta \right) \mid \mathcal{F}_{k - 1}\right] + \mathrm{E} \left[ \mathbbm{1} \left\{ \kappa_k > 1 \right\} \exp \left( -\theta x \right) \mid \mathcal{F}_{k - 1}\right] \nonumber\\
=& \exp \left( \omega_k \theta \right) \mathbb{P} \left( \kappa_k = 1 \mid \mathcal{F}_{k - 1} \right) + \exp \left( - \theta x \right) \mathbb{P} \left( \kappa_k > 1 \mid \mathcal{F}_{k - 1} \right) \nonumber\\
=& \frac{\exp \left( \omega_k \theta \right)}{\omega_k} + \frac{(\omega_k - 1)\exp \left( - \theta x \right)}{\omega_k} \nonumber,
\end{align}
where the last equality holds from Lemma \ref{lem:uniform}.
For any fixed $\alpha \in (0, 1)$, take $x = {\theta}^{-1}(- c\log \alpha)$, which is equivalent to $\exp(-c^{-1}\theta x) = \alpha$. Then it remains to select $\theta$ such that for all $k \in \mathcal{H}_0$,
\begin{align}
\mathrm{E} \left( \frac{Z_k}{Z_{k - 1}} \mid \mathcal{F}_{k - 1} \right) = \frac{\exp \left( \omega_k \theta \right)}{\omega_k} + \frac{\omega_k - 1}{\omega_k} \exp \left( c \log \alpha \right) \leq 1,
\label{eq:toshow}
\end{align}
which is satisfied for
\begin{align}
\theta \leq \frac{1}{\omega_k} \log \left\{ \omega_k - \left( \omega_k - 1 \right) \alpha^{c} \right\}.
\nonumber
\end{align}
So we take
\begin{align}
\theta^\ast = \min_{k \in \mathcal{H}_0} \frac{1}{\omega_k} \log \left\{ \omega_k - \left( \omega_k - 1 \right) \alpha^{c} \right\}.
\nonumber
\end{align}
Then \eqref{eq:toshow} holds and thus from \eqref{eq:ville}, the theorem holds with
\begin{align}
x = \frac{-c\log \alpha}{\theta^\ast} = -c\log \alpha \left[\max_{k \in \mathcal{H}_0} \frac{\omega_k}{\log \left\{ \omega_k - \left( \omega_k - 1 \right) \alpha^{c} \right\}}\right].
\label{eq:x}
\end{align} \end{proof}
Now we have \begin{align}
\mathcal{U}(\mathcal{R}_k, c) = x \mathcal{V} (\mathcal{R}_k, c) = -\log \alpha \left[\frac{1 + \sum_{j = 1}^k c \mathbbm{1}\left \{ \kappa_j > 1 \right \}}{\left(\sum_{j = 1}^k \omega_j \mathbbm{1} \left\{ \kappa_j = 1 \right\}\right) \vee 1} \right] \left[\max_{k \in \mathcal{H}_0} \frac{\omega_k}{\log \left\{ \omega_k - \left( \omega_k - 1 \right) \alpha^{c} \right\}} \right],
\nonumber \end{align} and the results in Theorem \ref{thm:spotting} follow.
\end{document} |
\begin{document}
\title{Stochastic grid bundling method for backward stochastic differential equations}
\begin{abstract} In this work, we apply the Stochastic Grid Bundling Method (SGBM) to numerically solve backward stochastic differential equations (BSDEs). The SGBM algorithm is based on conditional expectations approximation by means of bundling of Monte Carlo sample paths and a local regress-later regression within each bundle. The basic algorithm for solving the backward stochastic differential equations will be introduced and an upper error bound is established for the local regression. A full error analysis is also conducted for the explicit version of our algorithm and numerical experiments are performed to demonstrate various properties of our algorithm. \end{abstract}
\section{Introduction}\label{section_introduction}
The Stochastic Grid Bundling Method (SGBM) is a Monte Carlo based algorithm designed to solve backward dynamic programming problems, with applications in pricing Bermudan options in \cite{jain_oosterlee_2015} and \cite{cong_oosterlee_2015}. This algorithm has been further extended computationally by the incorporation of GPU acceleration in \cite{leitao_oosterlee_2015} and generalized to the computation of Credit Valuation Adjustment and Potential Future Exposure in \cite{degraaf_etc_2014}. In this work, we will extend its applicability to the approximation of Backward Stochastic Differential Equations (BSDEs). We shall also study the errors in the SGBM algorithm.
The SGBM algorithm is based on the so-called {\em regress-later technique} and on an adaptive local basis approach. In usual Monte Carlo regression methods for backward-in-time problems, the values of the target function at the end of a time interval are regressed on certain dependent variables that are measured at the beginning of the time interval (which is called the regress-now approach). This creates a statistical error. Instead, the dependent variable is projected onto a set of basis functions at the end of the interval in a regress-later method, and a conditional expectation across the interval is then computed for each basis function. This difference removes the statistical error in the regression step. Regress-later schemes have been further discussed in \cite{Glasserman2004}.
With an adaptive local basis approach, the whole simulation is partitioned into non-overlapping subsets and we perform least-squares regressions separately within these subsets, possibly with a different basis for each subset. The exact partition depends on the simulated samples themselves and its purpose is to gather samples that share similar "characteristics" such that the local regression is more accurate than the global one. For further application of localization in numerical schemes, the reader may check out \cite{Bouchard2012}. Since each partition is non-overlapping, SGBM is easy to scale up in dimensionality and can facilitate parallel computing. We would like to test the SGBM algorithm in a new problem setting such that we can take advantage of its nice properties and also get a better understanding of the underlying principles.
The problem that we are interested in is the numerical approximation of BSDEs. These equations form a popular subject of research in quantitative finance ever since their introduction in \cite{pardoux_peng_1992} and related works. The connection between BSDEs and partial differential equations (PDEs) also provides the opportunity of solving PDEs (in high dimensions) with stochastic methods. However, the computational difficulties of solving BSDEs prevent them from being widely used in practice. Therefore, efficient algorithms for the approximation of high-dimensional BSDEs are of great interest. In fact, there are numerous works just focusing on the application of Monte Carlo methods to BSDEs, including \cite{BOUCHARD2004175, CRISAN20101133, doi:10.1137/16M106371X, lemor2006, Bender2012} and some of these could be integrated with our proposed scheme for further development. For example, in \cite{ding2017aregression}, the authors proposed a regression basis based on a Fourier-cosine expansion in a least-squares scheme for BSDEs, which can possibly be used as a basis function in our SGBM algorithm. As far as we know, there is no study of a combined approach based on the regress-later scheme and a localization Monte Carlo technique for these equations, which is the goal of this work.
In this article, we consider the application of SGBM to decoupled Forward Backward Stochastic Differential Equations (FBSDEs) of the form \begin{equation} \label{equation_fbsde} \left\{ \begin{array}{l} dX_t = \mu(t,X_t)dt + \sigma(t, X_t)dW_t, \; X_0 = x_0,\\ dY_t = -f(t, X_t, Y_t, Z_t)dt + Z_tdW_t, \; Y_T = \Phi(X_T), \end{array} \right. \end{equation}
defined on $0 \leq t \leq T$. The function $f:[0,T]\times \mathbb{R}^q \times \mathbb{R} \times \mathbb{R}^d$ is called the driver function of the backward process and the process $W_t = (W_{1,t}, \ldots, W_{d,t})^\top$ is a d-dimensional standard Brownian motion. Note that the usual setting of complete probability space $(\Omega, \mathcal{F},\mathbb{F},\mathbb{P})$ with $\mathbb{F}:=(\mathcal{F}_t)_{0\leq t\leq T}$ being a filtration satisfying the usual conditions of being right-complete and $\mathbb{P}$-complete applies throughout the article. Given that a solution exists for the forward equation, a pair of adapted processes $(Y_t,Z_t)$ is said to be the solution of the FBSDE \eqref{equation_fbsde}, if $Y_t$ is a continuous real-valued adapted process, $Z_t$ is a real-valued predictable process such that $\int^T_0|Z_t|^2dt < \infty$ almost surely in $\mathbb{P}$ and the pair satisfies Equation \eqref{equation_fbsde}.
One key difficulty in solving a BSDE is that the pair $(Y_t, Z_t)$ must be adapted to the underlying filtration. The terminal condition $Y_T$ is given by $\Phi(X_T)$, where $\Phi:\mathbb{R}^q\rightarrow\mathbb{R}$ is a deterministic function. Therefore, $Y_T$ is adapted to the filtration $\mathcal{F}_T$ and a naive Euler discretization on the backward equation fails to produce an adapted solution, for further discussion on this, the reader may refer to the introduction in \cite{BOUCHARD2004175}. In this work, we aim to construct an approximate solution by the theta-scheme from \cite{zhao_wang_peng_2009} and applying the SGBM algorithm.
To ensure the existence and uniqueness of the solution to the forward equation, further regularity conditions are assumed here. The functions $\mu: [0,T] \times \mathbb{R}^q\rightarrow \mathbb{R}^q$ and $\sigma: [0,T] \times \mathbb{R}^q \rightarrow \mathbb{R}^{q \times d}$ refer to the drift and the diffusion coefficients of the forward stochastic process, and $x_0$ is the initial condition for $X$. It is assumed that both $\mu(t,x)$ and $\sigma(t,x)$ are measurable functions that are uniformly Lipschitz in $x$ and such condition holds uniformly in $t$.
The forward process also satisfies the Markov property, namely $\mathbb{E}[X_\tau|\mathcal{F}_t] = \mathbb{E}[X_\tau|X_t]$ for $\tau \geq t$, where $\mathbb{E}[\cdot]$ denotes expectation with respect to probability measure $\mathbb{P}$.
The rest of the article is organized as follows. We start in Section \ref{section_algorithm} with the introduction of the SGBM algorithm, along with the necessary time discretization scheme and assumptions. Section \ref{section_refined_regression} will present an error analysis of a simplified case of SGBM. The proof in this section forms the foundation for the error bound in any algorithm applying SGBM. Later, in Section \ref{section_explicit}, we derive the full error bound for a specific choice of discretization scheme as an example. The article finishes with numerical experiments and a conclusion.
To close off this section, here is some further notation that is used in this article. \begin{itemize}
\item For any vector $x$, $|x|$ denotes its Euclidean norm and $x_r$ denotes its $r$-th component.
\item Similarly, $X_{r,t}$ denotes the $r$-th component for any random process $X_t$.
\item The gradient $\nabla g$ is defined as $\left(\frac{\partial g}{\partial x_1}, \ldots , \frac{\partial g}{\partial x_q}\right)$ for any differentiable function $g:\mathbb{R}^q\rightarrow \mathbb{R}$.
\item The notations $\mathbb{E}_{t}[\cdot]$ and $\mathbb{E}^x_t[\cdot]$ are the simplified notations for $\mathbb{E}[\cdot|\mathcal{F}_t]$ and $\mathbb{E}[\cdot|X_t = x]$
\item For any set $\mathcal{S}$, the function ${\bf 1}_S$ is the indicator function which takes value $1$ when the input is within set $\mathcal{S}$ and $0$ otherwise.
\item For any function space $H$ containing functions $\phi: \mathbb{R}^q \rightarrow \mathbb{R}$, $H^+$ is defined as the set $\{\{(x, y)\in \mathbb{R}^q \times \mathbb{R} : \phi(x) \geq y\} : \phi \in H\}$.
\item For any function $\phi$ and compact set $\mathcal{A}$, the control constant $C_{\phi, \mathcal{A}}$ is defined as an extended real number $\sup_{x \in \mathcal{A}}|\phi(x)|$. \end{itemize}
\section{Assumptions and Algorithm} \label{section_algorithm}
In this section, we shall introduce the SGBM algorithm and its application to the approximation of BSDEs. To begin, we need to discretize the BSDEs.
\subsection{Discretization Scheme}\label{section_discretization scheme}
We denote a time grid $\pi=\{0 = t_0 < \ldots < t_N = T\}$ on the interval $[0,T]$ and let $\Delta_k :=t_{k+1}-t_k$, $\Delta W_{l,k} := W_{l,t_{k+1}}-W_{l,t_k}$, and $\Delta W_k := (\Delta W_{1,k},\ldots, \Delta W_{d,k})^\top$ be the time-step, the Brownian motion increment along the $l$-th dimension and the Brownian motion increment, respectively, for $k \in \{0, \ldots, N-1\}$.
For the forward process $X_t$, we shall apply a Markovian approximation $X^\pi_{t_k}, t_k\in \pi$. The most common choice is the Euler-Maruyama scheme, which will be explained in Section \ref{section_numerical}. However, our algorithm can work with any simulation method where the conditional expectations over one time step are known for some specific functions.
The backward in time discretizations $(Y^{\pi}, Z^{\pi})$ form a special case of the theta-scheme from \cite{zhao_li_zhang_2012} and \cite{ruijter_oosterlee_2015} by selecting $(\theta_1, \theta_2) = (0,1)$. We use the following explicit discretization: \begin{align*} & y_N(x) = \Phi(x),\\ & z_k(x) = \frac{1}{\Delta_k}\expectation{y_{k+1}(X^\pi_{t_{k+1}})\Delta W_k }{k}{x} , \; k = N-1, \ldots, 0,\\ & y_k(x) = \expectation{y_{k+1}(X^\pi_{t_{k+1}})}{k}{x} + \Delta_k \expectation{f_{k+1}(y_{k+1}(X^\pi_{t_{k+1}}),z_{k+1}(X^\pi_{t_{k+1}}))}{k}{x},\; k= N-1,\ldots,0, \end{align*} where $f_k(y, z) := f(t_k, X^\pi_{t_k}, y, z)$.
\subsection{Standing Assumptions}\label{session_assumptions}
To ensure the existence and uniqueness of the solution of the continuous BSDEs, some basic assumptions are required. Moreover, these assumptions will affect the algorithm designed regarding the admissible choice of $\pi$ and the error bound of the scheme. In this work, we assume the global Lipschitz condition as stated in Assumption \ref{assumption:globally_lipschitz}. Note that this assumption will affect the derivation and the result of the error bound for the complete algorithm. Assumption \ref{assumption:globally_lipschitz} is in force here as it is the most common assumption in the BSDE literature. Alternative assumptions can be found, for instance, in \cite{gobet_turkedjiev_2016}.
\begin{assumption}[Globally Lipschitz driver] \label{assumption:globally_lipschitz} \quad\\
\begin{enumerate}
\item[\itemassumption{\xi}]
\begin{enumerate}
\item[i.)] $\Phi$ is a measurable function.
\item[ii.)] The control constant $C_{\Phi, \mathcal{A}} < \infty$ for any given compact set $\mathcal{A}$.
\end{enumerate}
\item[\itemassumption{F}]
\begin{enumerate}
\item[i.)]
$(t, x, y, z) \mapsto f(t, x, y, z)$ is $\mathcal{B}(\mathcal{\mathbb{R}}) \otimes \mathcal{B}(\mathbb{R}^q) \otimes \mathcal{B}(\mathbb{R}) \otimes \mathcal{B}(\mathbb{R}^d)$-measurable.
\item[ii.)] For every $k \leq N$, $f_k(y,z)$ as defined in the Subsection \ref{section_discretization scheme} is $\mathcal{F}_{t_k}\otimes \mathcal{B}(\mathbb{R}) \otimes \mathcal{B}(\mathbb{R}^d)$-measurable and there exists an $L_f \in [0, +\infty )$ such that
\begin{equation*}
|f_k(y,z)-f_k(y', z')| \leq L_f(|y-y'|+|z-z'|),\qquad\forall k \in \{0, \ldots, N\},
\end{equation*}
for any $(y, y', z, z') \in \mathbb{R} \times \mathbb{R} \times \mathbb{R}^d \times \mathbb{R}^d$.
\item[iii.)] There exists a $C_f \in [0, \infty)$ such that
\begin{equation*}
|f_k(0,0)| \leq C_f,\qquad \forall k \in \{0, \ldots, N\}.
\end{equation*}
\item[iv.)] The time discretization is such that
\begin{equation*}
\limsup_{N\rightarrow\infty} R_\pi < +\infty, \quad \mbox{where } R_\pi = \sup_{0\leq k\leq N-2}\frac{\Delta_k}{\Delta_{k+1}}.
\end{equation*}
\end{enumerate}
\end{enumerate} \end{assumption}
Again, the assumption here is for the consistency of our derivation and does not imply that our algorithm can only be applied when these assumptions are satisfied.
\subsection{Stochastic Grid Bundling Method}
We now introduce SGBM. Due to the Markovian setting of $(X^\pi_{t_k},\mathcal{F}_{t_k})_{t_k\in\pi}$, there exist functions $y_k(x)$ and $z_k(x)$ such that \begin{equation*} Y^\pi_{t_k}=y_k(X^\pi_{t_k}),\; Z^\pi_{t_k} = z_k(X^\pi_{t_k}). \end{equation*} Our method is based on estimating these functions $(y_k(x),z_k(x))$ recursively backward in time by a local least-squares regression technique onto a finite function space with basis functions $(p_l)_{0\leq l \leq Q}$.
As a Monte Carlo based algorithm, our program starts with the simulation of $M$ independent samples of $(X^\pi_{t_k})_{0 \leq k \leq N}$, denoted by $(X^{\pi,m}_{t_k})_{1 \leq m \leq M, 0 \leq k \leq N}$. Note that in this basic algorithm, the simulation is only performed once. This scheme is therefore a non-nested Monte Carlo scheme.
The next step is the backward recursion. Denote by $y^{R}_k$ the SGBM approximation of the function $y_k$. The function $\approximant{z}{k}{R}$ similarly means the approximation of $\approximant{z}{k}{}$.
At initialization, we assign the terminal values to each path for our approximations, i.e., \begin{equation*} \approximant{y}{N}{R}(X^{\pi, m}_{t_N})=\Phi(X^{\pi, m}_{t_N}),\quad m = 1,\ldots, M, \end{equation*} The following steps are performed recursively, backward in time, at $t_k$, $k=N-1,\ldots, 0$. First, we bundle all paths into $\mathcal{B}_{t_k}(1),\ldots,\mathcal{B}_{t_k}(B)$ non-overlapping partitions based on the result of $(X^{\pi, m}_{t_k})$. Note that our design allows the application of various clustering techniques within the SGBM algorithm. A previous study in \cite{leitao_oosterlee_2015} compares the k-means clustering with an equal partitioning, and shows that they are similar in accuracy. However, it remains an interesting problem which clustering technique would provide the optimal result. We use the equal partition technique, which will be specified in Section \ref{section_refined_regression}, for the error analysis and the numerical experiment.
Next, we perform the regress-later approximation separately within each bundle. The regress-later technique we are using combines the least-squares regression with the (analytical) expectations of the basis functions to calculate the necessary expectations.
Generally speaking, for $M$ Monte Carlo paths, a standard regress-now algorithm for a dynamic programming problem finds a function $\iota$ within the space spanned by the regression basis such that it minimizes the value $\frac{1}{M}\sum^M_{i=1}(g(X^i_{t+\delta}) - \iota(X^i_{t}))^2$ and approximates the expectation $\mathbb{E}_t[g(X_{t+\delta})]$ by $\mathbb{E}_t[\iota(X_t)] = \iota(X_t)$. As a projection from a function of $X_{t+\delta}$ to a function of $X_t$ is performed then, it would introduce a statistical bias to the approximation.
Instead, the regress-later technique we employ picks out a function $\kappa$ such that it minimizes $\frac{1}{M}\sum^M_{i=1}(g(X^i_{t+\delta}) - \kappa(X^i_{t+\delta}))^2$ and approximates the expectation $\mathbb{E}_t[g(X_{t+\delta})]$ by $\mathbb{E}_t[\kappa(X_{t+\delta})]$. By using functions on the same variable in the regression basis, we can remove the statistical bias in the regression. However, the expectation of all basis functions must preferably be known in order to apply the regress-later technique efficiently.
In the context of our algorithm, we define the bundle-wise regression parameters $\regressionparameter{\alpha}{k+1}$, $\regressionparameter{\beta}{k+1}$, $\regressionparameter{\gamma}{k+1}$ as \begin{align*} & \regressionparameter{\alpha}{k+1} = \leastsquare{\alpha}{k}{\approximant{y}{k+1}{R}(\path{k+1}{m})},\\ & \regressionparameter{\beta}{i, k+1} = \leastsquare{\beta}{k}{\approximant{z}{i, k+1}{R}(\path{k+1}{m})},\\ & \regressionparameter{\gamma}{k+1} = \leastsquare{\gamma}{k}{f_{k+1}(\approximant{y}{k+1}{R }(\path{k+1}{m}), \approximant{z}{k+1}{R}(\path{k+1}{m}))}. \end{align*} The approximate functions within the bundle at time $k$ are defined by the above parameters and the expectations $\mathbb{E}^x_{t_k}[p(X^\pi_{t_{k+1}})]$ and $\mathbb{E}^x_{t_k}\left[p(X^\pi_{t_{k+1}})\frac{\Delta W_{r, k}}{\Delta_k}\right]$: \begin{align*} & \approximant{z}{r, k}{R}(b, x) = \expectation{
\frac{\Delta W_{r,k}}{\Delta_k}p(X^\pi_{t_{k+1}}) }{k}{x} \regressionparameter{\alpha}{k+1},\quad r= 1,\ldots,d;\\ & \approximant{y}{k}{R}(b, x) = \expectation{p(X^\pi_{t_{k+1}})}{k}{x} (\regressionparameter{\alpha}{k+1}+ \Delta_k(1 - \theta_1)\regressionparameter{\gamma}{k+1}), \quad i = 1, \ldots, I. \end{align*}
As the expectations related to the basis functions are the foundation of any regress-later scheme, we assume that the following assumptions are satisfied. \begin{assumption}
The regression basis $\{p_1, \ldots, p_Q\}$ is assumed to satisfy the following assumptions.
\begin{enumerate}
\item[\itemassumption{p}]
\begin{enumerate}
\item[i.)]
$\mathbb{E}^x_{t_k}[p_l(X^\pi_{t_{k+1}})]$ and $\mathbb{E}^x_{t_k}\left[p_l(X^\pi_{t_{k+1}})\frac{\Delta W_{r, k}}{\Delta_k}\right]$ are known, either analytically or empirically, for all $k = 0, \ldots N-1$, $l = 1, \ldots, Q$ and $r = 1, \ldots, d$.
\item[ii.)] For any given compact set $\mathcal{A}$ in $\mathbb{R}^q$, the constant $C_{p, \mathcal{A}} := \max_{l=1, \ldots, Q}C_{p_l, \mathcal{A}}$. Moreover, there exists a constant $C_{M,\mathcal{A}}$ such that
\begin{equation*}
\sum^Q_{l=1} \left|\mathbb{E}^x_{t_k}[p_l(X^\pi_{t_{k+1}})]\right| \leq C_{M, \mathcal{A}},\qquad \forall x \in \mathcal{A}, \mbox{ and } k = 0, \ldots, N-1;
\end{equation*}
and
\begin{equation*}
\sum^Q_{l=1} \left|\mathbb{E}^x_{t_k}[p_l(X^\pi_{t_{k+1}})\frac{\Delta W_{r, k}}{\Delta_k}]\right| \leq C_{M,\mathcal{A}},\qquad \forall x \in \mathcal{A}, \mbox{ and } k = 0, \ldots, N-1.
\end{equation*}
\end{enumerate}
\end{enumerate} \end{assumption}
Next, to ensure the stability of our algorithm, $|\alpha_k(b)|$, $|\beta_{r, k}(b)|$ and $|\gamma_k(b)|$ must be bounded above for all $k, b, r$. In practice, this means that an error notion should be given by the program when the Euclidean norm of any regression coefficient vector is greater than a predetermined constant $L$. Further details on this requirement will be described in Section \ref{section_refined_regression}.
Finally, to simplify notation, we define the notations below for the regression result across the bundles. \begin{align*} \tilde{y}_{k+1}^{R, I}(x_1, x_2) & := \sum_{b = 1}^B{\bf 1}_{\mathcal{B}_{t_k}(b)}(x_1)p(x_2)\alpha_{k+1}(b),\\ \tilde{z}_{r, k+1}^{R} (x_1, x_2) & := \sum_{b = 1}^B{\bf 1}_{\mathcal{B}_{t_k}(b)}(x_1)p(x_2)\beta_{r, k+1}(b),\\ \tilde{f}_{k+1}^{R}(x_1, x_2) & := \sum_{b = 1}^B{\bf 1}_{\mathcal{B}_{t_k}(b)}(x_1) p(x_2)\gamma_{k+1}(b). \end{align*}
\section{Refined Regression}\label{section_refined_regression} In this section, we derive a proof of an error bound for our regress-later strategy. In order to ensure the stability of our algorithm, we have introduced a sample selection step into the algorithm and modified the classical proof for nonparametric regression from \cite{gyorfi_kohler_krzyzak_walk_2002}, which was used in \cite{gobet_turkedjiev_2016}, for the derivation of the error bound to SGBM.
In order to simplify expressions, different notations are used in this section. We consider a random vector $(X,Y)$, where $X$ and $Y$ are both $\mathbb{R}^q$, following the probability measure $\nu$. A cloud of simulation paths can be generated by independently simulating $M$ copies, $\{(X^m, Y^m) : m = 1, \ldots, M\}$, defined on a probability space $(\hat{\Omega}, \hat{\mathcal{F}}, \hat{\mathbb{P}})$. In our content, the pair $(X, Y)$ represents the independent and dependent variables under consideration and $(X^m, Y^m)$ are the simulated samples for $(X, Y)$.
Denote by $\mathbb{B}$ a specific partition with $\mathbb{B} :=\{\mathcal{B}(1), \ldots, \mathcal{B}(B)\}$ and $\bigcup_{b=1}^B\mathcal{B}(b) = \mathbb{R}^d$. The partition which is used in the regression estimates is based on the simulation data $X^m$ in our setting and to which bundle a sample belongs solely depends on $X^m$.
The main goal of SGBM is finding an effective and accurate way to approximate the expectation $\mathbb{E}\left[\left.v(Y)\right|X\right]$ in a recurrence setting for some deterministic function $v: \mathbb{R}^q \rightarrow \mathbb{R}$, and we begin with establishing an estimate $\tilde{v}: \mathbb{R}^q \times \mathbb{R}^q$ for $v$. Note that although $v$ solely depends on the dependent variables, the estimate $\tilde{v}$ depends on both the independent and dependent variables in preparation for further calculation.
For a given partition and samples, one way to define the estimate $\tilde{v}$ is \begin{align}
\tilde{v}(x,y)
:= &
\sum^B_{b=1}{\bf 1}_{\mathcal{B}(b)}(x)\tilde{v}_b(y)
=
\sum^B_{b=1}{\bf 1}_{\mathcal{B}(b)}(x)\sum^{Q}_{k=1}\alpha_k(b) p_k(y),
\label{equation_approximant} \end{align} where
$$ \tilde{v}_b := \arg\min_{\phi \in H} \left\{ \frac{\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m)|v(Y^m)-\phi(Y^m)|^2}{\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m)}\right\}.$$
\begin{remark}
It is possible that under some particular clustering scheme for SGBM, there would be empty bundles in the resulting partition.
In practice, one could simply ignore these empty bundles in the algorithm.
As there are no samples in these bundles, approximations within these bundles are not needed for the next time step.
One point to note is that since least-squares regression requires a sufficient number of samples to be accurate, adopting a bundling scheme that would produce a small number of samples in any bundle may not be a good idea.
When generalizing the theoretical proof below to bundling methods other than equal partition, one has to take this into account and define the measurable partition $\mathbb{B}$ in such a way that it is consistent with the practical bundling scheme while it also merges all empty bundle to non-empty ones.
Further discussion on bundles with few samples under the equal partition scheme is placed in Remark \ref{remark_empty_bundle_equal}. \end{remark}
Note that functions $\tilde{v}_b:\mathbb{R}^q\rightarrow\mathbb{R}$ are stochastic with respect to the simulation samples $(X^m, Y^m)$. The linear vector space $H$ is spanned by continuous functions $\{p_1,\ldots, p_Q\}$, with $p_l: \mathbb{R}^q \rightarrow \mathbb{R}, \;\forall l = 1, \ldots, Q$. Thus, the second equality in Equation \eqref{equation_approximant} just follows from the definition of $H$ and typical least-squares regression. In fact, if we denote the total number of samples in a given bundle by $\# \mathcal{B}(b)$ and let $\{(X^{b,1}, Y^{b,1}), \ldots, (X^{b,{\#\mathcal{B}(b)}}, Y^{b,{\#\mathcal{B}(b)}})\}$ be the samples in this bundle, the coefficients $\alpha(b)$ satisfy \begin{equation} \mathcal{I}^\top\mathcal{I}\alpha(b) = \mathcal{I}^\top v(Y^b), \label{equation_coefficients} \end{equation} with $$\mathcal{I} = (p_j(Y^{b,i}))_{1\leq i \leq \#\mathcal{B}(b), 1 \leq j \leq Q} \mbox{ and } v(Y^b) = (v(Y^{b,1}), \ldots, v(Y^{b, \#\mathcal{B}(b)}))^\top.$$ According to \cite{gyorfi_kohler_krzyzak_walk_2002}, system \eqref{equation_coefficients} is always solvable, in the next section, we also provide a heuristic argument for its invertibility. Again, the coefficients $\alpha$ in each bundle can be seen as random variables with respect to $(X^m, Y^m)_{m = 1, \ldots, M}$.
Reversely, we may select the simulation cloud based on the regression coefficients.
Let the set $S$ be the set containing all possible collections of $(x_m, y_m)_{1 \leq m \leq M} \in (\mathbb{R}^q\times \mathbb{R}^q)^M$ such that $|\alpha(b)|^2 \leq L$ for all $b$ given that $(X^m, Y^m) = (x_m, y_m)_{1 \leq m \leq M}$. We modify the probability of the simulation cloud by only accepting those results that are in $S$. We denote the modified expectation by $\hat{\mathbb{E}}_S$ and it is related to the original expectation by $\hat{\mathbb{E}}_S[{\bf 1}_A] = \frac{\hat{\mathbb{E}}[{\bf 1}_A{\bf 1}_S]}{\hat{\mathbb{E}}[{\bf 1}_S]}$. \footnote{The situation of $\frac{0}{0}$ should be understood as $0$ and $\frac{K}{0}$ as $\infty$ in the rest of this article.}
\begin{remark}
In a regress-now scheme, especially in a recursion scheme, the resulting approximation is truncated such that its value is within a bounded interval $[M_1, M_2]$.
The truncation guarantees the convergence and the stability of the scheme.
However, truncation is not feasible in our regress-later scheme as we have to keep the full function for further operation.
Therefore, we must instead control the output by limiting the admissible samples. \end{remark}
\begin{remark}
The introduction of bundling here essentially serves two purposes.
First of all, clustering data may act as a localization of function $v$, thus a more accurate approximation for $v$ can be achieved with lower order function basis.
This is especially beneficial for the high-dimensional case as basis functions in higher dimension are generally complicated and hard to calculate.
We need a method to increase accuracy without adding more basis functions.
Secondly, by partitioning data into non-overlapping bundles, we can facilitate the application of parallel computing, which is important when we are in a high-dimensional situation.
However, while the above benefit depends on the particular choice of basis, the analysis we do in this section is applicable for a more general setting.
So, we would not emphasis these points further in this section. \end{remark}
Using $\nu$ to denote the probability measure induced by the random variable $(X, Y)$ and \\ $(X^m, Y^m)_{m = 1, \ldots, M}$ are independent and identical copies following the same law under a different probability space, the following random norms (depending on the simulation cloud $(X^m, Y^m)$) are used to quantify the error of approximation. \begin{definition}
Let $\varphi: \hat{\Omega} \times \mathbb{R}^q \times \mathbb{R}^q \rightarrow \mathbb{R}$ be measurable.
For any set $\mathcal{B}\subset \mathbb{R}^q$, we define the following random norms
\begin{equation*}
||\varphi||^2_{\mathcal{B},\infty} := \frac{\int_\mathcal{B}\int|\varphi(x, y)|^2\nu(dx, dy)}{\int_\mathcal{B}\int\nu(dx, dy)}; \qquad ||\varphi||^2_{\mathcal{B}, \#} := \frac{\sum_{m=1}^M{\bf 1}_\mathcal{B}(X^m)|\varphi(X^m, Y^m)|^2}{\sum_{m=1}^M{\bf 1}_\mathcal{B}(X^m)}.
\end{equation*} \end{definition} We derive the following theorem for the estimation of the error. Since we only accept a simulation result that satisfies event $S$, we should only consider the average error among all these accepted events. \begin{theorem} \label{theorem_regression}
Assume that we perform an equal partition at the bundling step, namely, we order all samples according to some specific measurable sorting function on $X$, and separate them into almost-equal size bundles by the ordering.
Further, assume a compact set $\mathcal{A} \subset \mathbb{R}^q$ to be given such that $C_{v, \mathcal{A}} \leq \infty$ and $\int v^2(y)\nu(dx, dy) \leq \infty$, namely, the function $v$ is within the $L^2$ space with respect to the given probability measure.
Then, for any real function $v$, we have
\begin{align*}
& \hat{\mathbb{E}}_S\left[\iint |v(y) - \tilde{v}(x,y)|^2 \nu(dx,dy)\right]\\
\leq &
\frac{\vartheta(L')}{\hat{\mathbb{E}}[{\bf 1}_S]}\hat{\mathbb{E}}
\left[\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{(\log(\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1)+1)(Q+1)}{\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1}\right]\\
&
+ \hat{\mathbb{E}}_S\left[\sum^{B-1}_{b =1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{24 L'}{(\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m))}\right]\\
&
+ \frac{12}{\hat{\mathbb{E}}[{\bf 1}_S]}
\hat{\mathbb{E}}\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)
(\inf_{\phi \in H} \sup_{x\in \mathcal{B}}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right]
\wedge L')
\right] \\
&
+ \hat{\mathbb{E}}_S\left[\iint |v(y)-\tilde{v}(x,y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy)\right],
\end{align*}
for $L' := 2LQC^2_{p,A}+2C^2_{v, A}$, and $\vartheta(L')$ a function depending on $L'$.
Note that the set $\mathcal{A}$ is introduced to avoid the restrictive assumption of $v$ being bounded.
It does not play a role in the actual algorithm. \end{theorem} \begin{proof}
To prepare for our analysis, a more formal construction of the equal partition technique needs to be introduced.
In practice, for samples $(X^m, Y^m)_{1\leq m \leq M}$ and a measurable sorting function $\mathfrak{S}: \mathbb{R}^q \rightarrow \mathbb{R}$, the $M$ different values can be ordered into, $$\mathfrak{S}(X^{ 1^*}) \leq \mathfrak{S}(X^{2^*}) \leq \cdots \leq \mathfrak{S}(X^{M^*}),$$
by simply putting $\{X^{1^*}, \cdots, X^{(M/B)^*}\}$ into the first bundle, $\{X^{(M/B+1)^*}, \cdots, X^{(2M/B)^*}\}$ into the second one, etc., assuming $M$ can be divided by $B$ for simplicity.
However, in order to conduct meaningful analysis, a measurable partition of $\mathbb{R}^q$ based on the simulation cloud $(X^m)_{1 \leq m \leq M}$ is required.
Thus for any simulation $(X^m)_{1 \leq m \leq M}$ with $\{X^{1^*} = x^*_1, \cdots, X^{M^*} = x^*_M\}$, we define $\mathcal{B}(1) := \mathfrak{S}^{-1}((-\infty, \mathfrak{S}(x^*_{M/B})]), \mathcal{B}(2) := \mathfrak{S}^{-1}((\mathfrak{S}(x^*_{M/B}), \mathfrak{S}(x^*_{2M/B})]), \cdots$, $\mathcal{B}(B) := \mathfrak{S}^{-1}(\mathfrak{S}(x^*_{M- M/B}), \infty)$ and $\cup^B_{b=1}\mathcal{B}(b) = \mathbb{R}^q.$
Therefore, $$\mathbb{B} = \{\mathfrak{S}^{-1}((-\infty, \mathfrak{S}(x_1)]), \mathfrak{S}^{-1}((\mathfrak{S}(x_1), \mathfrak{S}(x_2)]), \cdots, \mathfrak{S}^{-1}((\mathfrak{S}(x_{B-1}), \infty))\}$$ if and only if
\begin{align*}
&
(X^{1^{@}}, X^{2^{@}}, \cdots X^{M^{@}}) \\
\in &
(\mathfrak{S}^{-1}((-\infty, \mathfrak{S}(x_1))))^{M/B - 1} \times \{x_1\} \times \cdots \times \{x_{B-1}\} \times (\mathfrak{S}^{-1}((\mathfrak{S}(x_{B-1}), \infty)))^{M/B - 1}.
\end{align*}
The notation $@$ denotes any permutation of the set $\{1, 2, \cdots M\}$, noting that each sample is independent of the others and interchangeable.
This is measurable with respect to the sigma algebra generated by the simulation cloud $X^m$ as there are finite permutations for fixed $M$ and $\mathfrak{S}$ is measurable.
Note that this setting is not unique for defining a workable partition and there may be alternative definitions that may improve the analysis result.
However, this is an intuitive definition.
Assuming $\sigma(\mathbb{B})$ to be the smallest sigma algebra to determine the partition, we notice that it is smaller than the sigma algebra generated by the random samples $X^m$, $\sigma(\mathbb{B})\subset \sigma(X^m).$
This is because multiple realizations of the samples can lead to the same partition.
A simple thought experiment is to consider a fixed partition, and subsequently move one interior sample within a bundle.
If we conduct a new bundling with this new set of samples, the partition will remain the same.
Indeed, the samples within a bundle are independent among each other and have the same distribution.
As for the actual analysis, we start by decomposing the error into different terms for any given partition $\mathbb{B} = \{\mathcal{B}(1), \cdots, \mathcal{B}(B)\} = \{\mathfrak{S}^{-1}((-\infty, \mathfrak{S}(x_1)]), \mathfrak{S}^{-1}((\mathfrak{S}(x_1), \mathfrak{S}(x_2)]), \cdots, \mathfrak{S}^{-1}((\mathfrak{S}(x_{B-1}), \infty))\}$.
In line with the Monte Carlo literature, we assume $X^i \neq X^j$, if $i \neq j$, and
\begin{align}
&
\iint |v(y) - \tilde{v}(x,y)|^2 \nu(dx,dy)\nonumber\\
\leq &
\sum_{\mathcal{B} \in \mathbb{B}}\int_\mathcal{B}\int|v(y) - \tilde{v}(x, y)|^2 {\bf 1}_{\mathcal{A}}(y)\nu(dx, dy)
+ \iint |v(y)-\tilde{v}(x,y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy)\nonumber\\
= &
\sum^B_{b=1}
\int_{\mathcal{B}(b)}\int\nu(dx, dy) \bigg(
||(v - \tilde{v}) {\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \infty}
-2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash \{x_b\}, \#} \nonumber \\
& \hspace{110pt}
+2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash \{x_b\}, \#}
-2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \#}
+2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \#}
\bigg)^2\nonumber\\
&
+ \iint |v(y)-\tilde{v}(x, y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy)\nonumber\\
\leq &
\sum^B_{b = 1}
\int_{\mathcal{B}(b)}\int\nu(dx, dy)
\bigg( \max \{||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \infty} - 2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#}, 0\} \nonumber\\
& \hspace{110pt}
+2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash \{x_b\}, \#}
-2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \#}
+2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \#}
\bigg)^2\nonumber\\
&
+ \iint |v(y)-\tilde{v}(x, y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy)\nonumber\\
\leq &
\sum^B_{b = 1}
\int_{\mathcal{B}(b)}\int\nu(dx, dy)
3 \max \{||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \infty} - 2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#}, 0\}^2 \nonumber\\
& +
\sum^{B-1}_{b = 1}
\int_{\mathcal{B}(b)}\int\nu(dx, dy)
12 (||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash \{x_b\}, \#}
- ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \#})^2 \nonumber\\
& +
\sum_{\mathcal{B}\in \mathbb{B}}
12 \int_\mathcal{B}\int\nu(dx, dy) ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||^2_{\mathcal{B}, \#}
+ \iint |v(y)-\tilde{v}(x, y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy)\nonumber\\
=: &
\sum^B_{b = 1} \int_{\mathcal{B}(b)}\int\nu(dx, dy) T_{1, \mathcal{B}(b)}
+ \sum^{B-1}_{b = 1} \int_{\mathcal{B}(b)}\int\nu(dx, dy) T_{2, \mathcal{B}(b)}\nonumber\\
&
+ \sum_{\mathcal{B}\in \mathbb{B}} \int_{\mathcal{B}}\int\nu(dx, dy) T_{3, \mathcal{B}}
+ \iint |v(y)-\tilde{v}(x, y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy),
\label{equation_decomposition}
\end{align}
with a slight abuse of notation, $\{x_B\} := \emptyset$ above.
Note that the easiest way to conceptualize the term $||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#}$ is that we simply remove the sample that is used for defining the partition.
Thus, $$\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)\backslash\{x_b\}}(X^m) = \sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m) - 1.$$
This is done to ensure that all samples in the empirical norm $||\cdot||_{\mathcal{B}(b)\backslash\{x_b\}, \#}$ are independent of each other.
As the last term cannot be further simplified, we now focus on the first three terms.
The meaning of all error terms will be discussed in the next subsection.
The first term we study is $T_{3, \mathcal{B}}$, which represents the best possible approximation from the space $H$ to the target function under the empirical norm within the bundle.
To begin, within any bundle $\mathcal{B}(b)$ under any given partition $\mathbb{B}$, it is obvious that $$||{\bf 1}_{\mathcal{A}}(v - \tilde{v})||_{\mathcal{B}(b), \#} \leq ||v - \tilde{v}_b||_{\mathcal{B}(b), \#} = \min_{\phi \in H}||v - \phi||_{\mathcal{B}(b), \#},$$ for any $\mathcal{B}(b)$.
Only the $\tilde{v}_b$ term in the series of $\tilde{v}$ matters and $\tilde{v}_b$ is the function that minimizes the approximation difference under the empirical norm within the given bundle.
Alternatively, we may consider the following composite norm
\begin{equation}
\sup_{x\in \mathcal{B}(b)}\left(\mathbb{E}\left[ (\mathfrak{f}(Y))^2| X=x\right]\right)^\frac{1}{2}, \label{equation_ad_hoc_norm}
\end{equation}
for any given bundle $\mathcal{B}(b)$.
For the sake of simplicity, we assume there exists an element $\phi_{\mathcal{B}(b)}$ within the space $H$ such that $v(\cdot) - \phi_{\mathcal{B}(b)}(\cdot)$ minimizes the norm, namely,
$$\sup_{x\in \mathcal{B}(b)}\mathbb{E}\left[ |v(Y) - \phi_{\mathcal{B}(b)}(Y)|^2 | X=x\right] = \inf_{\phi \in H} \sup_{x\in \mathcal{B}(b)}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right].$$
As $\phi_{{\mathcal{B}}(b)} \in H$, it is clear that under the empirical norm, we have $$||{\bf 1}_{\mathcal{A}}(v - \tilde{v})||^2_{\mathcal{B}(b), \#} \leq ||v - \tilde{v}_b||^2_{\mathcal{B}(b), \#} \leq ||v - \phi_{\mathcal{B}(b)}||^2_{\mathcal{B}(b), \#} = \frac{\sum_{m=1}^{\#\mathcal{B}(b)}|v(Y^{b, m}) - \phi_{\mathcal{B}}(Y^{b, m})|^2}{\# \mathcal{B}(b)}.$$
Without loss of generality, assume $(X^{b, \#\mathcal{B}(b)}, Y^{b, \#\mathcal{B}(b)})$ is the bundle defining sample as stated in the construction of equal partition if $b \neq B$.
Recalling that samples within a bundle are i.i.d. given the partition, we can take the conditional expectation of the empirical norm with respect to the position of $(X^{b, \#\mathcal{B}(b)}, Y^{b, \#\mathcal{B}(b)})_{1\leq m \leq \#\mathcal{B}(b) - 1}$.
\begin{align*}
&
\hat{\mathbb{E}}\left[\left.||{\bf 1}_{\mathcal{A}}(v - \tilde{v})||^2_{\mathcal{B}, \#}\right|\sigma(\mathbb{B}), X^{b, 1}, X^{b, 2}, \cdots, X^{b, \#\mathcal{B}(b) -1}\right] \\
\leq &
\frac{\sum_{m=1}^{\#\mathcal{B}(b)}\hat{\mathbb{E}}\left[\left.|v(Y^{b, m}) - \phi_{\mathcal{B}}(Y^{b, m})|^2\right|\sigma(\mathbb{B}), X^{b, 1}, \cdots, X^{b, \#\mathcal{B}(b)-1}\right]}{\# \mathcal{B}(b)}\\
= &
\frac{\sum_{m=1}^{\#\mathcal{B}(b) - 1}\mathbb{E}\left[\left.|v(Y) - \phi_{\mathcal{B}}(Y)|^2\right| X = X^{b, m} \in \mathcal{B}(b)\backslash\{x_b\}\right] + \mathbb{E}\left[\left.|v(Y) - \phi_{\mathcal{B}}(Y)|^2\right| X = x_b\right]}{\# \mathcal{B}(b)}\\
\leq &
\frac{\sum_{m=1}^{\#\mathcal{B}(b)}\sup_{x \in \mathcal{B}(b)}\mathbb{E}\left[\left.|v(Y) - \phi_{\mathcal{B}}(Y)|^2\right| X =x \right]}{\# \mathcal{B}(b)}\\
= &
\inf_{\phi \in H}\sup_{x \in \mathcal{B}(b)}\mathbb{E}\left[\left.|v(Y) - \phi(Y)|^2\right| X = x \right].
\end{align*}
There are some details in the above calculation that require explanation.
Note that the boundary point information is included in $\sigma(\mathbb{B})$, therefore we only calculate conditional expectations with the remaining samples.
For the last bundle $\mathcal{B}(B)$, the separation is not necessary as no sample is used to define the bundle, but this does not alter the result.
We use the fact that each sample is independent within the bundle in the first equality.
Next, if the minimal element $\phi_{\mathcal{B}(b)}$ does not exist, one has to adjust the derivation above with a limiting argument.
This means that by the definition of infimum, one can find a sequence of functions $(v - \phi_{\mathcal{B}(b) , n})_{n \in \mathbb{Z}^+}$, such that
$$\sup_{x\in \mathcal{B}(b)}\mathbb{E}\left[ |v(Y) - \phi_{\mathcal{B}(b),n}(Y)|^2 | X=x\right] \leq \inf_{\phi \in H} \sup_{x\in \mathcal{B}(b)}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right] + \frac{1}{n}.$$
By repeating the above argument for each function in the sequence, replacing the infimum in the proof by the corresponding upper bound and taking $n$ to infinity (with the eventual inequality), we arrive at the same conclusion.
Thereafter, if we consider that expectation of the empirical norm conditioning on $\sigma(\mathbb{B})$, we have
\begin{align*}
\hat{\mathbb{E}}\left[\left.||{\bf 1}_{\mathcal{A}}(v - \tilde{v})||^2_{\mathcal{B}, \#}\right|\sigma(\mathbb{B})\right]
= &
\hat{\mathbb{E}}\left[\left.|\hat{\mathbb{E}}\left[\left.||{\bf 1}_{\mathcal{A}}(v - \tilde{v})||^2_{\mathcal{B}, \#}\right|\sigma(\mathbb{B}), X^{b, 1}, X^{b, 2}, \cdots, X^{b, \#\mathcal{B}(b) -1}\right] \right|\sigma(\mathbb{B})\right]\\
\leq &
\inf_{\phi \in H}\sup_{x \in \mathcal{B}(b)}\mathbb{E}\left[\left.|v(Y) - \phi(Y)|^2\right| X = x \right].
\end{align*}
Note that this bound is defined on all given partitions and solely depends on the partition but not the choice of $\phi_{\mathcal{B}}$ for any bundle $\mathcal{B}$.
Our calculation here is purely within a bundle given the partition is known.
Therefore, even if the minimum function $\phi_{\mathcal{B}(b)}$ or the sequence $(\phi_{\mathcal{B}(b) , n})_{n \in \mathbb{Z}^+}$ is not unique, the actual choice of these functions does not matter as long as they are picked in a consistent and measurable way.
Here, we derive a bound for the expectation of the weight summation $T_{3,\mathcal{B}}$ in \eqref{equation_decomposition} with respect to the simulation cloud, i.e.,
\begin{align}
\hat{\mathbb{E}}_S\left[\sum_{\mathcal{B}\in \mathbb{B}} \int_{\mathcal{B}}\int\nu(dx, dy) T_{3,\mathcal{B}}\right]
& \leq
12 \hat{\mathbb{E}}_S\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)||v - \phi_\mathcal{B}||^2_{\mathcal{B}, \#}
\right]\nonumber\\
& \leq
\frac{12}{\hat{\mathbb{E}}[{\bf 1}_S]}\hat{\mathbb{E}}
\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy) \hat{\mathbb{E}}\left[\left.||v - \phi_\mathcal{B}||^2_{\mathcal{B}, \#}\right|\sigma(\mathbb{B})\right]
\right]\label{equation_T2}\\
& \leq
\frac{12}{\hat{\mathbb{E}}[{\bf 1}_S]}
\hat{\mathbb{E}}\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)\inf_{\phi \in H} \sup_{x\in \mathcal{B}}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right]
\right].\nonumber
\end{align}
In this inequality, we expand the denominator of our adjusted probability by also including the rejected cases and applying the results above for each partition.
However, for an unbounded bundle $\mathcal{B}$, it is possible to find an example such that \\ $\sup_{x\in \mathcal{B}}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right] = \infty $.
An alternative bound is required to ensure that our error bound is not trivial.
Note that given the square norm $|\alpha(b)|^2 \leq L$, we have $$\forall y, b, \,|\tilde{v}_{\mathcal{B}(b)}(y){\bf 1}_{\mathcal{A}}(y)|^2 \leq \left(\sum^Q_{l=1}|\alpha_l(b)|^2\right)\left(\sum^Q_{l=1}|p_l(y){\bf 1}_{\mathcal{A}}(y)|^2\right)\leq L Q\max_{l =1 \ldots, Q}\max_{y\in \mathcal{A}}|p_l(y)|^2,$$
and
\begin{align*}
||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||^2_{\mathcal{B}, \#}
&
=\frac{\sum_{m=1}^M{\bf 1}_{\mathcal{B}}(X^m)|(v(Y^m)-\phi_\mathcal{B}(Y^m)){\bf 1}_{\mathcal{A}}|^2}{\sum_{m=1}^M{\bf 1}_{\mathcal{B}}(X^m)}
\nonumber\\
&
\leq \frac{\sum_{m=1}^M{\bf 1}_{\mathcal{B}}(X^m)(2|(v(Y^m){\bf 1}_{\mathcal{A}}|^2 + 2|\phi_\mathcal{B}(Y^m)){\bf 1}_{\mathcal{A}}|^2}{\sum_{m=1}^M{\bf 1}_{\mathcal{B}}(X^m)}\nonumber\\
&
\leq \frac{\sum_{m=1}^M{\bf 1}_{\mathcal{B}}(X^m)(2C_{v,\mathcal{A}}^2 + 2LQC_{p, \mathcal{A}}^2)}{\sum_{m=1}^M{\bf 1}_{\mathcal{B}}(X^m)} = L'.
\end{align*}
We shall use this alternative bound in Equation \eqref{equation_T2} for any bundle $\mathcal{B}$ such that \\ $\sup_{x\in \mathcal{B}}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right] > L'$.
Combining the two error bounds, we have
\begin{align}
\hat{\mathbb{E}}_S\left[\sum_{\mathcal{B} \in \mathbb{B}}T_{3,\mathcal{B}}\right]
& \leq
\frac{12}{\hat{\mathbb{E}}[{\bf 1}_S]}
\hat{\mathbb{E}}\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)
(\inf_{\phi \in H} \sup_{x\in \mathcal{B}}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right]
\wedge L')
\right].\nonumber
\end{align}
Next, we consider the term $T_{1,\mathcal{B}}$ in \eqref{equation_decomposition}.
This term concerns the difference between the theoretical projection and the empirical regression function within each bundle.
Here we restate that $S$ denotes the modified probability, based on the regression coefficients, where $\mathcal{A}$ is a compact set defined with respect to $Y$ only.
By taking conditional expectations with respect to $\sigma(\mathbb{B})$, we have,
\begin{align*}
&
\hat{\mathbb{E}}_S\left[\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)T_{1,\mathcal{B}(b)}\right] \\
= &
\frac{1}{\hat{\mathbb{E}}[{\bf 1}_S]}\hat{\mathbb{E}}\left[
\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)
\hat{\mathbb{E}}[
T_{1,\mathcal{B}(b)}{\bf 1}_S
|\sigma(\mathbb{B})]
\right]\\
= &
\frac{1}{\hat{\mathbb{E}}[{\bf 1}_S]}\hat{\mathbb{E}}\left[
\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)
\hat{\mathbb{E}}_{\mathcal{B}(b)}[
{\bf 1}_S 3 \max \{||{\bf 1}_{\mathcal{A}}(v-\tilde{v})||_{\mathcal{B}(b), \infty}
- 2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#}, 0\}^2
]
\right].
\end{align*}
It is important that we condition on the smaller sigma algebra such that all samples have an identical conditional distribution.
If we can condition on the whole sigma algebra generated by $(X^m)_{1\leq m \leq M}$, each $Y^m$ will have a different distribution depending on the position of $X^m$.
Within each (given) bundle $\mathcal{B}(b)$, we may consider the two norms $||\cdot||_{\mathcal{B}(b),\infty}$ and $||\cdot||_{\mathcal{B}(b)\backslash\{x_b\}, \#}$ as the theoretical and empirical $L^2$ norms of a random process satisfying the probability distribution $\mathbb{P}_{\mathcal{B}(b)}:= \frac{\int_{\mathcal{B}(b)}\nu(dx, \cdot)}{\int_{\mathcal{B}(b)}\int\nu(dx, dy)}$ and extend our notation for expectations to this measure.
In other words, $\mathbb{P}_{\mathcal{B}(b)}$ is the conditional probability of $Y^m$ given that $X^m$ is within the bundle.
As only the samples within bundle $\mathcal{B}(b)$ are considered in $T_{1, \mathcal{B}(b)}$, we only have to consider the identically distributed samples following $\mathbb{P}_{\mathcal{B}(b)}$.
Thus, we simplify the notation with this measure.
Assume that $\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)\backslash\{x_b\}}(X^m) = N - 1$ and let $u > 864 L'/(N - 1)$ be arbitrary, by Theorem 11.2 in \cite{gyorfi_kohler_krzyzak_walk_2002}, we find
\begin{align}
&
\hat{\mathbb{P}}_{\mathcal{B}(b)}\{3 \max
\{||{\bf 1}_{\mathcal{A}}(v-\tilde{v})||_{\mathcal{B}(b), \infty}
- 2 ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#}, 0\}^2 > u
\mbox{ and the event } S \mbox{ is true}\}\nonumber\\
\leq &
\hat{\mathbb{P}}_{\mathcal{B}(b)}\{ \exists \phi \in H_L :
||{\bf 1}_{\mathcal{A}}(v-\phi)||_{\mathcal{B}(b), \infty}
- 2 ||(v-\phi){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#} > \sqrt{u/3} \mbox{ and the event } S \mbox{ is true}\}\nonumber\\
\leq &
\hat{\mathbb{P}}_{\mathcal{B}(b)}\{ \exists \phi \in H_L :
||{\bf 1}_{\mathcal{A}}(v-\phi)||_{\mathcal{B}(b), \infty}
- 2 ||(v-\phi){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash\{x_b\}, \#} > \sqrt{u/3} \}\nonumber\\
\leq &
3\hat{\mathbb{E}}_{\mathcal{B}(b)}[\mathcal{N}_2(\sqrt{2/3}\sqrt{u}/24, H_{L,\mathcal{A}}, Y_{\mathcal{B}(b)}^{2(N-1)})]\exp\left(-\frac{(N-1)u}{864L'}\right)\nonumber\\
\leq &
3\hat{\mathbb{E}}_{\mathcal{B}(b)}[\mathcal{N}_2(\sqrt{L'}/\sqrt{N-1}, H_{L,\mathcal{A}}, Y_{\mathcal{B}(b)}^{2(N-1)})]\exp\left(-\frac{(N-1)u}{864L'}\right), \label{equation_covering_number}
\end{align}
where $H_L$ is the set of all functions in $H$ whose coordinates with respect to the basis $(p_l)_{1\leq l \leq Q}$, have a Euclidean norm no greater than $L$ and $H_{L, \mathcal{A}}$ the set containing all functions of the form ${\bf 1}_{\mathcal{A}}(\phi - v)$, where $\phi$ belongs to $H_L$.
Again, since we condition only on the partition, all samples within a bundle are i.i.d. and the condition for Theorem 11.2 in \cite{gyorfi_kohler_krzyzak_walk_2002} is satisfied.
In fact, this proof should work for all partitions for which the samples remain i.i.d. within a bundle.
Note that the indicator for the event $S$ is kept in the first line to keep our regression function bounded, then we drop the indicator in the second inequality to take advantage of the independent samples.
Finally, $Y_{\mathcal{B}(b)}$ is a sample following the conditional probability $\mathbb{P}_\mathcal{B}$ here.
Constant $\mathcal{N}_2$ in \eqref{equation_covering_number} is called the covering number and it is bounded by Lemma 9.2 and Theorem 9.4 of \cite{gyorfi_kohler_krzyzak_walk_2002}: $$\mathcal{N}_2(\sqrt{L'}/\sqrt{N-1}, H_{L,\mathcal{A}}, Y^{2(N-1)}) \leq 3\left(\frac{2eL'}{L'/(N-1)}\log(\frac{3eL'}{L'/(N-1)})\right)^{V_{H^+_{L, \mathcal{A}}}}\leq 3\left[3e(N-1)\right]^{2V_{H^+_{L, \mathcal{A}}}},$$ where $V$ denotes the Vapnik-Chervonenkis dimension, which represents the number of elements in the largest set that can be shattered by a class of subsets in $\mathbb{R}^q$.
The reader is referred to section 9.4 of \cite{gyorfi_kohler_krzyzak_walk_2002} for further information on $\mathcal{N}_2$ and $V$.
Next, recalling the definition $H^+$ from Section \ref{section_introduction}, we notice that $V_{H^+_{L, \mathcal{A}}} \leq V_{H^+_L}$, which can be shown by the following argument.
Let $(y, z) \in \mathbb{R}^{q} \times \mathbb{R}$, if $y \not\in \mathcal{A}$ and $z\geq 0$, then $(y,z)$ is contained in none of the sets in $H_{L,\mathcal{A}}^+$ and if $y \not\in \mathcal{A}$ and $z \leq 0$, then $(y,z)$ is contained in each set of $H^+_{L,\mathcal{A}}$.
Hence, if $H^+_{L,\mathcal{A}}$ shatters a set of points, then the x-coordinates of these points must lie in $\mathcal{A}$ and $H^+_L$ also shatters this set of points.
In addition, we have the fact that $H_L \subset H$ and observe that
\begin{align*}
H^+
& \subseteq \{\{(x,t) : \phi(x) + a_0t \geq 0\} : \phi\in H, a_0 \in \mathbb{R}\},
\end{align*}
which is a linear vector space of dimension less than or equal to $Q+1$, thus Theorem 9.5 of \cite{gyorfi_kohler_krzyzak_walk_2002} implies $$V_{H^+_L}\leq Q+1.$$
It follows that, for any $u > 864L'/(N-1)$, the probability under consideration is bounded by \\ $9 [3e(N-1)]^{2(Q+1)}\exp\left(-\frac{(N-1) u}{864L'}\right),$
and for any $w>864L'/(N-1)$,
\begin{align*}
&
\hat{\mathbb{E}}[T_{1,\mathcal{B}(b)}{\bf 1}_S|\sigma(\mathbb{B}), \sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)\backslash\{x_b\}}(X^m) = N-1] \\
\leq &
w + 9[3e(N-1)]^{2(Q+1)} \int^\infty_w\exp\left(-\frac{(N-1)t}{864L'}\right)dt \\
& =
w + 9[3e(N-1)]^{2(Q+1)}\frac{864 L'}{N-1}\exp\left(-\frac{(N-1)w}{864L'}\right).
\end{align*}
By setting, $$w = \frac{864L'}{N-1} \log\left(9[3e(N-1)]^{2(Q+1)}\right),$$ and taking expectations with respect to $\sigma(\mathbb{B})$, we find
\begin{align*}
&
\hat{\mathbb{E}}_S\left[\sum^B_{b =1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)T_{1,\mathcal{B}(b)}\right]\\
\leq &
\frac{\vartheta(L')}{\hat{\mathbb{E}}[{\bf 1}_S]}\hat{\mathbb{E}}
\left[\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{(\log(\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1)+1)(Q+1)}{\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1}\right],
\end{align*}
where one possible choice of $\vartheta$ is $\vartheta(L'):=1728(\log(27e) + 1) L'$.
This can be checked by simple algebra.
Note that $\vartheta$ is independent of the number of samples in a bundle and only depends on $L'$.
Finally, $T_{2, \mathcal{B}(b)}$ is the technical term introduced by the definition of the partition $\mathbb{B}$.
Consider any realization of the simulation cloud $(X^m, Y^m)$ and partition $\mathbb{B}$, in particular, there exist boundary defining samples $(x_b, y_b) \in (X^m, Y^m)$ for $b = 1, 2, \cdots, B-1$.
Using the inequality $(\sqrt{a} - \sqrt{b})^2 \leq |a-b|$ and the definition of $L'$, we have
\begin{align*}
&
12 (||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b)\backslash \{x_b\}, \#}
- ||(v-\tilde{v}){\bf 1}_{\mathcal{A}}||_{\mathcal{B}(b), \#})^2 \\
\leq &
12 \left|\frac{\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)\backslash\{x_b\}}(X^m)|(v(Y^m)-\tilde{v}(X^m, Y^m){\bf 1}_\mathcal{A}(Y^m)|^2}{\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m) - 1}\right.\\
& \hspace{20pt} \left.
- \frac{\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m)|(v(Y^m)-\tilde{v}(X^m, Y^m){\bf 1}_\mathcal{A}(Y^m)|^2}{\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m)}\right|\\
\leq &
12 \left|\frac{\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)\backslash\{x_b\}}(X^m)(|(v(Y^m)-\tilde{v}(X^m, Y^m){\bf 1}_\mathcal{A}(Y^m)|^2 - |(v(y_m)-\tilde{v}(x_m, y_m){\bf 1}_\mathcal{A}(y_m)|^2)}
{(\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m) - 1)(\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m))}\right|\\
\leq &
\frac{24 L'}{(\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m))},
\end{align*}
for $b = 1, 2, \cdots, B-1.$
Substituting all the results above into Equation \eqref{equation_decomposition} we conclude the proof. \end{proof}
\begin{remark} \label{remark_empty_bundle_equal}
Implicitly, it is assumed in our proof that the sorting function used behaves nicely such that there is no empty bundle or bundle with a few samples in our partition.
If this is not the case, these bundles need to be merged with other bundles in a consistent way and the number of bundles needs to be adjusted accordingly.
We omit this extra complexity in favor of the presentation. \end{remark}
\subsection{Discussion on the Error Bound} We shall discuss the meaning of all error terms in Theorem \ref{theorem_regression}. Note that most discussions here are in heuristic sense instead of rigorous analysis.
The last term in the sum $\hat{\mathbb{E}}_S\left[\iint |v(y)-\tilde{v}(x,y)|^2(1 - {\bf 1}_{\mathcal{A}}(y))\nu(dx, dy)\right]$ represents an expectation with respect to an integration of the approximation error using the probability measure of $(X, Y)$ outside a given compact set $\mathcal{A}$ for $Y$. In theory, for an increasing series of compact sets $\mathcal{A}_1 \subset \mathcal{A}_2 \subset \ldots \subset \mathcal{A}_A \subset \mathbb{R}^q$, $$\lim_{A\rightarrow \infty}\iint |v(y)-\tilde{v}(x, y)|^2(1 - {\bf 1}_{\mathcal{A}_A}(y))\nu(dx, dy)\rightarrow 0,$$ since the original function and the approximant are both in $L^2(\nu)$ and the dominating convergent theorem. Again, the set $\mathcal{A}$ plays no role in the algorithm. This term is introduced to reflect that only the area with high probability measure has strong impact on a Monte Carlo approximation, thus, we can only consider the behavior of function $v$ in this area and do not impose strong conditions on $v$ over the whole domain. In practice, we can find a big enough set $\mathcal{A}$ such that the last term is smaller than any preset tolerated level. Otherwise, the probability distribution is too much spread out so that Monte-Carlo may not be a suitable approximation method.
The term $\frac{12}{\hat{\mathbb{E}}[{\bf 1}_S]}
\hat{\mathbb{E}}\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)
(\inf_{\phi \in H} \sup_{x\in \mathcal{B}}\mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X=x\right]
\wedge L') \right]$ can be seen as the average of the best projection error among bundles and upper bounded by $L'$. It concerns how close the original function and its projection onto the space spanned by our basis are. This term should be controlled by increasing the number of bundles. As the number of bundles increases, all the bundles converge to a point and the error term becomes $\frac{12}{\hat{\mathbb{E}}[{\bf 1}_S]}
\hat{\mathbb{E}}\left[
(\inf_{\phi \in H} \mathbb{E}\left[ |v(Y) - \phi(Y)|^2 | X\right]
\wedge L')
\right]$, which is the average projection error over the whole range of $X$. It is clear that $\hat{\mathbb{E}}[{\bf 1}_S]$ might change when we increase the number of bundles, thus the above analysis is by no mean rigorous. We will provide comments on $S$ at the end of this section.
The first error term is the bound for the estimation error based on the empirical norm instead of the theoretical norm. This term can be controlled by simply increasing the number of samples within each bundle such that this term is below a certain threshold. Because low sample numbers imply high error bounds, if there are bundles in any partition which contain very few samples, they should be merged with other bundles or the sorting function must be adapted.
The second error term is just a technical term for constructing a measurable partition and can be controlled by the number of samples in each bundle.
Therefore, the best way to set the parameters for the SGBM algorithm is to first fix the number of samples in each bundle such that the first two terms in the error bound are under a given threshold, then increase the number of bundles to control the projection error. However, we cannot write a simple convergence rate for the combined error under the current conditions.
\subsection{Discussion on Event $S$} There is one final question remaining. The above argument heavily depends on $L$, which is a user defined quantity, and the probability of $S$, the event that $L^2$ norms of the regression coefficients are below threshold $L$. It is natural to ask if we can actually find a number $L$ such that $\mathbb{P}_S$ is bounded below, as our bound becomes trivial when $\frac{1}{\hat{\mathbb{E}}[{\bf 1}_S]}$ tends to infinity and it would be incredibly expensive to apply the algorithm if we reject most of the simulations. In the following, we shall provide a heuristic argument for the convergence of the regression coefficients within the bundle, therefore, there is a natural choice of $L$ depending on the target function itself and a hard cut off may be unnecessary. The numerical experiments in the next section also back up this argument.
Once again, we consider Equation \eqref{equation_coefficients}, where the regression coefficients within any bundle $\mathcal{B}(b)$ satisfy \begin{equation*} \mathcal{I}^\top\mathcal{I}\alpha(b) = \mathcal{I}^\top v(Y^b), \end{equation*} with \begin{equation*} \mathcal{I}^\top\mathcal{I} = \left( \begin{array}{ccc} \sum^{\# \mathcal{B}(b)}_{i = 1}(p_1(Y^{b, i}))^2 & & \sum^{\# \mathcal{B}(b)}_{i = 1}p_1(Y^{b, i})p_Q(Y^{b, i}) \\ & \ddots &\\ \sum^{\# \mathcal{B}(b)}_{i = 1}p_Q(Y^{b, i})p_1(Y^{b, i}) & & \sum^{\# \mathcal{B}(b)}_{i = 1}(p_Q(Y^{b, i}))^2 \\ \end{array} \right) \end{equation*} and \begin{equation*} \mathcal{I}^\top v(Y^b) = \left(\begin{array}{c} \sum^{\# \mathcal{B}(b)}_{i = 1}p_1(Y^{b, i})v(Y^{b, i})\\ \vdots\\ \sum^{\# \mathcal{B}(b)}_{i = 1}p_Q(Y^{b, i})v(Y^{b, i}) \end{array}\right). \end{equation*} When the number of samples within a bundle tends to infinity, it is easy to see that \begin{equation*} \frac{1}{\#\mathcal{B}(b)}\mathcal{I}^\top\mathcal{I} \rightarrow \left( \begin{array}{ccc}
\mathbb{E}[p_1(Y))^2|X\in \mathcal{B}(b)] & & \mathbb{E}[p_1(Y)p_Q(Y)|X\in\mathcal{B}(b)] \\ & \ddots &\\
\mathbb{E}[p_Q(Y)p_1(Y)|X\in\mathcal{B}(b)] & & \mathbb{E}[p_Q(Y))^2|X\in \mathcal{B}(b)]\\ \end{array} \right) \end{equation*} and \begin{equation*} \frac{1}{\#\mathcal{B}(b)}\mathcal{I}^\top v(Y^b) \rightarrow \left(\begin{array}{c}
\mathbb{E}[p_1(Y)v(Y)|X\in\mathcal{B}(b)]\\ \vdots\\
\mathbb{E}[p_Q(Y)v(Y)|X\in\mathcal{B}(b)] \end{array}\right). \end{equation*} So, the empirical system of equations "converges" to the system of equations of a projection. Therefore, as long as our basis is properly defined such that they remain linearly independent for all bundles, this system of equations should be solvable with enough simulation paths. Moreover, since the regression coefficients should "converge" to the theoretical projection coefficients, we could pick $L$ depending on the $L^2$ norm of $v$ itself, like, for example, two times its theoretical norm. Alternatively, when there are enough samples within each bundle, we suspect that the regression coefficients simply "converges" to the theoretical value and satisfy the bounded condition of regression in a natural way. Therefore, no actual rejection step in the algorithm is needed when there are enough samples within the bundles. This proposition appears to be supported by our numerical experiments.
However, there are multiple difficulties to incorporate the above argument into Theorem \ref{theorem_regression}. First, since equal partitioning is not a recursive partitioning scheme as defined in \cite{gordon1984almost}, we cannot use a martingale argument on equal partitioning, limiting the available tools. Secondly, as the partition changes when increasing the overall number of samples, the above "convergence" does not seem to be properly defined. Finally, we would have to introduce a measure of convergence with respect to a matrix inverse, which is beyond the scope of this work.
On the other hand, there is a possibility that when the number of samples within a bundle is too small, the algorithm as a whole will fail to converge. Thus, it is beneficial to remind a user of such possibility and put a safety check in place. Therefore, we keep the derivation of Theorem \ref{theorem_regression} as a complete justification for SGBM. In practice, one can either make sure that there are enough samples within each bundle and let $L$ be infinity. In this case, Theorem \ref{theorem_regression} no longer applies but we believe that the overall error will satisfy a bound of similar form. Alternatively, one starts from a small $L$ when running the algorithm and increases $L$'s value until most tests are accepted. By these techniques, the error bound from Theorem \ref{theorem_regression} remains valid.
\section{Error Analysis} \label{section_explicit}
A complete error description of the algorithm with respect to the application of SGBM towards BSDEs will be derived in this section.
We wish to apply the theorem from the last section to establish an error bound for the expectation of our approximation with respect to the selected simulation cloud. We need to check that after rejecting the simulations that generate regression coefficients that are "too large", our approximation functions are bounded in the recursion. We notice that for any $k\leq N$,
$$|y^{R}_k(x)| \leq \max \{C_{M, A} L \sqrt{2(1 + C_\pi^2)}, C_{\Phi, A} \}=: C_{Y,A}$$ and $$|z^{R}_k(x)| \leq C_{M, A} L \sqrt{2(1 + C_\pi^2)} =: C_{Z, A}$$ for all $x$ in a compact set $A$. The constant $C_\pi$ is defined as $\max_{k = 0, \ldots, N-1}\Delta_k$. These bounds can be proven by Assumption \ref{assumption:globally_lipschitz} and some simple inequalities. Furthermore, we have $\forall x \in A$, $$f^{R}_k(x) := f_{k}(y^{R}_{k}(x), z^{R}_{k}(x)) \leq C_f + L_f (C_{Y, A} + C_{Z, A}) =: C_{f,A},$$ which follows from the Lipschitz assumptions of $f$. Therefore, Theorem \ref{theorem_regression} applies.
We denote by $S$ the set of all simulation cloud values $(X^{\pi,m}_{t_k})_{\substack{1\leq m \leq M\\ 0 \leq k \leq N}}$ such that the Euclidean norm of the regression coefficients at each time step in each bundle is bounded by $L$, and the expectation is adjusted accordingly. With the application of Theorem \ref{theorem_regression}, we know that for any given compact set $\mathcal{A}$, \begin{align*}
&
\hat{\mathbb{E}}^x_{t_{k}, S}\left[\expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\right]\\
\leq &
\frac{\vartheta(L'_y)}{\hat{\mathbb{E}}^x_{t_k}[{\bf 1}_S]}
\hat{\mathbb{E}}^x_{t_k}\left[\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{(\log(\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1)+1)(Q+1)}{\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1}\right]\\
& +
\hat{\mathbb{E}}^x_{t_k, S}\left[\sum^{B-1}_{b =1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{24 L'_y}{(\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m))}\right]\\
& +
\frac{12}{\hat{\mathbb{E}}^x_{t_k}[{\bf 1}_S]}
\hat{\mathbb{E}}^x_{t_k}\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)
(\inf_{\phi \in H} \sup_{\theta\in \mathcal{B}}\mathbb{E}^\theta_{t_i}\left[|y^{R}_{t_{i+1}}(X^\pi_{t_{i+1}}) - \phi(X^\pi_{t_{i+1}})|^2\right]
\wedge L'_y)
\right] \\
& + \hat{\mathbb{E}}^x_{t_k, S}\left[\expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2(1 - {\bf 1}_{\mathcal{A}}(X^\pi_{t_{i+1}}))}{k}{x}\right] =: \Xi^x_{t_k}(i,y). \end{align*} and \begin{align*}
&
\hat{\mathbb{E}}^x_{t_k, S}\left[\expectation{
|f^{R}_{i+1}(X^\pi_{t_{i+1}})
- \tilde{f}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2
}{k}{x}\right]\\
\leq &
\frac{\vartheta({L'_f})}{\hat{\mathbb{E}}^x_{t_k}[{\bf 1}_S]}\hat{\mathbb{E}}^x_{t_k}
\left[\sum^B_{b = 1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{(\log(\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1)+1)(Q+1)}{\sum^M_{m=1}{\bf 1}_{\mathcal{B}(b)}(X^m) - 1}\right] \\
& +
\hat{\mathbb{E}}^x_{t_k, S}\left[\sum^{B-1}_{b =1}\int_{\mathcal{B}(b)}\int\nu(dx, dy)\frac{24 L'_f}{(\sum_{m=1}^M{\bf 1}_{\mathcal{B}(b)}(X^m))}\right]\\
& +
\frac{12}{\hat{\mathbb{E}}^x_{t_k}[{\bf 1}_S]}
\hat{\mathbb{E}}^x_{t_k}\left[
\sum_{\mathcal{B}\in \mathbb{B}}\int_\mathcal{B}\int\nu(dx,dy)
(\inf_{\phi \in H} \sup_{\theta\in \mathcal{B}}\mathbb{E}^\theta_{t_i}\left[|f^{R}_{t_{i+1}}(X^\pi_{t_{i+1}}) - \phi(X^\pi_{t_{i+1}})|^2\right]
\wedge L'_f)
\right] \\
&
+ \hat{\mathbb{E}}^x_{t_k, S}\left[\expectation{|f^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{f}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2(1 - {\bf 1}_{\mathcal{A}}(X^\pi_{t_{i+1}}))}{k}{x}\right]
=: \Xi^x_{t_k}(i,f), \end{align*} where $L'_y = 2LQ C^2_{p, A} + 2C_{Y,A}^2$ and $L'_f = 2LQ C^2_{p, A} + 2C^2_{f,A}$. Note that although the size of $C_\pi:= \max_{k = 0, \ldots, N-1}\Delta_k$ may affect multiple constants here, like $C_{M, A}$ due to the probability law, $C_{Y, \mathcal{A}}$ and $C_{f, \mathcal{A}}$ by definition, however, $C_\pi\rightarrow 0$ would not make these constants converge to $0$. So it may be easier to replace the constant $C_\pi$ by $T$ and consider these constants independent of the discretization scheme. Therefore, we consider the refined regression error to be independent of the discretization scheme.
The following proposition summarizes the error bound for our scheme: \begin{align*} \Delta z^{}_k(x) := z^{}_k(x) - z^{R}_k(x);\quad \Delta y^{}_k(x) := y^{}_k(x) - y^{R}_k(x). \end{align*} \begin{proposition}\label{proposition_a_s_upperbound}
Given Assumption \ref{assumption:globally_lipschitz}, and the time-grid $\pi$ and an $N$-dimensional vector $\gamma \in (0, +\infty)^N$ satisfying $12q(L_f^2R_\pi\vee 1)(\Delta_k + \frac{1}{\gamma_k})\leq 1$, for all $k\leq N-1$, we have, for $0 \leq k \leq N$,
\begin{align}
& \hat{\mathbb{E}}^x_{t_k, S}[|\Delta y_k(x)|^2]\nonumber\\
\leq &
6q e^{T/4} \sum^{N-2}_{i=k} (\Delta_i + \gamma^{-1}_i) \Gamma_i L^2_f
\Xi^x_{t_k}(i+1, y) + 3 e^{T/4}\sum^{N-1}_{i=k}(\Delta_i + \gamma^{-1}_i) \Gamma_i \frac{1}{\Delta_i} \Xi^x_{t_k}(i, y)\nonumber\\
& + 3 e^{T/4} \sum^{N-1}_{i=k}(\Delta_i + \gamma^{-1}_i) \Gamma_i \Delta_i \Xi^x_{t_k}(i, f),\label{equation_y_error}
\end{align}
where $\Gamma_i := \prod^{k-1}_{i =0}(1+\gamma_i\Delta_i)$, and
\begin{align*}
& \hat{\mathbb{E}}^x_{t_k, S}\left[\sum^{N-1}_{i=k}\Delta_i\expectation{|\Delta z_i(X^\pi_{t_i})|^2}{k}{x}\Gamma_i \right]\\
\leq &
(12q + 3Te^{T/4})\sum^{N-1}_{i = k+1} \left(\Delta_i + \gamma^{-1}_i\right)
\frac{1}{\Delta_i}\Xi^x_{t_k}(i, y)\Gamma_i
+ 6q Te^{T/4} \sum^{N-2}_{i=k} (\Delta_i + \gamma^{-1}_i) \Gamma_i L^2_f \Xi^x_{t_k}(i+1, y)\\
& + (12q + 3Te^{T/4})\sum^{N-1}_{i = k+1} \left(\Delta_i + \gamma^{-1}_i\right) \Delta_i
\Xi^x_{t_k}(i, f)\Gamma_i
+ 4 \sum^{N-1}_{i=k} q \Xi^x_{t_k}(i, y)\Gamma_i.
\end{align*} \end{proposition} We will discuss the bound on $\Delta y_k$ here only as the two bounds are quite similar in structure. Note that the three terms within the sum at the right hand side of Equation \eqref{equation_y_error} are also of similar structure. They all sum up the refined regression error multiplied by some constant related to $\Delta_i$. The most problematic term is $3 e^{T/4}\sum^{N-1}_{i=k}(\Delta_i + \gamma^{-1}_i) \Gamma_i \frac{1}{\Delta_i} \Xi^x_{t_k}(i, y)$ as the coefficient is $\mathcal{O}(1)$. The value $\sum^{N-1}_{i=k}(\Delta_i + \gamma^{-1}_i) \Gamma_i \frac{1}{\Delta_i}$ tends to infinity as the number of time steps tends to infinity. Therefore, one must use the parameters $M$ and $B$ to ensure the refined regression term is bounded by $C\Delta_i^{1+\epsilon}$ for some constant $C$, such that the sum and the error are bounded by $C C^\epsilon_\pi$. This error plus the discretization error between the continuous system and the discretized system would be the complete error. So, in practice, one should ensure that $N, M, M/B$ all tend together to infinity.
\begin{proof}
The proof is fairly similar to the one used in \cite{gobet_turkedjiev_2016} with the necessary modifications for our present algorithm.
We shall derive an a-priori estimate of the error propagation in the recursion steps and we start with an estimate of $\Delta z^{}_k(x)$.
Note that we add an extra term in the formula which is equal to zero due to the expectation of the Brownian motion being equal to zero.
This term is added here to facilitate future steps of the proof.
We have
\begin{align*}
|\Delta_k \Delta z^{}_k(x)|^2 = & \left(\expectation{
\left(
\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})-\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}
\right)
\transpose{\Delta W_k}}{k}{x} \right.\\
& + \left.
\expectation{\left(y^{R}_{k+1}(X^\pi_{t_{k+1}})- \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})\right)\transpose{\Delta W_k}}{k}{x}\right)^2\\
\leq & 2 \left(\expectation{
\left(
\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})-\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}
\right)
\transpose{\Delta W_k}}{k}{x} \right)^2\\
& + 2 \left(\expectation{\left(y^{R}_{k+1}(X^\pi_{k+1})- \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})\right)\transpose{\Delta W_k}}{k}{x}\right)^2.
\end{align*}
The inequality follows from the inequality $(\sum^N_{n=1} a_n)^2 \leq \sum^N_{n=1} Na^2_n$, which will be frequently used in the proof and will not be specified again.
By applying the Cauchy-Schwarz inequality, we can derive bounds for the two terms separately, where
\begin{align*}
& \left|
\expectation{
\left(
\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})-\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}
\right)
\transpose{\Delta W_k}}{k}{x}
\right|^2 \\
\leq &
q \Delta_k
\left(
\expectation{(\Delta y^{}_{k+1}(X^\pi_{t_{k+1}}))^2}{k}{x} - \left(\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}\right)^2
\right),
\end{align*}
and
\begin{align*}
& \left|
\expectation{
\left(y^{R}_{k+1}(X^\pi_{k+1})- \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})\right)
\transpose{\Delta W_k}}{k}{x}
\right|^2\\
\leq & q \Delta_k
\expectation{
\left|y^{R}_{k+1}(X^\pi_{k+1})- \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})\right|^2
}{k}{x}.
\end{align*}
Therefore,
\begin{align}
\Delta_k |\Delta z^{}_k(x)|^2 \leq &
2q \left(
\expectation{(\Delta y^{}_{k+1}(X^\pi_{t_{k+1}}))^2}{k}{x} - \left(\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}\right)^2
\right)\nonumber\\
& + 2q \expectation{
\left|y^{R}_{k+1}(X^\pi_{k+1})- \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})\right|^2
}{k}{x}. \label{equation_estimates_for_z}
\end{align}
Combining the fact that $(a+b)^2 \leq (1 + \gamma_k\Delta_k)a^2 +(1 + \gamma_k^{-1}\Delta^{-1}_k) b^2$ for $(a,b)\in\mathbb{R}^2$, $\gamma_k >0$, and the Lipschitz property of $f$, one deduces with Equation (\ref{equation_estimates_for_z}) that, for $0\leq k\leq N-2$:
\begin{align}
|\Delta y^{}_k(x)|^2
\leq &
\left(
\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}
+\expectation{y^{R}_{k+1}(X^\pi_{t_{k+1}}) - \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})}{k}{x}
\right.\nonumber\\
& + \expectation{f_{k+1}(y^{}_{k+1}(X^\pi_{t_{k+1}}), z^{}_{k+1}(X^\pi_{t_{k+1}})) - f^{R}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}\Delta_k\nonumber\\
& \left.
+\expectation{
f_{k+1}^{R}(X^\pi_{t_{k+1}})
- \tilde{f}_{k+1}^{R}(X^\pi_{t_k}, X^\pi_{t_{k+1}})
}{k}{x}\Delta_k
\right)^2\nonumber\\
\leq &
(1+ \gamma_k \Delta_k)\left(\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}\right)^2 \nonumber\\
& + 3\left(\Delta_k + \gamma^{-1}_k\right) \Delta_k
\left[
L^2_f\expectation{(\Delta y^{}_{k+1}(X^\pi_{t_{k+1}}))^2}{k}{x}
+ L^2_f\expectation{(\Delta z^{}_{k+1}(X^\pi_{t_{k+1}}))^2}{k}{x}
\right.\nonumber\\
& \hspace{95pt}\left.
+\frac{1}{\Delta_k^2}\expectation{|y^{}_{k+1}(X^\pi_{t_{k+1}}) - \tilde{y}^{}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})|^2}{k}{x}
\right.\nonumber\\
& \hspace{95pt}\left.
+\expectation{
|f_{k+1}^{R}(X^\pi_{t_{k+1}})
- \tilde{f}_{k+1}^{R}(X^\pi_{t_k}, X^\pi_{t_{k+1}})|^2
}{k}{x}
\right]\nonumber\\
\leq &
(1+ \gamma_k \Delta_k)\left(\expectation{\Delta y^{}_{k+1}(X^\pi_{t_{k+1}})}{k}{x}\right)^2 \nonumber\\
& + 3(\Delta_k + \gamma^{-1}_k) \Delta_k L^2_f\expectation{(\Delta y^{}_{k+1}(X^\pi_{t_{k+1}}))^2}{k}{x}\nonumber\\
& + 6q(\Delta_k + \gamma^{-1}_k) L^2_f R_\pi\left(
\expectation{(\Delta y^{}_{k+2}(X^\pi_{t_{k+2}}))^2}{k}{x} - \expectation{\left(\expectation{\Delta y^{}_{k+2}(X^\pi_{t_{k+2}})}{k+1}{}\right)^2}{k}{x}
\right) \nonumber\\
& + 6q(\Delta_k + \gamma^{-1}_k) L^2_f \expectation{
\left|y^{R}_{k+2}(X^\pi_{t_{k+2}})- \tilde{y}^{R}_{k+2}(X^\pi_{t_{k+1}}, X^\pi_{t_{k+2}})\right|^2
}{k}{x}\nonumber\\
& + 3(\Delta_k + \gamma^{-1}_k) \Delta_k \frac{1}{\Delta_k^2}\expectation{|y^{R}_{k+1}(X^\pi_{t_{k+1}}) - \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})|^2}{k}{x}\nonumber\\
& + 3(\Delta_k + \gamma^{-1}_k) \Delta_k \expectation{
|f_{k+1}^{R}(X^\pi_{t_{k+1}})
- \tilde{f}_{k+1}^{R}(X^\pi_{t_k}, X^\pi_{t_{k+1}})|^2
}{k}{x}, \label{equation_estimation_y_lipschitz}
\end{align}
while
\begin{align}
|\Delta y^{}_{N-1}(x)|^2
\leq &
3\left(\Delta_k + \gamma^{-1}_k\right) \Delta_k
\left[
\frac{1}{\Delta_k^2}\expectation{|y^{R}_{k+1}(X^\pi_{t_{k+1}}) - \tilde{y}^{R}_{k+1}(X^\pi_{t_k}, X^\pi_{t_{k+1}})|^2}{k}{x}
\right.\nonumber\\
& \hspace{95pt}\left.
+\expectation{
|f_{k+1}^{R}(X^\pi_{t_{k+1}})
- \tilde{f}_{k+1}^{R}(X^\pi_{t_k}, X^\pi_{t_{k+1}})|^2
}{k}{x}
\right]. \label{equation_estimation_for_y}
\end{align}
Next, we define the following sequence
\begin{equation*}
\lambda_k := \left[1 + \left(\gamma_{k-1} + \frac{1}{4}\right)\Delta_{k-1}\right]\lambda_{k-1}, \mbox{ where } \lambda_0:=1,
\end{equation*}
consider the sum of $|\Delta y_i(X^\pi_{t_i})|\lambda_i$, from $i = 1$ to $N-1$, and take conditional expectations with respect to $\mathcal{F}_k$.
Applying Equation (\ref{equation_estimation_for_y}) for the case $k = N-1$ and Equation (\ref{equation_estimation_y_lipschitz}) otherwise, we have:
\begin{align*}
\sum^{N-1}_{i=k} \expectation{|\Delta y^{}_i(X^\pi_{t_{i}})|^2 \lambda_i}{k}{x}
\leq & \sum^{N-2}_{i=k}\lambda_{i+1}\expectation{\left(\Delta y^{}_{i+1}(X^\pi_{t_{i+1}})\right)^2}{k}{x}\\
& + \sum^{N-2}_{i=k} 6q(\Delta_i + \gamma^{-1}_i) L^2_f \lambda_i \expectation{
\left|y^{R}_{i+2}(X^\pi_{i+2})- \tilde{y}^{R}_{i+2}(X^\pi_{i+1}, X^\pi_{i+2})\right|^2
}{k}{x}\\
& + \sum^{N-1}_{i=k}3(\Delta_i + \gamma^{-1}_i) \Delta_i \frac{1}{\Delta_i^2}\lambda_i \expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\\
& + \sum^{N-1}_{i=k}3(\Delta_i + \gamma^{-1}_i) \Delta_i \lambda_i\expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}.
\end{align*}
By rearranging the terms, we have:
\begin{align*}
|\Delta y^{}_k(x)|^2\lambda_k
\leq &
\sum^{N-2}_{i=k} 6q(\Delta_i + \gamma^{-1}_i) L^2_f \lambda_i \expectation{
\left|y^{R}_{i+2}(X^\pi_{t_{i+2}})- \tilde{y}^{R}_{i+2}(X^\pi_{t_{i+1}}, X^\pi_{t_{i+2}})\right|^2
}{k}{x}\\
& + \sum^{N-1}_{i=k}3(\Delta_i + \gamma^{-1}_i) \Delta_i \frac{1}{\Delta_i^2}\lambda_i \expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\\
& + \sum^{N-1}_{i=k}3(\Delta_i + \gamma^{-1}_i) \Delta_i \lambda_i\expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}.
\end{align*}
It follows from the simple inequality $\Gamma_k \leq \lambda_k = \exp(\sum^k_{i=0}\log(1+(\gamma_i +0.25)\Delta_i) \leq e^{T/4}\Gamma_k$ that, for all $k\in \{0, \ldots, N\}$,
\begin{align}
|\Delta y^{}_k(x)|^2
\leq &
|\Delta y^{}_k(x)|^2\Gamma_k \nonumber \\
\leq &
6q e^{T/4} \sum^{N-2}_{i=k} (\Delta_i + \gamma^{-1}_i) \Gamma_i L^2_f \expectation{
\left|y^{R}_{i+2}(X^\pi_{t_{i+2}})- \tilde{y}^{R}_{i+2}(X^\pi_{t_{i+1}}, X^\pi_{t_{i+2}})\right|^2
}{k}{x}\nonumber\\
&
+ 3 e^{T/4}\sum^{N-1}_{i=k}(\Delta_i + \gamma^{-1}_i) \Gamma_i \frac{1}{\Delta_i} \expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\nonumber\\
&
+ 3 e^{T/4} \sum^{N-1}_{i=k}(\Delta_i + \gamma^{-1}_i) \Gamma_i \Delta_i \expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}. \label{equation_pointwise_bound_y}
\end{align}
We can take expectations with respect to the simulation cloud and apply Theorem \ref{theorem_regression}, which finishes the calculation for $\Delta y$.
Regarding the error term $\Delta z$, $\sum^{N-1}_{i=k}\Delta_i\expectation{|\Delta z^{}_i(X^\pi_{t_i})|^2}{k}{x}\Gamma_i $ is bounded from above by
\begin{align*}
& \sum^{N-1}_{i=k}\Delta_i\expectation{|\Delta z^{}_i(X^\pi_{t_i})|^2}{k}{x}\Gamma_i \\
\leq &
\sum^{N-1}_{i=k} 2q \left(
\expectation{(\Delta y^{}_{i+1}(X^\pi_{t_{i+1}}))^2}{k}{x} - \expectation{\left(\expectation{\Delta y^{}_{i+1}(X^\pi_{t_{i+1}})}{i}{}\right)^2}{k}{x}
\right)\Gamma_{i+1}\\
& + \sum^{N-1}_{i=k} 2q \expectation{
\left|y^{R}_{i+1}(X^\pi_{t_{i+1}})- \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})\right|^2
}{k}{x}\Gamma_i\\
\leq &
2q \Gamma_N \expectation{(\Delta y^{}_N(X^\pi_{t_N}))^2}{k}{x}\\
& + \sum^{N-1}_{i=k+1} 2q \Gamma_i \left(
\expectation{(\Delta y^{}_i(X^\pi_{t_i}))^2}{k}{x} - (1+\gamma_i \Delta_i)\expectation{\left(\expectation{\Delta y^{}_{i+1}(X^\pi_{t_{i+1}})}{i}{}\right)^2}{k}{x}
\right)\\
& + \sum^{N-1}_{i=k} 2q \expectation{
\left|y^{R}_{i+1}(X^\pi_{t_{i+1}})- \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})\right|^2
}{k}{x}\Gamma_i,
\end{align*}
because of Equation (\ref{equation_estimates_for_z}), and from (\ref{equation_estimation_y_lipschitz}), we have
\begin{align*}
\sum^{N-1}_{i=k}\Delta_i\expectation{|\Delta z^{}_i(X^\pi_{t_i})|^2}{k}{x}\Gamma_i
\leq &
6\sum^{N-1}_{i = k+1} q \left(\Delta_i + \gamma^{-1}_i\right) \Delta_i
L^2_f\expectation{(\Delta y^{}_{i+1}(X^\pi_{t_{i+1}}))^2}{k}{x} \Gamma_i \\
& + 6\sum^{N-1}_{i = k+1} q \left(\Delta_i + \gamma^{-1}_i\right) \Delta_i
L^2_f\expectation{(\Delta z^{}_{i+1}(X^\pi_{t_{i+1}}))^2}{k}{x} \Gamma_i\\
& + 6\sum^{N-1}_{i = k+1} q \left(\Delta_i + \gamma^{-1}_i\right)
\frac{1}{\Delta_i}\expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\Gamma_i\\
& + 6\sum^{N-1}_{i = k+1} q \left(\Delta_i + \gamma^{-1}_i\right) \Delta_i
\expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}\Gamma_i\\
& + \sum^{N-1}_{i=k} 2q \expectation{
\left|y^{R}_{i+1}(X^\pi_{i+1})- \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})\right|^2
}{k}{x}\Gamma_i.
\end{align*}
Using the assumptions of the proposition statement, it follows that
\begin{align*}
& \sum^{N-1}_{i=k}\Delta_i\expectation{|\Delta z^{}_i(X^\pi_{t_i})|^2}{k}{x}\Gamma_i \\
\leq &
12\sum^{N-1}_{i = k+1} q \left(\Delta_i + \gamma^{-1}_i\right)
\frac{1}{\Delta_i}\expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\Gamma_i\\
& + 12\sum^{N-1}_{i = k+1} q \left(\Delta_i + \gamma^{-1}_i\right) \Delta_i
\expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}\Gamma_i\\
& + 4 \sum^{N-1}_{i=k} q \expectation{
\left|y^{R}_{i+1}(X^\pi_{i+1})- \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{i+1})\right|^2
}{k}{x}\Gamma_i
+ \sum^{N-1}_{j = k+1} \Delta_j \expectation{(\Delta y^{}_{j+1}(X^\pi_{t_{j+1}}))^2}{k}{x} \Gamma_{j+1}.
\end{align*}
Note that we may bound each individual term in the last sum with the estimate from Equation (\ref{equation_pointwise_bound_y}) and by taking conditional expectations.
\begin{align*}
\mathbb{E}^x_{t_k}[(\Delta y_{j+1}(X^\pi_{t_{j+1}}))^2 \Gamma_{i+1}]
\leq &
6q e^{T/4} \sum^{N-2}_{i=j+1} (\Delta_i + \gamma^{-1}_i) \Gamma_i L^2_f \expectation{
\left|y^{R}_{i+2}(X^\pi_{t_{i+2}})- \tilde{y}^{R}_{i+2}(X^\pi_{t_{i+1}}, X^\pi_{t_{i+2}})\right|^2
}{k}{x}\nonumber\\
& + 3 e^{T/4}\sum^{N-1}_{i=j+1}(\Delta_i + \gamma^{-1}_i) \Gamma_i \frac{1}{\Delta_i} \expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\nonumber\\
& + 3 e^{T/4} \sum^{N-1}_{i=j+1}(\Delta_i + \gamma^{-1}_i) \Gamma_i \Delta_i \expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}\\
\leq &
6q e^{T/4} \sum^{N-2}_{i=k+1} (\Delta_i + \gamma^{-1}_i) \Gamma_i L^2_f \expectation{
\left|y^{R}_{i+2}(X^\pi_{t_{i+2}})- \tilde{y}^{R}_{i+2}(X^\pi_{t_{i+1}}, X^\pi_{t_{i+2}})\right|^2
}{k}{x}\nonumber\\
& + 3 e^{T/4}\sum^{N-1}_{i=k+1}(\Delta_i + \gamma^{-1}_i) \Gamma_i \frac{1}{\Delta_i} \expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\nonumber\\
& + 3 e^{T/4} \sum^{N-1}_{i=k+1}(\Delta_i + \gamma^{-1}_i) \Gamma_i \Delta_i \expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}
\end{align*}
This upper bound is independent of $j$.
Summing up the remaining parts, the time increments, results in the full time length $T$.
We have:
\begin{align*}
& \sum^{N-1}_{i=k}\Delta_i\expectation{|\Delta z^{}_i(X^\pi_{t_i})|^2}{k}{x}\Gamma_i \\
\leq &
(12q + 3Te^{T/4})\sum^{N-1}_{i = k+1} \left(\Delta_i + \gamma^{-1}_i\right)
\frac{1}{\Delta_i}\expectation{|y^{R}_{i+1}(X^\pi_{t_{i+1}}) - \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})|^2}{k}{x}\Gamma_i\\
& + 6q Te^{T/4} \sum^{N-2}_{i=k} (\Delta_i + \gamma^{-1}_i) \Gamma_i L^2_f \expectation{
\left|y^{R}_{i+2}(X^\pi_{t_{i+2}})- \tilde{y}^{R}_{i+2}(X^\pi_{t_{i+1}}, X^\pi_{i+2})\right|^2
}{k}{x}\\
& + (12q + 3Te^{T/4})
\sum^{N-1}_{i = k+1} \left(\Delta_i + \gamma^{-1}_i\right) \Delta_i
\expectation{
|f_{i+1}^{R}(X^\pi_{t_{i+1}})
- \tilde{f}_{i+1}^{R}(X^\pi_{t_{i}}, X^\pi_{t_{i+1}})|^2
}{k}{x}\Gamma_i\\
& + 4 \sum^{N-1}_{i=k} q \expectation{
\left|y^{R}_{i+1}(X^\pi_{i+1})- \tilde{y}^{R}_{i+1}(X^\pi_{t_i}, X^\pi_{t_{i+1}})\right|^2
}{k}{x}\Gamma_i,
\end{align*}
Again, taking expectations with respect to the simulation cloud finishes the proof. \end{proof}
\section{Numerical Experiments} \label{section_numerical}
In this section, numerical experiments are conducted for some selected examples. Before discussing these examples, we would specify the forward and backward discretization scheme used in these experiments. In particular, we introduce a more general backward scheme to show that our algorithm can be applied in general circumstances.
\subsection{Forward and Backward Scheme} In this section, we conduct our numerical experiments with the Euler-Maruyama discretization scheme, which is a common standard in the literature.
\begin{definition}[Euler-Maruyama scheme]
The Euler-Maruyama scheme is defined by
\begin{equation*}
X^\pi_{t_{k+1}} = X^\pi_{t_k} + b(t_k,X^\Delta_{t_k})\Delta_k + \sigma(t_k,X^\pi_{t_k})\Delta W_k =: d(X^\pi_{t_k}, \Delta W_k).
\end{equation*}
Note that the conditional expectation $\expectation{\frac{\Delta W_{l,k}}{\Delta_k}p(X^\pi_{t_{k+1}})}{k}{x}$ can be calculated by:
\begin{align*}
\expectation{\frac{\Delta W_{l,k}}{\Delta_k}p(X^\pi_{t_{k+1}})}{k}{x}
= & \frac{1}{\sqrt{(2\pi)^q \Delta^q_k}}
\int_{\mathbb{R}^q}p(d(x,y))
\frac{\partial}{\partial y_l}\left(-\exp\left(-\frac{1}{2}\sum^q_{r=1}\frac{y^2_r}{\Delta_k}\right)\right)dy\\
= & \frac{1}{\sqrt{(2\pi)^q\Delta^q_k}}\int_{\mathbb{R}^q}\exp\left(-\frac{1}{2}\sum^q_{r=1}\frac{y^2_r}{\Delta_p}\right) \nabla p(d(x,y))\frac{\partial d(x,y)}{\partial y_l}dy\\
= & \expectation{\nabla p(X^\pi_{t_{k+1}})}{k}{x}\sigma_{l}(t_k,x),
\end{align*}
where $\sigma_l$ is the $l$-th column of the matrix $\sigma$. \end{definition}
For example, for the one-dimensional monomial $x^r$, $r \in \mathbb{N}$ and a forward process discretized by the Euler-Maruyama scheme, we have \begin{align*} \expectation{\frac{\Delta W_k}{\Delta_k}(X^\pi_{t_{k+1}})^r}{k}{x} = & \expectation{r(X^\pi_{t_{k+1}})^{r-1}}{k}{x}\sigma(t_k,x). \end{align*} The conditional expectations of polynomials are calculated directly by definition. We have \begin{align*} \mathbb{E}^x_{t_k}[(X^\pi_{t_{k+1}})^0] = & 1;\\ \mathbb{E}^x_{t_k}[(X^\pi_{t_{k+1}})^1] = & x + b(t_k,x)\Delta_k;\\ \mathbb{E}^x_{t_k}[(X^\pi_{t_{k+1}})^{2}] = & x^2 + 2 x b(t_k,x)\Delta_k + \sigma(t_k, x)^2\Delta_k + b(t_k, x)^2 \Delta^2_k ,\\ \end{align*} and so on.
For the backward discretization, we apply the theta-scheme from \cite{zhao_li_zhang_2012} and \cite{ruijter_oosterlee_2015}: \begin{align*} Y^{\pi}_{t_N} = & \Phi(X^\pi_{t_N}),\quad Z^{\pi}_{t_N} = \transpose{\left(\nabla \Phi(X^\pi_{t_N}) \sigma (t_N, X^\pi_{t_N})\right)},\\ Z^{\pi}_{t_k} = & -\theta_2^{-1} (1-\theta_2) \expectation{Z^\pi_{t_{k+1}}}{k}{} + \frac{1}{\Delta_k}\theta_2^{-1}\expectation{Y^\pi_{t_{k+1}}\Delta W_k}{k}{} \\ & + \theta_2^{-1}(1-\theta_2)\expectation{f_{k+1}(Y^\pi_{t_{k+1}}, Z^\pi_{t_{k+1}})\Delta W_k }{k}{}, \; k = N-1, \ldots, 0,\\ Y^\pi_{t_k} = & \expectation{Y^\pi_{t_{k+1}}}{k}{} + \Delta_k \theta_1 f_k(Y^\pi_{t_k}, Z^\pi_{t_k})\\ & + \Delta_k (1-\theta_1) \expectation{f_{k+1}(Y^\pi_{t_{k+1}},Z^\pi_{t_{k+1}})}{k}{},\; k= N-1,\ldots,0, \end{align*} $0 \leq \theta_1 \leq 1$ and $0 < \theta_2 \leq 1$.
By picking various parameters $(\theta_1, \theta_2)$, we can construct different types of one-step dynamic programming schemes. For example, the choice $(\theta_1, \theta_2) = (0,1)$ would result in an explicit scheme, while the choice $(\theta_1, \theta_2) = (0.5, 0.5)$ would give the Crank-Nicolson scheme. Using a general construction means that our algorithm can be applied to various types of schemes and we may adjust our algorithms towards the specific problem.
Applying the SGBM algorithm to this general scheme, we have that the approximate functions within the bundle at time $k$ are defined by: \begin{align*} & \approximant{z}{r, k}{(\theta_1, \theta_2), R}(b, x) = -\theta^{-1}_2(1-\theta_2)\expectation{p(X^\pi_{t_{k+1}})}{k}{x} \regressionparameter{\beta}{r, k+1}\\ & \hspace{80pt} + \theta_2^{-1}\expectation{
\frac{\Delta W_{r,k}}{\Delta_k}p(X^\pi_{t_{k+1}}) }{k}{x} (\regressionparameter{\alpha}{k+1}+(1-\theta_2)\Delta_k\regressionparameter{\gamma}{k+1}),\quad r= 1,\ldots,d;\\ & \approximant{y}{k}{(\theta_1, \theta_2), R, 0}(b, x) = \expectation{p(X^\pi_{t_{k+1}})}{k}{x} \regressionparameter{\alpha}{k+1},\\ & \approximant{y}{k}{(\theta_1, \theta_2), R, i}(b, x) = \Delta_k\theta_1 f(t_k, x, \approximant{y}{k}{(\theta_1, \theta_2), R,i-1}(x), \approximant{z}{k}{(\theta_{1}, \theta_2), R}(x)) + \approximant{h}{k}{}(b, x),\\ & \approximant{h}{k}{}(b, x) = \expectation{p(X^\pi_{t_{k+1}})}{k}{x} (\regressionparameter{\alpha}{k+1}+ \Delta_k(1 - \theta_1)\regressionparameter{\gamma}{k+1}), \quad i = 1, \ldots, I. \end{align*} Note that a Picard iteration is performed at each time step for each bundle if the choice of $(\theta_1, \theta_2)$ results in an implicit scheme. For further details on the application of the Picard iteration, readers may refer to \cite{gobet_lemor_warin_2005} or \cite{ruijter_oosterlee_2015} and the references therein.
Different types of backward discretizations will be considered for Example 1.
\subsection{Example 1} This example is originally from \cite{zhao_li_zhang_2012}. The considered FBSDE is given by \begin{equation*} \left\{ \begin{array}{l} dX_t = d\omega_t,\\ dY_t = -(Y_tZ_t-Z_t+2.5Y_t-\sin(t+X_t)\cos(t+X_t)-2\sin(t+X_t))dt+Z_td\omega_t. \end{array} \right. \end{equation*} We take the initial and terminal conditions $x_0 = 0$ and $Y_T = \sin(X_T+T)$.
The exact solution is given by \begin{equation*} (Y_t,Z_t)=(\sin(X_t+t),\cos(X_t+t)). \end{equation*} The terminal time is set to be $T=1$ and $(Y_0,Z_0)=(0,1)$. We use the set $\{1, x, x^2\}$ as the regression base for this example. We apply equal partitioning bundling for all our tests with the sample paths sorted by the value function $x$. As mentioned in Session \ref{session_assumptions}, not all assumptions set in this work are necessary for the basic SGBM algorithm to work. For example, Assumption \ref{assumption:globally_lipschitz} is included to ensure the existence and uniqueness of the solution of the BSDE. In this example, even though the driver is not Lipschitz, one can check that the above solution solves the BSDE with It\^o's formula, and the SGBM algorithm still applies.
Table \ref{test_case_general} shows the tests that we have run. Basically, our test cases can be placed into two groups. Test cases 1a, 1b, 1c are tests for the explicit version of our algorithm, while test cases 1d, 1e, 1f are for the Crank-Nicolson version. Within each group, the three tests are run for identical test settings, except for the constant $L$, i.e., the pre-set limit for the Euclidean norm so that we may check the influence of the factor $L$. Within each test, the factors $M$, $N$ and $B$ are linked to a common factor $J$ such that when $J$ tends infinity, $N$, $B$ and $M/B$ tend to infinity as well. This setting is inspired by our observation on the error bound that all three factors should tend to infinity together to ensure the convergence of the algorithm. However, the extra ratio between the three factors is from empirical experience.
\begin{table}[h]
\centering
\begin{tabular}{|c|ccccccc|}
\hline
Test Case & $\theta_1$ & $\theta_2$ & I & M & N & B & L\\
\hline
1a & 0 & 1 & - & $2^{2J}$ & $2^J$ & $2^J$ & $100$\\
1b & 0 & 1 & - & $2^{2J}$ & $2^J$ & $2^J$ & $10000$\\
1c & 0 & 1 & - & $2^{2J}$ & $2^J$ & $2^J$ & $-$\\
1d & 0.5 & 0.5 & 4 & $2^{2J}$ & $2^J$ & $2^J$ & $100$\\
1e & 0.5 & 0.5 & 4 & $2^{2J}$ & $2^J$ & $2^J$ & $10000$\\
1f & 0.5 & 0.5 & 4 & $2^{2J}$ & $2^J$ & $2^J$ & $-$\\
\hline
\end{tabular}
\caption{Test cases for Example 1} \label{test_case_general} \end{table}
\subsection{Example 2: Black-Scholes European option} The second example under consideration is the calculation of the price $v(t,S_t)$ of a European option under the $d$-dimensional Black-Scholes model by solving a FBSDE, which has been a classical application of BSDEs. It has been introduced in classical papers, like \cite{karoui1997backward}, and here we will provide a brief review. We consider a market where the assets satisfy: \begin{equation*} dS_{i,t} = \mu_i S_{i,t} dt + \sigma_i S_{i,t} d B_{i, t},\; 1 \leq i \leq d, \end{equation*} where $B_t$ is a correlated $d$-dimensional Wiener process, with $$dB_{i, t}dB_{j, t} = \rho_ij dt.$$ The parameters $\rho_{ij}$ form a symmetric non-negative matrix $\rho$, \begin{equation*} \rho = \left(\begin{array}{ccccc} 1 & \rho_{12} & \rho_{13} & \cdots & \rho_{1q}\\ \rho_{21} & 1 & \rho_{23} & \cdots & \rho_{2q}\\ \vdots & \vdots & \vdots & & \vdots\\ \rho_{q1} & \rho_{q2}& \rho_{q3} & \cdots & 1 \end{array}\right), \end{equation*} and we assume it is invertible. By performing a Cholesky decomposition on $\rho$ such that $\mathfrak{C} \mathfrak{C}^\top = \rho$, where $\mathfrak{C}$ is a lower triangular matrix with real and positive diagonal entries, we may relate the correlated and standard Brownian motions, as follows, $$B_t = \mathfrak{C} W_t.$$
Along the line of reasoning in \cite{ruijter_oosterlee_2015}, we assume the financial market is complete, there is no trading restriction and a derivative can be perfectly hedged. To derive the corresponding pricing BSDE for a European option with terminal payoff $g(S_T)$, we construct a replicating portfolio $Y_t$, containing $\omega_{i, t}$ of asset $S_{i,t}$ and bonds with risk-free return rate $r$. Applying the self-financing assumption, the portfolio follows the SDE: \begin{equation*} dY_t = -(-r Y_t - \sum^d_{i=1} \omega_{i,t}(\mu_i-r)S_{i,t})dt + \sum^d_{i=1}\omega_{i. t}\sigma_i S_i d B_{i, t}. \end{equation*} If we set $Z_t = (\omega_{1,t} \sigma_1 S_{1,t}, \ldots, \omega_{d, t}\sigma_d, S_{d, t})\mathfrak{C}$, then $(Y, Z)$ solves the BSDE, \begin{equation*} \begin{cases} dY_t = -\left(-r Y_t - Z_t \mathfrak{C}^{-1}\left(\frac{\mu - r}{\sigma}\right) \right)dt + Z_t dW_t;\\ Y_T = g(S_T), \end{cases} \end{equation*} where $\left(\frac{\mu -r}{\sigma}\right) = \left(\frac{\mu_1-r}{\sigma_1}, \cdots, \frac{\mu_q - r}{\sigma_q}\right)^T$.
We test our algorithm for the next two cases.
\subsubsection{Arithmetic Basket Put Option}
In this numerical test, we use the 5-dimensional example from \cite{2007reisingerefficient}, which is designed as a tractable representation for the German stock index DAX at that time. All $\mu_i$ are assumed to be $r$ here. The volatilities are given by $$(\sigma_1, \sigma_2, \sigma_3, \sigma_4, \sigma_5) = (0.518, 0.648, 0.623, 0.570, 0.530), $$ while the correlations $\rho$ are given by \begin{equation*} \rho = \left(\begin{array}{ccccc} 1.00 & 0.79 & 0.82 & 0.91 & 0.84\\ 0.79 & 1.00 & 0.73 & 0.80 & 0.76\\ 0.82 & 0.73 & 1.00 & 0.77 & 0.72\\ 0.91 & 0.80 & 0.77 & 1.00 & 0.90\\ 0.84 & 0.76 & 0.72 & 0.90 & 1.00 \end{array}\right). \end{equation*}
We would consider a European weighted basket put option for $T=1$ year, with the payoff function $g$ given by $$g(S) = \left(1 - \sum^5_{i=1}w_i S_i\right)^+,$$ where $(w_1, w_2, w_3, w_4, w_5) = (38.1, 6.5, 5.7, 27.0, 22.7)$. The risk free interest rate is $r=0.05$ and all the stocks have starting value 0.01. The reference price is given as 0.175866 in \cite{2007reisingerefficient}.
We perform the equal-partitioning bundling technique and sort the paths in different bundles according to the ordering of the value $\sum^5_{i = 1}w_i S^m_{i, t_p}$. The regression basis is chosen to be $p_k(x) = \left(\sum^5_{i = 1}w_i x_i\right)^{k-1}$ for $k = 1, \ldots, K$.
Table \ref{test_case_arithmetic} shows the tests that we have run. In these tests, we keep most of the parameters fixed but vary the number of bundles. We test our algorithm for the explicit scheme with a second-order regression basis and the Crank-Nicolson scheme with a third-order regression basis. The change of basis is made to test the impact of the regression basis to our algorithm. We just keep these two sets of tests to demonstrate the impact of the number of bundles.
\begin{table}[h]
\centering
\begin{tabular}{|c|cccccccc|}
\hline
Test Case & $\theta_1$ & $\theta_2$ & I & M & N & B & L & K\\
\hline
2.1a & 0.5 & 0.5 & 4 & $2^{12}$ & $10$ & $2^{2J}$ & -& 3\\
2.1b & 0 & 1 & - & $2^{12}$ & $10$ & $2^{2J}$ & -& 2\\
\hline
\end{tabular}
\caption{Test cases for Example 2.1} \label{test_case_arithmetic} \end{table}
\subsubsection{Example 2.2: Geometric Basket Put Option}
Here we also consider the problem of pricing q-dimensional geometric basket options with initial state $S_0 = (40, \ldots, 40) \in \mathbb{R}^q$; strike $K = 40$; risk-free interest rate $r = 0.06$; volatility $\sigma_i = 0.2, i = 1, \ldots, d$; correlation $\rho_{ij} = 0.25, i,j = 1, \ldots, d, i\neq j$; and maturity $T=1.0$. The final payoff function is given by $$g(S) = \left(K - \left(\prod^d_{i = 1} S_i\right)^{\frac{1}{d}}\right)^+.$$ This is the same setting as in \cite{leitao_oosterlee_2015} but for European options instead of Bermudan options.
We again use the equal-partitioning technique and sort the paths in different bundles according to the ordering of the values $\left(\prod^d_{i=1}S^m_{i, t_p}\right)^{\frac{1}{d}}$. The regression basis is chosen to be $p_k(x) = \left(\prod^d_{i = 1} x_i\right)^{\frac{k-1}{d}}$ for $k = 1, \ldots, 3$.
Since the geometric product of a geometric Brownian motion remains a geometric Brownian motion, the analytic solution can be found using Black-Scholes formula and any other classical pricing method.
Table \ref{test_case_geometric} shows the tests that we have run. In these sets of tests, we fixed all the parameters but change the number of stocks in our test. This example is used to test the {\em scalability} of our methodology. Tests are performed for both explicit and Crank-Nicolson schemes.
\begin{table}[h]
\centering
\begin{tabular}{|c|ccccccc|}
\hline
Test Case & $\theta_1$ & $\theta_2$ & I & M & N & B & L\\
\hline
2.2a & 0 & 1 & - & $2^{12}$ & 20 & 16 & - \\
2.2b & 0.5 & 0.5 & 4 & $2^{12}$ & 20 & 16 & - \\
\hline
\end{tabular}
\caption{Test cases for Example 2.2} \label{test_case_geometric} \end{table}
\subsection{Results} The results are given as the average values of 10 separated runs of the algorithm.
We first consider the results of the explicit version of our algorithm applied to Example 1, namely test cases 1a, 1b and 1c, in Table \ref{test_result_explicit}. This test can be seen as a proof of concept. As mentioned, we design the test in such a way that the number of steps $N$, the number of bundles $B$ and the ratio $M/B$ all tend to infinity. As expected, our algorithm converges under this setting. Moreover, the total variation of the absolute errors among each successful run converges with respect to $J$ too, as the reader can read from the second part of Table \ref{test_result_explicit}. It is defined as the sum of the individual differences between the Monte Carlo result of each run (which is not rejected) and the analytic solution, divided by the total number of successful runs.
\begin{table}[h]
\centering
\begin{tabular}{c|ccccccc}
\multicolumn{8}{c}{$|Y_0-y^{(\theta_1, \theta_2), R}_0(x_0)|$}\\
J & 2 & 3 & 4 & 5 & 6 & 7 & 8\\
\hline
1a & 0.023535 & 0.20392 & 0.046947 & 0.057056 & 0.026622 & 0.018172 & 0.016179\\
1b & 0.18360 & 0.17807 & 0.098821 & 0.030159 & 0.028840 & 0.019621 & 0.0057568\\
1c & 0.41648 & 0.14362 & 0.10368 & 0.04658 & 0.018068 & 0.019175 & 0.0098448\\
\hline
\multicolumn{8}{c}{Total Variation/Successful Run}\\
J & 2 & 3 & 4 & 5 & 6 & 7 & 8 \\
\hline
1a & 0.28203 & 0.20392 & 0.081031 & 0.057056 & 0.027255 & 0.018172 & 0.016179\\
1b & 0.31030 & 0.17807 & 0.098884 & 0.044555 & 0.028840 & 0.020392 & 0.0079454\\
1c & 0.60090 & 0.15673 & 0.10368 & 0.054715 & 0.019420 & 0.019175 & 0.011833\\
\hline
\end{tabular}
\caption{Test result for Example 1 with explicit scheme.} \label{test_result_explicit} \end{table}
While we have not shown the proof of convergence for the Crank–Nicolson scheme, where $\theta_1 = \theta_2 = 0.5$, our numerical tests for test cases 1d, 1e, 1f, in Table \ref{test_result_crank_nicolson}, suggest that it works well in our framework.
\begin{table}[h]
\centering
\begin{tabular}{c|ccccccc}
\multicolumn{8}{c}{$|Y_0-y^{(\theta_1, \theta_2), R}_0(x_0)|$}\\
J & 2 & 3 & 4 & 5 &6 & 7 & 8 \\
\hline
1a & $0.0053401$ & $0.032606$ & $0.18142$ & $0.025799$ & $0.0060404$ & $0.020565$ & NA \\
1b & 3.6788 & 0.24551 & 0.34892 & 0.069220 & 0.012861 & 0.0013653 & 0.0024095\\
1c & $4.6822 \times 10^8$ & $3.5241 \times 10^{137}$ & $1.0773 \times 10^{44}$ & 0.051122 & 0.0050518 & 0.011735 & 0.0030526\\
\hline
\multicolumn{8}{c}{Total Variation/Successful Run}\\
J & 2 & 3 & 4 & 5 & 6 & 7 & 8 \\
\hline
1a & $0.23450$ & $0.032606$ & $0.18142$ & $0.025799$ & $0.012630$ & $0.020565$ & NA\\
1b & 4.5732 & 0.37590 & 0.34892 & 0.075550 & 0.014571 & 0.012470 & 0.010903\\
1c & $4.6822 \times 10^8$ & $3.5241 \times 10^{137}$ & $1.0773 \times 10^{44}$ & 0.058288 & 0.020924 & 0.014260 & 0.0078873\\
\hline
\end{tabular}
\caption{Test result for Example 1 with Crank–Nicolson scheme} \label{test_result_crank_nicolson} \end{table}
A specific point of interest is the impact of factor $L$ introduced in Section \ref{section_refined_regression} for the samples selection. It can be seen in Table \ref{test_result_crank_nicolson} that when the number of paths or the bundles are few, a smaller value of $L$ preserves the stability of our algorithm. In test case $1d$, where the factor $L$ is relatively small, our algorithm rejected all tests for $J=8$. One of the explanations is that the regression coefficients converge to the analytic projection coefficients on the basis space but the norm of these analytic coefficient is greater than $L$. The effect of the factor $L$ actually can be seen in Table \ref{test_result_explicit} too. Some runs for test case 1a were rejected when $J=8$ and the result for $J=8$ is worse than either 1b or 1c. On the contrary, if we remove the restriction on $L$ altogether, the results are non-satisfactory when the value of $J$ is low but converge when the number of time steps and samples are high enough. Heuristically, the regression coefficients should converge to the actual projection coefficients on the basis space, which results in a function that is bounded in a compact set. This in turns satisfies the conditions of the proof of convergence with respect to the regression. Although it may look like we can adjust $L$ in the same time as other algorithm parameters in order to achieve the optimal result, we should still note that $L$ is model dependent and there is no clear way to figure out the best link of $L$ with the simulation parameters. It remains important to use $L$ as a warning system.
Next, we shall move on to the result for the more practical and higher-dimensional Example 2. The results for Example 2.1 in Table \ref{test_result_arithmetic} show that our method can be easily applied to a practical problem.
\begin{table}[h]
\centering
\centering
\begin{tabular}{c|ccccc}
\multicolumn{6}{c}{$|Y_0-y^{(\theta_1, \theta_2), R}_0(x_0)|$}\\
J & 0 & 1 & 2 & & \\
\hline
2.1a & $2.0321 \times 10^{-3}$ & $2.2567 \times 10^{-3}$ & $1.9883 \times 10^{-3}$ & & \\
2.1b & $2.9314 \times 10^{-3}$ & $1.8934 \times 10^{-3}$ & $2.2151 \times 10^{-4}$ & &
\end{tabular}
\caption{Test result for Example 2.1 } \label{test_result_arithmetic} \end{table}
With respect to the problem of dimensionality, we can check the results in Table \ref{test_result_geometric}. Since the analytic solution is known to this problem, we compare our result to the actual value. It can be seen that under our choice of bundling and regression basis, the accuracy of our method is similar across all choices of problem dimensions. This suggested that with appropriate setting, our algorithm can easily scale up to tackle high-dimensional problems.
\begin{table}[h]
\centering
\begin{tabular}{c|ccccc}
\multicolumn{6}{c}{$|Y_0-y^{(\theta_1, \theta_2), R}_0(x_0)|$}\\
Stock dimensions & 1 & 2 & 3 & 4 & 5 \\
\hline
2.2a & $6.5482 \times 10^{-3}$ & $7.3015 \times 10^{-3}$ & $6.6827 \times 10^{-3}$ & $8.0384 \times 10^{-3}$ & $7.1308 \times 10^{-3}$\\
2.2b & $5.1918 \times 10^{-3}$ & $6.9460 \times 10^{-3}$ & $6.4038 \times 10^{-3}$ & $6.9507 \times 10^{-3}$ & $7.4937 \times 10^{-3}$\\
\hline
Stock dimensions & 6 & 7 & 8 & 9 & 10\\
\hline
2.2a & $6.9885 \times 10^{-3}$ & $7.5067 \times 10^{-3}$ & $6.9271 \times 10^{-3}$ & $6.9993 \times 10^{-3}$ & $7.5682 \times 10^{-3}$\\
2.2b & $7.2034 \times 10^{-3}$ & $7.1633 \times 10^{-3}$ & $7.0850 \times 10^{-3}$ & $7.2023 \times 10^{-3}$ & $6.7595 \times 10^{-3}$\\
\hline
Stock dimensions & 11 & 12 & 13 & 14 & 15\\
\hline
2.2a & $6.9549 \times 10^{-3}$ & $7.4005 \times 10^{-3}$ & $7.5329 \times 10^{-3}$ & $7.1437 \times 10^{-3}$ & $7.1364 \times 10^{-3}$\\
2.2b & $8.4614 \times 10^{-3}$ & $7.1430 \times 10^{-3}$ & $7.6267 \times 10^{-3}$ & $7.8998 \times 10^{-3}$ & $7.2455 \times 10^{-3}$\\
\end{tabular}
\caption{Test result for Example 2.2} \label{test_result_geometric} \end{table}
More generally, all the results from Example 2 suggest that linking the bundling criterion and the regression basis to the terminal condition can deliver an accurate algorithm. Adapting our algorithm to a specific problem to improve the performance could be a promising direction of further research. In fact, the choice of basis itself deserves further study. Even in our localised setting, regression with respect to the linear basis scheme fails to converge for Example 1. A more sophisticated way to pick the regression basis may be important to put our algorithm into actual applications.
To sum up, we have developed a new algorithm for approximating BSDEs based on SGBM and our numerical tests showed that this new algorithm can deliver accurate estimation results.
\end{document} |
\begin{document}
\date {}
\newtheorem{thm}{Theorem} \newtheorem{cor}{Corollary} \newtheorem{lem}{Lemma} \newtheorem{claim}{Claim} \newtheorem{dfn}{Definition} \newtheorem{prop}{Proposition}
\def{\cal D}{{\cal D}} \def{\cal D_A}X{{\cal D_A}X} \def{\cal D}X{{\cal D}X} \def{\cal D}F{{\cal D}F} \def{\cal D}\hat{F_i}{{\cal D}\hat{F_i}} \def{\cal H}{{\cal H}} \def\V(f) {{\rm Var(f)}} \def\varphi{\varphi} \def{\rm Var}{{\rm Var}} \def\Im(var) {{{\rm Hom}(H_n(F,\partial F),~H_n(F))}} \def\K(V) {\tilde{\pi}_0V {{\rm Diff}(F,\partial)}} \def\tilde{\pi}_0 {{\rm Diff}}(\DblF){\tilde{\pi}_0 {{\rm Diff}}(\DblF)} \def\tilde{\pi}_0 {{\rm Diff}(F,{\rm rel}~\partial)}{\tilde{\pi}_0 {{\rm Diff}(F,{\rm rel}~\partial)}} \def{G}{{G}} \def{{\rm Hom}(H_n(F,\partial F),~\G)}{{{\rm Hom}(H_n(F,\partial F),~\G)}} \def{{\rm Hom}(H_n(\DblF),~\G)}{{{\rm Hom}(H_n(\DblF),~\G)}} \def{{\rm Hom}(H_n(F_+),~\G)}{{{\rm Hom}(H_n(F_+),~\G)}} \def\tilde{\pi}_0S {{\rm Diff}(\DblF)} {\tilde{\pi}_0S {{\rm Diff}(\DblF)} } \def\mathbb N{\mathbb N} \def\mathbb Z{\mathbb Z} \def\bar{\cal F}{\bar{\cal F}} \def\tilde{\pi}_0 {\rm Diff}(DM){\tilde{\pi}_0 {\rm Diff}(DM)} \def\stackrel{\partial}{\simeq}{\stackrel{\partial}{\simeq}} \def\tilde{\pi}_0 {{\rm Diff}(F,~{\rm rel}~K)}{\tilde{\pi}_0 {{\rm Diff}(F,~{\rm rel}~K)}} \def\tilde{\pi}_0 {{\rm Diff}(M)}{\tilde{\pi}_0 {{\rm Diff}(M)}} \def\tilde{\pi}_0S {{\rm Diff}(M)} {\tilde{\pi}_0S {{\rm Diff}(M)} } \def\mathbb Z{\mathbb Z} \def\mathbb C{\mathbb C} \def\downarrow{\downarrow} \def\lefteqn{\lefteqn} \def\longrightarrow{\longrightarrow} \def\rightarrow{\rightarrow} \def\hookrightarrow{\hookrightarrow} \def\longmapsto{\longmapsto} \def{\rm Aut}~H_n(M){{\rm Aut}~H_n(M)} \def\lambda{\lambda} \def\delta{\delta} \def\epsilon{\epsilon} \def\tilde{\psi }{\tilde{\psi }} \def\tilde\jmath{\tilde\jmath} \def\tilde{\vf }{\tilde{\vf }}
\maketitle
\parskip=2mm
\begin{abstract}
{We consider a parallelizable $2n$-manifold $F$ which has the homotopy type of the wedge product of $n$-spheres and show that the group of pseudo-isotopy classes of orientation preserving diffeomorphisms that keep the boundary $\partial F$ pointwise fixed and induce the trivial variation operator is a central extension of the group of all homotopy $(2n+1)$-spheres by $H_n\bigl(F; S\pi_n(SO(n))\bigr)$. Then we apply this result to study the periodicity properties of branched cyclic covers of manifolds with simple open book decompositions and extend the previous results of Durfee, Kauffman and Stevens to dimensions 7 and 15.} \end{abstract}
\noindent {\bf Keywords}: Isotopy classes of diffeomorphisms; Cyclic branched covers \\ {\bf 2000 Mathematics Subject Classification}: 57N15; 57N37; 14J17
\section{Introduction and the Results}
An open book decomposition of a manifold $M^{m+1}$ is a presentation of this manifold as the union of the mapping torus $F_{\vf}$ and the product $\partial F\times D^2$ along the boundary $\partial F\times S^1$, where $\vf: F^m\lra F^m$ is an orientation preserving diffeomorphism which fixes the boundary $\partial F$ pointwise. Open book structures have been used in the study of various topological problems (for short historical overviews see \S2 of \cite{Quinn} or Appendix by E. Winkelnkemper in \cite{Ranicki}), and in particular in the study of the isolated complex hypersurface singularities. Let $f:~(\cn^{n+1},0)\lra
(\cn,0)$ be a polynomial mapping with the only singular point at the origin and with zero locus $V=\{z\in\cn^{n+1}|f(z)=0\}$. Consider the intersection of $V$ with a small sphere centered at the origin $K:=V\cap S_{\eps}^{2n+1}$. J. Milnor has shown in \cite{Mil2} that the mapping $$
\Phi(z):=f(z)/|f(z)|~~~~ S_{\eps}^{2n+1}\setminus K\lra S^1 $$ is the projection map of a smooth fibration such that the fiber $F:=\Phi^{-1}(1)$ is a smooth $(n-1)$-connected parallelizable $2n$-manifold homotopically equivalent to the wedge product of $n$-spheres and $\partial F=K$ is $(n-2)$-connected. This gives the open book structure to the sphere $$ S^{2n+1}=F_{\vf}\cup (K \times D^2) $$ Such an open book decomposition of $S^{2n+1}$ is called a simple fibered knot and the periodicity, in $k$, of the $k$-fold cyclic covers of $S^{2n+1}$ branched along $K$ has been studied by A. Durfee and L. Kauffman in \cite{DK}. Later, J. Stevens (see \cite{Stev}, Theorem 7 and Proposition 8) generalized Theorems 4.5 and 5.3 of \cite{DK} to a wider class of manifolds with simple open book decompositions $M^{2n+1}=F_{\vf}\cup (\partial F \times D^2)$ (an open book $M^{2n+1}$ is called {\it simple} if both $M$ and $F$ are $(n-1)$-connected and $M$ bounds a parallelizable manifold).
{\bf Theorem I} (Stevens). {\it Let $M_k$ denote the k-fold cyclic cover of $M^{2n+1}$ branched along $\partial F$ and $n\ne 1,3~or~7~odd$. If~ ${\rm Var}(\vf^d)=0$, then $M_k$ and $M_{k+d}$ are (orientation preserving) homeomorphic, while $M_k$ and $M_{d-k},~k<d$ are orientation reversing homeomorphic. Furthermore, $M_{k+d}$ is diffeomorphic to $(\sigma_d/8)\Sigma~\#~M_k$.}
\noindent Here $\sigma_k$ is the signature of a parallelizable manifold $N_k$ with the boundary $\partial N_k=M_k$, and $\Sigma$ is the generator of the finite cyclic group $bP_{2n+2}$ of homotopy $(2n+1)$-spheres that bound parallelizable manifolds. ${\rm Var}(h)$ denotes the variation homomorphism of a diffeomorphism $h: F\lra F$, which keeps the boundary $\partial F$ pointwise fixed, and defined as follows. Let $[z]\in H_n(F,\partial F)$ be the homology class of a relative cycle $z$, then we define ${\rm Var}(h): H_n(F,\partial F)\lra H_n(F)$ by the formula ${\rm Var}(h)[z]:=[h(z)-z]$ (cf. \S 1 of \cite{Stev} or \S1.1 of \cite{AGV}).
Stevens also proved topological as well as smooth periodicity for $n$ even (see \cite{Stev}, Theorem 9):
{\bf Theorem II}. {\it If for branched cyclic covers $M_k$ of a $(2n+1)$-manifold $M$ with simple open book decomposition ${\rm Var}(\vf^d)=0$, then $M_k$ and $M_{k+2d}$ are homeomorphic and $M_k$ and $M_{k+4d}$ are diffeomorphic. Moreover, if $n=2~or~6$, then $M_k$ and $M_{k+d}$ are diffeomorphic.}
Both of the papers viewed the open book $M^{2n+1}$ as the boundary of a $(2n+2)$-manifold and used results of C.T.C. Wall \cite{W1}, on classification of $(n-1)$-connected $(2n+1)$-manifolds. Here, in the third section, we are dealing with the same periodicity problems from a different point of view which is based on results of M. Kreck \cite{Kreck} on the group of isotopy classes of diffeomorphisms of $(n-1)$-connected almost-parallelizable $2n$-manifolds. We give here different proofs of these two theorems of Stevens including the cases $n=3$ and $n=7$ (see Corollaries 2, 3, and 4 below).
As we have just mentioned, our approach is based on the results of Kreck who has computed the group of isotopy classes of diffeomorphisms of closed $(n-1)$-connected almost-parallelizable $2n$-manifolds in terms of exact sequences. In the first part of this paper we use these results to obtain a similar exact sequence for the diffeomorphisms $f$ of a parallelizable handlebody $F\in\H(2n,\mu,n),~n\geq 2$, that preserve the boundary $\partial F$ pointwise and induce the trivial variation operator $\V(f) : H_*(F,\partial F)\lra H_*(F)$. We will denote the group of pseudo-isotopy classes of such diffeomorphisms by $\K(V) $ and prove the following
\noindent {\bf Theorem 3.}~{\it If $n\geq 3$ then the following sequence is exact $$ 0\lra \Theta_{2n+1} \lra \K(V) \lra {{\rm Hom}\Bigl(H_n(F,\partial F),~S\pi_n(SO(n))\Bigr)} \lra 0 $$ If $n=2$ then $\K(V) = 0$.}
\noindent Here, by $S\pi_n(SO(n))$ we mean the image of $\pi_n(SO(n))$ in $\pi_n(SO(n+1))$ under the natural inclusion $SO(n)\hra SO(n+1)$ and by $\Theta_{2n+1}$ the group of all homotopy $(2n+1)$-spheres (see \S2.2 for the details).
\noindent \underline {Remark}: Recently D. Crowley \cite{Crowl} extended results of D. Wilkins on the classification of closed $(n-1)$-connected $(2n+1)$-manifolds, $n=3,7$. One could use these results together with the technique of Durfee, Kauffman and Stevens to complete the periodicity theorems for $n=3,7$. However our intention was to show how one can apply the higher dimensional analogs of the mapping class group in studying this kind of problem.
At the end we briefly mention the cyclic coverings of $S^3$ branched along the trefoil knot as an example which shows that there is no topological periodicity in the case $n=1$.
Let $F$ be a manifold with boundary $\partial F$ and consider two diffeomorphisms $\vf,~\psi$ of $F$ that are identities on the boundary (in this paper we consider only orientation preserving diffeomorphisms). As usual, two such diffeomorphisms are called {\it pseudo-isotopic relative to the boundary} if there is a diffeomorphism ${\cal H}:~F\times I\lra F\times I$ which satisfies the following properties: $$
1)~{\cal H}|_{F\times \{0\}}=\vf,~~~2)~{\cal H}|_{F\times
\{1\}}=\psi,~~~3)~{\cal H}|_{\partial F\times I}=id $$ We will denote the group of pseudo-isotopy classes of such diffeomorphisms by $\mcgF$. The group of pseudo-isotopy classes of orientation preserving diffeomorphisms on a closed manifold $M$ will be denoted by $\mcgM$. There is a deep result of J. Cerf \cite{Cerf} which allows one to replace pseudo-isotopy by isotopy provided that the manifold is simply connected and of dimension at least six. All our manifolds are simply connected here, so $n=2$ is the only case when we actually use pseudo-isotopy. For all other $n\geq 3$ we will use the same notations (where tilde \~ {} stands for ``pseudo") but mean the usual isotopy. We will call these groups {\it the mapping class groups}.
If $M$ is embedded into $W$ as a submanifold, then the normal bundle of $M$ in $W$ will be denoted by $\nu(M;W)$. Integer coefficients are understood for all homology and cohomology groups, unless otherwise stated, and symbols $\simeq$ and $\cong$ are used to denote diffeomorphism and isomorphism respectively.
\noindent {\bf Acknowledgement}: We thank the referee for helpful comments and suggestions. The second author also would like to express his gratitude to Professors Matthias Kreck and Anatoly Libgober for stimulating discussions during the preparation of this paper.
\section{Kernel of the variation operator}
\subsection{Double of a pair (X,A)}
Let $(X,A)$ be a pair of CW complexes, and consider the pair $(X\times I,A\times I)$ (here and later $I=[0,1]$, and we denote the boundary of $I$ by $\partial I$).
\begin{dfn} The subspace $(X\times\partial I)\cup(A\times I)$ of $X\times I$ will be called the double of the pair $(X,A)$, and denoted by $\DblX$. \end{dfn}
We will denote the pair $(X\times\{0\},A\times\{0\})$ by $(X_0,A_0)$, the product $A\times I$ by $A_+$ and the union $(X\times\{1\})\cup A_+$ by $X_+$. Thus we can write $\DblX=X_0\cup X_+$ and $X_0\cap X_+ = A\times\{0\}$.
\noindent \underline{Remark}: If we take the pair $(X,A)$ to be a manifold with the boundary, then the double $\DblX$ will be the boundary of the product $X\times I$, which is a closed manifold with the canonically defined smooth structure (see \cite{Munkres}). In this case we will denote the double simply by ${\cal D}X$.
Now we construct a natural homomorphism $d_*: H_*(X,A)\lra H_*(\DblX)$. Consider the reduced suspensions of $X$ and $A$ (the common base point is chosen outside of $X$) and the induced isomorphism between $H_*(X,A)$ and $H_{*+1}(\Sigma X^+,\Sigma A^+)$. The excision property induces a natural isomorphism between $H_{*+1}(\Sigma X^+,\Sigma A^+)$ and $H_{*+1}(X\times I, \DblX)$, and we define the homomorphism $d_*$ as the composition of these two isomorphisms with the boundary map $\delta_{*+1}$ from the exact sequence of the pair $(X\times I,\DblX)$:
\begin{dfn} $$ d_q:=\delta_{q+1}\circ iso :~ H_q(X,A) \stackrel{\cong}{\lra} H_{q+1}(X\times I, \DblX) \stackrel{\delta_{q+1}}{\lra} H_q (\DblX) $$ \end{dfn}
The groups $H_*(X,A)$ and $H_*(\DblX,X)$ are naturally isomorphic and we can rewrite the exact sequence of the pair $(\DblX,X)$ in the following form: $$ \cdots H_{q+1}(\DblX)\to H_{q+1}(X,A) \to H_q(X) \stackrel{i_q}{\to} H_q(\DblX) \stackrel{j_q}{\to} H_q(X,A)\cdots $$
\begin{lem} For each $q\geq 1$ the homomorphism $d_q$ is a splitting homomorphism of the above exact sequence and we have the following short exact sequence that splits: $$ 0\lra H_q(X) \stackrel{i_q}{\lra} H_q(\DblX) \stackrel{j_q}{\lra} H_q(X,A)\lra 0$$ \end{lem} \begin{proof} It follows rather easily from our definition of $d_q$ that for each $q\geq 1$ the composition $j_q\circ d_q$ is the identity map of the group $H_q(X,A)$. This property entails our lemma (cf. \cite{RF}, chap. 5, \S 1.5). \end{proof}
Let us consider now a homeomorphism $f: X\lra X$ which is the identity on $A$, i.e. $f(x)=x$ for all $x\in A$. For such a map the variation homomorphism $\V(f) : H_q(X,A)\lra H_q(X)$ is defined for all $q\geq 1$ by the formula $\V(f) [z]:=[f(z)-z]$ for any relative cycle $z\in H_q(X,A)$ (cf. \S 1 of \cite{Stev} or \S1.1 of \cite{AGV}). The map $f$ also induces the map $f^{(r)}: (X,A)\lra (X,A)$ and a map $\tilde{f}: \DblX\lra \DblX$ defined as follows: $$\tilde{f}(x):=\left\{ \begin{array}{rcl} f(x)& {\rm if}& x\in X_0\\ x~& {\rm if}& x\in X_+\\ \end{array} \right. $$
\noindent If we denote the corresponding induced maps in homology by $f_*,~f^{(r)}_*,~\tilde{f_*}$ then we have the following commutative diagram: $$ \begin{CD} 0 @>>> H_q(X) @>i_q>> H_q(\DblX) @>j_q>> H_q(X,A) @>>>0\\ @. @VVf_*V @VV\tilde{f_*}V @VVf^{(r)}_*V @.\\ 0 @>>> H_q(X) @>i_q>> H_q(\DblX) @>j_q>> H_q(X,A) @>>>0 \end{CD} $$
\begin{thm} ~\\ If~ $\V(f) = 0$, then $\tilde{f_*}$ is the identity map of $H_q(\DblX)$ for all $q$. \end{thm} \begin{proof} It follows right from the definition of $\V(f) $ that $f_* - Id = \V(f) \circ l_*$ and $f_*^{(r)} - Id = l_*\circ\V(f) $, where $l_*: H_*(X)\lra H_*(X,A)$ is induced by the inclusion $(X,\emptyset) \hra (X,A)$ (cf. \S1.1 of \cite{AGV}). It is also easy to check that the homomorphisms $\tilde{f_*}$ and $d_q$ are connected with the variation homomorphism via the formula $$\tilde{f_*}\circ d_q = d_q\circ Id + i_q\circ \V(f) $$ Hence if $\V(f) = 0$, then $f_*=Id,~f_*^{(r)}=Id$ and $\tilde{f_*}\circ d_q = d_q\circ Id$. These three identities together with $j_q\circ d_q = Id$ imply the statement. \end{proof}
Now we restrict our attention to the case when $X$ is a smooth, simply connected manifold of dimension at least four and $A=\partial X$ is the boundary. Let $\varphi\in {{\rm Diff}(X,{\rm rel}~\partial)}$ and $\tilde{\varphi} \in {{\rm Diff}(\DX)}$ be the extension by the identity to the second half of the double. Define the map $\omega: {{\rm Diff}(X,{\rm rel}~\partial)}\lra {\rm Diff}(\DX)$ by the formula $\omega(\varphi):=\tilde{\varphi}$.
\begin{thm}~\\ The map $\omega$ induces a monomorphism $\tilde{\pi}_0{{\rm Diff} (X,{\rm rel}~\partial)}\lra \tilde{\pi}_0{\rm Diff}(\DX)$. \end{thm} \begin{proof} It is easy to see that $\omega$ induces a well-defined map of groups of pseudo-isotopy classes of diffeomorphisms, i.e., if $\varphi'$ is pseudo-isotopic relative to the boundary to $\varphi$ then $\omega(\varphi')$ is pseudo-isotopic to $\omega(\varphi)$. It is obvious that for any two diffeomorphisms $\varphi,\psi\in \rm Diff(X,rel~\partial)$, $\omega(\varphi\cdot\psi)=\omega(\varphi)\cdot\omega(\psi)$, that is $\omega$ induces a homomorphism which we also denote by $\omega$.
To show that $\omega$ is actually a monomorphism we use Proposition 1 of Kreck (see \cite{Kreck}, p. 650 for the details): {\it Let $A^m$ be a simply-connected manifold with $m\geq 5$ and $h\in {\rm Diff}(\partial A)$. $h$ can be extended to a diffeomorphism on $A$ if and only if the twisted double $A\cup_h -A$ bounds a 1-connected manifold $B$ such that all relative homotopy groups $\pi_k(B,A)$ and $\pi_k(B,-A)$ are zero, where $A$ and $-A$ mean the two embeddings of $A$ into the twisted double.} Suppose now that $\omega(\varphi)=\tilde{\varphi}$ is pseudo-isotopic to the identity. Then the mapping torus $\DX_{\tilde{\varphi}}$ is diffeomorphic to the product $\DX\times S^1 =\partial(X\times I\times S^1)$. On the other hand we can present $\DX_{\tilde{\varphi}}$ as the union of $X_{\varphi}$ and $-X\times S^1$ along the boundary $\partial X\times S^1$. Since $\partial(X\times D^2)=X\times S^1\bigcup -\partial X\times D^2$ we can paste together $X\times I\times S^1$ and $X\times D^2$ along the common sub-manifold $X\times S^1$ to obtain a new manifold $W$, which cobounds $X_{\varphi}\bigcup-\partial X\times D^2$. Now note that $X_{\varphi}\bigcup-\partial X\times D^2$ is diffeomorphic to the twisted double $X\times I\bigcup_h -X\times I$ where the diffeomorphism $h: \partial(X\times I)\rightarrow \partial(X\times I)$ is defined by the identities:
$\left.h\right|_{X_0}=id$, and $\left.h\right|_{X_+}=\varphi$ (cf. \cite{Kreck}, property 1) of $\tilde{W}$ on page 657). The theorem of Seifert and Van Kampen entails that $\pi_1(W)\cong\{1\}$, and hence $\pi_1(W,X\times I)\cong\{1\}$. To show that the other homotopy groups are trivial it is enough to show that $H_*(W,X)\cong \{0\}$ for all $*\ge 2$. This can be seen from the relative Mayer-Vietoris exact sequence of pairs $(X\times I\times S^1, X)$ and $(X\times D^2, X)$ where by $X$ we mean a fiber of the product $X\times S^1$: $H_*(X\times S^1,X)\stackrel{\cong}{\rightarrow} H_*(X\times I\times S^1,X)\oplus H_*(X\times D^2,X)\rightarrow H_*(W,X)$. Thus by Proposition 1 of \cite{Kreck}, there is a diffeomorphism of $X\times I$ to itself that gives the required pseudo-isotopy between $\varphi$ and $id$. \end{proof}
\subsection{$\K(V) $ as an extension}
We now let $F\in\H(2n,\mu,n)$ be a parallelizable handlebody, that is, a parallelizable manifold which is obtained by gluing $\mu$ $n$-handles to the $2n$-disk and rounding the corners: $$ F=D^{2n}\cup\bigsqcup_{i=1}^{\mu} (D_i^n\times D^n)$$ We assume here that $n\geq 2$. For the classification of handlebodies in general, see \cite{Wall1}. Obviously $F$ has the homotopy type of the wedge product of $n$-spheres and nonempty boundary $\partial F$ which is $(n-2)$-connected. The Milnor fibre of an isolated complex hypersurface singularity is an example of such a manifold.
Let us consider now $\vf\in\mcgF$ and the induced variation homomorphism $\Va(\vf): H_n(F,\partial F)\lra H_n(F)$. This correspondence gives a well defined map $$\Va: \mcgF \lra \Im(var) $$ which is a derivation (1-cocycle) with respect to the natural action of the group $\mcgF$ on $\Im(var) $ (cf. \cite{Stev}, \S2) $$ \Va(h\circ g) = \Va(h) + h_*\circ \Va(g)$$
This formula implies that the isotopy classes of diffeomorphisms that give trivial variation homomorphisms form a subgroup of $\mcgF$.
\begin{dfn} The subgroup
$$\K(V) :=\{f\in\mcgF~|~\Va(f)[z]=0,~ \forall [z]\in H_n(F,\partial F)\}$$ will be called the kernel of the variation operator. \end{dfn}
In order to describe the algebraic structure of this kernel we will use the results of Kreck \cite{Kreck} who has computed the group of isotopy classes of diffeomorphisms of closed oriented $(n-1)$-connected almost-parallelizable $2n$-manifolds in terms of exact sequences. First we note that the double of our handlebody $F$ is such a manifold.
\begin{lem} Let $F\in\H(2n,\mu,n)$ be a parallelizable handlebody $(n\geq 2)$, then the double $\DblF$ is a closed $(n-1)$-connected stably-parallelizable $2n$-manifold. \end{lem} \begin{proof} Since $F$ is simply connected and $\DblF=F_0\cup F_+$, we have $\pi_1(\DblF)=0$. Then using the exact homology sequence of the pair ($F\times I, \partial(F\times I)$) it can be easily seen that $\DblF$ is a $(n-1)$-connected manifold. Since $F$ is parallelizable the double will be stably-parallelizable. \end{proof}
Next we recall the result of Kreck \cite{Kreck}. Let $M$ be a smooth, closed, oriented $(n-1)$-connected almost-parallelizable $2n$-manifold, $n\geq 2$. Denote by $\Aut$ the group of automorphisms of $H_n(M,\int)$ preserving the intersection form on $M$ and (for $n\geq3$) commuting with the function $\alpha:~H_n(M)\lra \pi_{n-1}(SO(n))$, which is defined as follows. Represent $x\in H_n(M)$ by an embedded sphere $S^n\hookrightarrow M$. Then function $\alpha$ assigns to $x$ the classifying map of the corresponding normal bundle. Any diffeomorphism $f\in {\rm Diff}(M)$ induces a map $f_*$ which lies in $\Aut$. This gives a homomorphism $$ \kappa:~ \mcgM \lra \Aut,~~~[f]\longmapsto f_* $$
The kernel of $\kappa$ is denoted by $\kerM$ and to each element
$f$ from this kernel Kreck assigns a homomorphism $H_n(M)\lra S\pi_n(SO(n))$, where $S: \pi_n(SO(n))\lra \pi_n(SO(n+1))$ is induced by the inclusion, in the following way. Represent $x\in H_n(M)$ by an imbedded sphere $S^n\subset M$ and use an isotopy to make $f|_{S^n}=Id$. The stable normal bundle $\nu(S^n)\oplus \varepsilon^1$ of this sphere in $M$ is trivial and therefore the differential of $f$ gives an element of $\pi_n(SO(n+1))$. It is easy to see that this element lies in the image of $S$. This construction leads to a well defined homomorphism (cf. Lemma 1 of \cite{Kreck}) $$\chi:~\kerM\lra {{\rm Hom}\bigl(H_n(M),~S\pi_n(SO(n))\bigr)}$$
\noindent If $n=6$ we have $S\pi_n(SO(n))=0$, and for all other $n\geq 3$ the groups $S\pi_n(SO(n))$ are given in the following table (\cite{Kreck}, p. 644):
\parskip=5mm
\begin{tabular}{|c|c|c|c|c|c|c|c|c|} \hline $n$ (mod 8) & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7\\ \hline $S\pi_n(SO(n))$ & ~$\zint_2\oplus\zint_2$~ & ~$\zint_2$~ & ~$\zint_2$ ~ & ~$\zint$ ~ & ~ $\zint_2$~ & ~ 0 ~ & ~ $\zint_2$ ~ & ~$\zint$ \\ \hline \end{tabular}
In particular, when $n\equiv 3\pmod{4}$ the homomorphism $\chi(f)$ can be defined using the Pontryagin class $p_{\frac{n+1}{4}}(M_f)$ of the mapping torus $M_f$: Take a diffeomorphism $f\in \kerM$ and consider the projection $$ \pi:~M_f\lra \left. M_f \right/ \{0\}\times M =\Sigma M^+ $$ It is clear from the exact sequence of Wang that the map $i^*:~H^n(M_f)\lra H^n(M)$ is surjective (recall that $f_*=id$) and therefore we obtain an isomorphism $\pi^*:~H^n(M)\cong H^{n+1}(\Sigma M^+)\lra H^{n+1}(M_f)$. Next define an element $p'(f)\in H^n(M)$ by $p'(f):={\pi^*}^{-1}(p_{\frac{n+1}{4}}(M_f))$. It can be shown (cf. \cite{Kerv}) that the map $f\lmt p'(f)$ is a homomorphism and $c:=a_{\frac{n+1}{4}} (\frac{n-1}{2})!$ divides $p'(f)$, where as always, $a_m=2$ if $m$ is odd and $a_m=1$ if $m$ is even. This defines a map $$ \chi':~\kerM\lra H^n(M),~~~with~~~\chi'(f):= p'(f)/c $$
\parskip=2mm
\noindent \underline{Remark}: These two elements $\chi'(f)$ and $\chi(f)$ belong to the isomorphic groups ${{\rm Hom}(H_n(M),~\pi_n(SO))}~~{\rm and}~~{{\rm Hom}(H_n(M),~ S\pi_n(SO(n)))}$ respectively, and they are connected through $\tau^*(\chi(f))=\chi'(f)$ via the homomorphism $$ \tau^* : {{\rm Hom}\bigl(H_n(M),~S\pi_n(SO(n))\bigr)} \lra {{\rm Hom}(H_n(M),~\pi_n(SO))} $$ induced by the natural homomorphism $\tau: \pi_n(SO(n+1))\lra \pi_n(SO(n+2))$. For the details the reader is referred to Lemma 2 of \cite{Kreck}.
If $M^{2n}$ bounds a parallelizable manifold and $n\geq 3$, then Theorem 2 of \cite{Kreck} gives two short exact sequences:
\begin{equation} \label{firstseq} 0\lra \kerM \lra \mcgM \stackrel{\kappa}{\lra} \Aut \lra 0 \end{equation} \begin{equation} \label{secondseq} 0\lra \Theta_{2n+1} \stackrel{\iota}{\lra} \kerM \stackrel{\chi}{\lra} {{\rm Hom}\bigl(H_n(M),~S\pi_n(SO(n))\bigr)} \lra 0 \end{equation}
\noindent where the map $\iota$ is induced by the identification of each homotopy $(2n+1)$-sphere with the element of the mapping class group $\tilde{\pi}_0 {\rm Diff}(D^{2n},{\rm rel}~\partial)$.
If $M$ is a simply connected manifold of dimension 4, Kreck has proved that $\kappa$ is a monomorphism (\cite{Kreck}, Theorem 1).
Let $F\in\H(2n,\mu,n)$ be a parallelizable handlebody as above, and $\DblF$ be the corresponding double. First assume that $n=2$ and $\vf\in \K(V) $, then it follows from our Theorems 1 and 2 and Theorem 1 of Kreck \cite{Kreck} that $\tilde{\vf}$ is the trivial element of $\mcgDF$, and therefore $\vf$ is the identity of $\mcgF$.
\noindent \underline{Remark}: In this case, the handlebody $F$ doesn't have to be parallelizable and the kernel of the variation operator $\K(V) $ will be trivial for any simply connected 4-manifold $F$.
\noindent Next we consider the case when $n\geq 3$ and denote the group $S\pi_n(SO(n))$ by $\G$. Recall also that we can assume that $\DblF=F\cup F_+$. Since $F$ is $(n-1)$-connected and the boundary $\partial F$ is $(n-2)$-connected, the universal coefficient theorem together with the cohomology exact sequence of the pair $(\DblF,F_+)$ and the excision property give us the following short exact sequence: \begin{equation} \label{thirdseq} 0\to \Rhom \stackrel{j^*}{\to} \Dhom \stackrel{i^*}{\to} \Fhom \to 0 \end{equation} where $i:F_+\hra \DblF$, $j: (\DblF,\emptyset)\hra (\DblF,F)$ are inclusions and $i^*$ and $j^*$ are the corresponding induced maps.
\begin{lem} $i^*(\chi(\tilde{\vf}))$ is the trivial map for any $\vf\in\mcgF$. \end{lem} \begin{proof} Take any $[z]\in H_n(F_+)$, then we have $i^*(\chi(\tilde{\vf}))[z]=\chi(\tilde{\vf})[i_*(z)]$. Since $H_n(F)\cong\pi_n(F)$ we can present our $n$-cycle $[z]$ by an imbedded $S^n\hra F_+$ and we can also assume that the normal bundle of such a sphere is contained in $F_+$. We have defined $\tilde{\vf}$ as the identity on $F_+$ and this implies $\chi(\tilde{\vf})[i_*(z)]=0$ as required. \end{proof}
\noindent Now we define a homomorphism $\chi_r: \K(V) \to \Rhom$. Take any $\vf\in \K(V) $ then $\tilde{\vf}\in \kerDF$ (recall Theorem 1 above) and $\chi(\tilde{\vf})\in \Dhom$. Since $i^*(\chi(\tilde{\vf}))=0$ there exists unique $h\in \Rhom$ such that $j^*(h)=\chi(\tilde{\vf})$.
\begin{dfn} We define the map $\chi_r: \K(V) \to \Rhom $ by the formula $\chi_r(\vf):=h$. \end{dfn} \noindent It is clear that $\chi_r$ is a homomorphism. Here we also consider the map $\iota_r: \Theta_{2n+1}\lra \mcgF$ defined as in (\ref{secondseq}) above: present any homotopy $(2n+1)$-sphere $\Sigma'$ as the union of two disks via a diffeomorphism $\psi\in \tilde{\pi}_0 {\rm Diff}(S^{2n})\cong \tilde{\pi}_0 {\rm Diff}(D^{2n},{\rm rel}~\partial ) \cong \Theta_{2n+1}$ then take a disk $D^{2n}$ embedded into ${\rm int}(F)$ and define the diffeomorphism of $F$ by the formula $$ \iota_r(\Sigma')(x):=\left\{ \begin{array}{rcl} \psi(x)& {\rm if}& x\in D^{2n}\hra F\\ x~& ~ & {\rm otherwise} \\ \end{array} \right. $$
\noindent It is obvious that ${\rm Im(\iota_r)}\subset \K(V) $. Now we describe $\K(V) $ as a central extension of the group $\Theta_{2n+1}$ by $H^n(F,\partial F;\G)\cong H_n(F;\G)$.
\begin{thm} ~\\ If $n=2$ then $\K(V) = 0$, and for all $n\geq 3$ the following sequence is exact \begin{equation} \label{kervar} 0\lra \Theta_{2n+1} \stackrel{\iota_r}{\lra} \K(V) \stackrel{\chi_r}{\lra} \Rhom \lra 0 \end{equation} \end{thm} \begin{proof} We have mentioned already that if $n=2$, the kernel of the variation operator is trivial. Assume now that $n\geq 3$. It follows from Theorems 1 and 2 above that the inclusion map $\omega: {\rm Diff}(F,{\rm rel}~\partial)\to {\rm Diff} (\DblF)$ induces a monomorphism $s_{\omega}: \K(V) \to \kerDF$. Since the composition $s_{\omega}\cdot \iota_r$ coincides with the injective map $\iota$ from the exact sequence (\ref{secondseq}), we see that our $\iota_r$ is injective too. It is also clear that $\rm Im(\iota_r)\subset Ker(\chi_r)$. Consider now any $\vf\in \rm Ker(\chi_r)$, then $\chi( s_{\omega}(\vf)) = j^*(\chi_r(\vf)) = 0$, where $j^*$ is as in (\ref{thirdseq}). Thus $s_{\omega}(\vf) \in \rm Ker(\chi)\cong\Theta_{2n+1}\cong Im(\iota)$ and since $s_{\omega}$ is a monomorphism we have $\vf\in \rm Im(\iota_r)$ as required.
To prove that $\chi_r$ is an epimorphism it is enough to show that for a set of generators $\{g_1,\ldots, g_m\}$ of $\Rhom$ the group $\K(V) $ contains diffeomorphisms $\{\vf_1,\ldots, \vf_m\}$ such that $\chi_r(\vf_j)=g_j, ~ j\in\{1,\ldots,m\}$. Recall that $F=D^{2n}\cup\bigsqcup_{i=1}^{\mu} (D_i^n\times D^n)$ and $H_n(F,\partial F)\cong \zint^{\mu}$. We can choose the following embedded disks $d_i\hra F,~ i\in\{1,\ldots,\mu\}$, as a basis of this homology group: $$ d_i:= \{0\}_i\times D^n\hra D^n_i\times D^n \hra F$$ (here $\{0\}_i$ is the center of the $i^{th}$ handle core disk $D^n_i$). Take a generator $x$ of $G$ and consider the homomorphism $ g_{xi}: H_n(F,\partial F)\lra G$ defined by the formula $$ g_{xi}[d_k]:=\left\{ \begin{array}{rcl} x & {\rm if} & k=i\\ 0 & {\rm if} & k\ne i \end{array} \right. k\in\{1,\ldots,\mu\} $$ end extended linearly to the whole group. The set of such homomorphisms obviously generates $\Rhom$. Now we will use an analog of the Dehn twist in higher dimensions to construct the diffeomorphism $\vf_{xi}$ (cf. \cite{Wall1}, Lemma 12).
For each disk $d_k$ consider the ``half-handle" $=(\frac{1}{2}D_k^n)\times D^n$ and notice that the closure of the complement to all these ``half-handles" in $F$ $$ \CCF:=cl(F\setminus\bigsqcup_{k=1}^{\mu}(\frac{1}{2}D_k^n)\times D^n)$$ is diffeomorphic to the closed $2n$-disk $D^{2n}$, and the intersection of each ``half-handle" with the boundary $\partial \CCF\simeq S^{2n-1}$ is $\partial (\frac{1}{2}D_k^n)\times D^n\simeq S_k^{n-1}\times D^n$. We take a smooth map $\vf_x:
(D^n,S^{n-1})\lra (SO(n),id)$ that sends a neighborhood of $S^{n-1}$ to $id$ and represents an element $[\vf_x]\in \pi_n(SO(n))$ such that $S([\vf_x])=x$ and define the diffeomorphism $\vf_{xi}|_{\bigsqcup_{k=1}^{\mu}(\frac{1}{2}D_k^n)\times D^n}$ by the formula \begin{equation} \label{twist} \vf_{xi}(t,s):=\left\{ \begin{array}{ccl} (\vf_x(s)\circ t,s) & {\rm if} & (t,s)\in (\frac{1}{2}D_i^n)\times D^n\\ (t,s) & {\rm if} & (t,s)\in (\frac{1}{2}D_k^n)\times D^n~and~k\ne i \end{array} \right. \end{equation} In particular, this gives a diffeomorphism $\phi\in {\rm Diff(}\partial \CCF)$ which is defined on $S_i^{n-1}\times D^n\hra \partial \CCF$ by restricting $t$ to the boundary of $\frac{1}{2}D_i^n$ (see (\ref{twist}) above) and by the identity everywhere else. We will show now that $\phi$ is isotopic to the identity. Consider the handlebody $$ F_i:=D^{2n}\cup (D_i^n\times D^n)=cl(F\setminus\bigsqcup_{k=1,k\ne i }^{\mu}(\frac{1}{2}D_k^n)\times D^n) $$ and denote by $\hat{F_i}$ the manifold obtained from $F_i$ by removing the open disk
$\frac{1}{2}D^{2n}$ from $D^{2n}$. Hence $\partial \hat{F_i} \simeq \partial F_i\sqcup S^{2n-1}$. The first equation of (\ref{twist}) together with the identity map define a diffeomorphism $\Phi$ of $\hat{F_i}$ such that $\Phi|_{S^{2n-1}} = \phi$
and $\Phi|_{\partial F_i} = Id$. We use the identity again to extend this $\Phi$ to a diffeomorphism $\tilde{\Phi}$ of $\DblFi$ where $$ \DblFi:=\DblF_i\setminus\frac{1}{2}D^{2n}\simeq \hat{F_i}\bigcup_{\partial F_i}
F_i~~~~~and~~~~\tilde{\Phi}|_{F_i}=Id,~\tilde{\Phi}|_{\hat{F_i}}=\Phi $$
Thus $\phi$ is the restriction of $\tilde{\Phi}$ to the boundary $\partial \DblFi=S^{2n-1}$ and hence can be considered as an element of the inertia group of ${\cal D}F_i$ (cf. \cite{Kreck}, Proposition 3). Now it follows from Lemma 2 above and results of Kosinski (\cite{Kos}, see \S3) and Wall \cite{W0} that $\phi$ is isotopic to the identity. In particular, we can use this isotopy on $S^{2n-1}\times [\frac{1}{2},\frac{1}{4}]\subset \frac{1}{2}D^{2n}$ to extend the diffeomorphism
$\vf_{xi}|_{\bigsqcup_{k=1}^{\mu}(\frac{1}{2}D_k^n)\times D^n}$ to a diffeomorphism of the whole handlebody $F$. Denote the result of this extension by $\vf_{xi}$. Clearly $\vf_{xi}\in \mcgF$, and we leave it to the reader to check that $\chi_r(\vf_{xi})=g_{xi}$. \end{proof}
\begin{cor}We have the following commutative diagram $$ \begin{array}{ccccccccc} &&&&0&&0&&\\ &&&&\da&&\da&&\\ 0 &\ra&\Theta_{2n+1}&\stackrel{\iota_r}{\to}&\K(V) &\stackrel{\chi_r}{\lra} &\Rhom &\ra &0\\ && \updownarrow\lt{\equiv} &&\da\lt{s_{\omega}} &&\da\lt{j^*} &\\ 0 &\ra &\Theta_{2n+1} &\stackrel{\iota}{\to} &\kerDF &\stackrel{\chi}{\lra} & \Dhom &\ra &0\\ &&&&\da\lt{i^*\cdot\chi} &&\da\lt{i^*} &\\ &&&&\Fhom &\stackrel{\equiv}{\longleftrightarrow} &\Fhom &\\ &&&&\da&&\da&&\\ &&&&0&&0&& \end{array} $$ where all horizontal and vertical sequences are exact. \end{cor} \begin{proof} The standard diagram chasing procedure is left to the reader. \end{proof}
\noindent {\tt Example:} Consider the case when $F=S^3\times D^3$. Then $\DblF=S^3\times S^3$, $\Rhom\cong G\cong \zint$, $\Theta_{7}\cong\zint_{28}$ and $\kerDF\cong{\cal H}_{28}$, that is the factor group of the group $\cal H$ (upper unitriangular $3\times 3$ matrices with integer coefficients) modulo the cyclic subgroup $28\zint$, where $\zint$ is the center of $\cal H$ (cf. \cite{Fried} or \S1.3 of \cite{Kryl}). Thus $\K(V) \cong S\pi_3(SO(3))\oplus\Theta_7\cong \zint\oplus\zint_{28}$ and the first vertical short exact sequence from the previous corollary can be written as follows $$ 0\lra S\pi_3(SO(3))\oplus\Theta_7 \lra \tilde{\pi}_0S{{\rm Diff}(S^3\times S^3)} \lra S\pi_3(SO(3)) \lra 0. $$ Such exact sequence was obtained by J. Levine (\cite{Levine}, Theorems 2.4 and 3.3) and H. Sato (\cite{Sato}, Theorem II) for the group $\tilde{\pi}_0S{{\rm Diff}(S^p\times S^p)}$.
\section{Manifolds with open book decompositions}
\subsection{Periodicity in higher dimensions}
In this section we will apply our exact sequence (\ref{kervar}) to study the periodicity of branched cyclic covers of manifolds with open book decompositions.
\begin{dfn} We will say that a smooth closed $(m+1)$-dimensional manifold $M$ has an open book decomposition if it is diffeomorphic to the union $$ M\simeq F_{\vf}\cup_r (\partial F\times D^2)$$ where $F$ is m-dimensional manifold with boundary $\partial F$, $\vf \in {{\rm Diff}(F,{\rm rel}~\partial)}$ is an orientation preserving diffeomorphism of $F$ that keeps the boundary pointwise fixed, $F_{\vf}$ is the mapping torus of $\vf$ $$ F_{\vf}:= \left. F\times[0,1]\right/ (x,0)\sim (\vf(x),1) $$ and $r:\partial F_{\vf}\lra \partial F\times S^1$ is a diffeomorphism that makes the following diagram commute $$\begin{CD} \partial F_{\vf} @>in>> F_{\vf}\\ @VVrV @VV\pi V\\ \partial F\times S^1 @>p_2>> S^1 \end{CD} $$ (here $p_2$ is the projection onto the second factor and $\pi$ is the bundle projection of the mapping torus onto the base circle). \end{dfn} Such a union is also called the relative mapping torus with page $F$ and binding $\partial F$ (cf. \cite{Quinn} or \cite{Stev}). When $M$ has dimension $(2n+1)$ and $F$ has the homotopy type of a $n$-dimensional CW-complex, it is said that the page is {\it almost canonical}. The diffeomorphism $\vf$ is called the geometric monodromy and the induced map $\vf_*:~H_n(F)\ra H_n(F)$ is the (algebraic) monodromy. If instead of $\vf$ we take some positive power of this diffeomorphism, say $\vf^k$, we obtain the $k$-fold cyclic cover $M_k$ of $M$, branched along $\partial F$, i.e. $$ M_k=F_{\vf^k}\cup_r (\partial F \times D^2) $$
It was shown in \cite{DK} (Theorem 4.5) that if a fibered knot $\partial F$ is a rational homology sphere and $\vf^d=id$ for some $d>0$, then the $k$-fold cyclic covers $M_k$ of $S^{2n+1}$ branched along $\partial F$ have the periodic behavior in $d$. In case of the links of isolated complex polynomial singularities these restrictions on $\partial F$ and $\vf$ are equivalent to the condition ${\rm Var}(\vf^d)=0$.
\noindent \underline{Remarks}:\\ i) Notice that the conditions $\vf^d_*=id$ and $\partial F$ is a rational homology sphere imply that ${\rm Var}(\vf^d)=0$, but the converse is not true (see \cite{Stev}, p. 231).\\ ii) Proposition 3.3 of \cite{Kauffman} proves that an open book $M^{2n+1}$ with page $F$ and monodromy $\vf$ is a homotopy sphere if and only if ${\rm Var}(\vf)$ is an isomorphism.
In addition to the almost canonical page requirement we will need to assume more about $M$ (cf. \cite{Stev}, \S3 p.232), i.e. we assume from now on that $M$ has a {\it simple open book decomposition}. It implies, in particular, that $M$ bounds a simply connected parallelizable manifold. We will also assume that $n\geq 3$, $\pi_1(\partial F)=1$ and ${\rm Var}(\vf^d)=0$ for some $d\ge 1$ (where $\vf$ is the diffeomorphism that gives $M$ the open book structure). A parallelizable simply connected manifold bounded by $M$ will be denoted by $N$.
Before we give proofs of the periodicity theorems (Corollaries 2, 3 and 4 below) we will first obtain some auxiliary results. It is clear that $F$ is also a parallelizable manifold. Take now any $z\in H_n(F,\partial F)\cong \pi_n(F,\partial F)$ and choose an embedded disk $(D^n,\partial D^n)\hra (F,\partial F)$ that represents this class $z$. Inside of $\DblF=F_0\cup F_+$ we consider the double $\Dbl D^n= D^n_0\cup D^n_+\simeq S^n\hra \DblF$, and since the boundary $\partial D^n=S^{n-1}\subset \partial F$ has trivial normal bundle in $\partial F$ we can add to $F_0$ one $n$-handle along this sphere to obtain the manifold $F_0(z):= F_0\cup (D^n_+\times d^n)$. As we have done above, we can extend a diffeomorphism $\vf\in {{\rm Diff}(F,{\rm rel}~\partial)}$ to a diffeomorphism $\vf_z\in {{\rm Diff}(}F_0(z),{\rm rel}~\partial)$ using the identity on $D^n_+\times d^n$. Then we obviously have $\Dbl D^n\hra F_0(z)\hra
\DblF$ and $\vf_z = \tilde{\vf}|_{F_0(z)}$.
\begin{lem} The mapping torus $F_{\vf}$ of $\vf$ is framed if and only if the mapping torus ${F_0(z)}_{\vf_z}$ of $\vf_z$ is framed. \end{lem} \begin{proof} We will show that any framing of $F_{\vf}$ can be extended to a framing of ${F_0(z)}_{\vf_z}$. The other direction is trivial. Since $\vf$ is the identity on the boundary, we have $S^{n-1}\times S^1\hra \partial F_{\vf} = (\partial F)\times S^1$, where $S^{n-1}$ is the boundary of our relative homology class $z$. We can assume that $F$ has a collar $\partial F\times [0,1]$ and $\vf$ is the identity map on this collar. Now we have $D^n\hra F\hra F\cup (\partial F\times [0,1])$ and we use the disk theorem to change $\vf$ by an isotopy to a diffeomorphism $\vf'$ such that
$\vf'|_{D^{2n}}=\vf'|_{\partial F\times 1} =id$ and $D^n\subset {\rm int}(D^{2n})\subset F\cup(\partial F\times [0,\frac{1}{2}])$. Then clearly $F_{\vf'}\simeq F_{\vf}$ and $D^n\times S^1\hra F_{\vf'}$ with the trivial normal bundle. Furthermore since $S^{n-1}\times [0,1]\hra \partial F\times [0,1]$ with trivial normal bundle too, we can connect $\partial D^n\times S^1 \hra F_{\vf'}$ with $S^{n-1}\times S^1 \hra (\partial F\times 1)\times S^1=\partial (F_{\vf'})$, using the collar $(S^{n-1}\times [0,1])\times S^1$. This implies that the trivial normal bundle of $S^{n-1}\times S^1$ in $\partial (F_{\vf})$ comes from the trivial normal bundle of $D^n\times S^1$ embedded into $F_{\vf}$. Now notice that the mapping torus ${F_0(z)}_{\vf_z}$ is the union of $F_{\vf}$ and $D_+^n\times d^n\times S^1$ along $S^{n-1}\times d^n \times S^1 \hra \partial (F_{\vf})$. Therefore the restriction of the framing of $F_{\vf}$ to $S^{n-1}\times d^n \times S^1 = (\partial D^n)\times d^n \times S^1$ (where $D^n\times d^n \times S^1\hra F_{\vf}$) can be extended to a framing of ${F_0(z)}_{\vf_z}$. \end{proof}
\begin{thm} (n is odd, $\ne 1$)\\ Suppose $[\psi]\in \K(V) $ and $M^{2n+1}\simeq F_{\psi}\cup_r (\partial F\times D^2)$ bounds a parallelizable manifold $N$. Then $\chi_r(\psi) = 0$. \end{thm} \begin{proof} It is enough to show that $\chi_r(\psi)[z] = 0$ for an arbitrary relative homology class $z\in H_n(F,\partial F)$. As we just did above, we represent such a class by an embedded disk $(D^n,\partial D^n) \hra (F,\partial F)$ and take the double $\Dbl D^n = S^n\hra \DblF$. We will denote this double by $dz$ ({\it to avoid cumbersome notations we denote by $dz$ both the homology class and the embedded sphere $\Dbl D^n$ that represents this class}) and its normal bundle in $\DblF$ by $\nu(dz;\DblF)$ respectively. Note that $\nu(dz;\DblF)$ is trivial. The proof now splits into two parts.\\ 1)~ $n\geq 5$: It is clear that ${\psi_z}_* = id$ on $H_n(F_0(z))$
and we can isotope $\psi_z$ to a diffeomorphism $\psi'_z$ such that ${\psi'_z}|_{dz} = id$ (see \cite{Haef}). Extending this new diffeomorphism by the identity to the diffeomorphism ${\tilde\psi}'\in {{\rm Diff}(\DblF)}$ we obtain an element of $\kerDF$ which pointwise fixes $dz$ and maps $F_0(z)$ to itself. Now it follows from the commutative diagram of Corollary 1 that it is enough to show that $\chi({\tilde \psi}')[dz]=0$. Since by Lemma 4 the mapping torus ${F_0(z)}_{\psi'_z}$ is framed, the normal bundle $\nu(dz\times S^1;\DblF_{{\tilde\psi}'})$ is stably trivial. Since $n$ is odd, the map $\G = S\pi_n(SO(n)) \hra \pi_n(SO(n+1))\lra \pi_n(SO(n+2))$ is a monomorphism (see \cite{Wall3}) and therefore the map $$ l^*: {{\rm Hom}(H_n(\DblF),~\G)} \lra {{\rm Hom}(H_n(\DblF),~\pi_n(SO))}$$ is a monomorphism too. Hence $l^*(\chi({\tilde \psi}'))[dz]$ is the obstruction to triviality of the stable normal bundle $\nu(dz\times S^1;\DblF_{{\tilde\psi}'})$ and since this bundle is trivial we have $\chi({\tilde \psi}')[dz] =0$, as required.\\ 2)~ $n\equiv 3\pmod{8}$: Since $\partial N = M^{2n+1}\simeq F_{\psi}\cup_r (\partial F\times D^2)$ and $\partial(F\times D^2) = (\partial F \times D^2)\cup (F\times S^1)$, we can paste together the manifolds $N$ and $F\times D^2$ along the common part of the boundary $\partial F\times D^2$ (respecting orientations of course) to obtain a manifold (after smoothing the corner) $$ W^{2n+2}:= N\bigcup_{\partial F\times D^2} (F\times D^2)~~~ {\rm with}~~~ \partial W= F_{\psi}\cup (F\times S^1)\simeq {\cal D}F_{\tp }$$ We use elementary obstruction theory to show that this $W$ is stably parallelizable. Fix a frame field of the stable tangent bundle of $N\subset W$. Obstructions to the extension of this frame field over the whole manifold lie in the groups $H^{q+1}(W,N;\pi_q(SO))\cong H^{q+1}(F,\partial F;\pi_q(SO))\cong H_{2n-q-1}(F;\pi_q(SO))$. If $q\ne n-1$ or $q\ne 2n-1$ then $H_{2n-q-1}(F;\zint)\cong 0$ (since $F$ has the homotopy type of the wedge product of $n$-spheres). But if $q=n-1$ or $q=2n-1$ then $\pi_q(SO)\cong 0$ because $n\equiv 3\pmod{8}$ and all obstructions lie in the trivial groups. It implies that ${\cal D}F_{\tp}$ is stable parallelizable and the Pontryagin class $p_{\frac{n+1}{4}}({\cal D}F_{\tp})$ vanishes. Thus $\chi(\tp)=0$ (recall Lemma 2 of \cite{Kreck}) and hence $\chi_r(\psi)=0$. \end{proof}
Now we can prove the following theorem of Stevens including the cases when $n=3,7$ (cf. \cite{Stev}, Theorem 7).
\begin{cor} Let $M_k$ be the k-fold branched cyclic cover of a $(2n+1)$-manifold $M = F_{\vf}\cup_r (\partial F\times D^2)$ with simple open book decomposition, where n is odd, $\ne 1$. Suppose ${\rm Var}(\vf^d)=0$, then $M_k$ and $M_{k+d}$ are (orientation preserving) homeomorphic, while $M_k$ and $M_{d-k},~d>k$, are orientation reversing homeomorphic. \end{cor} \begin{proof} Since ${\rm Var}(\vf^d)=0$ and $M_d$ bounds a parallelizable manifold (see Lemma 5 of \cite{Stev}) we have $\chi_r(\vf^d)=0$ by the previous theorem. The exact sequence (\ref{kervar}) implies that $\vf^d$ is isotopic to a diffeomorphism which belongs to the image $\iota(\Theta_{2n+1})$ and therefore $F_{\vf^{d+k}}$ is diffeomorphic to $F_{\vf^k}\# \Sigma'$ (cf. Lemma 1 of \cite{Browd1}) for some $\Sigma'\in \Theta_{2n+1}$. In particular, it means that $F_{\vf^{d+k}}$ is homeomorphic (via some homeomorphism that preserves orientation) to $F_{\vf^{k}}$, and hence $M_{d+k}$ is homeomorphic to $M_k$. To see the orientation reversing case, notice that the mapping torus $F_g$ is diffeomorphic to $F_{g^{-1}}$ via an orientation reversing diffeomorphism induced, for instance, by the map $(x,t)\lmt (g(x),1-t)$ from $F\times I$ to $F\times I$. This diffeomorphism extends to an orientation reversing homeomorphism of the corresponding open books $M$ and $M_{-1}$. Hence in our situation $M_k=F_{\vf^{k}}\cup_r (\partial F\times D^2)$ is homeomorphic (orient. revers.) to $F_{\vf^{-k}}\cup_r (\partial F \times D^2)$ which is homeomorphic (orient. pres.) to $F_{\vf^{-k}}\#\Sigma'\cup_r (\partial F\times D^2)\simeq F_{\vf^{d-k}}\cup_r (\partial F\times D^2)=M_{d-k}$. \end{proof}
\noindent \underline{Remark}: If one defines $M_k := F_{\vf^k}\cup_r (\partial F\times D^2)$ for any $k\in \int$, then the first statement that $M_k$ is homeomorphic to $M_{k+d}$ remains true, and the restriction $d>k$ in the second part can be omitted.
To show diffeomorphism type periodicity we will basically use the same argument plus the fact that the homotopy sphere $\Sigma'$ bounds a parallelizable manifold. We start with proving this fact. Thus for $n\in\nat,~n\geq2$, we consider a diffeomorphism $h$ with $[h]\in\K(V) $ such that our simple open book $M^{2n+1}=F_h\cup_r (\partial F\times D^2)$ bounds a simply connected parallelizable manifold $N$ and $\chi_r(h)=0$. In particular, we can assume that $h\in {\rm Im}(\iota)$ is the identity except on a small closed disk ${\cal D}^{2n}\hra {\rm int}(F)$ embedded into the interior of $F$.
\begin{lem} The natural inclusions $i_1:F\hra F_h$ and $i_2: F_h\hra M$ induce isomorphisms $i_{1*}:H_n(F) \to H_n(F_h)$ and $i_{2*}:H_n(F_h) \to H_n(M)$ respectively, and every $[z]\in H_n(M)$ can be represented by an embedded sphere $S^n\hra M$ with trivial normal bundle $\nu(S^n;M)$. In addition, $H_n(M)\cong H_{n+1}(M)$. \end{lem} \begin{proof} That $i_{1*}$ is an isomorphism follows immediately from the Wang exact sequence, and the other two isomorphisms follow from the exact sequence of Stevens: $$ 0\lra H_{n+1}(M) \lra H_n(F,\partial F)\stackrel{\rm Var(h)}{\lra} H_n(F)\lra H_n(M) \lra 0$$ which arises from the exact sequence of the pair $(M,F)$ (see Proposition 1 of \cite{Stev}). Since the normal bundle of any $S^n\hra M$ is stable and $M$ bounds a parallelizable manifold, the bundle $\nu(S^n;M)$ must be trivial. \end{proof}
Now we would like to kill $H_n(M)$ using surgery, and as a result obtain a homotopy sphere $\Sigma_h\in \Theta_{2n+1}$ (we again assume $n\geq 3$). We will show firstly that $\Sigma_h$ belongs to $bP_{2n+2}$, and secondly that $\iota(\Sigma_h) =[h]$. Our construction will follow Kreck's construction of the isomorphism $\sigma: {\rm ker}(\chi) \lra \Theta_{2n+1}$ (see \cite{Kreck}, proof of Proposition 3).
For each generator $[z_i]\in H_n(F)$ we fix an embedding $\phi_i: S^n_i\times d^{n+1}_i\hra F\times (0,1)\hra M$ disjoint from ${\cal D}^{2n}\times (0,1)$. Then we attach handles $D^{n+1}_i\times d^{n+1}_i$ to the product $M\times I$ along these embeddings into $M\times\{1\}$ to obtain a cobordism $W$ between $M=M\times\{0\}$ and the homotopy sphere $\Sigma_h$ which is the result of these $\phi_i$-surgeries on $M$. Furthermore, we can choose the embeddings $\phi_i$ compatible with the framing of $M$ that comes from the framing of $N$ (see Lemma 6.2 of \cite{KerMil}), and hence we get $W$ as a framed manifold. Taking the union of $N$ and $W$ along $M$ we obtain a parallelizable manifold with boundary $\Sigma_h$, and hence $\Sigma_h\in bP_{2n+2}$. In the next lemma we show that $\Sigma_h$ is well defined (depends only on the isotopy class of $h$) and that $\iota(\Sigma_h)=[h]$, which implies $[h]\in \iota(bP_{2n+2})$ (cf. the properties of $W$ from \cite{Kreck}, pp. 655-656).
\begin{lem}~ \begin{enumerate} \item The manifold $W$ is parallelizable, $n$-connected and $\partial W=M\sqcup -\Sigma_h$. \item The embedding ${\cal L}: M\times\{0\}\hra W$ induces an isomorphism\\ ${\cal L}_*: H_{n+1}(M) \lra H_{n+1}(W)$ and all elements of $H_{n+1}(W)$ can be represented by embedded spheres with trivial normal bundle. \item If $W'$ is another n-connected manifold that also satisfies property 2 and $\partial W'=M\sqcup -\Sigma$ for some $\Sigma\in\Theta_{2n+1}$, then $\Sigma \simeq \Sigma_h$. \item $\iota(\Sigma_h)=[h]$ where $\iota: \Theta_{2n+1}\hra \K(V) $. \end{enumerate} \end{lem} \begin{proof} $n$-connectivity follows immediately from $(n-1)$-connectivity of $M$ and the Mayer-Vietoris exact sequence of the union $$ W:=M\times [0,1] \bigcup (\sqcup_i D^{n+1}_i\times d^{n+1}_i) $$ Hence, by the Hurewicz theorem we can represent every $[z]\in H_{n+1}(W)$ by an embedded sphere $S^{n+1}\hra W$. The same exact sequence implies that the embedding ${\cal L}: M=M\times\{0\} \hra W$ induces isomorphism between $H_{n+1}(M)$ and $H_{n+1}(W)$ and to finish part 2 we just need to show that $\nu(S^{n+1};W)$ is trivial. To see this, notice that every $[z]\in H_{n+1}(M)\cong H_{n+1}(W)$ can be represented by an embedded $S^n\times S^1\hra M$ (see the Lemma above) with trivial normal bundle in $M\times (0,1)$. Since we can take the diffeomorphism $h$ to be the identity on $\nu(S^n;F)$, it is not hard to see that there is an embedded $S^{n+1}\hra M$ which is cobordant to $S^n\times S^1$ in $M$ and hence framed cobordant in $M\times(0,1)\hra W$. This proves 2. Suppose now we have $W'$ that satisfies property 3. Take a union of $W$ and $W'$ along $M$ to obtain a n-connected cobordism $\cal W$ between $\Sigma$ and $\Sigma_h$. We will show that we can make it into an $h$-cobordism. Mayer-Vietoris exact sequence implies that $H_{n+1}({\cal W})\cong H_{n+1}(W)\oplus H_n(M)$, and since $H_{n+1}(W)\cong H_{n+1}(W,M)$ (Poincar\'e duality plus Universal coefficient theorem) and also $H_{n+1}(W,M)\cong H_n(M)$ we see that $H_{n+1}({\cal W})$ has the direct summand $H_{n+1}(W)$ with the properties: \\ i) ${\rm dim}(H_{n+1}(W))=\frac{1}{2}{\rm dim}(H_{n+1}({\cal W}))$\\ ii) every homology class of $H_{n+1}(W)$ can be represented by an embedded sphere $S^{n+1}\hra W$ with trivial normal bundle.\\ iii) For all $[z_1],~[z_2]\in H_{n+1}(W)$ the intersection number $z_1\circ z_2$ vanishes (this follows from 2. of this lemma). Therefore we can use surgery to kill $H_{n+1}({\cal W})$ and obtain an $h$-cobordism between $\Sigma$ and $\Sigma_h$. The last property follows from \cite{Browd1}, Lemma 1. Indeed, if $\Sigma'=D^{2n+1}_1\cup_h D^{2n+1}_2$ then by our definition $\iota(\Sigma')=[h]\in \K(V) $ and hence $M\simeq\partial(F\times D^2)\# \Sigma'$. Since $\partial(F\times D^2)$ is framed cobordant to the standard $(2n+1)$-sphere, we can use this cobordism (namely $F\times D^2$ minus an embedded disk $D^{2n+2}$) to produce a $n$-connected cobordism between $M$ and $\Sigma'$ that will satisfy property 2. As we have just seen above this means that $\Sigma'\simeq \Sigma_h$, i.e. $\iota(\Sigma_h) = [h]$. \end{proof}
Let us denote the signature of a parallelizable manifold $N_k$ with boundary $\partial N_k= M_k = F_{\vf^k}\cup_r (\partial F\times D^2)$ by $\sigma_k$, and the generator of $bP_{2n+2}$ by $\Sigma$.
\begin{cor}(cf. \cite{Stev}, Proposition 8) Let $M^{2n+1}= F_{\vf}\cup_r (\partial F\times D^2)$ be the manifold with simple open book decomposition where $n$ is odd, $\ne 1$ and $M_k$ be the $k$-fold branched cyclic cover of $M$. If ${\rm Var}(\vf^d)=0$ then $M_{k+d}$ is diffeomorphic to $(\frac{\sigma_d}{8}\cdot\Sigma)\# M_k$. \end{cor} \begin{proof} We have just seen above that $M_{k+d}\simeq \Sigma'\# M_k$ with $\Sigma'=m\cdot\Sigma\in bP_{2n+2}$ for some $m\in\nat$. Since $M_d=F_{\vf^d}\cup_r (\partial F\times D^2)\simeq \partial(F\times D^2)\# m\Sigma$ and $m\Sigma$ bounds a parallelizable manifold, say $W_m$, with signature $\sigma(W_m)=8m$ and $\partial(F\times D^2)$ bounds $F\times D^2$ (which is also parallelizable) with signature zero, the connected sum of $W_m$ and $F\times D^2$ along the boundary (cf.~\S 2~of \cite{KerMil}) will give us a parallelizable manifold $N_d := W_m\# F\times D^2$ with boundary $\partial N_d = M_d$ and signature $\sigma(N_d)=\sigma(W_m)+\sigma(F\times D^2)=8m+0$. Thus $m=\frac{\sigma(N_d)}{8}\equiv\frac{\sigma_d}{8}\bmod ({\rm order~of~}bP_{2n+2})$ and the corollary follows. \end{proof}
When $n=even$, the periodicity of $M_k$ is more complicated. Consider the link of the singularity $z^2_0+z^2_1+\ldots +z^2_n=0$ with $n=2m$ and denote the $(4m+1)$-dimensional Kervaire sphere by $\Sigma$ and the tangent $S^n$-sphere bundle to $S^{n+1}$ by $T$. Then $M_{k+8}$ is diffeomorphic to $M_k$ and the diffeomorphism types are listed in the table (see \cite{DK}, Proposition 6.1)
\parskip=5mm
\begin{tabular}{|c|c|c|c|c|c|c|c|} \hline $M_1\simeq M_7$ & $M_2$ & $M_3\simeq M_5$ & $M_4$ & $M_6$ & $M_8$ \\ \hline $S^{2n+1}$ & ~$T$~ & ~$\Sigma$~ & ~$(S^n\times S^{n+1})\#\Sigma$~ & ~ $T\#\Sigma$~ &
~ $S^n\times S^{n+1}$\\ \hline \end{tabular}
\noindent The following result is due to Stevens (\cite{Stev}, Theorem 9).
\begin{cor} If for branched cyclic covers $M_k$ of a $(2n+1)$-manifold $M$ with simple open book decomposition ${\rm Var}(\vf^d)=0$, then $M_k$ and $M_{k+2d}$ are homeomorphic and $M_k$ and $M_{k+4d}$ are diffeomorphic. Moreover, if $n=2~or~6$, then $M_k$ and $M_{k+d}$ are diffeomorphic. \end{cor} \begin{proof} When $n=2$ the mapping class group is trivial and $[\vf^d]=Id$. If $n=6$ then $G\cong 0$ and $bP_{14}\cong 0$ (see \cite{KerMil}, Lemma 7.2) which implies that $[\vf^d]=Id$. For the other even $n$ we know that the group $G$ is isomorphic either to $\zint_2$ or $\zint_2\oplus \zint_2$ and hence $\chi_r(\vf^d)$ has order two. Therefore $\vf^{2d}\in bP_{4m+2}$, i.e. $M_k$ is homeomorphic to $M_{k+2d}$, and since the group $bP_{4m+2}$ is either trivial or $\zint_2$ (see \cite{KerMil}), $\vf^{4d}$ must be pseudo-isotopic to the identity. \end{proof}
\noindent {\tt Example 2:} (The authors are indebted to the referee for suggesting this example.) Consider again the singularity $z^2_0+z^2_1+\ldots +z^2_n=0$ with $n=2m$. Assume in addition that $m\ne 0$ (mod 4) and that the Kervaire sphere $\Sigma \in bP_{4m+2}$ is exotic, e.g. when $4m+2\ne 2^l-2$ (see \cite{Browder}). Here the Milnor fiber $F$, is the tangent disc bundle to the sphere $S^{2m}$ and hence $\DblF\simeq S^{2m}\times S^{2m}$. It is also well known that the geometric monodromy $\vf$ of this singularity satisfies the properties: $\vf_*=-Id$, ${\rm Var}(\vf^2)=0$ and ${\rm Var}(\vf)\ne 0$ (cf. \cite{Looijenga}, Chapter 3). Since $M_0$ is not diffeomorphic to $M_2$ and $M_1$ is not diffeomorphic to $M_5$, $\chi_r([\vf^2])$ will be a generator of $\Rhom\cong\int_2$ and $[\vf^4]$ will be a generator of $bP_{4m+2}\cong\int_2$. Since $\Theta_{4m+1}\cong bP_{4m+2}\oplus {\rm Coker}(J_{4m+1})$ (cf. \cite{Brumfiel}) we see that in this case $\K(V) \cong\int_4\oplus {\rm Coker}(J_{4m+1})$ and the exact sequence (\ref{kervar}) doesn't split.
\subsection{Periodicity in dimension 3}
It is known that if the dimension of the open book $M^{2n+1}$ is three, then there is homological periodicity (see references in \cite{DK}) but there is no topological one. For the sake of completeness we illustrate this with the following classical example (cf. \cite{Rolfsen}, Chapter 10. D.). Let $f(z_0,z_1)=z_0^2+z_1^3$ be the complex polynomial which defines the curve $V=\{f(z_0,z_1)=0\}$ in $\cn^2$ with the cusp at the origin. The corresponding Milnor fibration has monodromy $\vf$ of order six, the boundary of the fiber $F$ is the trefoil knot $K$ and $\Va (\vf^6)=0$. This fibration gives the open book structure to the standard 3-sphere $S^3=M_1=F_{\vf}\cup (K\times D^2)$. We show that $M_7\ne M_1$ and $M_6\ne M_0=(F\times S^1)\cup (K\times D^2)$.
Let us first compare $\pi_1(M_0)$ with $\pi_1(M_6)$. The theorem of Seifert and Van Kampen entails that $\pi_1(M_0)\cong \pi_1(F)$ which is the free group on two generators. As for $M_6$ one can easily find using the Reidemeister-Schreier theorem a presentation for $\pi_1(F_{\vf^6})$ and then show that $\pi_1(M_6)$ admits the following presentation: $$
\left\langle Z_1,Z_2,\ldots,Z_6~|~Z_1=Z_6Z_2,~\ldots,~Z_j=Z_{j-1}Z_{j+1},~ \ldots,~ Z_6=Z_5Z_1\right\rangle $$ It takes a bit more effort to show that this group is isomorphic to the group of upper unitriangular $3\times 3$ matrices with integer coefficients (cf. \cite{Mil3}, \S8) $$ {\cal H}\cong \{ \begin{pmatrix} 1 & a & c\\ 0 & 1 & b\\ 0 & 0 & 1
\end{pmatrix}|a,b,c\in\int \} $$
Suppose now that $M_7$ were homeomorphic to the sphere. Then we could take the union of $N_7$ and $D^4$ (recall that $N_7$ is the cyclic covering of $D^4$ branched along the fiber $(F,K)\hra (D^4,S^3)$ where $F\cap S^3=K$): $$ W^4:=N_7\bigcup_{S^3} D^4 $$ Since $N_7$ is parallelizable (see \cite{Cappell}, Theorem 5 or \cite{Kauf-book}, Chapter XII), $W^4$ would be a closed spin-manifold. Hence its signature $\sigma(W^4)=\sigma(N_7)$ must be a multiple of 16 by the theorem of Rokhlin \cite{Rokh}. But $\sigma(N_7)=-8$ as one can find using the Seifert pairing on $H_1(F)$ (cf. \cite{Cappell}; \cite{Kauf-book}), and hence $M_7\ne M_1$. Actually much more is known. Milnor in \cite{Mil3} proved that $\pi_1(M_r)$ is isomorphic to the commutator subgroup $[\Gamma,\Gamma]$ of the centrally extended triangle group $\Gamma$ which has a presentation $$ \Gamma\cong \langle
\gamma_1,~\gamma_2,~\gamma_3~|~\gamma_1^2=\gamma_2^3= \gamma_3^r=\gamma_1\cdot \gamma_2\cdot \gamma_3 \rangle $$ This group $\Gamma$ is infinite when $r\geq 6$ (see \cite{Mil3}, \S2,3) and hence $[\Gamma,\Gamma]$, that has index $r-6$, is infinite too. In particular, none of the cyclic coverings of $S^3$ branched along the trefoil knot can be simply connected.
\parskip=1mm
\noindent University of Illinois at Chicago \\ Department of Mathematics, Statistics and Computer Science\\ 851 S.Morgan st. Chicago, IL 60607
\noindent {\small E-mail address:~kauffman@uic.edu}
~
\noindent International University Bremen\\ School of Engineering \& Science\\ P.O. Box 750 561\\ 28725 Bremen, Germany
\noindent {\small E-mail address:~n.krylov@iu-bremen.de}
\end{document} |
\begin{document}
\global\long\def\overset{{\scriptscriptstyle \perp}}{\oplus}{\overset{{\scriptscriptstyle \perp}}{\oplus}} \global\long\def\oton#1#2{#1_{1},\dots,#1_{#2}} \global\long\def\text{rk}{\text{rk}} \global\long\def\text{tr}{\text{tr}} \global\long\def\text{adj}{\text{adj}} \global\long\def\text{Im}{\text{Im}} \global\long\def\text{span}{\text{span}} \global\long\def\notin{\notin} \global\long\def\text{Id}{\text{Id}} \global\long\def\text{parallel}{\text{parallel}} \global\long\def\text{meridian}{\text{meridian}} \global\long\def\text{Sgn}{\text{Sgn}} \global\long\def\text{Fix}{\text{Fix}} \global\long\def\text{Hess}{\text{Hess}} \global\long\def\ooton#1#2{#1_{0},\dots,#1_{#2}} \global\long\def\text{Otherwise}{\text{Otherwise}} \global\long\def\norm#1{\left\Vert #1\right\Vert } \global\long\def\text{Nill}{\text{Nill}} \global\long\def\text{Fix}{\text{Fix}} \global\long\def\text{Spec}{\text{Spec}} \global\long\def\text{Ind}{\text{Ind}}
\global\long\def\text{Hom}{\text{Hom}} \global\long\def\text{Ob}{\text{Ob}} \global\long\def\text{coker}{\text{coker}} \global\long\def\text{Rad}{\text{Rad}} \global\long\def\text{Supp}{\text{Supp}} \global\long\def\text{Aut}{\text{Aut}} \global\long\def\text{Gal}{\text{Gal}} \global\long\def\text{Ann}{\text{Ann}} \global\long\def\text{Mayer-Vietoris}{\text{Mayer-Vietoris}} \global\long\def\text{conv}{\text{conv}} \global\long\def\text{diam}{\text{diam}} \global\long\def\text{length}{\text{length}} \global\long\def\text{tp}{\text{tp}} \global\long\def\text{lcm}{\text{lcm}} \global\long\def\text{Core}{\text{Core}} \global\long\def\text{ad}{\text{ad}} \global\long\def\text{ord}{\text{ord}} \global\long\def\text{rank}{\text{rank}} \global\long\def\text{Inn}{\text{Inn}} \global\long\def\overline{{\scriptstyle \bigbox}}{\overline{{\scriptstyle \bigbox}}}
\title{Classes of free group extensions.} \author{Noam M.D. Kolodner} \maketitle \begin{abstract} In this paper we identify different classes of free group extension using core graphs, by further developing machinery from \cite{KOLODNER2020}. We show that every free group extension $H\leq K\leq F$ has a base $B$ such that the associated pointed graph morphism $\Gamma_{B}\left(H\right)\to\Gamma_{B}\left(H\right)$ is onto. But if we examine graphs without base points, there is an extension $\left\langle b\right\rangle \leq\left\langle b,aba^{-1}\right\rangle <F_{\left\{ a,b\right\} }$ such that for every base of $F_{\left\{ a,b\right\} }$ the associated graph morphisms are injective. \end{abstract}
\section{Introduction}
In this paper we identify different classes of free group extension using core graphs, by further developing machinery from \cite{KOLODNER2020}. Leveraging the theory of topological cover spaces, Stallings \cite{MR695906} established a correspondence between subgroups of a free group with labeled graphs called core graphs. Let $F$ be a free group and $B$ a base. For every subgroup $H\leq F$ we associate a pointed labeled graph $\Gamma_{B}\left(H\right)$ and for subgroups $H\leq K\leq F$ a graph morphism $\Gamma_{B}\left(H\right)\to\Gamma_{B}\left(K\right)$. Thus we realize the category of subgroups of free groups ordered by inclusion as a subcategory of the category of pointed labeled graphs. When we order subgroups by inclusion,\textcolor{red}{{} }these inclusion have no ``flavor''. But as morphisms in the category of labeled graphs they have easily detectable properties. Labeled graph morphisms can be injective or surjective for instance. The problem is that these properties are incidental and are dependent on the arbitrary base chosen for constructing the correspondence, so if one wants to leverage these properties to study algebraic properties of free group extensions one must look at invariant properties.
The first attempt to do this was made by Miasnikov Ventura and Weil \cite{MR2395796}, who conjectured that extensions $H\leq K\leq F$, such that the morphism of the corresponding core graphs is surjective for every base, are algebraic extensions (i.e. such that $H$ is not included in any proper free factor of $K$). Puder and Parzanchevski showed this to be false for subgroups of a free group with two generators but conjectured that it is still true for free groups with more generators, or that it is true if one allows automorphisms of free extension of the ambient free group $F$. The author of this paper found a counter example for the revised conjectures \cite{KOLODNER2020} and thus showed that algebraic extensions are strictly included in extensions that are onto on all bases, which form a separate extension class. Parzanchevski and Puder \cite{MR3264763} suggested another class of extensions: one where there \emph{exists} a base for which the graph morphism is onto. They asked if this is true for all extensions. Another suggestion proposed by Verd\'{u} in his masters thesis \cite{meunpub2020} is an extension class where graph morphisms are injective for every base.
In Theorem \ref{thm:ontoallbase} we show that for every extension $H\leq K\leq F$ there exists a base such that the morphism of the corresponding labeled graphs is onto; moreover one obtains this base by conjugation. Thus the extension class where there exists a base that is onto includes all extensions and the extension class where every base is injective is empty. If instead of the category of pointed labeled graphs we look at graphs without base points, and instead of automorphisms we look at outer automorphisms we show that in this setting these classes of extensions are non-trivial. Using the methods developed in \cite{KOLODNER2020} we show that the extension $\left\langle b\right\rangle \leq\left\langle b,aba^{-1}\right\rangle $ is injective for every outer automorphism. Thus in the new setting there is a class of extension whose graph morphism is injective on every base and there is also a class of extensions s.t. there exists a base where the graph morphism is surjective. In order to prove that extension $\left\langle b\right\rangle \leq\left\langle b,aba^{-1}\right\rangle $ has the desired property we further develop in this paper the machinery from \cite{KOLODNER2020}, to deal with cases where a graph does not have stencil finiteness.
\section{\label{sec:Preliminaries}Preliminaries}
In this paper we use the machinery developed in \cite{KOLODNER2020}. We present the definitions we use in the paper, merging the language of \cite{MR1882114} and \cite{MR695906}. \begin{defn}[Graphs] We use graphs in the sense of Serre \cite{MR0476875}: A \emph{graph} $\Gamma$ is a set $V\left(\Gamma\right)$ of vertices and a set $E\left(\Gamma\right)$ of edges with a function $\iota\colon E\left(\Gamma\right)\to V\left(\Gamma\right)$ called the initial vertex map and an involution $\overline{{\scriptstyle \bigbox}}\colon E\left(\Gamma\right)\to E\left(\Gamma\right)$ with $\overline{e}\neq e$ and $\overline{\overline{e}}=e$. A \emph{graph morphism} $f\colon\Gamma\to\Delta$ is a pair of set functions $f^{E}\colon E\left(\Gamma\right)\to E\left(\Delta\right)$ and $f^{V}\colon V\left(\Gamma\right)\to V\left(\Delta\right)$ that commute with the structure functions. A \emph{path }in $\Gamma$ is a finite sequence $\oton en\in E\left(\Gamma\right)$ with $\iota\left(\overline{e}_{k}\right)=\iota\left(e_{k+1}\right)$ for every $1\leq k<n$. The path is \emph{closed or circuit} if $\iota\left(\overline{e}_{n}\right)=\iota\left(e_{1}\right)$, and \emph{reduced} if $e_{k+1}\neq\overline{e}_{k}$ for all $k$. All the graphs in the paper are assumed to be connected unless specified otherwise, namely, for every $e,f\in E\left(\Gamma\right)$ there is a path $e,\oton en,f$. \end{defn}
\begin{defn}[Labeled graphs] Let $X$ be a set and let $X^{-1}$ be the set of its formal inverses. We define $R_{X}$ to be the graph with $E\left(R_{X}\right)=X\cup X^{-1}$, $V\left(R_{X}\right)=\left\{ *\right\} $, $\overline{x}=x^{-1}$ and $\iota\left(x\right)=*.$ An \emph{$X$-labeled graph }is a graph $\Gamma$ together with a graph morphism $l\colon\Gamma\to R_{X}$. A morphism of $X$-labeled graphs $\Gamma$ and $\Delta$ is a graph morphism $f\colon\Gamma\to\Delta$ that commutes with the label functions. Let $\mathcal{P}\left(\Gamma\right)$ be the set of all the paths in $\Gamma$, let $F_{X}$ be the free group on $X$ and let $P=\oton en$ be a path. The edge part of the label function $l^{E}\colon E\left(\Gamma\right)\to E\left(R_{X}\right)$ can be extended to a function $l\colon\mathcal{P}\left(\Gamma\right)\to F_{X}$ by the rule $l\left(P\right)=l\left(e_{1}\right)\ldots\left(e_{n}\right)$.
A \emph{pointed }$X$-labeled graph is an $X$-labeled graph that has a distinguished vertex called the base point. A morphism of a pointed labeled graph sends the base point to the base point. This constitutes a category called $X\text{-Grph}$. For a pointed $X$-labeled graph $\Gamma$ we define $\pi_{1}\left(\Gamma\right)$ to be \[
\pi_{1}(\Gamma)=\left\{ l\left(P\right)\in F_{X}\,|\,P\text{ is a closed path beginning at the base point}\right\} . \] \end{defn}
\begin{defn} Let $F_{X}$ be the free group on the set $X$. We define a category $\text{Sub}\left(F_{X}\right)$, whose objects are subgroups $H\leq F_{X}$ and there is a unique morphism in $\text{Hom}\left(H,K\right)$ iff $H\leq K$. It is easy to verify that $\pi_{1}$ is a functor from $X\text{-Grph}$ to $\text{Sub}\left(F_{X}\right)$. \end{defn}
Note: The functor $\pi_{1}$ defined above is not the fundamental group of $\Gamma$ as a topological space. Rather, if one views $\Gamma$ and $R_{X}$ as topological spaces and $l$ as a continuous function, then $\pi_{1}$ is the image of the fundamental group of $\Gamma$ in that of $R_{X}$ under the group homomorphism induced by $l$. \begin{defn}[Folding] A labeled graph $\Gamma$ is \emph{folded }if $l\left(e\right)\neq l\left(f\right)$ holds for every two edges $e,f\in E\left(\Gamma\right)$ with $\iota\left(f\right)=\iota\left(e\right)$. We notice that there is at most one morphism between two pointed folded labeled graphs. If $\Gamma$ is not folded, there exist $e,f\in E\left(\Gamma\right)$ s.t.\ $\iota\left(e\right)=\iota\left(f\right)$ and $l\left(e\right)=l\left(f\right)$; Let $\Gamma'$ be the graph obtained by identifying the vertex $\iota\left(\overline{e}\right)$ with $\iota\left(\overline{f}\right)$, the edges $e$ with $f$ and $\overline{e}$ with $\overline{f}$. We say $\Gamma'$ is the result of \emph{folding} $e$ and $f$. The label function $l$ factors through $\Gamma'$, yielding a label function $l'$ on $\Gamma'$, and we notice that $\pi_{1}\left(\Gamma\right)=\pi_{1}\left(\Gamma'\right)$. \end{defn}
\begin{defn}[Core graph] A \emph{core graph} $\Gamma$ is a labeled, folded, pointed graph s.t.\ every edge in $\Gamma$ belongs in a closed reduced path around the base point. Is case of finite graphs this is equivalent to every
$v\in V\left(\Gamma\right)$ having $\deg(v):=|\iota^{-1}(v)|>1$ except the base point which can have any degree. \end{defn}
\begin{defn} Let $X\text{-CGrph}$ be the category of connected, pointed, folded, $X$-labeled core graphs. Define a functor $\Gamma_{X}\colon\text{Sub}\left(F_{X}\right)\to X\text{-CGrph}$ that associates to the subgroup $H\leq F_{X}$ a graph $\Gamma_{X}\left(H\right)$ (which is unique up to a unique isomorphism) s.t.\ $\pi_{1}(\Gamma_{X}(H))=H$. \end{defn}
\begin{fact}[\cite{MR695906,MR1882114}] The functors $\pi_{1}$ and $\Gamma$ define an equivalence between the categories $X\text{-CGrph}$ and $\text{Sub}\left(F_{X}\right)$. \end{fact}
The correspondence between the categories of $X\text{-CGrph}$ and $\text{Sub}\left(F_{X}\right)$ follows from the theory of cover spaces. Let us sketch a proof. We regard $R_{X}$ as a topological space and look at the category of connected pointed cover spaces of $R_{X}$. This category is equivalent to $\text{Sub}\left(F_{X}\right)$, following from the fact that $R_{X}$ has a universal cover. Let $\Gamma$ be a connected folded $X$-labeled core graph, viewed as a topological space and $l$ as a continuous function. There is a unique way up to cover isomorphism to extend $\Gamma$ to a cover of $R_{X}$. There is also a unique way to associate a core graph to a cover space of $R_{X}$. This gives us an equivalence between the category of connected pointed cover spaces of $R_{X}$ and pointed connected folded $X$-labeled core graphs. \begin{defn} By uniqueness of the core graph of a subgroup we can define a functor $\text{Core}\colon\text{\ensuremath{X}-Grph}\to X\text{-CGrph}$ that associates to a graph $\Gamma$ a core graph s.t.\ $\pi_{1}\left(\text{Core}\left(\Gamma\right)\right)=\pi_{1}\left(\Gamma\right)$. \end{defn}
\begin{defn} Let $\Gamma$ be a graph with a vertex $v$ of degree one which is not the base-point. For $e=\iota^{-1}\left(v\right)$, let $\Gamma'$ be the graph with $V\left(\Gamma'\right)=V\left(\Gamma\right)\backslash\left\{ v\right\} $ and $E\left(\Gamma'\right)=E\left(\Gamma\right)\backslash\left\{ e,\overline{e}\right\} $. We say that $\Gamma'$ is the result of \emph{trimming} $e$ from $\Gamma$, and we notice that $\pi_{1}\left(\Gamma\right)=\pi_{1}\left(\Gamma'\right)$. \end{defn}
\begin{rem}
For a finite graph $\Gamma$, after both trimming and folding $\left|E\left(\Gamma'\right)\right|<\left|E\left(\Gamma\right)\right|$. If no foldings or trimmings are possible then $\Gamma$ is a core graph. This means that after preforming a finite amount of trimmings and foldings we arrive at $\text{Core}\left(\Gamma\right)$. It follows from the uniqueness of $\text{Core}\left(\Gamma\right)$ that the order in which one performs the trimmings and foldings does not matter. \end{rem}
\begin{defn}[Whitehead graph] A \emph{2-path} in a graph $\Gamma$ is a pair $\left(e,f\right)\in E\left(\Gamma\right)\times E\left(\Gamma\right)$ with $\iota\left(f\right)=\iota\left(\overline{e}\right)$ and $f\neq\overline{e}$. If $\Gamma$ is $X$-labeled, the set \[ W\left(\Gamma\right)=\left\{ \left\{ l\left(e\right),l\left(\overline{f}\right)\right\} \mid\left(e,f\right)\text{ is a \ensuremath{2}-path in \ensuremath{\Gamma}}\right\} \] forms the set of edges of a combinatorial (undirected) graph whose vertices are $X\cup X^{-1}$, called the \emph{Whitehead graph }of $\Gamma$. If $w\in F_{X}$ is a cyclically reduced word, the Whitehead graph of $w$ as defined in \cite{MR1575455,MR1714852} and the Whitehead graph of $\Gamma\left(\left\langle w\right\rangle \right)$ defined here coincide. Let $W_{X}=W\left(R_{X}\right)$ be the set of edges of the Whitehead graph of $R_{X}$, which we call the \emph{full Whitehead graph}. Let $x,y\in X\cup X^{-1}$ and let $\left\{ x,y\right\} \in W_{X}$ be an edge. We denote $x.y=\left\{ x,y\right\} $ (this is similar to the notation in \cite{MR1812024}). \end{defn}
\begin{defn} A homomorphism $\varphi\colon F_{Y}\to F_{X}$ is \emph{non-degenerate} if $\varphi\left(y\right)\neq1$ for every $y\in Y$. \end{defn}
\begin{defn} Let $w\in F_{X}$ be a reduced word of length $n$. Define $\Gamma_{w}$ to be the $X$-labeled graph with $V\left(\Gamma_{w}\right)=\left\{ 1,\dots,n+1\right\} $ forming a path $P$ labeled by $l\left(P\right)=w$. Notice that $\Gamma_{w}\cong\Gamma_{w^{-1}}$. \end{defn}
\begin{defn}
Let $\varphi\colon F_{Y}\to F_{X}$ be a non-degenerate homomorphism. We define a functor $\mathcal{F}_{\varphi}$ from $Y$-labeled graphs to $X$-labeled graphs by sending $y$-labeled edges to $\varphi\left(y\right)$-labeled paths. Formally, let $\Delta$ be a $Y$-labeled graph and let $E_{0}=\left\{ e\in E\left(\Delta\right)|l\left(e\right)\in Y\right\} $
be an orientation of $\Delta$, namely, $E\left(\Delta\right)=E_{0}\sqcup\{\overline{e}\,|\,e\in E_{0}\}$. For every $e\in E\left(\Delta\right)$ let $n_{e}\in\mathbb{N}$ be the length of the word $\varphi\left(l\left(e\right)\right)\in F_{X}$ plus one. We consider $V\left(\Delta\right)$ as a graph without edges, take the disjoint union of graphs $\bigsqcup_{e\in E_{0}}\Gamma_{\varphi\left(l\left(e\right)\right)}\sqcup V\left(\Delta\right)$ and for every $e\in E_{0}$ glue $1\in V\left(\Gamma_{\varphi\left(l\left(e\right)\right)}\right)$\textcolor{red}{{} }to $\iota\left(e\right)\in V\left(\Delta\right)$, and $n_{e}\in V\left(\Gamma_{\varphi\left(l\left(e\right)\right)}\right)$ to $\iota\left(\overline{e}\right)\in V\left(\Delta\right)$. Let $\Delta$ and $\Xi$ be $Y$-labeled graphs, and $f\colon\Delta\to\Xi$ a graph morphism. As for functionality, if $f\colon\Delta\to\Xi$ is a morphism of $Y$-labeled graphs, $\mathcal{F}_{\varphi}f$ is defined as follows: every edge in $\mathcal{F}_{\varphi}\left(\Delta\right)$ belongs to a path $\mathcal{F}_{\varphi}\left(e\right)$ for some $e\in E(\Delta)$, and we define $\left(\mathcal{F}_{\varphi}f\right)\left(\mathcal{F}_{\varphi}\left(e\right)\right)=\mathcal{F}_{\varphi}\left(f\left(e\right)\right)$. \end{defn}
\begin{rem} \label{rem:Let--we}For $H\leq F_{Y}$ we notice that $\text{Core}(\mathcal{F}_{\varphi}\Gamma_{Y}(H))=\Gamma_{X}\left(\varphi\left(H\right)\right)$. \end{rem}
\begin{defn}[Stencil] Let $\Gamma$ be an $Y$-labeled graph, and $\varphi\colon F_{Y}\to F_{X}$ a non-degenerate homomorphism. We say that the pair $\left(\varphi,\Gamma\right)$ is a \textit{stencil} iff $\mathcal{F}_{\varphi}\left(\Gamma\right)$ is a folded graph. Notice that if $\Gamma$ is not folded, then $\mathcal{F}_{\varphi}\left(\Gamma\right)$ is not folded for any $\varphi$. \end{defn}
\begin{defn} Let $\Gamma$ be a $Y$-labeled folded graph. An FGR object $(Y,N_{Y})$ is said to be a \textit{stencil space}\emph{ of }$\Gamma$ if $W\left(\Gamma\right)\subseteq N_{Y}$. The reason for the name is that for any object $\left(X,N_{X}\right)$ and morphism $\varphi\in\text{Hom}\left((Y,N_{Y}),\left(X,N_{X}\right)\right)$, the pair $\left(\varphi,\Gamma\right)$ is a stencil. \end{defn}
\begin{defn} Let $\tau\colon F_{X}\backslash\left\{ 1\right\} \to X\cup X^{-1}$ be the function returning the last letter of a reduced word. For reduced words $u,v$ in a free group, we write $u\cdot v$ to indicate that there is no cancellation in their concatenation, namely $\tau(u)\neq\tau(v^{-1})$. \end{defn}
\begin{defn}[FGR] The objects of the category \emph{Free Groups with Restrictions} ($\mathbf{FGR}$) are pairs $\left(X,N\right)$ where $X$ is a set of ``generators'' and $N\subseteq W_{X}$ a set of ``restrictions''. A morphism $\varphi\in\text{Hom}_{\mathbf{FGR}}\left(\left(X,N\right),\left(Y,M\right)\right)$ is a group homomorphism $\varphi\colon F_{Y}\rightarrow F_{X}$ with the following properties: \begin{enumerate} \item[(i)] For every $x\in Y$, $\varphi\left(x\right)\neq1$ ($\varphi$ is non-degenerate). \item[(ii)] For every $x\in Y$, $W(\Gamma_{\varphi\left(x\right)})\subseteq M$. \item[(iii)] For every $x.y\in N$, $\varphi(x)\cdot\varphi(y)^{-1}$ (i.e.\ $\tau\left(\varphi\left(x\right)\right)\neq\tau\left(\varphi\left(y\right)\right)$). \item[(iv)] For every $x.y\in N$, $\tau\left(\varphi\left(x\right)\right).\tau\left(\varphi\left(y\right)\right)\in M$.\footnote{Technically, (iv) implies (iii), as $M\subseteq W_{Y}$ and $x.x\notin W_{Y}$.} \end{enumerate} \end{defn}
\section{There is always a base where the graph morphism is onto}
All lemmas and propositions used here can be found in the section titled The core functor in \cite{KOLODNER2020}. \begin{prop} \label{prop:Let--a}Let $H\leq F_{Y}$ a subgroup let $w\in F_{Y}$ be a word. We can obtain the core graph $\Gamma_{Y}\left(wHw^{-1}\right)$ from the graph $\Gamma_{Y}\left(H\right)$ by the following process: \begin{enumerate} \item Attach a reduced path labeled $w$ to the base point in $\Gamma\left(H\right)$ \item Set the new base point to be at the beginning of the path labeled by $w$. \item Fold and trim if necessary \end{enumerate} \end{prop}
\begin{thm} \label{thm:ontoallbase}Let $F_{Y}$ be a free group with a finite set of generators $Y$ and let $H\leq K\leq F_{Y}$ be finitely generated subgroups. Then there is a basis $B$ of $F_{Y}$ s.t. the morphism of Stallings graphs $\Gamma_{B}\left(H\right)\to\Gamma_{B}\left(K\right)$ is onto. Moreover this basis can be obtained by conjugation. \end{thm}
\begin{proof} We will prove the equivalent statement. There is an automorphism $\varphi\in\text{Inn}\left(F_{Y}\right)$ s.t. $\Gamma_{Y}\left(\varphi\left(H\right)\right)\to\Gamma_{Y}\left(\varphi\left(K\right)\right)$ is onto. Without loss of generality we can assume that the base point of $\Gamma_{Y}\left(H\right)$ has degree greater or equal to two (if not this can be corrected by conjugation). Let $u\in F_{Y}$ be the label of a reduced circuit in $\Gamma_{Y}\left(K\right)$ based at the base point that traverses each edge of $\Gamma_{Y}\left(H\right)$ at least once (we are not bothered if it traverses some edges multiple times). Let $\varphi\in\text{Inn}\left(F_{Y}\right)$ be conjugation by $u$. By construction $u\in K$ therefore $\text{Core}\mathcal{F}_{\varphi}\left(\Gamma_{Y}\left(K\right)\right)=\Gamma_{Y}\left(\varphi\left(K\right)\right)=\Gamma_{Y}\left(uKu^{-1}\right)=\Gamma_{Y}\left(K\right)$. We construct $\Gamma_{Y}\left(uHu^{-1}\right)$ by the process described in proposition \ref{prop:Let--a} but stop at stage 2 (before preforming folding and trimming), we denote this graph by $\Gamma'.$ By construction the graph morphism $\Gamma'\to\Gamma_{Y}\left(K\right)$ is onto. Because $\Gamma_{Y}\left(H\right)$ is folded and its base point has degree at least 2 the graph $\Gamma'$ satisfies the conditions of lemma 2.1 from \cite{MR3211804} this means that $\text{Core}\left(\Gamma'\right)$ is obtained without trimming. Without trimming the morphism remains onto when one takes its core ( remark $3.16$ from \cite{KOLODNER2020}). Thus we get that $\text{Core}\left(\Gamma'\to\Gamma_{Y}\left(K\right)\right)$ is onto but $\text{Core}\left(\Gamma'\to\Gamma_{Y}\left(K\right)\right)=\Gamma_{Y}\left(\varphi\left(H\right)\right)\to\Gamma_{Y}\left(\varphi\left(K\right)\right).$ \end{proof} \begin{cor} Let $H<K\leq F_{Y}$ ($H$ strictly contained in $K)$. There is a basis $B$ of $F_{Y}$ s.t. the graph morphism $\Gamma_{B}\left(H\right)\to\Gamma_{B}\left(K\right)$ is not injective. \end{cor}
\begin{proof} There exists a basis $B$ s.t. $\Gamma_{B}\left(H\right)\to\Gamma_{B}\left(K\right)$ is onto. Because $H$ is strictly contained in $K$ the graph morphism $\Gamma_{B}\left(H\right)\to\Gamma_{B}\left(K\right)$ cannot be an isomorphism therefore it is not injective. \end{proof}
\section{Example}
In the category of pointed labeled core graphs we can't have a free group extension s.t. for every automorphism the graph morphism is injective. We saw that the obstruction was conjugation we will show that this is the only obstruction. For this we focus our attention on labeled graphs without a base point. Miasnikov and Kapovich \cite{MR1882114} called this `` the type of a graph''. To get the type of a graph we forget the base point and trim again. We can look at the category of core $X$-labeled graphs without base points. Let $\Gamma,\Delta$ be $X$-labeled core graphs. There is a graph morphism $\Gamma\to\Delta$ iff there exists a $u\in F_{X}$ s.t. $u\pi_{1}\left(\Gamma\right)u^{-1}<\pi_{1}\left(\Delta\right)$ (technically one has to choose a base point for $\pi_{1}$ to be well defined. But it is well defined up to conjugation which is what we are using here). We notice that without a base point there is no longer a unique graph morphism between two graphs. Because we have an action of $\text{Out}\left(F_{X}\right)$ on the set of subgroups of $F_{X}$ up to conjugation this gives us an action of $\text{Out}\left(F_{X}\right)$ on morphisms $\Gamma\to\Delta$ in the category of labeled graphs. In this setting there is a graph morphism $\Gamma\to\Delta$ that is injective in its whole orbit under outer automorphism. We give the example of $\left\langle b\right\rangle <\left\langle b,aba^{-1}\right\rangle $. We will use the tools developed in \cite{KOLODNER2020}. \begin{defn} We define a functor \[ \text{Trimf}:PLCGraphs\to LCGraphs \]
from pointed labeled core graphs to labeled core graphs. The functor forgets the base point and trims the ``tail'' (trimf stands for forget then trim). It takes graph morphisms to their restrictions. This definition is indeed a legal functor: Let $\Gamma\to\Delta$ be a morphism of pointed labeled core graphs. Let $v\in V\left(\Delta\right)$ be the base point of $\Delta$ and suppose it is of valency one (otherwise no trimming occurs and the definition is clearly legal). Let $w\in V\left(\Gamma\right)$ be the inverse image of $v$ , it is the base point of $\Gamma$. Since $\Gamma$ is folded the morphism $\Gamma\to\Delta$ is locally injective therefor $w$ must also be of valency one. Let $\Gamma',\Delta'$ be the graphs obtained by trimming the edges incident to $v$ and $w$ respectively. We see that the morphism $\Gamma'\to\Delta'$ obtained by restricting $\Gamma\to\Delta$ to $\Gamma'$ is well defined. By induction we can trim the whole ``tails'' of $\Gamma$ and $\Delta$. (Trimf would not be defined if we include graphs that aren't folded). \end{defn}
We denote $\Gamma=\Gamma_{\left\{ a,b\right\} }\left(\left\langle b\right\rangle \right)$ and $\Delta=\Gamma_{\left\{ a,b\right\} }\left(\left\langle b,aba^{-1}\right\rangle \right)$ and let $X$ be a countably infinite set. \begin{thm}
\label{thm:All-the-morphism}All the morphism in set $\left\{ \text{Trimf}\circ\text{Core}\circ\mathcal{F}_{\varphi}\left(\Gamma\to\Delta\right)|\varphi\in\text{Hom}\left(\left(\left\{ a,b\right\} ,\emptyset\right),\left(X,W_{X}\right)\right)\right\} $ are injective. \end{thm}
Theorem \label{thm:All-the-morphism-1} shows that for every morphism $\Gamma\to\Delta$ in the orbit under $\text{Out}\left(F_{\left\{ a,b\right\} }\right)$ is injective: Let $u\in F_{X}$ and let $\varphi_{1},\varphi_{2}:F_{\left\{ a,b\right\} }\to F_{X}$ non-degenerate homomorphisms s.t. $u\varphi_{1}u^{-1}=\varphi_{2}$ then clearly $\text{Trimf}\circ\text{Core}\circ\mathcal{F}_{\varphi_{1}}\left(\Gamma\to\Delta\right)=\text{Trimf}\circ\text{Core}\circ\mathcal{F}_{\varphi_{2}}\left(\Gamma\to\Delta\right)$. Without loss of generality we can assume that $\left\{ a,b\right\} \subset X$ so $\text{Hom}\left(\left(\left\{ a,b\right\} ,\emptyset\right),\left(X,W_{X}\right)\right)$ includes $\text{Aut}\left(F_{\left\{ a,b\right\} }\right)$ it includes also all automorphisms of free extensions of $F_{\left\{ a,b\right\} }$ and non free extensions as well. We will use the method presented in \cite{KOLODNER2020} with modifications to account to the fact that we are now interested in injective not surjective morphisms. \begin{rem} \label{rem:injective}Let $\Gamma\to\Delta$ be a morphism of $U$-labeled graphs and let $N_{U}$ be a set of restrictions. Suppose $\left(U,N_{U}\right)$ is a stencil space of $\Delta$. \begin{enumerate} \item $\left(U,N_{U}\right)$ is also a stencil space of $\Gamma$ \item If $\Gamma\to\Delta$ is injective then $\mathcal{F}_{\varphi}\left(\Gamma\to\Delta\right)$ is injective for every $\varphi\in\text{Hom}\left(\left(U,N_{U}\right),\left(X,W_{X}\right)\right)$. (This is true generally the assumption that $\left(U,N_{U}\right)$ is a stencil space is unnecessary) \item $\text{Core}\circ\mathcal{F}_{\varphi}\left(\Gamma\to\Delta\right)=\mathcal{F}_{\varphi}\left(\Gamma\to\Delta\right)$ \end{enumerate} \end{rem}
We can use a surjectivity problem $\left(\Gamma\to\Delta,\left(U,N_{U}\right)\right)$
from \cite{KOLODNER2020} as an injectivity problem. We say that an injectivity problem resolves positively if all morphisms in $\mathscr{P}=\left\{ \text{Trimf}\circ\text{Core}\circ\mathcal{F}_{\varphi}\left(\Gamma\to\Delta\right)|\varphi\in\text{Hom}\left(\left(U,N_{U}\right),\left(X,W_{X}\right)\right)\right\} $ are injective. we distinguish three cases \begin{enumerate} \item $\Gamma\to\Delta$ is not injective: clearly $\mathscr{P}$ resolves negatively. \item $\Gamma\to\Delta$ is injective and $\left(U,N_{U}\right)$ is a stencil space of $\Delta$: following Remark \ref{rem:injective}, $\mathscr{P}$ resolves positively. \item $\Gamma\to\Delta$ is injective and $W\left(\Delta\right)\backslash N_{U}\neq\varnothing$: in this case we cannot resolve $\mathscr{P}$ immediately. We call this the ambiguous case. \end{enumerate} If $\mathscr{P}$ is of the ambiguous case we can split to five cases using FGR. We examine the five new cases and then split again if necessary. Because of Theorem $3.14$ in \cite{KOLODNER2020} every morphisms $\varphi\in\text{Hom}\left(\left(U,N_{U}\right),\left(X,W_{X}\right)\right)$ either $\mathcal{F}_{\varphi}\left(\Gamma\to\Delta\right)$ isn't injective or it ends up in a stencil case. Therefore we try to classify all possible stencil cases that my arise in this process and determine that they are all positive. In contrasted to the example in \cite{KOLODNER2020} the graph $\Delta$ does not have stencil finitness therefore we end this process differently. We notice the by conjugation we can assume that $b$ is cyclically reduced so instead of $\left(\Gamma\to\Delta,\left(\left\{ a,b\right\} ,\emptyset\right)\right)$ we consider the problem $\left(\Gamma\to\Delta,\left(\left\{ a,b\right\} ,\left\{ b.b^{-1}\right\} \right)\right)$. We preform a change of coordinates (see \cite{KOLODNER2020}). Let $V=\left\{ a,b\right\} ,N_{V}=\left\{ b.b^{-1}\right\} $ , and \[ \sigma:F_{\left\{ a,b\right\} }\to F_{\left\{ a,b\right\} },\quad\sigma\left(\alpha\right)=b,\quad\sigma\left(\beta\right)=aba^{-1}. \] We notice that $\left\langle b,aba^{-1}\right\rangle \leq\text{Im}\sigma$. For any non-degenerate $\varphi\colon F_{\{a,b\}}\rightarrow F_{X}$, the words $\varphi\left(b\right)=\varphi\circ\sigma\left(\alpha\right)$ and $\varphi\left(aba^{-1}\right)=\varphi\circ\sigma\left(\beta\right)$ are conjugate and $b$ is cyclically reduced, hence there exist reduced words $\overline{y},\overline{u},\overline{v}\in F_{X}$ such that $\varphi(b)=\overline{u}\cdot\overline{v}$, $\varphi(aba^{-1})=\overline{y}\cdot\overline{v}\cdot\overline{u}\cdot\overline{y}^{-1}$ (in particular, $\overline{v}\overline{u}$ and $\overline{u}\overline{v}$ are cyclically reduced). By non-degeneracy we can also assume $\overline{u}\neq1$, and if $\overline{v}=1$ then $\overline{u}$ is cyclically reduced. We perform a change of coordinates according to four possible cases, with $(U_{i},N_{i})$, $\psi_{i}$ and $\sigma_{i}$ being:
{} \\ \hspace*{\fill}
\begin{tabular}{|c|c|c|c|c|c|c|c|c|} \hline \# & $\overline{y}$ & $\overline{v}$ & $U_{i}$ & $N_{i}$ & $\psi_{i}(\alpha),\psi_{i}(\beta)$ & $\sigma_{i}(a),\sigma_{i}(b)$ & $\Gamma_{i}$ & $\Delta_{i}$\tabularnewline \hline \hline 1 & $\negmedspace=\negmedspace1\negmedspace$ & $\negmedspace=\negmedspace1\negmedspace$ & $u$ & $u.u^{-1}$ & $u,u$ & $u,u$ & \includegraphics[scale=0.5]{G2. 1} & \includegraphics[scale=0.5]{G2. 1}\tabularnewline \hline 2 & $\negmedspace\neq\negmedspace1\negmedspace$ & $\negmedspace=\negmedspace1\negmedspace$ & $y,u$ & $y.u^{-1},u.y,u.u^{-1}$ & $u,yuy^{-1}$ & $y,u$ & \includegraphics[scale=0.5]{G2. 1} & \includegraphics[scale=0.5]{D2. 1}\tabularnewline \hline 3 & $\negmedspace=\negmedspace1\negmedspace$ & $\negmedspace\neq\negmedspace1\negmedspace$ & $v,u$ & $v.u^{-1},u.v^{-1}$ & $uv,vu$ & $u^{-1},uv$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D3. 1}\tabularnewline \hline 4 & $\negmedspace\neq\negmedspace1\negmedspace$ & $\negmedspace\neq\negmedspace1\negmedspace$ & $v,u,y$ & $v.u^{-1},u.v^{-1},y.v^{-1},u.y$ & $uv,yvuy^{-1}$ & $yv,uv$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D4}\tabularnewline \hline \end{tabular}
Case 3 is the problematic case it splits in to new cases indefinitely this shows that $\left\langle b,aba^{-1}\right\rangle $ does not have stencil finitness. In order to treat it we define two auxiliary cases $x$ and $x'$. Cases $x$ and $x'$ include all the subcase of case 3 but they are more general and includes many more cases that are irrelevant to the original question. The advantage of using cases $x$ and $x'$ is that they split into a finite set of ambiguous cases in contrast to case 3 that splits into new ambiguous cases indefinitely. In the table is an analysis of all the different cases.
\begin{tabular}{|c|c|c|c|c|c|c|c|} \hline \# & FGR split & Homo. & $N_{i}$ & $\Gamma_{i}$ & $\Delta_{i}$ & $W\left(\Delta_{i}\right)\backslash N_{i}$ & Comment\tabularnewline \hline 2' & & & $\begin{array}{cc} u.y, & y.u^{-1}\\ u^{-1}.y^{-1}, & u.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{G2. 1} & \includegraphics[scale=0.5]{D2. 1} & $u.y^{-1}$ & triangle rule+symmetry\tabularnewline \hline 2.1 & $u.y^{-1}.1$ & $\text{Id}$ & $\begin{array}{cc} u.y, & y.u^{-1}\\ u.y,^{-1} & u.u^{-1}\\ u^{-1}.y^{-1} \end{array}$ & \includegraphics[scale=0.5]{G2. 1} & \includegraphics[scale=0.5]{D2. 1} & $\emptyset$ & $\checkmark$\tabularnewline \hline 2.2 & $u.y^{-1}.2$ & $\begin{array}{c} u\mapsto ut\phantom{^{-1}}\\ y\mapsto t^{-1}y \end{array}$ & $\begin{array}{cc} t.y, & y.u^{-1}\\ t.u^{-1}, & u.t^{-1}\\ u.y^{-1}, & t^{-1}.y^{-1} \end{array}$ & \includegraphics[scale=0.5]{G2. 2} & \includegraphics[scale=0.5]{D2. 2} & $\emptyset$ & $\checkmark$\tabularnewline \hline 2.3 & $u.y^{-1}.3$ & $\begin{array}{c} u\mapsto uy^{-1}\\ y\mapsto y\phantom{u^{-1}} \end{array}$ & $\begin{array}{cc} u.y, & y.u^{-1}\\ u^{-1}.y^{-1}, & y.y^{-1} \end{array}$ & \includegraphics[scale=0.5]{G2. 3} & \includegraphics[scale=0.5]{D2. 3} & $\begin{array}{c} u.u^{-1}\\ u.y^{-1} \end{array}$ & $\begin{array}{c} \text{contained in case 3 via}\\ u\mapsto u\\ v\mapsto y^{-1} \end{array}$\tabularnewline \hline 2.4 & $u.y^{-1}.4$ & $\begin{array}{c} u\mapsto u\phantom{y^{-1}}\\ y\mapsto u^{-1}y \end{array}$ & $\begin{array}{cc} u.u^{-1}, & y.u^{-1}\\ u^{-1}.y,^{-1} & u.y \end{array}$ & \includegraphics[scale=0.5]{G2. 1} & \includegraphics[scale=0.5]{D2. 1} & $u.y^{-1}$ & equivalent to case 2'\tabularnewline \hline 4' & & & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ y.v^{-1}, & u.y\\ u^{-1}.y^{-1}, \end{array}$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D4} & $v.y^{-1}$ & triangle rule+symmetry\tabularnewline \hline 4.1 & $v.y^{-1}.1$ & $\text{Id}$ & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ y.v^{-1} & u.y\\ u^{-1}.y^{-1} & v.y^{-1} \end{array}$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D4} & $\emptyset$ & $\checkmark$\tabularnewline \hline \end{tabular}
\begin{tabular}{|c|c|c|c|c|c|c|c|} \hline 4.2 & $v.y^{-1}.2$ & $\begin{array}{c} u\mapsto vt\phantom{^{-1}}\\ y\mapsto t^{-1}y \end{array}$ & $\begin{array}{cc} t.u^{-1}, & u.v^{-1}\\ y.v^{-1} & u^{-1}.y^{-1}\\ t^{-1}.y^{-1} & v.y^{-1}\\ v.t^{-1} \end{array}$ & \includegraphics[scale=0.5]{G4. 2} & \includegraphics[scale=0.5]{D4. 2} & $\emptyset$ & $\begin{array}{c} \text{contained in case \ensuremath{2.2} via}\\ u\mapsto v\\ t\mapsto tu \end{array}$\tabularnewline \hline 4.3 & $v.y^{-1}.3$ & $\begin{array}{c} v\mapsto vy^{-1}\\ y\mapsto y\phantom{v^{-1}} \end{array}$ & $\begin{array}{cc} y^{-1}.u^{-1}, & y.v^{-1}\\ u.v^{-1}, & v.y\\ u.y \end{array}$ & \includegraphics[scale=0.5]{G4. 3} & \includegraphics[scale=0.5]{D4. 3} & $\begin{array}{c} v.v^{-1}\\ v.u \end{array}$ & $\begin{array}{c} \text{contained in case 3 via}\\ u\mapsto y^{-1}u\\ v\mapsto v \end{array}$\tabularnewline \hline 4.4 & $v.y^{-1}.4$ & $\begin{array}{c} v\mapsto v\phantom{y^{-1}}\\ y\mapsto v^{-1}y \end{array}$ & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ y.v^{-1}, & u.y\\ v^{-1}.y^{-1} \end{array}$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D4. 4} & $u.y^{-1}$ & $\begin{array}{c} \text{contained in case 2 via}\\ u\mapsto vu\\ y\mapsto y \end{array}$\tabularnewline \hline 3.1 & $u.v.1$ & $\text{Id}$ & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ u.v \end{array}$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D3. 1} & $\begin{array}{c} u.u^{-1}\\ v.v^{-1}\\ v^{-1}.u^{-1} \end{array}$ & ambiguous\tabularnewline \hline 3.2 & $u.v.2$ & $\begin{array}{c} v\mapsto vt\\ u\mapsto ut \end{array}$ & $\begin{array}{cc} t.u^{-1}, & t.v^{-1}\\ v.t^{-1} & u.t^{-1}\\ u.v \end{array}$ & \includegraphics[scale=0.5]{G_3. 2} & \includegraphics[scale=0.5]{D_3. 2} & $\begin{array}{c} v^{-1}.u^{-1}\end{array}$ & $\begin{array}{c} \text{contained in \ensuremath{x'} via}\\ x\mapsto t \end{array}$\tabularnewline \hline 3.3 & $u.v.3$ & $\begin{array}{c} v\mapsto v\phantom{u}\\ u\mapsto uv \end{array}$ & $\begin{array}{cc} v.u^{-1}, & v.v^{-1}\\ u.v^{-1} \end{array}$ & \includegraphics[scale=0.5]{G3. 1. 3} & \includegraphics[scale=0.5]{D3. 1. 3} & $\begin{array}{c} u.v\\ v^{-1}.u^{-1} \end{array}$ & $\begin{array}{c} \text{contained in \ensuremath{x} via}\\ x\mapsto v \end{array}$\tabularnewline \hline 3.4 & $u.v.4$ & $\begin{array}{c} v\mapsto vu\\ u\mapsto u\phantom{v} \end{array}$ & $\begin{array}{cc} u.u^{-1}, & u.v^{-1}\\ v.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{G3. 1. 4} & \includegraphics[scale=0.5]{D3. 1. 4} & $\begin{array}{c} u.v\\ v^{-1}.u^{-1} \end{array}$ & $\begin{array}{c} \text{contained in \ensuremath{x} via}\\ x\mapsto u \end{array}$\tabularnewline \hline $x$ & & & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ x.v^{-1}, & x.u^{-1}\\ u.x^{-1}, & v.x^{-1} \end{array}$ & \includegraphics[scale=0.5]{G3. 4g} & \includegraphics[scale=0.5]{D3. 4g} & $\begin{array}{c} u.v\\ v^{-1}.u^{-1} \end{array}$ & \tabularnewline \hline $x'$ & & & $\begin{array}{cc} t.u^{-1}, & t.v^{-1}\\ v.t^{-1}, & u.t^{-1}\\ x.u^{-1} & x.v^{-1}\\ x^{-1}.u, & x^{-1}.v\\ u.v \end{array}$ & \includegraphics[scale=0.5]{Gx_} & \includegraphics[scale=0.5]{Dx_} & $v^{-1}.u^{-1}$ & \tabularnewline \hline \end{tabular}
\begin{tabular}{|c|c|c|c|c|c|c|c|} \hline $x.1$ & $u.v.1$ & $\text{Id}$ & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ x.v^{-1}, & x.u^{-1}\\ u.x^{-1}, & v.x^{-1}\\ u.v \end{array}$ & \includegraphics[scale=0.5]{G3. 4g} & \includegraphics[scale=0.5]{D3. 4g} & $u^{-1}.v^{-1}$ & ambiguous\tabularnewline \hline $x.2$ & $u.v.2$ & $\begin{array}{c} v\mapsto vt\\ u\mapsto ut \end{array}$ & $\begin{array}{cc} t.u^{-1}, & t.v^{-1}\\ x.v^{-1}, & x.u^{-1}\\ t.x^{-1}, & t.x^{-1}\\ v.t^{-1} & u.t^{-1}\\ u.v \end{array}$ & \includegraphics[scale=0.5]{G. x. 2} & \includegraphics[scale=0.5]{D. x. 2} & $u^{-1}.v^{-1}$ & $\begin{array}{c} \text{contained in \ensuremath{x'} via}\\ x\mapsto tx \end{array}$\tabularnewline \hline $x$.3 & $u.v.3$ & $\begin{array}{c} v\mapsto vu\\ u\mapsto u\phantom{v} \end{array}$ & $\begin{array}{cc} u.u^{-1}, & u.v^{-1}\\ x.v^{-1}, & x.u^{-1}\\ u.x^{-1}, & v.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{G. x. 3} & \includegraphics[scale=0.5]{D. x. 3} & $\begin{array}{c} u.v\\ v^{-1}.u^{-1} \end{array}$ & $\begin{array}{c} \text{contained in \ensuremath{x} via}\\ x\mapsto ux \end{array}$\tabularnewline \hline $x.4$ & $u.v.4$ & $\begin{array}{c} v\mapsto v\phantom{u}\\ u\mapsto uv \end{array}$ & $\begin{array}{cc} v.u^{-1}, & v.v^{-1}\\ x.v^{-1}, & x.u^{-1}\\ v.x^{-1}, & u.v^{-1} \end{array}$ & \includegraphics[scale=0.5]{G. x. 4} & \includegraphics[scale=0.5]{D. x. 4} & $\begin{array}{c} u.v\\ v^{-1}.u^{-1} \end{array}$ & $\begin{array}{c} \text{contained in \ensuremath{x} via}\\ x\mapsto vx \end{array}$\tabularnewline \hline $x.5$ & $u.v.5$ & $\begin{array}{c} v\mapsto u\\ u\mapsto u \end{array}$ & $\begin{array}{cc} u.u^{-1},\\ u.x^{-1}, & x.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{Dx. 5} & \includegraphics[scale=0.5]{Dx. 5} & $\emptyset$ & $\checkmark$\tabularnewline \hline $x.1.1$ & $u^{-1}.v^{-1}.1$ & $\text{Id}$ & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ x.v^{-1}, & x.u^{-1}\\ u.x^{-1}, & v.x^{-1}\\ u.v & u^{-1}.v^{-1} \end{array}$ & \includegraphics[scale=0.5]{G3. 4g} & \includegraphics[scale=0.5]{D3. 4g} & $\emptyset$ & $\checkmark$\tabularnewline \hline $x.1.2$ & $u^{-1}.v^{-1}.2$ & $\begin{array}{c} v\mapsto tv\\ u\mapsto tu \end{array}$ & $\begin{array}{cc} v.t^{-1}, & u.t^{-1}\\ x.t^{-1}, & x.t^{-1}\\ u.x^{-1}, & v.x^{-1}\\ u.v, & t.u^{-1}\\ t.v^{-1}. & u^{-1}.v^{-1} \end{array}$ & \includegraphics[scale=0.5]{G. x. 1. 2} & \includegraphics[scale=0.5]{D. x. 1. 2} & $\emptyset$ & $\begin{array}{c} \text{contained in \ensuremath{x}'.1 via}\\ x\mapsto xt \end{array}$\tabularnewline \hline $x.1.3$ & $u^{-1}.v^{-1}.3$ & $\begin{array}{c} v\mapsto uv\\ u\mapsto u\phantom{v} \end{array}$ & $\begin{array}{cc} v.u^{-1}, & u.u^{-1}\\ x.u^{-1}, & u.v^{-1}\\ u.x^{-1}, & v.x^{-1}\\ u.v \end{array}$ & \includegraphics[scale=0.5]{G. x. 1. 3} & \includegraphics[scale=0.5]{D. x. 1. 3} & $u^{-1}.v^{-1}$ & $\begin{array}{c} \text{contained in \ensuremath{x}.1 via}\\ x\mapsto xu \end{array}$\tabularnewline \hline $x.1.4$ & $u^{-1}.v^{-1}.4$ & $\begin{array}{c} v\mapsto v\phantom{u}\\ u\mapsto vu \end{array}$ & $\begin{array}{cc} v.v^{-1}, & u.v^{-1}\\ x.v^{-1}, & u.x^{-1}\\ v.x^{-1}, & u.v\\ v.u^{-1} \end{array}$ & & & $u^{-1}.v^{-1}$ & $\begin{array}{c} \text{contained in \ensuremath{x}.1 via}\\ x\mapsto xv \end{array}$\tabularnewline \hline \end{tabular}
\begin{tabular}{|c|c|c|c|c|c|c|c|} \hline $x'$.1 & $v^{-1}.u^{-1}$.1 & $\text{Id}$ & $\begin{array}{cc} t.u^{-1}, & t.v^{-1}\\ v.t^{-1} & u.t^{-1}\\ u.v & x.v^{-1}\\ x.u^{-1} & u.x^{-1}\\ v.x^{-1} & v^{-1}.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{Gx_} & \includegraphics[scale=0.5]{Dx_} & $v^{-1}.u^{-1}$ & $\checkmark$\tabularnewline \hline $x'$.2 & $v^{-1}.u^{-1}$.2 & $\begin{array}{c} v\mapsto sv\\ u\mapsto su \end{array}$ & $\begin{array}{cc} t.s^{-1}, & t.s^{-1}\\ v.t^{-1} & u.t^{-1}\\ u.v & x.s^{-1}\\ x.s^{-1} & u.x^{-1}\\ v.x^{-1} & v^{-1}.u^{-1}\\ s.v^{-1} & s.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{G. x_. 2} & \includegraphics[scale=0.5]{D. x_. 2} & $\emptyset$ & $\begin{array}{c} \text{contained in \ensuremath{x}'.1 via}\\ x\mapsto xs\\ u\mapsto ts \end{array}$\tabularnewline \hline $x'$.3 & $v^{-1}.u^{-1}.3$ & $\begin{array}{c} v\mapsto uv\\ u\mapsto u\phantom{v} \end{array}$ & $\begin{array}{cc} t.u^{-1}, & u.v^{-1}\\ v.t^{-1} & u.t^{-1}\\ u.v & x.u^{-1}\\ v.x^{-1} & u.x^{-1} \end{array}$ & \includegraphics[scale=0.5]{G. x_. 3} & \includegraphics[scale=0.5]{D. x_. 3} & $v^{-1}.u^{-1}$ & $\begin{array}{c} \text{contained in \ensuremath{x}.1 via}\\ x\mapsto xu\\ u\mapsto tu \end{array}$\tabularnewline \hline $x'.4$ & $v^{-1}.u^{-1}.4$ & $\begin{array}{c} v\mapsto v\phantom{u}\\ u\mapsto vu \end{array}$ & $\begin{array}{cc} t.v^{-1}, & v.u^{-1}\\ v.t^{-1}, & u.t^{-1}\\ x.v^{-1}, & u.v\\ x^{-1}.u, & x^{-1}.v \end{array}$ & \includegraphics[scale=0.5]{G. x_. 4} & \includegraphics[scale=0.5]{D. x_. 4} & $v^{-1}.u^{-1}$ & $\begin{array}{c} \text{contained in \ensuremath{x}.1 via}\\ x\mapsto xv\\ v\mapsto tv \end{array}$\tabularnewline \hline \end{tabular}
\begin{tabular}{|c|c|c|c|c|c|c|c|} \hline 3.1.1 & $v^{-1}.u^{-1}.1$ & $\text{Id}$ & $\begin{array}{cc} v.u^{-1}, & u.v^{-1}\\ v.v^{-1} & u.v \end{array}$ & \includegraphics[scale=0.5]{G4} & \includegraphics[scale=0.5]{D3. 1} & $\begin{array}{c} u.u^{-1}\\ v.v^{-1} \end{array}$ & ambiguous\tabularnewline \hline 3.1.1.2 & $v.v^{-1}.2$ & $\begin{array}{c} v\mapsto t^{-1}vt\\ u\mapsto u\phantom{tt^{-1}} \end{array}$ & $\begin{array}{cc} t.u^{-1}, & u.t\\ v.v^{-1}, & v.t^{-1}\\ t^{-1}.v \end{array}$ & \includegraphics[scale=0.5]{G3. 2} & \includegraphics[scale=0.5]{D3. 2} & $u.u^{-1}$ & ambiguous\tabularnewline \hline 3.1.1.2.1 & $u.u^{-1}.1$ & $\text{Id}$ & $\begin{array}{cc} t.u^{-1}, & u.t\\ v.v^{-1}, & v.t^{-1}\\ t^{-1}.v, & u.u^{-1} \end{array}$ & & & $\emptyset$ & $\checkmark$\tabularnewline \hline 3.1.1.2.2 & $u.u^{-1}.2$ & $\begin{array}{c} v\mapsto v\phantom{ss^{-1}}\\ u\mapsto s^{-1}us \end{array}$ & $\begin{array}{cc} t.s, & v.t^{-1}\\ v.v^{-1}, & t^{-1}.v\\ u.s^{-1}, & u^{-1}.s^{-1}\\ u.u^{-1} \end{array}$ & \includegraphics[scale=0.5]{G_3. 2. 2} & \includegraphics[scale=0.5]{D_3. 2. 2} & $\emptyset$ & $\checkmark$\tabularnewline \hline \end{tabular}
\end{document} |
\begin{document}
\title{Time-like Salkowski and anti-Salkowski curves in Minkowski space $\hbox{\bf E}_1^3$ } \author{ Ahmad T. Ali\\Mathematics Department\\
Faculty of Science, Al-Azhar University\\
Nasr City, 11448, Cairo, Egypt\\ email: atali71@yahoo.com}
\maketitle \begin{abstract} Salkowski \cite{salkow}, one century ago, introduced a family of curves with constant curvature but non-constant torsion (Salkowski curves) and a family of curves with constant torsion but non-constant curvature (anti-Salkowski curves) in Euclidean 3-space $\hbox{\bf E}^3$. In this paper, we adapt definition of such curves to time-like curves in Minkowski 3-space $\hbox{\bf E}_1^3$. Thereafter, we introduce an explicit parametrization of a time-like Salkowski curves and a time-like Anti-Salkowski curves in Minkowski space $\hbox{\bf E}_1^3$. Also, we characterize them as space curve with constant curvature or constant torsion and whose normal vector makes a constant angle with a fixed line.
\end{abstract}
\emph{MSC:} 53C40, 53C50
\emph{Keywords}: Salkowski curves; constant curvature; Minkowski 3-space.
\section{Introduction}
The Minkowski 3-space $\hbox{\bf E}_1^3$ is the Euclidean 3-Space $\hbox{\bf E}^3$ provided with the standard flat metric given by $$ \langle,\rangle=-dx_1^2+dx_2^2+dx_3^2, $$ where $(x_1,x_2,x_3)$ is a rectangular coordinate system of $\hbox{\bf E}_1^3$. If $u=(u_1,u_2,u_3)$ and $v=(v_1,v_2,v_3)$ are arbitrary vectors in $\hbox{\bf E}_1^3$, we define the (Lorentzian) vector product of $u$ and $v$ as the following: $$
u\times v=\Bigg|\begin{array}{ccc}
i & -j & -k \\
u_1 & u_2 & u_3 \\
v_1 & v_2 & v_3
\end{array}\Bigg|. $$
An arbitrary vector $v\in\hbox{\bf E}_1^3$ is said space-like if $\langle v,v\rangle>0$ or $v=0$, time-like if $\langle v,v\rangle<0$, and light-like (or null) if $\langle v,v\rangle =0$ and $v\neq0$. The norm (length) of a vector $v$ is given by $\parallel v\parallel=\sqrt{|\langle v,v\rangle|}$.
Given a regular (smooth) curve $\alpha:I\subset\hbox{\bb R}\rightarrow\hbox{\bf E}_1^3$, we say that $\alpha$ is space-like (resp. time-like, light-like) if all of its velocity vectors $\alpha'(t)$ are space-like (resp. time-like, light-like). If $\alpha$ is space-like or time-like we say that $\alpha$ is a non-null curve. In such case, there exists a change of the parameter $t$, namely, $s=s(t)$, such that $\parallel\alpha'(s)\parallel=1$. We say then that $\alpha$ is parameterized by the arc-length parameter. If the curve $\alpha$ is light-like, the acceleration vector $\alpha''(t)$ must be space-like for all $t$. Then we change the parameter $t$ by $s=s(t)$ in such way that $\parallel \alpha''(s)\parallel=1$ and we say that $\alpha$ is parameterized by the pseudo arc-length parameter. In any of the above cases, we say that $\alpha$ is a unit speed curve \cite{ali,lopez}.
Given a unit speed curve $\alpha$ in Minkowski space $\hbox{\bf E}_1^3$ it is possible to define a Frenet frame $\{\hbox{\bf T}(s),\hbox{\bf N}(s),\hbox{\bf B}(s)\}$ associated for each point $s$ \cite{kuhn, walr}. Here $\hbox{\bf T}$, $\hbox{\bf N}$ and $\hbox{\bf B}$ are the tangent, normal and binormal vector field, respectively. The geometry of the curve $\alpha$ can be describe by the differentiation of the Frenet frame, which leads to the corresponding Frenet equations. Although different expressions of the Frenet equations appear depending of the causal character of the Frenet trihedron (see the next sections below), we have the concepts of curvature $\kappa$ and torsion $\tau$ of the curve. With this preparatory introduction, we give the following:
Recall that in Euclidean space $\hbox{\bf E}^3$ a general helix is a curve where the tangent lines make a constant angle with a fixed direction. Helices are characterized by the fact that the ratio $\tau/\kappa$ is constant along the curve \cite{doca}. A slant helix is a curve where the normal lines make a constant angle with a fixed direction \cite{izum}. Slant helices are characterized by the fact that the function $\dfrac{\kappa^2}{(\kappa^2+\tau^2)^{3/2}}\big(\dfrac{\tau}{\kappa}\big)$ is constant \cite{kula}. Salkowski (resp. anti-Salkowski) curves in Euclidean space $\hbox{\bf E}^3$ are the first known family of curves with constant curvature (resp. torsion) but non-constant torsion (resp. curvature) with an explicit parametrization \cite{monter, salkow}.
In Minkowski space $\hbox{\bf E}_1^3$, one defines a general helix, slant helix curves in Minkowski space as a similar way. Ferrandez et. al. \cite{ferr} proved that: helices in Minkowski space $\hbox{\bf E}_1^3$ are characterized by the constancy of the function $\tau/\kappa$ again. Ali and Lopez \cite{ali} proved that: slant helices in Minkowski space $\hbox{\bf E}_1^3$ are characterized by the constancy of the function $\dfrac{\kappa^2}{(\varepsilon_1\kappa^2+\varepsilon_2\tau^2)^{3/2}}\big(\dfrac{\tau}{\kappa}\big)$, where $\varepsilon_1, \varepsilon_2\in\{-1,1\}$.
In this paper, we define Salkowski curves and anti-Salkowski curves in Minkowski space $\hbox{\bf E}_1^3$ as a similar way in Euclidean space $\hbox{\bf E}^3$.o, we introduce the explicit parametrization of a time-like Salkowski curves and time-like anti-Salkowski curves in Minkowski space $\hbox{\bf E}_1^3$ and we study some characterizations of such curves.
\section{Time-like Salkowski curves and some characterizations}
We suppose that $\alpha$ is a time-like curve. Then $\hbox{\bf T}^{\prime}(s)\neq 0$ is a space-like vector independent with $\hbox{\bf T}(s)$. We define the curvature of $\alpha$ at $s$ as $\kappa(s)=|\hbox{\bf T}^{\prime}(s)|$. The normal vector $\hbox{\bf N}(s)$ and the binormal $\hbox{\bf B}(s)$ are defined as $$
\hbox{\bf N}(s)=\dfrac{\hbox{\bf T}^{\prime}(s)}{\kappa(s)}=\dfrac{\psi''}{|\psi''|},\,\,\,\hbox{\bf B}(s)=\hbox{\bf T}(s)\times\hbox{\bf N}(s), $$ where the vector $\hbox{\bf B}(s)$ is unitary and space-like. For each $s$, $\{\hbox{\bf T},\hbox{\bf N},\hbox{\bf B}\}$ is an orthonormal base of $\hbox{\bf E}_1^3$ which is called the Frenet trihedron of $\alpha$. We define the torsion of $\alpha$ at $s$ as: $$ \tau(s)=\langle\hbox{\bf N}'(s),\hbox{\bf B}(s)\rangle. $$ Then the Frenet formula read \begin{equation}\label{u1}
\left[
\begin{array}{c}
\hbox{\bf T}' \\
\hbox{\bf N}' \\
\hbox{\bf B}' \\
\end{array}
\right]=\left[
\begin{array}{ccc}
0 & \kappa & 0 \\
\kappa & 0 & \tau \\
0 & -\tau & 0 \\
\end{array}
\right]\left[
\begin{array}{c}
\hbox{\bf T} \\
\hbox{\bf N} \\
\hbox{\bf B} \\
\end{array}
\right],
\end{equation} where $g(\hbox{\bf T},\hbox{\bf T})=-1, g(\hbox{\bf N},\hbox{\bf N})=g(\hbox{\bf B},\hbox{\bf B})=1, g(\hbox{\bf T},\hbox{\bf N})=g(\hbox{\bf N},\hbox{\bf B})=g(\hbox{\bf B},\hbox{\bf T})=0$.
We introduce the explicit parametrization of a time-like Salkowski curves in Minkowski space $\hbox{\bf E}_1^3$ as the following:
\begin{definition} (Time-like Salkowski curves). \label{df-1} For any $m\in R$ with $m>1$, let us define the space curve \begin{equation}\label{u2} \begin{array}{ll} \gamma_m(t)=\dfrac{n}{4m}\Bigg(&\dfrac{1-n}{1+2n}\cosh[(1+2n)t]-\dfrac{1+n}{1-2n}\cosh[(1-2n)t]+2\cosh[t],\\ &\dfrac{1-n}{1+2n}\sinh[(1+2n)t]-\dfrac{1+n}{1-2n}\sinh[(1-2n)t]+2\sinh[t],\\ &\dfrac{1}{m}\sinh[2nt]\Bigg), \end{array} \end{equation} with $n=\dfrac{m}{\sqrt{m^2-1}}$. \end{definition} We will name this curve is a time-like Salkowski curve in Minkowski space $\hbox{\bf E}_1^3$. The geometric elements of the time-like Salkowski curve $\gamma_m$ are the following:
{\bf (1):} $\langle \gamma'_m,\gamma'_m\rangle=-\dfrac{\sinh^2[nt]}{m^2-1}$, so $\|\gamma'_m\|=\dfrac{\sinh[nt]}{\sqrt{m^2-1}}$
{\bf (2):} The arc-length parameter is $s=\dfrac{\cosh[nt]}{m}$.
{\bf (3):} The curvature $\kappa(t)=1$ and the torsion $\tau(t)=\coth[nt]$.
{\bf (4):} The Frenet's frame is \begin{equation}\label{u3} \begin{array}{ll} \hbox{\bf T}(t)=&\Big(n\cosh[t]\cosh[nt]-\sinh[t]\sinh[nt],\\ &n\sinh[t]\cosh[nt]-\cosh[t]\sinh[nt],\dfrac{n}{m}\cosh[nt]\Big),\\ \hbox{\bf N}(t)=&\dfrac{n}{m}\Big(\cosh[t],\sinh[t],m\Big),\\ \hbox{\bf B}(t)=&\Big(\sinh[t]\cosh[nt]-n\cosh[t]\sinh[nt],\\ &\cosh[t]\cosh[nt]-n\sinh[t]\sinh[nt],-\dfrac{n}{m}\sinh[nt]\Big),\\ \end{array} \end{equation} From the expression of the normal vector, see Eqs. (\ref{u3}), one can see that the normal indicatrix, or nortrix, of a time-like Salkowski curve in Minkowski space $\hbox{\bf E}_1^3$ describes a parallel of the unit sphere. The angle between the normal vector and the vector $(0,0,1)$ is constant and equal to $\phi=\mathrm{arccosh}[n]$. This fact is reminiscent of what happens with another important class of curves, the general helices in Minkowski space $\hbox{\bf E}_1^3$. Such a condition implies that the tangent indicatrix, or tantrix, describes a parallel in the unit sphere.
\begin{lemma} \label{lm-1} Let $\alpha:I\rightarrow E_1^3$ be a time-like curve parameterized by arc-length with $\kappa=1$. Its normal vectors make a constant hyperbolic angle, $\phi$, with a fixed line in space if and only if $\tau(s)=\pm\dfrac{s}{\sqrt{s^2-\tanh^2[\phi]}}$. \end{lemma}
{\bf Proof:} $(\Rightarrow)$ Let $\textbf{d}$ be the unitary space-like fixed direction which makes a constant hyperbolic angle $\phi$ with the normal vector $\hbox{\bf N}$. Therefore \begin{equation}\label{u4} \langle\hbox{\bf N},\textbf{d}\rangle=\cosh[\phi]. \end{equation} Differentiating Eq. (\ref{u4}) and using Frenet's formula, we get \begin{equation}\label{u5} \langle\hbox{\bf T}+\tau\hbox{\bf B},\textbf{d}\rangle=0. \end{equation} Therefore, \begin{equation}\label{u6} \langle\hbox{\bf T},\textbf{d}\rangle=-\tau\langle\hbox{\bf B},\textbf{d}\rangle. \end{equation} If we put $\langle\hbox{\bf B},\textbf{d}\rangle=b$, we can write \begin{equation}\label{u7} \textbf{d}=-\tau\,b\,\hbox{\bf T}+\cosh[\phi]\hbox{\bf N}+b\,\hbox{\bf B}. \end{equation} From the unitary of the vector $\textbf{d}$ we get $b=\pm\dfrac{\sinh[\phi]}{\sqrt{\tau^2-1}}$. Therefore, the vector $\textbf{d}$ can be written as \begin{equation}\label{u8} \textbf{d}=\mp\dfrac{\tau\,\sinh[\phi]}{\sqrt{\tau^2-1}}\hbox{\bf T}+\cosh[\phi]\hbox{\bf N}\pm\dfrac{\sinh[\phi]}{\sqrt{\tau^2-1}}\hbox{\bf B}. \end{equation} If we Differentiate Eq. (\ref{u5}) again, we obtain \begin{equation}\label{u9} \langle\dot{\tau}\hbox{\bf B}+(1-\tau^2)\hbox{\bf N},\textbf{d}\rangle=0. \end{equation} The Eqs. (\ref{u9}) and (\ref{u8}) lead to the differential equation \begin{equation}\label{u10} \pm\tanh[\phi]\dfrac{\dot{\tau}}{(\tau^2-1)^{3/2}}+1=0. \end{equation} By integration we get \begin{equation}\label{u11} \pm\tanh[\phi]\dfrac{\tau}{\sqrt{\tau^2-1}}+s+c=0. \end{equation} where $c$ is an integration constant. The integration constant can be subsumed thanks to a parameter change $s\rightarrow s-c$. Finally, to solve (\ref{u11}) with $\tau$ as unknown we get the desired result.
$(\Leftarrow)$ Suppose that $\tau=\pm\dfrac{s}{\sqrt{s^2-\tanh^2[\phi]}}$ and let us consider the vector \begin{equation}\label{u12} \textbf{d}=\cosh[\phi]\Big(-s\,\hbox{\bf T}+\hbox{\bf N}\mp\sqrt{s^2-\tanh^2[\phi]}\,\hbox{\bf B}\Big). \end{equation} It is easy to prove the vector $\textbf{d}$ is constant i.e., ($\dot{\textbf{d}}=0$) and $\langle\textbf{d},\hbox{\bf N}\rangle=\cosh[\phi]$.
Once the intrinsic or natural equations of a curve have been determined, the next step is to integrate Frenet's formula with $\kappa=1$ and \begin{equation}\label{u13} \tau=\pm\dfrac{s}{\sqrt{s^2-\tanh^2[\phi]}}=\pm\dfrac{\dfrac{s}{\tanh[\phi]}}{\sqrt{\Big(\dfrac{s}{\tanh[\phi]}\Big)^2-1}}= \pm\coth\Big[\textmd{arccosh}\big[\dfrac{s}{\tanh[\phi]}\big]\Big]. \end{equation}
\begin{theorem} \label{th-1} The time-like curve in Minkowski space $\hbox{\bf E}_1^3$ with $\kappa=1$ and such that their normal vectors make a constant angle with a fixed line are, up to rigid movements in space or up to the antipodal map, time-like Salkowski curves (see Definition \ref{df-1}). \end{theorem}
{\bf Proof:} As has been said after Definition \ref{df-1}, the arc-length parameter of time-like Salkowski curves is $s=\int_0^t\|\gamma'_m(u)\|du=\dfrac{1}{m}\cosh[nt]$. Thereafter, $t=\dfrac{1}{n}\textmd{arccosh}[ms]$. In terms of the arc-length curvature and torsion are then $$ \kappa(s)=1,\,\,\,\,\,\tau(s)=\coth[\textmd{arccosh}[ms]], $$ the same intrinsic equations, with $m=\coth[\phi]$ and $n=\dfrac{m}{\sqrt{m^2-1}}=\cosh[\phi]$ (compare with the positive case in Eq. (\ref{u13})), as the ones shown in Lemma \ref{lm-1}.
For the negative case in Eq. (\ref{u13}), let us recall that if a curve $\alpha$ has torsion $\tau_{\alpha}$, then the curve $\beta(t)=-\alpha(t)$ has as torsion $\tau_\beta(t)=-\tau_\alpha(t)$, whereas curvature is preserved.
Therefore, the fundamental theorem of curves in Minkowski space states in our situation that, up to a rigid movements or up to the antipodal map, $p\rightarrow-p$, the curves we are looking for are time-like Minkowski curves.
\section{Time-like anti-Salkowski curves }
As an additional material we will show in this section how to build, from a curve in Minkowski space $\hbox{\bf E}_1^3$ of constant curvature, another curve of constant torsion.
Let us recall that a curve $\alpha:]a,b[\rightarrow\hbox{\bf E}_1^3$, is 2-regular at a point $t_0$ if $\alpha'(t_0)\neq0$ and if $\kappa_\alpha(t_0)\neq0$.
\begin{lemma} \label{lm-2} Let $\alpha:I\rightarrow E_1^3$ be a regular curve parameterized by arc-length with curvature $\kappa_\alpha$, torsion $\tau_\alpha$ and Frenet's frame $\{\hbox{\bf T}_\alpha,\hbox{\bf N}_\alpha,\hbox{\bf B}_\alpha\}$. Let us consider the curve $\beta(t)=\int_{0}^{t}\hbox{\bf T}_\alpha(u)\|\hbox{\bf B}^{'}_\alpha(u)\|\,du$. Then at a parameter $s_\alpha\in I$ such that $\tau_\alpha(s_\alpha)\neq0$, the curve $\beta$ is 2-regular at $s_\beta$ and $$ \kappa_\beta=\dfrac{\kappa_\alpha}{\tau_\alpha},\,\,\,\tau_\beta=1,\,\,\,\hbox{\bf T}_\beta=\hbox{\bf T}_\alpha,\,\,\, \hbox{\bf N}_\beta=\hbox{\bf N}_\alpha,\,\,\,\hbox{\bf B}_\beta=\hbox{\bf B}_\alpha. $$ \end{lemma}
{\bf Proof:} In order to obtain the tangent vector of $\beta$ let us compute \begin{equation}\label{u14}
\hbox{\bf T}_\beta(s_\beta)=\dot{\beta}(s_\beta)=\dfrac{d\beta}{dt}\dfrac{dt}{ds_\beta}=\hbox{\bf T}_\alpha\|\hbox{\bf B}'_\alpha(t)\|\dfrac{dt}{ds_\beta}. \end{equation} From the above equation, we get \begin{equation}\label{u15}
\dfrac{ds_\beta}{dt}=\|\hbox{\bf B}'_\alpha(t)\|=\Big\|\dfrac{\hbox{\bf B}_\alpha}{ds_\alpha}\dfrac{ds_\alpha}{dt}\Big\|=\tau_\alpha\dfrac{ds_\alpha}{dt}, \end{equation} and \begin{equation}\label{u151} \hbox{\bf T}_\beta(s_\beta)=\hbox{\bf T}_\alpha(s_\alpha). \end{equation} Differentiation the above equation along with Frenet's Eqs. (\ref{u1}) we obtain \begin{equation}\label{u16} \dot{\hbox{\bf T}}_\beta(s_\beta)=\dfrac{d\hbox{\bf T}_\alpha}{ds_\alpha}\,\dfrac{ds_\alpha}{dt}\,\dfrac{dt}{ds_\beta}. \end{equation} Using Frenet's Eqs. (\ref{u1}) and Eq. (\ref{u15}), the above equation can be written as \begin{equation}\label{u17} \kappa_\beta\,\hbox{\bf N}_\beta(s_\beta)=\dfrac{\kappa_\alpha}{\tau_\alpha}\,\hbox{\bf N}_\alpha(s_\alpha) \end{equation} From the above equation, we get \begin{equation}\label{u18} \kappa_\beta=\dfrac{\kappa_\alpha}{\tau_\alpha}, \end{equation} and \begin{equation}\label{u181} \hbox{\bf N}_\beta(s_\beta)=\hbox{\bf N}_\alpha(s_\alpha). \end{equation} So that, we have \begin{equation}\label{u19} \hbox{\bf B}_\beta(s_\beta)=\hbox{\bf T}_\beta(s_\beta)\times\hbox{\bf N}_\beta(s_\beta)=\hbox{\bf T}_\alpha(s_\alpha)\times\hbox{\bf N}_\alpha(s_\alpha)=\hbox{\bf B}_\alpha(s_\alpha). \end{equation} Differentiating the above equation with respect to $s_\beta$ we get $\tau_\beta=1$.
Let us apply the previous result to the time-like Salkowski curve $\gamma_m$ defined in Eq. (\ref{u2}) we have the explicit parametrization of a time-like anti-Salkowski curve as the following: \begin{equation}\label{u111} \begin{array}{ll} \beta_m(t)=\dfrac{n}{4m}\Bigg(&\dfrac{n-1}{2n+1}\sinh[(1+2n)t]-\dfrac{n+1}{2n-1}\sinh[(1-2n)t]+2n\sinh[t],\\ &\dfrac{n-1}{2n+1}\cosh[(1+2n)t]-\dfrac{n+1}{2n-1}\cosh[(1-2n)t]+2n\cosh[t],\\ &-\dfrac{1}{m}(\sinh[2nt]+2nt)\Bigg), \end{array} \end{equation} where, as for time-like Salkowski curves, $n=\dfrac{m}{\sqrt{m^2-1}}$ and $m>1$. Let us call these curves by the name time-like anti-Salkowski curves. The presence of the non-trigonometric term $2nt$ in the third component of $\beta_m$ makes that the change of variable studied in Section 2 for time-like Salkowski curves does not work for anti-Salkowski curves.
Applying Lemma \ref{lm-2} we get the following proposition:
\begin{proposition} \label{pr-1} The curve $\beta_m$ in Eq. (\ref{u111}) are curves of constant torsion equal to 1 and non-constant curvature equal to $\tanh[nt]$. \end{proposition}
Finally, we stated here the following Lemma:
\begin{lemma} \label{lm-3} Let $\alpha:I\rightarrow E_1^3$ be a regular curve parameterized by arc-length with curvature $\kappa_\alpha$, torsion $\tau_\alpha$ and Frenet's frame $\{\hbox{\bf T}_\alpha,\hbox{\bf N}_\alpha,\hbox{\bf B}_\alpha\}$. Let us consider the curve $\beta(t)=\int_{0}^{t}\hbox{\bf T}_\alpha(u)\|\hbox{\bf T}^{\,\prime}_\alpha(u)\|\,du$. Then at a parameter $s_\alpha\in I$ such that $\kappa_\alpha(s_\alpha)\neq0$, the curve $\beta$ is 2-regular at $s_\beta$ and $$ \kappa_\beta=1,\,\,\,\tau_\beta=\dfrac{\tau_\alpha}{\kappa_\alpha},\,\,\,\hbox{\bf T}_\beta=\hbox{\bf T}_\alpha,\,\,\, \hbox{\bf N}_\beta=\hbox{\bf N}_\alpha,\,\,\,\hbox{\bf B}_\beta=\hbox{\bf B}_\alpha. $$ \end{lemma}
{\bf Proof:} The proof of this Lemma is the same as the proof of Lemma \ref{lm-2}.
\begin{theorem} \label{th-2} The space curve with $\tau=1$ and such that their normal vectors makes a constant angle with a fixed line are the anti-Salkowski curves defined in Eq. (\ref{u111}). \end{theorem}
{\bf Proof:} Let $\alpha$ be a curve with $\tau=1$ and let $\beta(t)=\int_{0}^{t}\hbox{\bf T}_\alpha(u)\|\hbox{\bf T}'_\alpha(u)\|\,du$. By Lemma \ref{lm-3}, $\beta$ is a curve with constant curvature $\kappa=1$, non-constant torsion $\tau=\dfrac{1}{\kappa_\alpha}$ and with the same normal vector. Therefore, $\beta$ is a Salkowski curve and $\alpha$ is an anti-Salkowski curve.
\end{document} |
\begin{document}
\title{Lindel\" of hypothesis and the order of the mean-value of $|\zeta(s)|^{2k-1}$ in the critical strip}
\author{Jan Moser}
\address{Department of Mathematical Analysis and Numerical Mathematics, Comenius University, Mlynska Dolina M105, 842 48 Bratislava, SLOVAKIA}
\email{jan.mozer@fmph.uniba.sk}
\keywords{Riemann zeta-function}
\begin{abstract}
The main subject of this paper is the mean-value of the function $|\zeta(s)|^{2k-1}$ in the critical strip. On Lindel\" of hypothesis we give a solution to this question for some class of disconnected sets. This paper is English version of our paper \cite{5}. \end{abstract}
\maketitle
\section{Introduction}
\subsection{}
E.C. Titchmarsh had began with the study of the mean-value of the function \begin{displaymath}
\left|\zeta\left(\sigma+it\right)\right|^\omega,\ \frac 12<\sigma\leq 1,\ 0<\omega , \end{displaymath} where $\omega$ is non-integer number, \cite{6} (comp. \cite{2}, p. 278). Next, Ingham and Davenport have obtained the following result (see \cite{1}, \cite{2}, comp. \cite{7}, pp. 132, 133) \begin{equation} \label{1.1}
\frac 1T \int_1^T \left|\zeta\left(\sigma+it\right)\right|^{2\omega}{\rm d}t= \sum_{n=1}^\infty \frac{d^2_{\omega}(n)}{n^{2\sigma}}+\mathcal{O}(1),\ \omega\in (0,2],\ T\to\infty. \end{equation} Let us remind that: \begin{itemize}
\item[(a)] for $\omega\in\mathbb{N}$ the symbol $d_\omega(n)$ denotes the number of decompositions of $n$ into
$\omega$-factors ,
\item[(b)] in the case $\omega$ is not an integer, we define $d_\omega(n)$ as the coefficient of $n^{-s}$ in the
Dirichlet series for the function $\zeta^\omega(s)$ converging for all $\sigma>1$. \end{itemize}
\subsection{}
Next, for \begin{displaymath} \omega=\frac 12,\frac 32 \end{displaymath} it follows from (\ref{1.1}) that the orders of mean-values \begin{displaymath}
\frac 1T \int_1^T \left|\zeta\left(\sigma+it\right)\right|{\rm d}t,\
\frac 1T \int_1^T \left|\zeta\left(\sigma+it\right)\right|^3{\rm d}t \end{displaymath} are determined. But a question about the order of mean-value of \begin{displaymath}
|\zeta(\sigma+it)|^{2l+1},\ l=2,3,\dots,\ \frac 12<\sigma<1 \end{displaymath} remains open.
In this paper we give a solution to this open question on the assumption of truth of the Lindel\" of hypothesis for some infinite class of disconnected sets. In a particular case we obtain the following result: on Lindel\" of hypothesis we have \begin{equation} \label{1.2} \begin{split}
& 1-|o(1)|<\frac 1H\int_T^{T+H}|\zeta(\sigma+it)|^{2k-1}{\rm d}t < \\
& < \sqrt{F(\sigma,2k-1)}+|o(1)|,\quad H=T^\epsilon,\ k=1,2,\dots,\ 0<\epsilon , \end{split} \end{equation} where \begin{equation} \label{1.3} F(\sigma,\omega)=\sum_{n=1}^\infty \frac{d^2_{\omega}(n)}{n^{2\sigma}}, \end{equation} and $\epsilon$ is an arbitrarily small number.
The proof of our main result is based on our method (see \cite{4}) for the proof of new mean-value theorem for the Riemann zeta-function \begin{displaymath} Z(t)=e^{i\vartheta(t)}\zf \end{displaymath} with respect of two infinite classes of disconnected sets.
\section{Main formulas}
We use the following formula: on Lindel\" of hypothesis \begin{equation} \label{2.1} \begin{split}
& \zeta^k(s)=\sum_{n\leq t^\delta}\frac{d_k(n)}{n^s}+\mathcal{O}(t^{-\lambda}),\ \lambda=\lambda(k,\delta,\sigma)>0, \\
& s=\sigma+it,\ \frac 12<\sigma<1,\ t>0 \end{split} \end{equation} (see \cite{7}, p. 277) for every natural number $k$, where $\delta$ is any given positive number less than $1$. Let us remind that \begin{equation} \label{2.2} d_k(n)=\mathcal{O}(n^\eta) , \end{equation} where $0<\eta$ is an arbitrarily small number. Of course, (see (\ref{2.1}), (\ref{2.2})) \begin{equation} \label{2.3} \begin{split}
& \zeta^k(s)=\mathcal{O}\left(\sum_{n\leq t^\delta}d_k(n)n^{-\sigma}\right)=
\mathcal{O}\left( t^{\delta\eta+\delta(1-\sigma)}\right)= \\
& = \mathcal{O}\left( t^{(n+1/2)\delta}\right). \end{split} \end{equation} Let \begin{equation} \label{2.4} t\in [T,T+H],\ H=T^{\epsilon},\ 2\delta n+2\delta<\epsilon. \end{equation} Since \begin{displaymath} \sum_{T^\delta\leq n\leq (T+H)^\delta}1=\mathcal{O}(T^{\delta+\epsilon-1}) \end{displaymath} then \begin{equation} \label{2.5} \begin{split}
& \sum_{T^\delta\leq n\leq t^{\delta}}\frac{d_k(n)}{n^s}=
\mathcal{O}\left( T^{\delta\eta+\delta\sigma}\cdot \sum_{T^\delta\leq n\leq (T+H)^\delta} 1 \right)= \\
& = \mathcal{O}(T^{\delta\eta-\delta\sigma+\delta+\epsilon-1})=\mathcal{O}(T^{-\lambda_1}), \end{split} \end{equation} where \begin{equation} \label{2.6} \lambda_1=1-\delta-\epsilon+\delta\sigma-\delta\eta>0 , \end{equation} (of course, for sufficiently small $\epsilon$ the inequality (\ref{2.6}) holds true). Next, for \begin{equation} \label{2.7} \lambda_2=\lambda_2(k,\delta,\sigma,\epsilon,\eta)=\min \{\lambda,\lambda_1\}>0 \end{equation} the following formula (see (\ref{2.1}), (\ref{2.5}) -- (\ref{2.7})) \begin{equation} \label{2.8} \zeta^k(s)=\sum_{n<T^\delta}\frac{d_k(n)}{n^s}+\mathcal{O}(T^{-\lambda_2}),\ t\in [T,T+H] \end{equation} holds true. Since \begin{equation} \label{2.9} \zeta^k(s)=U_k(\sigma,t)+iV_k(\sigma,t) \end{equation} then - on Lindel\" of hypothesis - we obtain from (\ref{2.8}) the following main formula \begin{equation} \label{2.10} \begin{split}
& U_k(\sigma,t)=1+\sum_{2\leq n < T^\delta}\frac{d_k(n)}{n^\sigma}\cos(t\ln n)+\mathcal{O}(T^{-\lambda_2}), \\
& V_k(\sigma,t)=-\sum_{2\leq n < T^\delta}\frac{d_k(n)}{n^\sigma}\sin(t\ln n)+\mathcal{O}(T^{-\lambda_2}), \\
& t\in [T,T+H]. \end{split} \end{equation}
\section{The first class of lemmas}
Let us denote by \begin{displaymath} \{ t_\nu(\tau)\} \end{displaymath} an infinite set of sequences that we defined (see \cite{4}, (1)) by the condition \begin{equation} \label{3.1} \vartheta[t_\nu(\tau)]=\pi\nu+\tau,\ \nu=1,\dots,\ \tau\in [-\pi,\pi], \end{equation} of course, \begin{displaymath} t_\nu(0)=t_\nu, \end{displaymath} where (see \cite{7}, pp. 220, 329) \begin{equation} \label{3.2} \begin{split}
& \vartheta(t)=-\frac t2\ln\pi+\mbox{Im}\ln\Gamma\left(\frac 12+i\frac t2\right), \\
& \vartheta'(t)=\frac 12\ln\frac{t}{2\pi}+\mathcal{O}\left(\frac 1t\right), \\
& \vartheta''(t)\sim \frac{1}{2t}. \end{split} \end{equation}
\subsection{}
The following lemma holds true.
\begin{mydef51} If \begin{displaymath} 2\leq m,n < T^{\delta} \end{displaymath} then \begin{equation} \label{3.3} \sum_{T\leq t_\nu\leq T+H}\cos\{ t_\nu(\tau)\ln n\}=\mathcal{O}\left(\frac{\ln T}{\ln n}\right), \end{equation} \begin{equation} \label{3.4} \sum_{T\leq t_\nu\leq T+H}\cos\{ t_\nu(\tau)\ln (mn)\}=\mathcal{O}\left(\frac{\ln T}{\ln (mn)}\right), \end{equation} \begin{equation} \label{3.5} \sum_{T\leq t_\nu\leq T+H}\cos\left\{ t_\nu(\tau)\ln \frac mn\right\}= \mathcal{O}\left(\frac{\ln T}{\ln \frac mn}\right), \ m>n, \end{equation} where the $\mathcal{O}$-estimates are valid uniformly for $\tau\in [-\pi,\pi]$. \end{mydef51}
\begin{proof} We use the van der Corput's method. Let (see (\ref{3.3})) \begin{displaymath} \varphi_1(\nu)=\frac{1}{2\pi}t_\nu(\tau)\ln n. \end{displaymath} Next, (see (\ref{2.4}), (\ref{3.1}), (\ref{3.2})) \begin{displaymath} \begin{split}
& \varphi_1'(\nu)=\frac{\ln n}{2\vartheta'[t_\nu(\tau)]}, \\
& \varphi_1''(\nu)=-\frac{\pi\ln n}{2\{ \vartheta'[t_\nu(\tau)]\}^2}\vartheta''\{ t_\nu(\tau)\}<0, \\
& 0< A\frac{\ln n}{\ln T}\leq \varphi_1'(\nu)=
\frac{\ln n}{\ln\frac{t_\nu(\tau)}{2\pi}+\mathcal{O}(\frac 1t)}=\frac{\ln n}{\ln\frac{T}{2\pi}+\mathcal{O}(\frac HT)}< \\
& < \delta\frac{\ln T}{\ln\frac{T}{2\pi}+\mathcal{O}(\frac HT)}<\frac 14, \end{split} \end{displaymath} ($A>0$, sice $\delta$ may be sufficiently small). Hence, (see \cite{7}, p. 65 and p. 61, Lemma 4.2) \begin{displaymath} \begin{split}
& \sum_{T\leq t_\nu\leq T+H}\cos\{ t_\nu(\tau)\ln n\}= \\
& = \int_{T\leq t_x\leq T+H} \cos\{ 2\pi\varphi_1(x)\}{\rm d}x+\mathcal{O}(1)=
\mathcal{O}\left(\frac{\ln T}{\ln n}\right), \end{split} \end{displaymath} i.e. the estimate (\ref{3.3}) holds true. The estimates (\ref{3.4}) and (\ref{3.5}) follow by the similar way. \end{proof}
\subsection{}
The following lemma holds true.
\begin{mydef52} On Lindel\" of hypothesis we have \begin{equation} \label{3.6} \sum_{T\leq t_\nu\leq T+H} U_k[\sigma,t_\nu(\tau)]=\frac{1}{2\pi}H\ln\frac{T}{2\pi}+\mathcal{O}(H). \end{equation} \end{mydef52}
\begin{proof} Let us remind that \begin{equation} \label{3.7} \sum_{T\leq t_\nu\leq T+H} 1=\frac{1}{2\pi}H\ln\frac{T}{2\pi}+\mathcal{O}(1), \end{equation} (see \cite{3}, (23)). Next, (see (\ref{2.10}), (\ref{3.7})) \begin{equation} \label{3.8} \begin{split}
& \sum_{T\leq t_\nu\leq T+H} U_k[\sigma,t_\nu(\tau)]=\frac{1}{2\pi}H\ln\frac{T}{2\pi}+\mathcal{O}(1)+\\
& + \mathcal{O}(T^{-\lambda_2}H\ln T)+\sum_{2\leq n<T^\delta}\frac{d_k(n)}{n^\sigma}\cdot
\sum_{T\leq t_nu\leq T+H}\cos\{ t_\nu(\tau)\ln n\}= \\
& = \frac{1}{2\pi}H\ln T+\mathcal{O}(1)+\mathcal{O}(T^{-\lambda_2}H\ln T)+w_1. \end{split} \end{equation} Since (see (\ref{2.2}), (\ref{2.4}), (\ref{3.3})) \begin{displaymath} \begin{split}
& w_1=\mathcal{O}\left(T^{\delta\eta}\ln T\sum_{2\leq n\leq T^\delta}\frac{1}{\sqrt{n}\ln n}\right)= \\
& = \mathcal{O}\left\{ T^{\delta\eta}\ln T\left( \sum_{2\leq n<T^{\delta/2}} \ + \
\sum_{T^{\delta/2}\leq n<T^\delta}\right)\frac{1}{\sqrt{n}\ln n}\right\}= \\
& = \mathcal{O}(T^{\delta\eta+\delta/2})=\mathcal{O}(H), \end{split} \end{displaymath} then from (\ref{3.8}) the formula (\ref{3.6}) follows. \end{proof}
\section{Theorem 1}
\subsection{}
Next, we define the following class of disconnected sets (comp. \cite{4}, (3)): \begin{equation} \label{4.1} G(x)=\bigcup_{T\leq t_\nu\leq T+H}\{ t:\ t_\nu(-x)<t<t_\nu(x)\},\ 0<x\leq \frac{\pi}{2}. \end{equation} Let us remind that (see \cite{4}, (7)) \begin{equation} \label{4.2} \begin{split}
& t_\nu(x)-t_\nu(-x)=\frac{4x}{\ln\frac{T}{2\pi}}+\mathcal{O}\left(\frac{xH}{T\ln^2T}\right), \\
& t_\nu(-x),t_\nu(x)\in [T,T+H]. \end{split} \end{equation} Of course, \begin{equation} \label{4.3} m\{ G(x)\}=\frac{2x}{\pi}H+\mathcal{O}(x), \end{equation} (see (\ref{3.7}), (\ref{4.2})), where $m\{ G(x)\}$ stands for the measure of $G(x)$.
\subsection{}
The following theorem holds true.
\begin{mydef11} On Lindel\" of hypothesis \begin{equation} \label{4.4} \int_{G(x)} U_k(\sigma,t){\rm d}t=\frac{2x}{\pi}H+o\left(\frac{xH}{\ln T}\right). \end{equation} \end{mydef11}
First of all, we obtain from (\ref{4.4}) by (\ref{4.3}) the following
\begin{mydef41} On Lindel\" of hypothesis \begin{equation} \label{4.5} \frac{1}{m\{ G(x)\}}\int_{G(x)} U_k(\sigma,t){\rm d}t=1+o\left(\frac{1}{\ln T}\right). \end{equation} \end{mydef41}
Next, we obtain from (\ref{4.4}) the following
\begin{mydef42} On Lindel\" of hypothesis \begin{equation} \label{4.6}
\int_{G(x)}|U_k(\sigma,t)|{\rm d}t\geq \frac{2xH}{\pi}\{ 1-|o(1)|\}. \end{equation} \end{mydef42}
Since (see (\ref{2.9})) \begin{displaymath}
|\zeta(s)|^{2k-1}=\sqrt{U^2_{2k-1}+V^2_{2k-1}}\geq |U_{2k-1}| \end{displaymath} then we obtain from(\ref{4.6}), $k\longrightarrow 2k-1$, the following
\begin{mydef43} On Lindel\" of hypothesis \begin{equation} \label{4.7}
\int_{G(x)}|\zeta(\sigma+it)|^{2k-1}{\rm d}t\geq \frac{2xH}{\pi}\{ 1-|o(1)|\}. \end{equation} \end{mydef43}
\subsection{}
In this part we shall give the
\begin{proof} of the Theorem 1. Since (see (\ref{3.1}), (\ref{3.2})) \begin{displaymath} \begin{split} & \left(\frac{{\rm d}t_\nu(\tau)}{{\rm d}\tau}\right)^{-1}=\ln P_0+\mathcal{O}\left(\frac HT\right),\\ & t_\nu(\tau)\in [T,T+H],\ P_0=\sqrt{\frac{T}{2\pi}}, \end{split} \end{displaymath} then we obtain by using of the substitution \begin{displaymath} t=t_\nu(\tau) , \end{displaymath} and by estimates (\ref{2.3}) that \begin{displaymath} \begin{split}
& \int_{-\pi}^\pi U_k[\sigma,t_\nu(\tau)]{\rm d}\tau=\int_{-\pi}^\pi U_k[\sigma,t_\nu(\tau)]
\left(\frac{{\rm d}t_\nu(\tau)}{{\rm d}\tau}\right)^{-1}\cdot \frac{{\rm d}t_\nu(\tau)}{{\rm d}\tau}
{\rm d}\tau= \\
& = \ln P_0\int_{-\pi}^\pi U_k[\sigma,t_\nu(\tau)]\frac{{\rm d}t_\nu(\tau)}{{\rm d}\tau}{\rm d}\tau+ \\
& + \mathcal{O}\left( x\max\{|\zeta^k|\}\frac HT\max\left\{\frac{{\rm d}t_\nu(\tau)}{{\rm d}\tau}\right\}\right)= \\
& = \ln P_0\int_{t_\nu(-x)}^{t_\nu(x)}U_k(\sigma,t){\rm d}t+
\mathcal{O}\left( x\frac{T^{\delta\eta+\delta/2+\epsilon-1}}{\ln T}\right), \end{split} \end{displaymath} where the $\max$ is taken with respect to the segment $[T,T+H]$. Consequently, (see (\ref{2.3}), (\ref{3.7}), (\ref{4.1}) and (\ref{4.2})) \begin{displaymath} \begin{split}
& \sum_{T\leq t_\nu\leq T+H}\int_{-\pi}^\pi U_k[\sigma,t_\nu(\tau)]{\rm d}\tau= \\
& = \ln P_0 \int_{G(x)}U_k(\sigma,t){\rm d}t+\mathcal{O}(xT^{\delta\eta+\delta/2+2\epsilon-1})+ \\
& + \mathcal{O}\left(\frac{xT^{(\eta+1/2)\delta}}{\ln T}\right). \end{split} \end{displaymath} Now, the integration (\ref{3.6}) by \begin{displaymath} \tau\in [-\pi,\pi] \end{displaymath} gives the formula \begin{displaymath} \begin{split}
& \ln P_0\int_{G(x)} U_k(\sigma,t){\rm d}t+\mathcal{O}(xT^{\delta\eta+\delta/2+2\epsilon-1})= \\
& = \frac x\pi H\ln\frac{T}{2\pi}+\mathcal{O}(xT^{\delta\eta+\delta/2}) \end{split} \end{displaymath} and from this by (\ref{2.4}) the formula (\ref{4.4}) follows immediately (here $\epsilon$ is arbitrarily small number). \end{proof}
\section{The second class of lemmas}
\subsection{}
Let \begin{equation} \label{5.1} \begin{split}
& S_1(t)=\sum_{2\leq n\leq T^\delta}\frac{d_k(n)}{n^\sigma}\cos(t\ln n), \\
& w_2(t)=\{ S_1(t)\}^2. \end{split} \end{equation} The following lemma holds true.
\begin{mydef53} \begin{equation} \label{5.2} \begin{split}
& \sum_{T\leq t_\nu\leq T+H} w_2[t_\nu(\tau)]= \\
& = \{ F(\sigma,k)-1\}\cdot \frac{1}{4\pi} H\ln\frac{T}{2\pi}+o(H), \end{split} \end{equation} (on $F(\sigma,k)$ see (\ref{1.3})). \end{mydef53}
\begin{proof} First of all we have \begin{equation} \label{5.3} \begin{split}
& w_2(t)=\sum_m\sum_n \frac{d_k(m)d_k(n)}{(mn)^\sigma}\cos(t\ln m)\cos(t\ln n)= \\
& = \frac 12\sum_m\sum_n\frac{d_k(m)d_k(n)}{(mn)^\sigma}\cos\{t\ln(mn)\}+\\
& + \ssum_{n<m}\frac{d_k(m)d_k(n)}{(mn)^\sigma}\cos\left(t\ln\frac mn\right)+
\frac 12\sum_n\frac{d_k^2(n)}{n^{2\sigma}}= \\
& = w_{21}(t)+w_{22}(t)+w_{23}(t). \end{split} \end{equation} Now we have:
by (\ref{2.2}), (\ref{2.4}) and (\ref{3.4}) \begin{equation} \label{5.4} \begin{split}
& \sum_{T\leq t_\nu\leq T+H}w_{21}[t_\nu(\tau)]=
\mathcal{O}\left( T^{\delta\eta}\ln T\cdot \ssum_{2\leq m,n<T^\delta}\frac{1}{\sqrt{mn}\ln(mn)}\right)= \\
& = \mathcal{O}(T^{2\delta\eta+\delta}\ln T)=o(H); \end{split} \end{equation} by (\ref{2.2}), (\ref{2.4}) and (\ref{3.5}) and by \cite{7}, p. 116, Lemma, ($T\longrightarrow T^\delta$), \begin{equation} \label{5.5} \begin{split}
& \sum_{T\leq t_\nu\leq T+H}w_{22}[t_\nu(\tau)]=
\mathcal{O}\left( T^{\delta\eta}\ln T\cdot \ssum_{2\leq n<m<T^\delta}\frac{1}{\sqrt{mn}\ln\frac mn}\right)= \\
& = \mathcal{O}(T^{2\delta\eta+\delta}\ln^2T)=o(H); \end{split} \end{equation} by (\ref{1.3}), $\omega\longrightarrow k$, and by (\ref{2.2}) \begin{equation}\label{5.6} \begin{split}
& 2_{23}=\frac 12\sum_{n=2}^\infty \frac{d_k^2(n)}{n^{2\sigma}}-\frac 12\sum_{n\geq T^{\delta}}
\frac{d_k^2(n)}{n^{2\sigma}}= \\
& = \frac 12\{ F(\sigma,k)-1\}+\mathcal{O}\left(\int_{T^\delta}^\infty x^{\eta-2\sigma}{\rm d}x\right)= \\
& = \frac 12\{ F(\sigma,k)-1\}+\mathcal{O}(T^{-\delta(2\sigma-1-\eta)}); \end{split} \end{equation} (of course, $2\sigma-1-\eta>0$ since $\eta$ is arbitrarily small). Finally, by (\ref{2.4}), (\ref{3.7}) and (\ref{5.6}) we obtain \begin{equation} \label{5.7} \sum_{T\leq t_\nu\leq T+H}w_{23}=\{ F(\sigma,k)-1\}\frac{1}{4\pi}H\ln\frac{T}{2\pi}+o(H). \end{equation} Hence, from (\ref{5.3}) by (\ref{5.4}) -- (\ref{5.7}) the formula (\ref{5.2}) follows. \end{proof}
Next, the following lemma holds true.
\begin{mydef54} On Lindel\" of hypothesis \begin{equation} \label{5.8} \sum_{T\leq t_\nu\leq T+H}U_k^2[\sigma,t_\nu(\tau)]=\{ F(\sigma,k)-1\}\frac{1}{2\pi}G\ln\frac{T}{2\pi}+ o(H). \end{equation} \end{mydef54}
\begin{proof} Since (see (\ref{2.10}), (\ref{5.1})) \begin{displaymath} U_k(\sigma,t)=1+S_1+\mathcal{O}(T^{-\lambda_2}) , \end{displaymath} then \begin{equation} \label{5.9}
U_k^2(\sigma,t)=1+w_2+2S_1+\mathcal{O}(|S_1|T^{-\lambda_2})+\mathcal{O}(T^{-2\lambda_2}). \end{equation} Now we have:
by (\ref{2.4}) \begin{displaymath} \sum_{T\leq t_\nu\leq T+H}S_1[t_\nu(\tau)]=\mathcal{O}(T^{\delta\eta+\delta/2})=o(H); \end{displaymath} by(\ref{2.4}), (\ref{3.7}) and (\ref{5.2}) \begin{displaymath} \begin{split}
& \sum_{T\leq t_\nu\leq T+H}|S_1|T^{-\lambda_2}= \\
& = \mathcal{O}\left\{ T^{-\lambda_2}\sqrt{H\ln T}
\left(\sum_{T\leq t_\nu\leq T+H}w_2[t_\nu(\tau)]\right)^{1/2}\right\}=\\
& = \mathcal{O}(T^{-\lambda_2}H\ln T)=o(H). \end{split} \end{displaymath} Consequently, from (\ref{5.9}) by (\ref{3.7}) the formula (\ref{5.8}) follows. \end{proof}
\subsection{}
Let \begin{equation} \label{5.10} \begin{split} & S_2(t)=\sum_{2\leq n<T^\delta}\frac{d_k(n)}{n^\sigma}\sin(t\ln n), \\ & w_3(t)=\{ S_2(t)\}^2. \end{split} \end{equation} The following lemma holds true.
\begin{mydef55} \begin{equation} \label{5.11} \sum_{T\leq t_\nu\leq T+H} w_3[t_\nu(\tau)]=\{ F(\sigma,k)-1\}\frac{1}{4\pi}H\ln\frac{T}{2\pi}+o(H). \end{equation} \end{mydef55}
\begin{proof} Since (comp. (\ref{5.3})) \begin{equation} \label{5.12} \begin{split}
& w_3(t)=\ssum_{m,n}\frac{d_k(m)d_k(n)}{(mn)^\sigma}\sin(t\ln m)\sin(t\ln n)= \\
& = -\frac 12\ssum_{m,n}\frac{d_k(m)d_k(n)}{(mn)^\sigma}\cos\{ t\ln(mn)\}+ \\
& + \ssum_{n<m} \frac{d_k(m)d_k(n)}{(mn)^\sigma}\cos\left( t\ln\frac mn\right)+ \\
& + \frac 12 \sum_n \frac{d_k^2(n)}{n^{2\sigma}}=w_{31}(t)+w_{32}(t)+w_{33}(t), \end{split} \end{equation} then we obtain by the way (\ref{5.3}) -- (\ref{5.7}) our formula (\ref{5.11}). \end{proof}
Next, the following lemma holds true
\begin{mydef56} On Lindel\" of hypothesis \begin{equation} \label{5.13} \begin{split} & \sum_{T\leq t_\nu\leq T+H} V_k^2[\sigma,t_\nu(\tau)]= \\ & = \{ F(\sigma,k)-1\}\frac{1}{4\pi}H\ln\frac{T}{2\pi}+o(H). \end{split} \end{equation} \end{mydef56}
\begin{proof} Since (see (\ref{2.10}), (\ref{5.10})) \begin{displaymath} V_k(\sigma,t)=-S_2+\mathcal{O}(T^{-\lambda_2}), \end{displaymath} then \begin{equation} \label{5.14}
V_k^2(\sigma,t)=w_3+\mathcal{O}(T^{-\lambda_2}|S_2|)+\mathcal{O}(T^{-2\lambda_2}). \end{equation} Consequently, the proof may be finished in the same way as it was done in the case of our Lemma 4 (comp. (\ref{5.12}), (\ref{5.14})). \end{proof}
Since (see (\ref{2.9})) \begin{displaymath}
|\zeta(s)|^{2k}=U_k^2+V_k^2 , \end{displaymath} then by (\ref{5.8}), (\ref{5.11}) we obtain the following.
\begin{mydef57} On Lindel\" of hypothesis \begin{equation} \label{5.14}
\sum_{T\leq t_\nu\leq T+H} |\zeta[\sigma_it_\nu(\tau)]|^{2k}=\frac{1}{2\pi}F(\sigma,k)H\ln\frac{T}{2\pi}+o(H). \end{equation} \end{mydef57}
\section{Theorem 2 and main Theorem}
Now we obtain from (\ref{5.14}) by the way very similar to than one used in the proof of the Theorem 1, the following.
\begin{mydef12} On Lindel\" of hypothesis \begin{equation} \label{6.1}
\int_{G(x)}|\zeta(\sigma+it)|^{2k}{\rm d}t=\frac{2x}{\pi}F(\sigma,t)H+o\left(\frac{xH}{\ln T}\right). \end{equation} \end{mydef12}
Further, from (\ref{6.1}) we obtain
\begin{mydef44} On Lindel\" of hypothesis \begin{equation} \label{6.2}
\int_{G(x)}|\zeta(\sigma+it)|^{2k-1}{\rm d}t<\frac{2xH}{\pi}\sqrt{F(\sigma,2k-1)}\cdot \{1+|o(1)|\}. \end{equation} \end{mydef44}
Indeed, by (\ref{4.3}), (\ref{6.1}) we have \begin{displaymath} \begin{split}
& \int_{G(x)}|\zeta(\sigma+it)|^{2k}{\rm d}t< \\
& < \sqrt{m\{ G(x)\}}\left( \int_{G(x)}|\zeta(\sigma+it)|^{4k-2}{\rm d}t\right)^{1/2}< \\
& < \frac{2xH}{\pi}\sqrt{F(\sigma,2k-1)}\cdot\{ 1+|o(1)|\}. \end{split} \end{displaymath}
Finally, from (\ref{4.7}), (\ref{6.2}) we obtain our main result:
\begin{mydef1} On Lindel\" of hypothesis \begin{equation} \label{6.3} \begin{split}
& 1-|o(1)|<\frac{1}{m\{ G(x)\}}\int_{G(x)}|\zeta(\sigma+it)|^{2k-1}{\rm d}t< \\
& < \sqrt{F(\sigma,2k-1)}+|o(1)|. \end{split} \end{equation} \end{mydef1}
\begin{remark} The question about the order of the mean-value of the function \begin{displaymath}
|\zeta(\sigma+it)|^{2k-1},\ k=1,2,\dots \end{displaymath} defined on infinite class of disconnected sets $\{ G(x)\}$ is answered by the inequalities (\ref{6.3}). \end{remark}
\begin{remark} Inequalities (\ref{1.2}) follows from (\ref{6.3}) as a special case for $x=\pi/2$, (see (\ref{2.3}), (\ref{2.4}), (\ref{4.3})). \end{remark}
\thanks{I would like to thank Michal Demetrian for helping me with the electronic version of this work.}
\end{document} |
\begin{document}
\title{Supplementary Information``Simulating Molecular Spectroscopy with Circuit Quantum Electrodynamics"} \author{L. H$^*$} \author{Y. C. Ma$^*$} \author{Y. Xu} \author{W. Wang} \author{Y. Ma} \author{K. Liu} \affiliation{Center for Quantum Information, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing 100084, China} \author{M.-H. Yung$^\dagger$} \email{yung@sustc.edu.cn} \affiliation{Institute for Quantum Science and Engineering and Department of Physics, South University of Science and Technology of China, Shenzhen 518055, China} \author{L.~Sun$^\dagger$} \email{luyansun@tsinghua.edu.cn} \affiliation{Center for Quantum Information, Institute for Interdisciplinary Information Sciences, Tsinghua University, Beijing 100084, China}
\maketitle
\tableofcontents
{\section{Key ideas of the molecular spectroscopy simulation}} The main idea of this work is related to the problem of simulating the absorption spectrum of molecules associated with the Huang-Rhys factor $D$. The molecular spectrum can be obtained by applying the Fourier transformation on the temporal correlation function of an electronic transition dipole operator. Therefore the key part of the experiment involves a simulation of the correlation function through a quantum circuit.
As shown in Fig.~1b in the main text, the core of the quantum circuit consists of three components:
\begin{enumerate} \item Hadamard gate, \begin{equation}
H \equiv \frac{1}{\sqrt{2}}(\ket{g}+\ket{e})\bra{g}+\frac{1}{\sqrt{2}}(\ket{g}-\ket{e})\bra{e}, \end{equation} applied to the ancilla qubit at the beginning of our quantum circuit. Here $\ket{g}$ represents the ground state and $\ket{e}$ the excited state of the qubit.
\item A controlled unitary operation \begin{equation} U_{\text{ctrl}}=\ket{g}\bra{g}\otimes U+\ket{e}\bra{e}\otimes I, \end{equation} where $U\equiv e^{iH_gt/\hbar}e^{-iH_et/\hbar}$ is the $``$Forward and Reversal" time-evolution operator and $I$ is the identity operator.
\item $\sigma_y$ and $\sigma_x$ measurements through $\pi/2$ rotations along $X$ and $Y$ axis: \begin{eqnarray}\label{eq:xy_rot}
R_X &\equiv& \frac{1}{\sqrt{2}}(\ket{g}-i\ket{e})\bra{g}+\frac{1}{\sqrt{2}}(\ket{e}-i\ket{g})\bra{e},\\
R_Y &\equiv& \frac{1}{\sqrt{2}}(\ket{g}+\ket{e})\bra{g}+\frac{1}{\sqrt{2}}(\ket{e}-\ket{g})\bra{e}. \end{eqnarray}
Similar to DQC1, We get the real and imaginary part of the correlation function ${C_{\mu \mu }}(t) = {\left\langle \psi \right|U(t)\left| \psi \right\rangle} = {\left\langle \sigma_x\right\rangle} + i{\left\langle \sigma_y\right\rangle}$, where $\ket{\psi}$ is the phonon initial state. Then the absorption spectrum of the molecule can be obtained by a Fourier transformation of $C_{\mu\mu}(t)$. \end{enumerate}
\section{Temporal Correlation function} Under the Condon approximation, the dipole operator is given by, \begin{equation} \mu = \ket{e}\bra{g} + \ket{g}\bra{e} = \sigma_x \ , \end{equation} where $\ket{g}$ and $\ket{e}$ represent the ground and excited electronic wave function. Given a molecular Hamiltonian, \begin{equation} H = H_g \ket{g} \bra{g} + H_e \ket{e} \bra{e} \ , \end{equation} the time-correlation function $C_{\mu\mu}(t)$ for the dipole operator is given by \begin{equation} C_{\mu\mu}(t) \equiv {\rm Tr}(\bra{g}\sigma_x(t)\sigma_x(t=0)\ket{g}) \ , \end{equation}
where ${\sigma _x}\left( t \right) = {e^{iHt/\hbar }}{\sigma _x}{e^{ - iHt/\hbar }}$, and ${\sigma _x}\left( {t = 0} \right) = {\sigma _x}$. Since ${\sigma _x}\left| g \right\rangle = \left| e \right\rangle $, we can write \begin{eqnarray} C_{\mu\mu}(t) &=& {\rm Tr}(\bra{g}e^{iH_g/\hbar\ket{g}\bra{g}t}\sigma_xe^{-iH_e/\hbar\ket{e}\bra{e}t}\ket{e}) \nonumber \\ &=& {\rm Tr}(e^{iH_gt/\hbar}e^{-iH_et/\hbar}). \end{eqnarray} Here ${{\rm Tr}}(\cdot)$ represents the expectation of the operator in the nuclear space. For simplicity, ${\rm Tr}(\cdot)$ is replaced by $\braket{\cdot}$ hereafter.
\section{Harmonic approximation} In terms of the creation and annihilation operators, \begin{equation} p = i\sqrt{m\hbar\omega_0/2}(a^{\dagger}-a) \quad {\rm and} \quad q = \sqrt{\hbar/2m\omega_0}(a+a^{\dagger}) \ . \end{equation} The Hamiltonians of the ground and excited electronic states are given by \begin{eqnarray} H_g &=& \hbar\omega_0 a^{\dagger}a \ , \\ H_e &=& \hbar\omega_{eg} + \hbar\omega_0 a^{\dagger}a + \tilde{d}(a+a^{\dagger}) + \hbar\omega_0\tilde{d}^2 \nonumber \\ &=& \hbar\omega_{eg} + \hbar\omega_0 {\cal D}(-\tilde{d})a^{\dagger}a{\cal D}(\tilde{d}) \ . \end{eqnarray} Here ${\cal D}(\tilde{d}) = e^{\tilde{d}a^{\dagger}-\tilde{d}^{\star}a}$ is the displacement operator for a single mode. The Huang-Rhys parameter $D$ is defined as $D=\tilde d^2$.
With ${\cal D}(-\tilde{d})a{\cal D}(\tilde{d}) = a + \tilde{d}$, we have \begin{eqnarray} C_{\mu\mu}(t) &=& e^{-i\omega_{eg}t}\braket{e^{i\omega_0 a^{\dagger}at}{\cal D}(-\tilde{d})e^{-i\omega_0 a^{\dagger}at}{\cal D}(\tilde{d})}\nonumber\\ &=& e^{-i\omega_{eg}t}\braket{e^{\tilde{d}(a^{\dagger}e^{i\omega_0 t}-ae^{-i\omega_0 t})}{\cal D}(\tilde{d})} \nonumber \\ &=& e^{-i\omega_{eg}t}\braket{{\cal D}(\tilde{d}e^{i\omega_0 t}){\cal D}(-\tilde{d})} \nonumber \\ &=& e^{-i\omega_{eg}t-i\tilde{d}^2\sin\omega_0 t}\braket{{\cal D}(\tilde{d}(e^{i\omega_0 t}-1))} \ . \end{eqnarray}
\section{Correlation function of Fock states} The states in the nuclear space can be written as linear superpositions of $\ket{n},n=0,1,2,\cdots$. Note that \begin{eqnarray*} e^{-\alpha^* a}\ket{n} = \ket{n}+\sqrt{n}(-\alpha^*)\ket{n-1} + \sqrt{n(n-1)}(\alpha^*)^2/2!\ket{n-2} + \cdots + \sqrt{n!}(-\alpha^*)^n/(n!)^2\ket{0}. \end{eqnarray*} Therefore, for Fock states, if we define $\alpha=\tilde{d}(e^{i\omega_0 t}-1)$, we have \begin{eqnarray} C_{\mu\mu}(t) &=& e^{-i\omega_{eg}t-i\tilde{d}^2\sin\omega_0 t}\bra{n}{\cal D}(\alpha)\ket{n} \nonumber \\
&=& e^{-i\omega_{eg}t-i\tilde{d}^2\sin\omega_0 t}\bra{n}e^{-\frac{|\alpha|^2}{2}}e^{\alpha a^{\dagger}}e^{-\alpha^* a}\ket{n} \nonumber \\
&=&e^{-i\omega_{eg}t-i\tilde{d}^2\sin\omega_0 t} e^{-\frac{|\alpha|^2}{2}}\sum_{j=0}^{n}(-1)^j\frac{n(n-1)\cdots(n-j)}{(j!)^2}|\alpha|^{2j} \end{eqnarray} For $\ket{n} = \ket{1}$, \begin{eqnarray} C_{\mu\mu}(t) = e^{-i\omega_{eg}t}e^{\tilde{d}^2(e^{-i\omega_0 t}-1)}(1-4\tilde{d}^2\sin^2(\omega_0 t/2)). \end{eqnarray} For $\ket{n} = \ket{0}$ (the equilibrium state at zero temperature), \begin{eqnarray} C_{\mu\mu}(t) = e^{-i\omega_{eg}t}e^{\tilde{d}^2(e^{-i\omega_0 t}-1)}. \end{eqnarray}
\section{Evolution Process in experiment}\label{sec:exp}
Our quantum simulator simulates the absorption spectra of a molecule in which electronic states (initialized at the ground state $\ket{g}$) coupled with a single nuclear mode (denoted by $\ket{\psi}$). In the controlled unitary gate $U_{\text{ctrl}} = \left| g \right\rangle \left\langle g \right| \otimes U + \left| e \right\rangle \left\langle e \right| \otimes I$, we have \begin{eqnarray} U\equiv e^{iH_gt/\hbar}e^{-iH_et/\hbar}=e^{-i\omega_{eg}t-i\tilde{d}^2\sin\omega_0 t}{\cal D}(\tilde{d}(e^{i\omega_0 t}-1)). \end{eqnarray}
The standard state evolution steps of our quantum simulator are \begin{eqnarray*} \ket{g}\ket{\psi} &\xrightarrow{\text{Hadamard}}& \frac{1}{\sqrt{2}} (\ket{e}\ket{\psi}+ \ket{g}\ket{\psi})\\ &\xrightarrow{U_{\text{ctrl}} \text{ Gate}}& \frac{1}{\sqrt{2}}(\ket{e}\ket{\psi}+ e^{-i\omega_{eg}t-i\tilde{d}^2\sin \omega_0 t}\ket{g}{\cal D} (\tilde{d} (e^{i\omega_0 t} - 1))\ket{\psi}) \ . \end{eqnarray*} By measuring the expectation of Pauli operators $\sigma_x$ and $\sigma_y$, we get \begin{eqnarray} \braket{\sigma_x} &=& Re(e^{- i \omega_{eg} t - i \tilde{d}^2 \sin
\omega_0 t} {\cal D} (\tilde{d} (e^{i \omega_0 t} - 1))) \ ,\\ \braket{\sigma_y} &=& Im(e^{- i \omega_{eg} t - i \tilde{d}^2 \sin
\omega_0 t} {\cal D} (\tilde{d} (e^{i \omega_0 t} - 1))) \ . \end{eqnarray}
In order to simulate molecular spectra with the phonon mode initialized in a thermal state, $\rho \equiv {e^{ - {\hbar\omega _0}{a^\dag }a/kT}}/{\text{Tr}}({e^{ - {\hbar\omega _0}{a^\dag }a/kT}})$, it is not practical to increase the physical temperature, as the performance of the experimental system would decrease significantly. To overcome this challenge, we can modify the above procedure at the first step: instead of an equal superposition {(after a Hadamard gate)}, the qubit is initialized to $e^{ - i\phi (t)}\sin{\frac{\gamma(t)}{2}}\ket{g}+ \cos{\frac{\gamma(t)}{2}}\ket{e}$, where $\phi \left( t \right) \equiv {\omega _{eg}}t + {{\tilde d}^2}\sin {\omega _0}t$, the angle $\gamma(t)$ is chosen such that $\sin\gamma=e^{2 \tilde{d}^2\bar{n} (\cos (\omega_0 t - 1)}$, and $\bar{n}=(e^{\hbar\omega_0 /kT}-1)^{-1}$.
Then we can re-calculate $\braket{\sigma_x}$ and $\braket{\sigma_y}$ to get $C_{\mu\mu}^{thm}(t)={\left\langle \sigma_x\right\rangle} + i{\left\langle \sigma_y\right\rangle}$ as follows. \begin{eqnarray}
\braket{\sigma_x} &=& \sin \gamma Re(e^{- i \omega_{eg} t - i\tilde{d}^2 \sin \omega_0 t - | \tilde{d} (e^{i \omega_0 t} - 1) |^2 / 2}) \nonumber \\
&=& \sin \gamma Re(e^{- i \omega_{eg} t + \tilde{d}^2 (e^{-i \omega_0 t} - 1)}) \nonumber \\ &=& Re(e^{- i \omega_{eg} t + 2 \tilde{d}^2 \bar{n} (\cos \omega_0 t- 1) + \tilde{d}^2 (e^{- i \omega_0 t} - 1)}) \nonumber \\ &=& Re (e^{- i \omega_{eg} t + \tilde{d}^2 [(\bar{n} +1) (e^{- i \omega_0 t} - 1) + \bar{n} (e^{i \omega_0 t} - 1)]})\\ \braket{\sigma_y} &=& Im(e^{- i \omega_{eg} t + \tilde{d}^2 [(\bar{n} +1) (e^{- i \omega_0 t} - 1) + \bar{n} (e^{i \omega_0 t} - 1)]}) \end{eqnarray}
This result is consistent with the correlation function for a real thermal equilibrium state (also see \cite{mukamel1999principles}) \begin{eqnarray} C_{\mu\mu}^{thm}(t) = e^{- i \omega_{eg} t + \tilde{d}^2 [(\bar{n} +1) (e^{- i \omega_0 t} - 1) + \bar{n} (e^{i \omega_0 t} - 1)]}. \end{eqnarray}
\section{Simulating correlation function with damping} The method to mimic the influence of the environment is similar to the case of a thermal equilibrium state except for $\sin\gamma(t) = e^{- t / \tau}$, where $\tau$ is the characteristic time which describes the decay time of the correlation function. This method works for any nuclear state $\ket{\psi}$ \begin{eqnarray} \ket{g}\ket{\psi} &\Rightarrow& \cos \frac{\gamma(t)}{2} \ket{e}\ket{\psi} + e^{- i \omega_{eg} t - i \tilde{d}^2 \sin\omega_0 t}\sin\frac{\gamma(t)}{2} \ket{g}\ket{\psi} \end{eqnarray}
It is easy to verify that \begin{equation} C_{\mu\mu}^{\text{damp}}(t) = \braket{\sigma_x} + i \braket{\sigma_y} = e^{- t / \tau} C_{\mu \mu} (t) \end{equation} where $C_{\mu\mu}^{\text{damp}}(t)~(C_{\mu \mu} (t))$ is the correlation function with (without) damping.
{\section{Device and Readout properties}} The transmon qubit in our experiment is fabricated using the standard Dolan technique~\cite{Dolan} with a double-angle evaporation of aluminum after a single electron-beam lithography step on a $c$-plane sapphire substrate. The experiment is performed in a cryogen-free dilution refrigerator with a base temperature of about 10~mK. A Josephson parametric amplifier (JPA)~\cite{Hatridge,Roy} is connected to the output of the readout cavity at the base temperature as the first stage of amplification before the high-electron-mobility-transistor amplifier at 4K. The JPA is operated in a pulsed double-pumped mode~\cite{Kamal,Murch} to minimize pump leakage to the readout cavity. The readout pulse is calibrated to contain only a few photons (see the calibration session below), enough for a high-fidelity single-shot readout of the qubit state. The schematic of the measurement setup can be found in Ref.~\onlinecite{Liu2016}.
The readout property of the qubit is shown in Fig.~\ref{fig:readoutproperty}. The readout histogram is clearly bimodal and well separated. A threshold $V_{th}=0$ is chosen to digitize the readout signal to $+1$ and $-1$ for the ground state $\ket{g}$ and the excited state $\ket{e}$, respectively. The qubit has an excited state population of $3.9\%$ in the steady state, presumably due to stray infrared photons or other background noise leaking into the cavity, although the exact source remains unknown. We use measurement-based post-selection to purify the qubit to $\ket{g}$ state. This requires the measurement to be high quantum non-demolition (QND). Inset of Fig.~\ref{fig:readoutproperty} shows the qubit readout matrix with the cavity left in vacuum and the corresponding experiment sequence. After the first measurement and post-selection of $\ket{g}$, the subsequent qubit measurement shows that the probability of the qubit being populated in $\ket{g}$ is as high as 0.999 (dashed histogram), demonstrating the high QND property of the qubit readout. If we post-select the qubit at $\ket{e}$ state, the subsequent measurement finds that the qubit remains in $\ket{e}$ with a fidelity of 0.953. These errors dominantly come from the $T_1$ process during the waiting time after the initialization measurement (250~ns) and during the readout time (240~ns).
Figure~\ref{fig:pi_pulse_property} shows the properties of non-selective $\pi$ pulses with a coherent state $\ket{\alpha}$ in the cavity. In our simulation, there is always a coherent state presented in the storage cavity (see Fig.~\ref{fig:Fock1_pre}). The associated measurement pulses are shown in Fig.~\ref{fig:pi_pulse_property}a. The absence or presence of a $\pi$ pulse at the beginning together with a measurement M1 is used to prepare the qubit's initial state $\ket{g}$ or $\ket{e}$ by a post-selection. The numbers outside the parenthesis correspond to a $\pi$ pulse with $\sigma=6$~ns for $\alpha=1$ ($\bar{n}=1$) in the storage cavity. The numbers inside the parenthesis correspond to a $\pi$ pulse with $\sigma=2$~ns for $\alpha=4$ ($\bar{n}=16$) in the storage cavity. The fidelity loss mainly comes from two parts: the qubit $T_1$ process during the measurement time and the waiting time between the two consecutive measurements, and the qubit frequency shifts due to photon number occupations in the cavity.
\begin{figure*}
\caption{Qubit readout property. Histogram of the readout with (the blue dashed line) and without (the red solid line) purification. The ground state $\ket{g}$ and the excited state $\ket{e}$ are well-separated and a threshold $V_{th}=0$ is chosen to digitize the readout result. Without purification, $\ket{e}$ takes up to $3.9\%$ occupation in the steady state, presumably due to stray infrared photons or other background noise leaking into the cavity. Inset is the basic qubit readout matrix and the corresponding experimental protocol with the storage cavity left in vacuum. After the purification to $\ket{g}$, the subsequent qubit measurement has 99.9\% probability of measuring $\ket{g}$ again, demonstrating the high QND nature of the qubit readout. If $\ket{e}$ state is post-selected, the subsequent qubit measurement has 95.3\% probability of measuring $\ket{e}$ again. The errors dominantly come from the $T_1$ process during the waiting time after the initialization measurement (250~ns) and during the readout time (240~ns).}
\label{fig:readoutproperty}
\end{figure*}
\begin{figure*}
\caption{Properties of non-selective qubit $\pi$ pulses with a coherent state $\ket{\alpha}$ in the cavity. \textbf{(a)} Calibration protocol. The absence or presence of a $\pi$ pulse at the beginning together with M1 is used to prepare the qubit's initial state $\ket{g}$ or $\ket{e}$ by a post-selection. \textbf{(b)} Readout matrix. The numbers outside the parenthesis correspond to a $\pi$ pulse with $\sigma=6$~ns for $\alpha=1$ ($\bar{n}=1$). The numbers inside the parenthesis correspond to a $\pi$ pulse with $\sigma=2$~ns for $\alpha=4$ ($\bar{n}=16$).}
\label{fig:pi_pulse_property}
\end{figure*}
{\section{Measurement pulse sequence}} The pulse sequence for the experiment is shown in Fig.~\ref{fig:Fock1_pre}a. Here we use the case of the nuclear state in a Fock state $\ket{1}$ as an example. The whole experiment can be divided into three main parts: 1) initialization of the system to $\ket{g,0}$ by post-selecting results of the qubit measurement and the cavity parity measurement; 2) creation of $\ket{1}$ state by first displacing the cavity to a coherent state $\ket{\alpha=1}$, then applying a selective $\pi$ rotation ($\sigma=360$~ns) of the qubit corresponding to $N=1$ photon in the cavity, and finally post-selecting the excited state of the qubit; 3) simulation as described in the main text.
The classical microwave pulse to displace the cavity state has either a square envelope with a width of 100~ns or a Gaussian envelope with $\sigma=335$~ns. The amplitudes have been calibrated carefully as shown in the section of ``calibration of driving amplitude". All qubit drive pulses have a Gaussian envelope pulses truncated to $\pm 2\sigma$. To eliminate the possible qubit leakage to higher qubit levels, we also apply the so-called ``derivative removal by adiabatic gate" technique~\cite{Motzoi} for pulses with $\sigma=$2, 4, and 6~ns.
Figure~\ref{fig:Fock1_pre}b shows the Wigner tomography of the created $\ket{1}$ state and Fig.~\ref{fig:Fock1_pre}c shows the moduli of the reconstructed density matrix $\rho$ by least-square regression using a maximum likelihood estimation~\cite{Smolin2012,VlastakisBell}. The measured fidelity of this state is $F=\left\langle 1\right|\rho\left|1\right\rangle=0.94$. The Wigner tomography of the cavity state is performed by a cavity's displacement operation $\cal D(-\beta)$ followed by a parity measurement~\cite{Lutterbach1997,Bertet2002,Vlastakis,SunNature,Liu2016}. The parity measurement is achieved in a Ramsey-type measurement of the qubit with a conditional cavity $\pi$ phase shift $C(\pi)$ sandwiched in between two unconditional qubit rotations $R_{Y}(\pi/2)$ followed by a projective measurement of the qubit.
\begin{figure*}
\caption{\textbf{(a)} Schematic of the pulse sequence for simulating a nuclear initial state $\ket{1}$. The process contains three parts: 1) initialization of the system to $\ket{g,0}$ by post-selecting results of the qubit measurement and the cavity parity measurement; 2) creation of $\ket{1}$ state by first displacing the cavity to $\ket{\alpha=1}$ state, then performing a selective $\pi$ rotation ($\sigma=360$~ns) of the qubit corresponding to $N=1$ photon in the cavity, and finally post-selecting the excited state of the qubit; 3) simulation as described in the main text. In order to minimize the cavity state's Kerr rotation associated with the qubit in the $\ket{e}$ state, the time interval between the last two $\pi/2$ pulses is set to $4\pi/\chi_{qs}$. The last $R_{X or Y}(\pi/2)$ pulse needs as narrow as possible to be non-selective. \textbf{(b)} Wigner function of the created $\ket{1}$ state with a fidelity $F=\left\langle 1\right|\rho\left|1\right\rangle=0.94$. \textbf{(c)} Moduli of the reconstructed density matrix $\rho$ from the Wigner function in \textbf{(b)} by least-square regression using a maximum likelihood estimation. }
\label{fig:Fock1_pre}
\end{figure*}
\section{Calibration of driving amplitude} To have a high-QND and high-fidelity single-shot readout is essential for any experiment that requires post-selections. There should be enough readout photons for a high signal-to-noise ratio of the readout, but not too many to take a long time for those readout photons to leak out, otherwise slowing down the subsequence operations. In Fig.~\ref{fig:readout_pulse_nbar}, the readout photon number is calibrated through a measurement-induced dephasing process. Figure~\ref{fig:readout_pulse_nbar}a shows the measurement pulse sequence which is a Ramsey-type measurement of the qubit inserted by a readout pulse with various amplitudes at a fixed width $T_m$. Figure~\ref{fig:readout_pulse_nbar}b shows the decaying signal, coming from the dephasing due to readout photons, as a function of the measurement pulse amplitude. An exponential fit gives a calibration of the readout pulse. In our experiment, we typically readout the qubit with about $\bar{n}=5$ photons in the cavity and wait for nearly six times the photon decay time to make sure the average remaining photon number in the readout cavity is only about 1\%.
The calibration of the controlled cavity displacement ${\cal D}^g(\alpha)$ (Gaussian envelope with $\sigma=335$~ns) is critical for our simulation scheme. This calibration is realized by measuring the probability of the first nine Fock states $N = 0,1,2,...8$ through a selective $\pi$ pulse (Gaussian envelope with $\sigma=360$~ns) as a function of the displacement pulse amplitude, as shown in Fig.~\ref{fig:calibrate_nbar}. A nearly perfect global fit to the measurement results with a Poisson distribution not only gives the required calibration of the cavity displacement amplitude, but also indicates good control of the coherent state in the cavity. The calibration of the displacement ${\cal D}(1)$ with a 100~ns square envelope is performed by measuring the photon number parity instead as a function of displacement pulse amplitude (inset of Fig.~\ref{fig:calibrate_nbar}).
Figure~\ref{fig:Poisson_Dist} shows the typical spectral peak values for the nuclear system at vacuum and at zero temperature (Fig.~2 of the main text). The peaks are all normalized by dividing a constant reduction factor $f=0.83$ (mainly due to the system decoherence), and are in good agreement with the expected Poisson distribution.
\begin{figure*}
\caption{Calibration of $\bar{n}$ in the readout pulse. \textbf{(a)} A Ramsey-type measurement sequence for the qubit inserted by a readout pulse with various amplitudes at a fixed width $T_m$. The second $\pi/2$ pulse has a rotating phase relative to the first one in order to have an oscillating interference signal. \textbf{(b)} The probability of the qubit at the ground state as a function of the measurement pulse amplitude. The Ramsey amplitude $A\sim e^{-T_m\Gamma_m}$ gives a calibration of $\bar{n}$ in the readout pulse, where the measurement-induced dephasing rate $\Gamma_m=2\bar{n}\kappa sin^2(tan^{-1}(\chi_{qr}/\kappa))$, $\kappa$ is the readout cavity's decay rate, and $\chi_{qr}$ is the dispersive frequency shift between the readout cavity and the qubit.}
\label{fig:readout_pulse_nbar}
\end{figure*}
\begin{figure*}
\caption{Calibration of the controlled cavity displacement ${\cal D}^g(\alpha)$ (Gaussian envelope with $\sigma=335$~ns). The probability of the first nine Fock states $N=0,1,...,8$ is measured as a function of the displacement pulse amplitude through a selective $\pi$ pulse ($\sigma=360$~ns) on the qubit at each resonant transition frequency corresponding to a specific photon number $N$ in the cavity. There is no normalization of the measurement data and the loss of probability dominantly comes from the $T_1$ process in the long duration of the $\pi$ pulse. Lines are from a global fit with a Poisson distribution $\displaystyle P(\ket{\alpha},N)=A|\alpha|^{2N}e^{-|\alpha|^2}/N!$, where $A$ is a scaling factor accounting for the probability loss. The excellent agreement indicates good control of the coherent state in the cavity, giving a calibration DAC level=326 corresponding to $\alpha=1$. Inset: calibration of the displacement ${\cal D}(1)$ with a 100~ns square envelope. This calibration is performed by measuring the photon number parity instead as a function of displacement amplitude, giving DAC level=2649 for $\alpha=1$. The loss of parity measurement contrast mainly comes from the qubit decoherence during the parity measurement.}
\label{fig:calibrate_nbar}
\end{figure*}
\begin{figure*}
\caption{Poisson distribution of spectral peaks for the nuclear system at vacuum and at zero temperature. Dots are typical spectral peak values in Fig.~2 of the main text, all normalized by dividing a constant reduction factor $f=0.83$. Lines are from a global fit with a Poisson distribution. The experimental peaks are nearly perfectly Poisson distributed.}
\label{fig:Poisson_Dist}
\end{figure*}
\begin{thebibliography}{14}
\makeatletter
\providecommand \@ifxundefined [1]{
\@ifx{#1\undefined}
}
\providecommand \@ifnum [1]{
\ifnum #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \@ifx [1]{
\ifx #1\expandafter \@firstoftwo
\else \expandafter \@secondoftwo
\fi
}
\providecommand \natexlab [1]{#1}
\providecommand \enquote [1]{``#1''}
\providecommand \bibnamefont [1]{#1}
\providecommand \bibfnamefont [1]{#1}
\providecommand \citenamefont [1]{#1}
\providecommand \href@noop [0]{\@secondoftwo}
\providecommand \href [0]{\begingroup \@sanitize@url \@href}
\providecommand \@href[1]{\@@startlink{#1}\@@href}
\providecommand \@@href[1]{\endgroup#1\@@endlink}
\providecommand \@sanitize@url [0]{\catcode `\\12\catcode `\$12\catcode
`\&12\catcode `\#12\catcode `\^12\catcode `\_12\catcode `\%12\relax}
\providecommand \@@startlink[1]{}
\providecommand \@@endlink[0]{}
\providecommand \url [0]{\begingroup\@sanitize@url \@url }
\providecommand \@url [1]{\endgroup\@href {#1}{\urlprefix }}
\providecommand \urlprefix [0]{URL }
\providecommand \Eprint [0]{\href }
\providecommand \doibase [0]{http://dx.doi.org/}
\providecommand \selectlanguage [0]{\@gobble}
\providecommand \bibinfo [0]{\@secondoftwo}
\providecommand \bibfield [0]{\@secondoftwo}
\providecommand \translation [1]{[#1]}
\providecommand \BibitemOpen [0]{}
\providecommand \bibitemStop [0]{}
\providecommand \bibitemNoStop [0]{.\EOS\space}
\providecommand \EOS [0]{\spacefactor3000\relax}
\providecommand \BibitemShut [1]{\csname bibitem#1\endcsname}
\let\auto@bib@innerbib\@empty
\bibitem [{\citenamefont {Mukamel}(1999)}]{mukamel1999principles}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {S.}~\bibnamefont
{Mukamel}},\ }\href@noop {} {\emph {\bibinfo {title} {Principles of nonlinear
optical spectroscopy}}},\ \bibinfo {number} {6}\ (\bibinfo {publisher}
{Oxford University Press on Demand},\ \bibinfo {year} {1999})\BibitemShut
{NoStop}
\bibitem [{\citenamefont {Dolan}(1977)}]{Dolan}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {G.~J.}\ \bibnamefont
{Dolan}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Appl.
Phys. Lett.}\ }\textbf {\bibinfo {volume} {37}},\ \bibinfo {pages} {337}
(\bibinfo {year} {1977})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Hatridge}\ \emph {et~al.}(2011)\citenamefont
{Hatridge}, \citenamefont {Vijay}, \citenamefont {Slichter}, \citenamefont
{Clarke},\ and\ \citenamefont {Siddiqi}}]{Hatridge}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Hatridge}}, \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont {Vijay}},
\bibinfo {author} {\bibfnamefont {D.~H.}\ \bibnamefont {Slichter}}, \bibinfo
{author} {\bibfnamefont {J.}~\bibnamefont {Clarke}}, \ and\ \bibinfo {author}
{\bibfnamefont {I.}~\bibnamefont {Siddiqi}},\ }\href {\doibase
10.1103/PhysRevB.83.134501} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. B}\ }\textbf {\bibinfo {volume} {83}},\ \bibinfo {pages} {134501}
(\bibinfo {year} {2011})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Roy}\ \emph {et~al.}(2015)\citenamefont {Roy},
\citenamefont {Kundu}, \citenamefont {Chand}, \citenamefont {M.},
\citenamefont {Ranadive}, \citenamefont {Nehra}, \citenamefont {Patankar},
\citenamefont {Aumentado}, \citenamefont {Clerk},\ and\ \citenamefont
{Vijay}}]{Roy}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {T.}~\bibnamefont
{Roy}}, \bibinfo {author} {\bibfnamefont {S.}~\bibnamefont {Kundu}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Chand}}, \bibinfo {author}
{\bibfnamefont {V.~A.}\ \bibnamefont {M.}}, \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Ranadive}}, \bibinfo {author} {\bibfnamefont
{N.}~\bibnamefont {Nehra}}, \bibinfo {author} {\bibfnamefont {M.~P.}\
\bibnamefont {Patankar}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont
{Aumentado}}, \bibinfo {author} {\bibfnamefont {A.~A.}\ \bibnamefont
{Clerk}}, \ and\ \bibinfo {author} {\bibfnamefont {R.}~\bibnamefont
{Vijay}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Appl.
Phys. Lett.}\ }\textbf {\bibinfo {volume} {107}},\ \bibinfo {pages} {262601}
(\bibinfo {year} {2015})}\BibitemShut {NoStop}
\bibitem [{\citenamefont {Kamal}\ \emph {et~al.}(2009)\citenamefont {Kamal},
\citenamefont {Marblestone},\ and\ \citenamefont {Devoret}}]{Kamal}
\BibitemOpen
\bibfield {author} {\bibinfo {author} {\bibfnamefont {A.}~\bibnamefont
{Kamal}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Marblestone}},
\ and\ \bibinfo {author} {\bibfnamefont {M.~H.}\ \bibnamefont {Devoret}},\
}\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys. Rev. B}\
}\textbf {\bibinfo {volume} {79}},\ \bibinfo {pages} {184301} (\bibinfo
{year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Murch}\ \emph {et~al.}(2013)\citenamefont {Murch},
\citenamefont {Weber}, \citenamefont {Macklin},\ and\ \citenamefont
{Siddiqi}}]{Murch} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.~W.}\ \bibnamefont
{Murch}}, \bibinfo {author} {\bibfnamefont {S.~J.}\ \bibnamefont {Weber}},
\bibinfo {author} {\bibfnamefont {C.}~\bibnamefont {Macklin}}, \ and\
\bibinfo {author} {\bibfnamefont {I.}~\bibnamefont {Siddiqi}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Nature}\ }\textbf {\bibinfo
{volume} {502}},\ \bibinfo {pages} {211} (\bibinfo {year}
{2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Liu}\ \emph {et~al.}(2016)\citenamefont {Liu},
\citenamefont {Xu}, \citenamefont {Wang}, \citenamefont {Shi-Biao},
\citenamefont {Tanay}, \citenamefont {Suman}, \citenamefont {Madhavi},
\citenamefont {Ranadive}, \citenamefont {Vijay}, \citenamefont {Song},
\citenamefont {Duan},\ and\ \citenamefont {Sun}}]{Liu2016} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {K.}~\bibnamefont
{Liu}}, \bibinfo {author} {\bibfnamefont {Y.}~\bibnamefont {Xu}}, \bibinfo
{author} {\bibfnamefont {W.}~\bibnamefont {Wang}}, \bibinfo {author}
{\bibfnamefont {Z.}~\bibnamefont {Shi-Biao}}, \bibinfo {author}
{\bibfnamefont {R.}~\bibnamefont {Tanay}}, \bibinfo {author} {\bibfnamefont
{K.}~\bibnamefont {Suman}}, \bibinfo {author} {\bibfnamefont
{C.}~\bibnamefont {Madhavi}}, \bibinfo {author} {\bibfnamefont
{A.}~\bibnamefont {Ranadive}}, \bibinfo {author} {\bibfnamefont
{R.}~\bibnamefont {Vijay}}, \bibinfo {author} {\bibfnamefont {Y.~P.}\
\bibnamefont {Song}}, \bibinfo {author} {\bibfnamefont {L.-M.}\ \bibnamefont
{Duan}}, \ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Sun}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {arXiv:1608.04908}\ } (\bibinfo {year} {2016})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Motzoi}\ \emph {et~al.}(2009)\citenamefont {Motzoi},
\citenamefont {Gambetta}, \citenamefont {Rebentrost},\ and\ \citenamefont
{Wilhelm}}]{Motzoi} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {F.}~\bibnamefont
{Motzoi}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont
{Gambetta}}, \bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Rebentrost}}, \ and\ \bibinfo {author} {\bibfnamefont {F.~K.}\ \bibnamefont
{Wilhelm}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. Lett.}\ }\textbf {\bibinfo {volume} {103}},\ \bibinfo {pages} {110501}
(\bibinfo {year} {2009})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Smolin}\ \emph {et~al.}(2012)\citenamefont {Smolin},
\citenamefont {Gambetta},\ and\ \citenamefont {Smith}}]{Smolin2012} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {J.~A.}\ \bibnamefont
{Smolin}}, \bibinfo {author} {\bibfnamefont {J.~M.}\ \bibnamefont
{Gambetta}}, \ and\ \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Smith}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Phys.
Rev. Lett.}\ }\textbf {\bibinfo {volume} {108}},\ \bibinfo {pages} {070502}
(\bibinfo {year} {2012})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Vlastakis}\ \emph {et~al.}(2015)\citenamefont
{Vlastakis}, \citenamefont {Petrenko}, \citenamefont {Ofek}, \citenamefont
{Sun}, \citenamefont {Leghtas}, \citenamefont {Sliwa}, \citenamefont {Liu},
\citenamefont {Hatridge}, \citenamefont {Blumoff}, \citenamefont {Frunzio},
\citenamefont {Mirrahimi}, \citenamefont {Jiang}, \citenamefont {Devoret},\
and\ \citenamefont {Schoelkopf}}]{VlastakisBell} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Vlastakis}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Petrenko}},
\bibinfo {author} {\bibfnamefont {N.}~\bibnamefont {Ofek}}, \bibinfo {author}
{\bibfnamefont {L.}~\bibnamefont {Sun}}, \bibinfo {author} {\bibfnamefont
{Z.}~\bibnamefont {Leghtas}}, \bibinfo {author} {\bibfnamefont
{K.}~\bibnamefont {Sliwa}}, \bibinfo {author} {\bibfnamefont
{Y.}~\bibnamefont {Liu}}, \bibinfo {author} {\bibfnamefont {M.}~\bibnamefont
{Hatridge}}, \bibinfo {author} {\bibfnamefont {J.}~\bibnamefont {Blumoff}},
\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont {Frunzio}}, \bibinfo
{author} {\bibfnamefont {M.}~\bibnamefont {Mirrahimi}}, \bibinfo {author}
{\bibfnamefont {L.}~\bibnamefont {Jiang}}, \bibinfo {author} {\bibfnamefont
{M.~H.}\ \bibnamefont {Devoret}}, \ and\ \bibinfo {author} {\bibfnamefont
{R.~J.}\ \bibnamefont {Schoelkopf}},\ }\href@noop {} {\bibfield {journal}
{\bibinfo {journal} {Nat. Commun.}\ }\textbf {\bibinfo {volume} {6}}
(\bibinfo {year} {2015})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Lutterbach}\ and\ \citenamefont
{Davidovich}(1997)}]{Lutterbach1997} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.~G.}\ \bibnamefont
{Lutterbach}}\ and\ \bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Davidovich}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal}
{Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {78}},\ \bibinfo {pages}
{2547} (\bibinfo {year} {1997})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Bertet}\ \emph {et~al.}(2002)\citenamefont {Bertet},
\citenamefont {Auffeves}, \citenamefont {Maioli}, \citenamefont {Osnaghi},
\citenamefont {Meunier}, \citenamefont {Brune}, \citenamefont {Raimond},\
and\ \citenamefont {Haroche}}]{Bertet2002} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont
{Bertet}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Auffeves}},
\bibinfo {author} {\bibfnamefont {P.}~\bibnamefont {Maioli}}, \bibinfo
{author} {\bibfnamefont {S.}~\bibnamefont {Osnaghi}}, \bibinfo {author}
{\bibfnamefont {T.}~\bibnamefont {Meunier}}, \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Brune}}, \bibinfo {author} {\bibfnamefont {J.~M.}\
\bibnamefont {Raimond}}, \ and\ \bibinfo {author} {\bibfnamefont
{S.}~\bibnamefont {Haroche}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Phys. Rev. Lett.}\ }\textbf {\bibinfo {volume} {89}},\ \bibinfo
{pages} {200402} (\bibinfo {year} {2002})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Vlastakis}\ \emph {et~al.}(2013)\citenamefont
{Vlastakis}, \citenamefont {Kirchmair}, \citenamefont {Leghtas},
\citenamefont {Nigg}, \citenamefont {Frunzio}, \citenamefont {Girvin},
\citenamefont {Mirrahimi}, \citenamefont {Devoret},\ and\ \citenamefont
{Schoelkopf}}]{Vlastakis} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {B.}~\bibnamefont
{Vlastakis}}, \bibinfo {author} {\bibfnamefont {G.}~\bibnamefont
{Kirchmair}}, \bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Leghtas}},
\bibinfo {author} {\bibfnamefont {S.~E.}\ \bibnamefont {Nigg}}, \bibinfo
{author} {\bibfnamefont {L.}~\bibnamefont {Frunzio}}, \bibinfo {author}
{\bibfnamefont {S.~M.}\ \bibnamefont {Girvin}}, \bibinfo {author}
{\bibfnamefont {M.}~\bibnamefont {Mirrahimi}}, \bibinfo {author}
{\bibfnamefont {M.~H.}\ \bibnamefont {Devoret}}, \ and\ \bibinfo {author}
{\bibfnamefont {R.~J.}\ \bibnamefont {Schoelkopf}},\ }\href@noop {} {\bibfield {journal} {\bibinfo {journal} {Science}\ }\textbf {\bibinfo
{volume} {342}},\ \bibinfo {pages} {607} (\bibinfo {year}
{2013})}\BibitemShut {NoStop} \bibitem [{\citenamefont {Sun}\ \emph {et~al.}(2014)\citenamefont {Sun},
\citenamefont {Petrenko}, \citenamefont {Leghtas}, \citenamefont {Vlastakis},
\citenamefont {Kirchmair}, \citenamefont {Sliwa}, \citenamefont {Narla},
\citenamefont {Hatridge}, \citenamefont {Shankar}, \citenamefont {Blumoff},
\citenamefont {Frunzio}, \citenamefont {Mirrahimi}, \citenamefont {Devoret},\
and\ \citenamefont {Schoelkopf}}]{SunNature} \BibitemOpen \bibfield {author} {\bibinfo {author} {\bibfnamefont {L.}~\bibnamefont
{Sun}}, \bibinfo {author} {\bibfnamefont {A.}~\bibnamefont {Petrenko}},
\bibinfo {author} {\bibfnamefont {Z.}~\bibnamefont {Leghtas}}, \bibinfo
{author} {\bibfnamefont {B.}~\bibnamefont {Vlastakis}}, \bibinfo {author}
{\bibfnamefont {G.}~\bibnamefont {Kirchmair}}, \bibinfo {author}
{\bibfnamefont {K.~M.}\ \bibnamefont {Sliwa}}, \bibinfo {author}
{\bibfnamefont {A.}~\bibnamefont {Narla}}, \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Hatridge}}, \bibinfo {author} {\bibfnamefont
{S.}~\bibnamefont {Shankar}}, \bibinfo {author} {\bibfnamefont
{J.}~\bibnamefont {Blumoff}}, \bibinfo {author} {\bibfnamefont
{L.}~\bibnamefont {Frunzio}}, \bibinfo {author} {\bibfnamefont
{M.}~\bibnamefont {Mirrahimi}}, \bibinfo {author} {\bibfnamefont {M.~H.}\
\bibnamefont {Devoret}}, \ and\ \bibinfo {author} {\bibfnamefont {R.~J.}\
\bibnamefont {Schoelkopf}},\ }\href@noop {} {\bibfield {journal} {\bibinfo
{journal} {Nature}\ }\textbf {\bibinfo {volume} {511}},\ \bibinfo {pages}
{444} (\bibinfo {year} {2014})}\BibitemShut {NoStop} \end{thebibliography}
\end{document} |
\begin{document}
\subjclass[2010]{Primary 42A45. Secondary 42C10, 42A38, 33C45.}
\title[Multipliers of Laplace Transform Type for Laguerre and Hermite\dots]{Multipliers of Laplace Transform Type for Laguerre and Hermite Expansions}
\author{Pablo L. De N\'apoli} \address{Departamento de Matem\'atica \\ Facultad de Ciencias Exactas y Naturales \\ Universidad de Buenos Aires \\ Ciudad Universitaria \\ 1428 Buenos Aires, Argentina} \email{pdenapo@dm.uba.ar}
\author{Irene Drelichman} \address{Departamento de Matem\'atica \\ Facultad de Ciencias Exactas y Naturales \\ Universidad de Buenos Aires \\ Ciudad Universitaria \\ 1428 Buenos Aires, Argentina} \email{irene@drelichman.com}
\author{Ricardo G. Dur\'an} \address{Departamento de Matem\'atica \\ Facultad de Ciencias Exactas y Naturales \\ Universidad de Buenos Aires \\ Ciudad Universitaria \\ 1428 Buenos Aires, Argentina} \email{rduran@dm.uba.ar}
\thanks{Supported by ANPCyT under grant PICT 01307, by Universidad de Buenos Aires under grants X070 and X837 and by CONICET under grants PIP 11420090100230 and PIP 11220090100625. The first and third authors are members of CONICET, Argentina.}
\begin{abstract} We present a new criterion for the weighted $L^p-L^q$ boundedness of multiplier operators for Laguerre and Hermite expansions that arise from a Laplace-Stieltjes transform. As a special case, we recover known results on weighted estimates for Laguerre and Hermite fractional integrals with a unified and simpler approach. \end{abstract}
\keywords{Laguerre expansions, Hermite expansions, harmonic oscillator, fractional integration, multipliers}
\maketitle
\section{Introduction}
The aim of this paper is to obtain weighted estimates for multipliers of Laplace transform type for Laguerre and Hermite orthogonal expansions. To explain our results, consider the system of Laguerre functions, for fixed $\alpha>-1$, given by \begin{equation*}
l_k^\alpha(x)= \left( \frac{k!}{\Gamma(k+\alpha+1)} \right)^{\frac12} e^{-\frac{x}{2}} L_k^\alpha(x) \ ,\quad k \in \mathbb{N}_0 \end{equation*} where $L_k^\alpha(x)$ are the Laguerre polynomials. The $l_k^\alpha(x)$ are eigenfunctions with eigenvalues $\lambda_{\alpha,k}= k + (\alpha+1)/2$ of the differential operator \begin{equation} \label{laguerre} L= -\left( x \frac{d^2}{dx^2} + (\alpha+1) \frac{d}{dx} - \frac{x}{4} \right) \end{equation} and are an orthonormal basis in $L^2(\mathbb{R}_{+},x^\alpha dx)$. Therefore, for $\gamma<p(\alpha+1)-1$ we can associate to any $f \in L^p(\mathbb{R}_+, x^\gamma \, dx)$ its Laguerre series: \begin{equation*} f(x) \sim \sum_{k=0}^\infty a_{\alpha,k}(f) l_k^\alpha(x), \quad a_{\alpha,k}(f)= \int_0^\infty f(x) l_k^\alpha(x) x^\alpha dx \label{Laguerre-series} \end{equation*} and, given a bounded sequence $\{m_k\}$, we can define a multiplier operator by \begin{equation}
M_{\alpha,m} f(x) \sim \sum_{k=0}^\infty a_{\alpha,k}(f) m_k l_k^\alpha(x). \label{multiplier-operator}
\end{equation}
The main example of the kind of multipliers we are interested in is the Laguerre fractional integral, introduced by G. Gasper, K. Stempak and W. Trebels in \cite{GST} as an analogue in the Laguerre setting of the classical fractional integral of Fourier analysis, and given by \begin{equation*} I_\sigma f(x) \sim \sum_{k=0}^\infty (k+1)^{-\sigma} a_{\alpha,k} l_k^\alpha(x). \end{equation*}
In \cite{GST} the aforementioned authors obtained weighted estimates for this operator that were later improved by G. Gasper and W. Trebels in \cite{GT} using a completely different proof. In this work we recover some of the ideas of the original method of \cite{GST}, but simplifying the proof in many technical details and extending it to obtain a better range of exponents that, in particular, give the same result of \cite{GT} for the Laguerre fractional integral. Moreover, we show that our proof applies to a wide class of multipliers, namely multipliers arising from a Laplace-Stieltjes transform, which are of the form \eqref{multiplier-operator} with $m_k=m(k)$ given by the Laplace-Stieljtes transform of some real-valued function $\Psi(t)$, that is, \begin{equation} m(s) = \mathfrak{L}\Psi(s) := \int_0^\infty e^{-st} d\Psi(t).
\label{Laplace.transform}
\end{equation}
We will assume that $\Psi$ is of bounded variation in $\mathbb{R}_+$, so that the Laplace transform converges absolutely in the half plane $\hbox{Re}(s)\geq 0$ (see \cite[Chapter 2]{Widder}) and the definition of the operator $M_{\alpha,m}$ makes sense.
Multipliers of this kind are quite natural to consider and, indeed, a slightly different definition is given by E. M. Stein in \cite{Stein} and was previously used in the unweighted setting by E. Sasso in \cite{S}. More recently, B. Wr\'obel \cite{W} has obtained weighted $L^p$ estimates for the both the kind of multipliers considered in \cite{Stein} and the ones considered here when $\alpha \in \{-\frac12\} \cup [\frac12,\infty)$, by proving that they are Calder\'on-Zygmund operators (see Section 4 below for a precise comparison of results). Also, let us mention that T. Mart\'inez has considered multipliers of Laplace transform type for ultraspherical expansions in \cite{Martinez}.
Other kind of multipliers for Laguerre expansions have also been considered, see, for instance, \cite{GST, Stempak-Trebels, Thangavelu} where boundedness criteria are given in terms of difference operators. In our case, we will only require minimal assumptions on the function $\Psi$, which are more natural in our context, and easier to verify in the case of the Laguerre fractional integral and in other examples that we will consider later. Indeed, the main theorem we will prove for multipliers for Laguerre expansions reads as follows:
\begin{theorem} Assume that $\alpha>-1$ and that $M_{\alpha,m}$ is a multiplier of Laplace transform type for Laguerre expansions, given by \eqref{multiplier-operator} and \eqref{Laplace.transform}, such that: \begin{enumerate}
\item[(H1)] \begin{equation*}\int_0^\infty |d\Psi|(t)< +\infty; \end{equation*} \item[(H2)] there exist $\delta>0$, $0 < \sigma < \alpha+1$, and $C>0$ such that
$$ |\Psi(t)| \leq C t^{\sigma} \quad \hbox{for} \; 0 \le t \leq \delta .$$ \end{enumerate}
Then $M_{\alpha,m}$ can be extended to a bounded operator such that
$$ \| M_{\alpha,m} f \|_{L^q(\mathbb{R}_+, x^{(\alpha-bq)})} \leq C \| f \|_{L^p(\mathbb{R}_+, x^{(\alpha+ap)})} $$ provided that the following conditions hold: \begin{equation*} 1 < p \leq q < \infty \quad , \quad a < \frac{\alpha+1}{p^\prime}\quad , \quad b < \frac{\alpha+1}{q} \end{equation*} and \begin{equation*} \label{cond19}
\left( \frac{1}{q} - \frac{1}{p} \right) \left(\alpha+\frac12\right) \le a+b \le \left(\frac{1}{q}-\frac{1}{p}\right)(\alpha+1) + \sigma. \end{equation*} \label{main-result} \end{theorem}
Besides the system $\{l_k^\alpha\}_{k\ge 0}$, other families of Laguerre functions have been considered in the literature, and using an idea due to I. Abu-Falah, R. A. Mac\'ias, C. Segovia and J. L. Torrea \cite{AMST} we will show that analogues of Theorem \ref{main-result} hold for those families with appropriate changes in the exponents (see Section 3 for the precise statement of results).
Finally, the well-known connection between Laguerre and Hermite expansions will allow us to extend the above result to an analogous result for Laplace type multipliers for Hermite expansions. To make this precise, recall that, given $f \in L^2(\mathbb{R})$, we can consider its Hermite series expansion \begin{equation*} f \sim \sum_{k=0}^\infty c_k(f) h_k , \quad c_{k}(f)= \int_{-\infty}^\infty f(x) h_k(x) dx
\label{Hermite-series}. \end{equation*} where $h_k$ are the Hermite functions given by \begin{equation*} h_k(x)= \frac{(-1)^k}{(2^k k! \pi^{1/2})^{1/2}} H_k(x) e^{-\frac{x^2}{2}}, \end{equation*} which are the normalized eigenfunctions of the Harmonic oscillator operator
$$H=-\frac{d^2}{dx^2} + |x|^2. $$
As before, given a bounded sequence $\{m_k\}$ we can define a multiplier operator by \begin{equation} \label{hermite-multiplier} M_{H,m} f \sim \sum_{k=0}^\infty c_k(f) m_k h_k \end{equation} and we say that it is a Laplace transform type multiplier if equation \eqref{Laplace.transform} holds. Then, we have the following analogue of Theorem \ref{main-result}, which, in the case of the Hermite fractional integral (that is, for $m_k= (2k+1)^{-\sigma}$), gives the same result of \cite[Theorem 2.5]{Nowak-Stempak} in the one-dimensional case:
\begin{theorem} \label{teorema-hermite} Assume that $M_{H,m}$ is a multiplier of Laplace transform type for Hermite expansions, given by \eqref{hermite-multiplier} and \eqref{Laplace.transform}, such that: \begin{enumerate}
\item[(H1h)] $$ \int_0^\infty |d\Psi|(t) < +\infty;$$ \item[(H2h)] there exist $\delta>0$, $0 < \sigma < \frac12$, and $C>0$ such that
$$ |\Psi(t)| \leq C t^{\sigma} \quad \hbox{for} \; 0 \le t \leq \delta.$$ \end{enumerate}
Then $M_{H,m}$ can be extended to a bounded operator such that
$$ \| M_{H,m} f \|_{L^q(\mathbb{R}, x^{-bq})} \leq C \| f \|_{L^p(\mathbb{R}, x^{ap})} $$ provided that the following conditions hold: \begin{equation*} 1 < p \leq q < \infty \quad , \quad a<\frac{1}{p'} \quad , \quad b<\frac{1}{q} \end{equation*} and \begin{equation*} 0\le a+b \le \frac{1}{q} -\frac{1}{p}+ 2\sigma. \label{escalah} \end{equation*}
\end{theorem}
The remainder of this paper is organized as follows. In Section 2 we prove Theorem \ref{main-result}. For the case $\alpha \ge 0$ the proof relies on the representation of the operator as a twisted generalized convolution, already used in \cite{GST} for the Laguerre fractional integral. However, instead of using the method of that paper to obtain weighted bounds, we give a simpler proof based on the use of Young's inequality in the multiplicative group $(\mathbb{R}_+, \cdot)$, which allows us to obtain a wider range of exponents. Moreover, we obtain an estimate for the convolution kernel which simplifies and generalizes Lemma 2.1 from \cite{GST}. For the case $-1 < \alpha < 0$ the result is obtained from the previous case by means of a weighted transplantation theorem from \cite{Garrigos}. A similar idea was used by Y. Kanjin and E. Sato in \cite{KS} to prove unweighted estimates for the Laguerre fractional integral using a transplantation theorem from \cite{K}. In Section 3 we obtain the analogues of Theorem \ref{main-result} for other Laguerre systems using an idea from \cite{AMST}. In Section 4 we exploit the relation between Laguerre and Hermite expansions to derive Theorem \ref{teorema-hermite} from Theorem \ref{main-result}.
Finally, in Section 5 we present some examples of operators covered by the two main theorems and make some further comments.
\section{Proof of the theorem in the Laguerre case}
In this section we prove Theorem \ref{main-result}. We will divide the proof in three steps: \begin{enumerate} \item We write the operator as a twisted generalized convolution and obtain the estimate for the convolution kernel when $\alpha \ge 0$. This part of the proof follows essentially the ideas of \cite{GST}, but in the more general setting of multipliers of Laplace transform type. In particular, we provide an easier proof of the analogue of \cite[Lemma 2.1]{GST} in this setting (see Lemma \ref{lemma-g} below). \item We complete the proof of the theorem in the case $\alpha \ge 0$ by proving weighted estimates for the generalized euclidean convolution. \item We extend the results to the case $-1<\alpha < 0$ using the case $\alpha \ge 0$ and a weighted transplantation theorem from \cite{Garrigos} (Lemma \ref{lema-garrigos} below). \end{enumerate}
\subsection{Step 1: representing the multiplier operator as a twisted generalized convolution when $\alpha \ge 0$}
Following \cite{Mc,A} we define the twisted generalized convolution of $F$ and $G$ by $$ F \times G := \int_0^\infty \tau_x F(y) \, G(y) \, y^{2\alpha+1} \, dy$$ where the twisted translation operator is defined by $$ \tau_x F(y)= \frac{\Gamma(\alpha+1)}{\pi^{1/2} \Gamma(\alpha+1/2)} \int_0^\pi F((x,y)_\theta) \mathcal{J}_{\alpha-1/2}(xy \sin \theta) (\sin \theta )^{2\alpha} \; d\theta $$ with $$\mathcal{J}_\beta(x)= \Gamma(\beta+1) J_\beta(x)/(x/2)^\beta $$
$J_\beta(x)$ being the Bessel function of order $\beta$ and $$ (x,y)_\theta= (x^2 + y^2 - 2xy \cos \theta)^{1/2}.$$ Then, we have (formally) that \begin{equation} \label{malfa} M_{\alpha,m} f(x^2)= F \times G \end{equation} where $$ F(y)=f(y^2)\quad , \quad G(y) = g(y^2) $$ and \begin{equation} g(x) \sim \frac1{\Gamma(\alpha+1)} \sum_{k=0}^\infty m_k L_k^\alpha(x) e^{-\frac{x}{2}}. \label{series-g} \end{equation}
Recalling that $|\mathcal{J}_\beta(x)| \leq C_\beta$ if $\beta \geq -\frac12$, we have that: \begin{equation}
| F \times G | \le C (|F| \star |G|) \label{convolution-bound}
\end{equation} where $\star$ denotes the generalized Euclidean convolution which is defined by \begin{equation} \label{gen-eucl} F \star G(x) := \int_0^\infty \tau^E_x F(y) \, G(y) \, y^{2\alpha+1} \, dy \end{equation} with \begin{equation} \label{gen-trans}
\tau^E_x F(y):= \frac{\Gamma(\alpha+1)}{\pi^{1/2} \Gamma(\alpha+1/2)} \int_0^\pi F((x,y)_\theta) (\sin \theta )^{2\alpha} \; d\theta. \end{equation}
As a consequence of \eqref{malfa} and \eqref{convolution-bound}, the operator $M_{\alpha,m}$ is pointwise bounded by a generalized euclidean convolution with the kernel $G$ (with respect to the measure $x^{2\alpha+1} \, dx$). Therefore, we need to obtain an appropriate estimate for $G(x)=g(x^2)$, that essentially is:
$$ |g(x)| \leq C x^{\sigma-\alpha-1} \; \hbox{for} \; \alpha \geq 0 \; \hbox{and} \; 0 < \sigma < \alpha +1 $$ (see Lemma \ref{lemma-g} below for a precise statement).
This generalizes the result given in \cite[Lemma 2.1]{GST} but, while in that paper the proof of the corresponding estimate is based on delicate pointwise estimates for the Laguerre functions, our proof is based on the following generating function for the Laguerre polynomials (see, for instance, \cite{Thangavelu}): \begin{equation} \label{generating-function}
\sum_{k=0}^\infty L_k^\alpha(x) w^k = (1-w)^{-\alpha-1} e^{-\frac{xw}{1-w}} := Z_{\alpha,x}(w) \quad (|w|<1). \end{equation}
To explain our ideas, we point out that if the series in \eqref{series-g} were convergent (this need not be the case) we would have: \begin{align*} g(x) &= \frac1{\Gamma(\alpha+1)} \sum_{k=0}^\infty m_k L_k^\alpha(x) e^{-\frac{x}{2}} \\ & = \frac1{\Gamma(\alpha+1)} \sum_{k=0}^\infty \left( \int_0^\infty e^{-kt} d\Psi(t) \right) L_k^\alpha(x) e^{-\frac{x}{2}} \\ & = \frac1{\Gamma(\alpha+1)} e^{-\frac{x}{2}} \int_0^\infty Z_{\alpha,x}(e^{-t}) \; d\Psi(t). \end{align*}
The main advantage of this formula is that it shields a rather explicit expression for $g$ in which, thanks to \eqref{generating-function}, the Laguerre polynomials do not appear.
However, in general it is not clear if the series in \eqref{series-g} is convergent (not even in the special case of the Laguerre fractional integral $m(t)=t^{\sigma-1}$). Moreover, the integration of the series in $Z_{\alpha,x}(w)$ is difficult to justify since it is not uniformly convergent in the interval $[0,1]$ (because $Z_{\alpha,x}(w)$ is not analytical for $w=1$).
Nevertheless, we will see that the formal manipulations above can be given a rigorous meaning if we agree in understanding the convergence of the series in $\eqref{series-g}$ in the Abel sense. For this purpose, we introduce a regularization parameter $\rho \in (0,1)$, we consider the regularized function \begin{equation}
g_{\rho}(x) = \frac1{\Gamma(\alpha+1)} \sum_{k=0}^\infty m_k \rho^k L_k^\alpha(x) e^{-\frac{x}{2}} \label{series-g-rho} \end{equation} and recall that the series in \eqref{series-g} is summable in Abel sense to the limit $g(x)$ if there exists the limit \begin{equation*}
g(x) = \lim_{\rho \to 1} g_{\rho}(x).
\end{equation*}
With this definition in mind, we can give a rigorous meaning to the heuristic idea described above. More precisely, we will prove the following:
\begin{lemma} \label{lemma-g} Let $ g_{\rho}$ be defined by \eqref{series-g-rho}. Then:
(1) For $0<\rho<1$ the series \eqref{series-g-rho} converges absolutely.
(2) The following representation formula holds: \begin{equation} \label{rep-grho} g_{\rho}(x) = \frac{1}{\Gamma(\alpha+1)} \int_0^\infty Z_{\alpha,x}(\rho e^{-t}) \; d\Psi(t). \end{equation}
(3) If we define $g(x)$ by setting $\rho=1$ in this representation formula, $g(x)$ is well defined and the series \eqref{series-g} converges to $g(x)$ in the Abel sense.
(4) If $\alpha>0$, $0 <\rho_0 < \rho \leq 1$ and $0 < \sigma < \alpha +1$, then
$$ |g_\rho (x)| \leq C x^{\sigma-\alpha-1}, $$ with a constant $C=C(\alpha,\sigma)$ independent of $\rho$. \end{lemma}
\begin{proof}
(1) Observe first that hypothesis $(H1)$ implies that $(m_k)$ is a bounded sequence. Indeed, $$
|m_k| \leq \int_0^\infty e^{-kt} |d\phi|(t) \leq \int_0^\infty |d\phi|(t) = C < +\infty. $$
Now recall that (\cite[Lemma 1.5.3]{Thangavelu}), if $\nu=\nu(k)= 4k + 2\alpha +2$, $$
|l_k^\alpha(x)| \leq C (x\nu)^{-\frac14} \quad \hbox{if } \frac{1}{\nu} \leq x \leq \frac{\nu}{2}. $$
Therefore, if we fix $x$, for $k \geq k_0$, $x$ is in the region where this estimate holds (since $\nu \to +\infty$ when $k \to +\infty$), and from Stirling's formula we deduce that $$
\frac{k!}{\Gamma(k+\alpha+1)} = \frac{\Gamma(k+1)}{\Gamma(k+\alpha+1)} = O(k^{-\alpha}).
$$
Then we have the following estimate for the terms of the series in \eqref{series-g-rho} $$
|m_k \rho^k L^\alpha_k(x)| e^{-\frac{x}{2}} \leq C(x) \rho^k k^{-\sigma} \; \hbox{for} \; k \geq k_0, $$ and, since $\rho<1$, this implies that the series converges absolutely.\footnote{K. Stempak has observed that this result can be also justified by observing that, for fixed $x$, $L^\alpha_k(x)$ has at most polynomial growth with $k\to \infty$ (see, for instance, (7.6.9) and (7.6.10) in \cite{Sz}). Hence, the polynomial growth of $L^\alpha_k(x)$ versus the exponential decay of $\rho^k$, with $m_k$ disregarded as a bounded sequence, produce an absolutely convergent series.}
(2) First, observe that $Z_{\alpha,x}(w)$ is continuous as a function of a real variable for $w \in [0,1]$ (if we define
$Z_{\alpha,x}(1)=0$) and, therefore, it is bounded, say \begin{equation*}
|Z_{\alpha,x}(w)| \leq C = C(\alpha,x) \; \hbox{for} \; w \in [0,1]. \label{Z-bound}
\end{equation*}
Hence, using hypothesis $(H1)$ we see that the integral in the representation formula is convergent for any $\rho \in [0,1]$. Moreover, from our assumptions we have that, for $\rho<1$, \begin{align} \nonumber g_{\rho}(x) & = \frac{1}{\Gamma(\alpha+1)} \sum_{k=0}^\infty m_k \rho^k L_k^\alpha(x) e^{-\frac{x}{2}} \\ \nonumber & = \frac{1}{\Gamma(\alpha+1)} \sum_{k=0}^\infty \left( \int_0^\infty \rho^k e^{-kt} d\Psi(t) \right) L_k^\alpha(x) e^{-\frac{x}{2}} \\ \nonumber & = \lim_{N \to +\infty} \frac{1}{\Gamma(\alpha+1)} \sum_{k=0}^N \left( \int_0^\infty \rho^k e^{-kt} d\Psi(t) \right) L_k^\alpha(x) e^{-\frac{x}{2}} \\ & = \lim_{N \to +\infty} \frac{1}{\Gamma(\alpha+1)} e^{-\frac{x}{2}} \int_0^\infty Z_{\alpha,x}^{(N)}(\rho e^{-t}) \; d\Psi(t) \label{limN} \end{align} where $$ Z_{\alpha,x}^{(N)}(w) = \sum_{k=0}^N L_k^\alpha(x) w^k $$ denotes a partial sum of the series for $Z_{\alpha,x}(w)$. Now, since $\rho<1$, that series converges uniformly in the interval $[0,\rho]$, so that given $\varepsilon>0$ there exists $N_0=N_0(\varepsilon)$ such that $$
|Z_{\alpha,x}(w)-Z_{\alpha,x}^{(N)}(w)| < \varepsilon \; \hbox{if} \; N \geq N_0.
$$
Using this estimate and hypothesis $(H1)$, we obtain \begin{align*}
& \left| \int_0^\infty Z_{\alpha,x}(\rho e^{-t}) \; d\Psi(t)
- \int_0^\infty Z_{\alpha,x}^{(N)}(\rho e^{-t}) \; d\Psi(t) \right| \\ & \leq \int_0^\infty
|Z_{\alpha,x}(\rho e^{-t})-Z_{\alpha,x}^{(N)}(\rho e^{-t})| \; |d\Psi|(t) \\ &\leq C \varepsilon \end{align*}
from which we conclude that \begin{equation} \label{limZN} \lim_{N \to +\infty} \int_0^\infty Z_{\alpha,x}^{(N)}(\rho e^{-t}) \; d\Psi(t) = \int_0^\infty Z_{\alpha,x}(\rho e^{-t}) \; d\Psi(t) \end{equation} and, replacing \eqref{limZN} into \eqref{limN} we obtain \eqref{rep-grho}.
(3) We have already observed that the integral in \eqref{rep-grho} is convergent for $\rho=1$. Moreover, the bound we have proved above for $Z_{\alpha,x}$, and $(H1)$ imply that we can apply the Lebesgue bounded convergence theorem to this integral (with a constant majorant function, which is integrable with respect to $|d\Psi|(t)$ by $(H1)$), to conclude that $g(x)=\lim_{\rho \to 1}g_\rho(x)$.
(4) Let $\delta$ be as in $(H2)$ and observe that \begin{align*} \Gamma(\alpha+1) g_\rho(x) &= e^{-\frac{x}{2}} \int_0^\infty Z_{\alpha,x}(\rho e^{-t}) d\Psi(t) \\ &= e^{-\frac{x}{2}} \int_0^\delta Z_{\alpha,x}(\rho e^{-t}) d\Psi(t) + e^{-\frac{x}{2}} \int_\delta^\infty Z_{\alpha,x}(\rho e^{-t}) d\Psi(t) \\ & = \underbrace{ e^{-\frac{x}{2}} \int_0^\delta Z_{\alpha,x}^\prime (\rho e^{-t}) \rho e^{-t} \Psi(t) \, dt}_{(i)} + \underbrace{ e^{-\frac{x}{2}} Z_{\alpha,x}(\rho e^{-\delta}) \Psi(\delta)}_{(ii)} \\ & \quad - \underbrace{e^{-\frac{x}{2}} Z_{\alpha,x}(\rho) \Psi(0)}_{(iii)} + \underbrace{ e^{-\frac{x}{2}} \int_\delta^\infty Z_{\alpha,x}(\rho e^{-t}) d\Psi(t) }_{(iv)} \end{align*}
Since $|Z_{\alpha,x}(\rho e^{-\delta})|\le (1-\rho e^{-\delta})^{-\alpha-1} \le C_\delta$, $\Psi(0)=0$, and $\sigma-\alpha-1<0$, clearly $(ii) \le C x^{\sigma-\alpha-1}$ and $(iii)$ vanishes.
To bound $(iv)$, notice that if $\omega= \rho e^{-t}$ and $t>\delta$, $0\le Z_{\alpha,x}(\omega) \le M_\delta$. Therefore, using $(H1)$ and the fact that $\sigma-\alpha-1<0$ we obtain \begin{equation*}
(iv) \le e^{-\frac{x}{2}} M_\delta \int_\delta^\infty |d \Psi|(t) \le C x^{\sigma-\alpha-1}. \end{equation*}
Now, observing that \begin{equation*} Z_{\alpha,x}^{\prime}(\omega) = (\alpha+1) Z_{\alpha+1,x}(\omega) - x Z_{\alpha+2,x}(\omega). \end{equation*} and using $(H2)$, we obtain \begin{align*} (i) & \le C e^{-\frac{x}{2}} \int_0^\delta Z_{\alpha+1,x}(\rho e^{-t}) \rho e^{-t} t^{\sigma} \, dt \\ & \quad + e^{-\frac{x}{2}} \int_0^\delta x Z_{\alpha+2,x}(\rho e^{-t}) \rho e^{-t} t^{\sigma} \, dt \end{align*} and the wanted estimates in this case follow by a direct application of the following lemma. \end{proof}
\begin{lemma} In the conditions of Lemma \ref{lemma-g}(4), if $$I(x) = e^{-\frac{x}{2}} \int_0^\delta Z_{\beta,x}(\rho e^{-t}) \rho e^{-t} t^{\sigma} \, dt,$$
and $\beta=\alpha+1$ or $\beta=\alpha+2$ then, $|I(x)| \le C x^{\sigma-\beta}$ with $C=C(\beta, \sigma, \delta, \rho_0)$. \end{lemma}
\begin{proof} Making the change of variables $w= \rho e^{-t}$, and recalling the definition of $Z_{\beta,x}(w)$ given by \eqref{generating-function}, we see that \begin{align*} I(x) &
= e^{-\frac{x}{2}} \int_{\rho e^{-\delta}}^\rho (1-w)^{-\beta-1} e^{-\frac{xw}{1-w}} \log^{\sigma}\left(\frac{\rho}{w}\right) \, dw \end{align*}
Making a further change of variables $u=\frac12 + \frac{w}{1-w}$ and setting $c_\delta = e^{-\delta}$ this is \begin{align} \nonumber I(x) & = \int_{\frac12 + \frac{c_\delta \rho}{1-c_\delta \rho}}^{\frac12 + \frac{\rho}{1-\rho}} \left(u+\frac12\right)^{\beta+1} e^{-ux} \left[ \log\left( \rho \frac{u+\frac12}{u-\frac12} \right)\right]^{\sigma} \frac{1}{\left( u+\frac12 \right)^2} \, du \\ & \le C \int_{\frac12 + \frac{c_\delta \rho}{1-c_\delta \rho}}^{\frac12 + \frac{\rho}{1-\rho}} u^{\beta-1} e^{-ux} \left( u - \frac12 \right)^{-\sigma} \underbrace{\left[u(\rho-1)+\frac12 (\rho+1)\right]^{\sigma}}_{:= \tilde u(\rho)} \, du \label{just} \end{align} where in \eqref{just} we have used that, since $$ \rho \frac{u+\frac12}{u-\frac12} = 1+\frac{u(\rho-1) + \frac12 (\rho+1)}{u-\frac12},$$ then $$\log\left( \rho \frac{u+\frac12}{u-\frac12} \right) \le \frac{u(\rho-1) + \frac12 (\rho+1)}{u-\frac12}.$$
Since $\frac12 < u\le \frac12 + \frac{\rho}{1-\rho}$, it is immediate that $$0 \le u(\rho-1)+ \frac12(\rho+1)\le \rho,$$
which, using that $\sigma \ge 0$, implies $ \tilde u(\rho)\le 1.$
Also, since
$$ u \ge \frac12 + \frac{c_\delta \rho_0}{1- c_\delta \rho_0} > \frac12 $$
we have that
$$
\left(u-\frac12 \right)^{-\sigma} \le C u^{-\sigma}
$$
where the constant depends only on $\rho_0$ and $\delta$.
Therefore, \begin{align} \nonumber I(x) & \le C \int_0^\infty u^{\beta-\sigma-1} e^{-ux} \, du \\ & = C x^{-\beta+\sigma} \int_0^\infty v^{\beta-\sigma-1} e^{-v} \, dv \label{r1} \\ & \le C x^{-\beta+\sigma} \label{r2} \end{align} where in \eqref{r1} we have made the change of variables $v=ux$, and in \eqref{r2} we have used that $\beta-\sigma-1> -1$ because $\beta=\alpha+1$ or $\beta=\alpha+2$.
\end{proof}
\subsection{Step 2: weighted estimates for the generalized Euclidean convolution}
Following the idea of the previous section, we define a regularized multiplier operator $M_{\alpha,m,\rho}$ by: \begin{equation} \label{Mamr}
M_{\alpha,m,\rho} f(x):= \sum_{k=0}^\infty m_k \rho^k a_{k,\alpha}(f) l_ k^\alpha(x) \end{equation}
In this section we will obtain the estimate \begin{equation} \label{acotacion}
\left( \int_0^\infty |M_{\alpha,m,\rho}(f)|^q x^{\alpha-bq} \; dx \right)^{\frac{1}{q}} \leq C \left( \int_0^\infty |f|^p x^{\alpha+ap} \; dx \right)^{\frac{1}{p}} \end{equation} for $f \in L^p(\mathbb{R}_+, x^{\alpha +ap})$ with a constant $C$ independent of the regularization parameter $\rho$ and appropriate $a,b$ (see Theorem \ref{teo-convolucion}).
Indeed, the operator can be expressed as before as a twisted generalized convolution with kernel $G_{\rho}(y)=g_\rho(y^2)$ (in place of $G$), and by Lemma \ref{lemma-g}, if $F(y)=f(y^2)$, we have the pointwise bound
$$ |M_{\alpha,m,\rho} f(x^2)| \leq (|F| \star |G_\rho|)(x) \leq C ( |F| \star |x^{2(\sigma-\alpha-1)}|)(x).$$ Therefore, \eqref{acotacion} will follow from a weighted inequality for the generalized Euclidean convolution with kernel $K_\sigma := x^{2(\sigma-\alpha-1)}$ (Theorem \ref{teo-convolucion}).
Once we have \eqref{acotacion}, Theorem \ref{main-result} will follow by a standard density argument. Indeed, if we consider the space $$ E= \{ f(x)=p(x) e^{-\frac{x}{2}} : 0 \leq x, \, p(x) \mbox{ a polynomial} \}, $$ any $f \in E$ has only a finite number of non-vanishing Laguerre coefficients. In that case, it is straightforward that $ M_{\alpha,m} f(x)$ is well-defined and: $$ M_{\alpha,m} f(x) = \lim_{\rho \to 1} M_{\alpha,m,\rho} f(x). $$
Then, by Fatou's lemma, $$
\int_0^\infty |M_{\alpha,m}(f)|^q x^{\alpha-bq} \; dx
\le \lim_{\rho \to 1} \int_0^\infty |M_{\alpha,m,\rho}(f)|^q x^{\alpha-bq} \; dx $$ and, therefore, we obtain
$$ \left( \int_0^\infty |M_{\alpha,m,\rho}(f)|^q x^{\alpha-bq} \; dx \right)^{\frac{1}{q}} \leq
C \left( \int_0^\infty |f|^p x^{\alpha+ap} \; dx \right)^{\frac{1}{p}} \; \forall f \in E.$$
Since $E$ is dense in $L^p(\mathbb{R}_+,x^{\alpha+a p})$, we deduce that $M_{\alpha,m}$ can be extended to a bounded operator from $L^p(\mathbb{R}_+, x^{\alpha +ap})$ to $L^q(\mathbb{R}_+, x^{\alpha -bq})$. Moreover, the extended operator satisfies:
$$ M_{\alpha,m} f = \lim_{\rho \to 1} M_{\alpha,m,\rho} f.$$
This means that the formula \eqref{multiplier-operator} is valid for $f\in L^p(\mathbb{R}_+, x^{\alpha +ap})$ if the summation is interpreted in the Abel sense with convergence in $L^q(\mathbb{R}_+, x^{\alpha -bq})$. Therefore, to conclude the proof of Theorem \ref{main-result} in the case $\alpha\ge 0$ it is enough to see that the following result holds:
\begin{theorem} \label{teo-convolucion} Let $\alpha \ge 0, 0<\sigma<\alpha+1$ and $M_{\alpha,m,\rho}$ be given by \eqref{Mamr} such that it satisfies $(H1)$ and $(H2)$. Then, for all $f\in L^p(\mathbb{R}_+, x^{\alpha+ap})$, the following estimate holds \begin{equation*}
\|M_{\alpha,m,\rho}f(x^2) x^{-2b} \|_{L^q(\mathbb{R}_+, x^{2\alpha+1})}
\leq \| f(x^2) x^{2a} \|_{L^p(\mathbb{R}_+, x^{2\alpha+1})} \end{equation*}
provided that \begin{equation*} a<\frac{\alpha+1}{p'} \quad , \quad b<\frac{\alpha+1}{q} \end{equation*} and that \begin{equation*}
\left(\frac{1}{q}-\frac{1}{p} \right)\left(\alpha +\frac12 \right) \le a+b \le \left( \frac{1}{q} -\frac{1}{p}\right) (\alpha+1) + \sigma \end{equation*} \end{theorem}
\begin{proof} First, notice that if condition $(H2)$ holds for a certain $0<\sigma_0<\alpha+1$, then it also holds for any $0<\sigma < \sigma_0$. Therefore, it suffices to prove the theorem in the case $a+b = (\frac{1}{q} - \frac{1}{p})(\alpha+1) + \sigma$ which in turn, by the conditions above, implies $\sigma \ge -\frac12 \left( \frac{1}{q}-\frac{1}{p}\right)$.
Let $K_\sigma (x) := x^{2(\sigma-\alpha-1)}$, $F(y)=f(y^2)$ and recall that
$$|M_{\alpha,m,\rho}f(x^2)| \le C (|F|\star |K_\sigma|)(x)$$
where $\star$ denotes the generalized euclidean convolution defined by \eqref{gen-eucl}.
We begin by computing the generalized Euclidean translation of $K_\sigma$ given by \eqref{gen-trans}. Making the change of variables $$ t=\cos \theta \Rightarrow dt = - \sin \theta \, d\theta = - \sqrt{1-t^2} \, d\theta $$ we see that $$ \tau_x^E K_\sigma(y)= C(\alpha) \int_{-1}^1 (x^2+y^2-2xyt)^{\sigma-\alpha-1} (1-t^2)^{\alpha-\frac12} \; dt .$$ Following the notation of our previous work \cite{ddd}, if we let $$ I_{\gamma,k}(r):= \int_{-1}^1 \frac{(1-t^2)^k}{(1-2rt+r^2)^{\frac{\gamma}{2}}}\; dt, $$ then $$ \tau_x^E K_\sigma(y)= C(\alpha) y^{2(\sigma-\alpha-1)} I_{2(1+\alpha-\sigma), \alpha-\frac12}\left( \frac{x}{y} \right)$$ and, therefore, \begin{align} \nonumber K_\sigma \star F(x) & = C \int_0^\infty y^{2(\sigma-\alpha-1)} I_{2(1+\alpha-\sigma),\alpha-\frac12}\left( \frac{x}{y} \right) F(y) y^{2\alpha+1} dy \\ & = C \int_0^\infty y^{2\sigma} I_{2(1+\alpha-\sigma),\alpha-\frac12}\left( \frac{x}{y} \right) F(y) \frac{dy}{y} \label{star} \end{align}
Now, \begin{align*}
\|M_{\alpha,m,\rho}f(x^2) x^{-2b} \|_{L^q(\mathbb{R}_+, x^{2\alpha+1})} & \le C \| [ K_\sigma \star F(x)] x^{-2b} \|_{L^q(\mathbb{R}_+,x^{2\alpha+1})}
\\ & = C \left( \int_0^\infty | K_\sigma \star F(x) x^{-2b} |^q x^{2\alpha+1} \; dx \right)^{\frac{1}{q}}
\\ & = C \left( \int_0^\infty \left| K_\sigma \star F(x) x^{\frac{2\alpha+2}{q}-2b}
\right|^q\; \frac{dx}{x} \right)^{\frac{1}{q}} \end{align*} but, by \eqref{star}, \begin{align*}
[ K_\sigma & \star F(x) ] x^{\frac{2\alpha+2}{q}-2b} \\ & = C \int_0^\infty y^{2\sigma} x^{\frac{2\alpha+2}{q}-2b} I_{2(1+\alpha-\sigma),\alpha-\frac12}\left( \frac{x}{y} \right) F(y) \frac{dy}{y} \\ & = C \int_0^\infty \left(\frac{y}{x}\right)^{-[\frac{2\alpha+2}{q}-2b]} I_{2(1-\alpha-\sigma),\alpha-\frac12}\left( \frac{x}{y} \right) F(y) y^{2\sigma+\frac{2\alpha+2}{q}-2b} \frac{dy}{y} \\ & = [ y^{\frac{2\alpha+2}{q}-2b} I_{2(1+\alpha-\sigma),\alpha-\frac12}(y) *
F(y) y^{2\sigma+\frac{2\alpha+2}{q}-2b} ](x)
\end{align*} where $*$ denotes the convolution in $\mathbb{R}_+$ with respect to the Haar measure $\frac{dx}{x}$.
Then, by Young's inequality: \begin{align*}
\|M_{\alpha,m,\rho} & f(x^2) x^{-2b} \|_{L^q(\mathbb{R}_+, x^{2\alpha+1})}
\\ & \leq \| F(x) x^{2\sigma+\frac{2\alpha+2}{q}-2b}
\|_{L^p\left(\frac{dx}{x} \right)}
\| x^{\frac{2\alpha+2}{q}-2b} I_{2(1+\alpha-\sigma),\alpha-\frac12}(x)
\|_{L^{s,\infty}(\frac{dx}{x})} \end{align*}
provided that: \begin{equation} \label{youngl}
\frac{1}{p}+\frac{1}{s}=1+\frac{1}{q}. \end{equation}
Since we are assuming that $a+b=\left( \frac{1}{q}-\frac{1}{p}\right)(\alpha +1)+\sigma$, we have that \begin{align*}
\| F(x) x^{2\sigma+\frac{2\alpha+2}{q}-2b} \|_{L^p\left(\frac{dx}{x} \right)} & = \left( \int_0^\infty | F(x) x^{2\sigma+\frac{2\alpha+2}{q}-2b} |^p \; \frac{dx}{x}\right)^{\frac{1}{p}}
\\ & = \left( \int_0^\infty | F(x) x^{2a+\frac{2\alpha+2}{p}} |^p \; \frac{dx}{x}\right)^{\frac{1}{p}}
\\ & = \| F(x) x^{2a} \|_{L^p(\mathbb{R}_+,x^{2\alpha+1})}
\\ & = \| f(x^2) x^{2a} \|_{L^p(\mathbb{R}_+,x^{2\alpha+1})} \end{align*} whence, to conclude the proof of the theorem it suffices to see that
$$\| x^{\frac{2\alpha+2}{q}-2b} I_{2(1+\alpha-\sigma),\alpha-\frac12}(x)
\|_{L^{s,\infty}(\frac{dx}{x})} < +\infty. $$
For this purpose, we shall use the following lemma, which is a generalization of our previous result \cite[Lemma 4.2]{ddd}. The first part of the proof is the same as in that lemma, but it is included here for the sake of completeness:
\begin{lemma} Let $$ I_{\gamma,k}(r)= \int_{-1}^1 \frac{(1-t^2)^k}{(1-2rt+r^2)^{\frac{\gamma}{2}}}\; dt $$
Then, for $r \sim 1$ and $k >-1$, we have that
$$ |I_{\gamma,k}(r)| \leq
\left\{ \begin{array}{lclcc} C_{\gamma,k} & \mbox{if} & \gamma<2k+2 \\
C_{\gamma,k} \log\frac{1}{|1-r|}& \mbox{if} & \gamma=2k+2 \\
C_{\gamma,k} |1-r|^{-\gamma+2k+2} & \mbox{if} & \gamma > 2k+2\\ \end{array} \right.
$$
\end{lemma}
\begin{proof} Assume first that $k \in\mathbb{N}_0$ and $-\frac{\gamma}{2}+k > -1$. Then, $$ I_{\gamma,k}(1)\sim\int_{-1}^1 \frac{(1-t^2)^k}{(2-2t)^{\frac{\gamma}{2}}} \, dt \sim C \int_{-1}^1 \frac{(1-t)^k}{(1-t)^{\frac{\gamma}{2}}} \, dt. $$ Therefore, $I_{ \gamma,k}$ is bounded.
If $-\frac{\gamma}{2}+k =-1$, then $$ I_{\gamma,k}(r)\sim\int_{-1}^1 (1-t^2)^k \frac{d^k}{dt^k}\left\{(1-2rt+r^2)^{-\frac{\gamma}{2}+k}\right\}\, dt. $$ Integrating by parts $k$ times (the boundary terms vanish), $$
I_{\gamma,k}(r)\sim\left|\int_{-1}^1 \frac{d^k}{dt^k}\left\{(1-t^2)^k\right\}
(1-2rt+r^2)^{-\frac{\gamma}{2}+k}\, dt\right|. $$ But $\frac{d^k}{dt^k}\left\{(1-t^2)^k\right\}$ is a polynomial of degree $k$ and therefore is bounded in $[-1,1]$ (in fact, it is up to a constant the classical Legendre polynomial). Therefore, $$ I_{ \gamma,k}(r) \sim \frac{1}{2r} \log\left(\frac{1+r}{1-r}\right)^2 \le C \log
\frac{1}{|1-r|}. $$
Finally, if $-\frac{\gamma}{2}+k <-1$, then integrating by parts as before, $$ I_{\gamma,k}(r)\le C_k\int_{-1}^{1}(1-2rt+r^2)^{-\frac{\gamma}{2}+k}\, dt. $$
Thus, $$
I_{ \gamma,k}(r) \sim (1-2rt+r^2)^{-\frac{\gamma}{2}+k+1}|_{t=-1}^{t=1}
\le C_{k,\gamma} |1-r|^{-\gamma+2k+2}. $$
This finishes the proof if $k\in \mathbb{N}_0$.
Consider now the case $k=m+\nu$ with $m\in \mathbb{N}_0$ and $0<\nu<1$. Then, \begin{align*} I_{\gamma, k}(r) & = \int_{-1}^1 (1-t^2)^{\nu(m+1)+(1-\nu)m} (1-2rt + r^2)^{-\frac{\nu\gamma}{2}-\frac{(1-\nu)\gamma}{2}} \, dt \\Ê& \le I_{m+1, \gamma}^\nu(r) I_{m, \gamma}^{1-\nu}(r), \end{align*} where in the last line we have used H\"older's inequality with exponent $\frac{1}{\nu}$.
If $\gamma<2m+2$, by the previous calculation $$
|I_{\gamma,k}(r)|\le C. $$
If $\gamma > 2(m+1)+2$, then, by the previous calculation \begin{align*}
|I_{\gamma,k}(r)| &\le C |1-r|^{\nu(-\gamma+2(m+1)+2)} |1-r|^{(1-\nu)(-\gamma+2m+2)}
\\ &= C |1-r|^{-\gamma+2k+2}. \end{align*}
For the case $2m+2 < \gamma < 2m+4 $, notice that we can always assume $r<1$, since $I_{ \gamma,k}(r) = r^{-\gamma} I_{\gamma,k}(r^{-1})$. Then, as before, we can prove that $$ I_{\gamma,k}'(r) \le \gamma (1-r) I_{\gamma+2,k}(r) $$ But now we are in the case $\gamma + 2 > 2(m+1)+2$ and, thus, $$
|I_{\gamma+2,k}(r)| \le C |1-r|^{-\gamma+2k}. $$
Therefore, if $-\gamma+2k+1\neq -1$ \begin{align*} I_{\gamma,k}(r) &= \int_0^r I'_{\gamma,k}(s) \, ds \\ & \le C \int_0^r (1-s)^{-\gamma+2k+1} \, ds
\\ & \le C |1-r|^{-\gamma+2k+2}, \end{align*} and if $-\gamma+2k+1= -1$ \begin{align*} I_{\gamma,k}(r) &\le C \int_0^r \frac{1}{1-s} \, ds
\\ &= C \log \frac{1}{|1-r|}. \end{align*}
It remains to check the case $k \in (-1, 0)$. For this purpose, write $$ I_{ \gamma,k}(r) = \underbrace{\int_{-1}^0 \frac{(1-t^2)^{k}}{(1-2rt+r^2)^\frac{\gamma}{2}} \, dt}_{(i)} + \underbrace{\int_0^1 \frac{(1-t^2)^{k}}{(1-2rt+r^2)^\frac{\gamma}{2}} \, dt}_{(ii)} $$
Since $\gamma>0$ and $k+1>0$, $$
(i) \le \int_{-1}^0 (1+t)^{k} \, dt = C $$ \begin{align*}
(ii) & \le \int_0^1 \frac{(1-t)^{k}}{(1-2rt+r^2)^{\frac{\gamma}{2}}} \, dt \\ & = -\frac{1}{k+1} \int_0^1 \frac{\frac{d}{dt}[(1-t)^{k+1}]}{(1-2rt+r^2)^{\frac{\gamma}{2}}} \, dt
\\ & = \frac{2r}{k+1} \int_0^1 \frac{(1-t)^{k+1}}{(1-2rt+r^2)^{\frac{\gamma}{2}+1}} \, dt
\\ & \le C I_{\gamma+2, k+1}(r). \end{align*} and, since now $k+1>0$, $I_{\gamma, k}$ can be bounded as before. This concludes the proof of the lemma. \end{proof}
Now we are ready to conclude the proof Theorem \ref{teo-convolucion}. Remember that we need to see that \begin{equation}
\| x^{\frac{2\alpha+2}{q}-2b} I_{2(1+\alpha-\sigma),\alpha-\frac12}(x)
\|_{L^{s,\infty}(\frac{dx}{x})} < +\infty. \label{norma} \end{equation}
Using the previous lemma, it is clear that when $x\to 1$ and $2(\alpha+1-\sigma)\le 2(\alpha - \frac12)$ the norm in \eqref{norma} is bounded.
In the case $2(\alpha+1-\sigma)> 2(\alpha - \frac12)$ (that is, $\sigma<3$), the integrability condition is $$-s \left[ 2(\alpha+1-\sigma)-2 \left(\alpha-\frac12\right)-2 \right] \ge -1.$$ But, using \eqref{youngl}, we see that this is equivalent to $\sigma \ge -\frac12 \left( \frac{1}{q}-\frac{1}{p}\right)$, which holds by our assumption on $a+b$.
When $x=0$, the integrability condition is $$ \frac{2\alpha+2}{q}-2b > 0 $$ which holds because $b<\frac{\alpha+1}{q}$.
Finally, when $x\to\infty$, since $I_{\alpha-\frac12, 2(\alpha+1-\sigma)}(x)\sim x^{-2(\alpha+1-\sigma)}$, the condition we need to fulfill is $$ \frac{2\alpha+2}{q}-2b-2(\alpha+1-\sigma)<0 $$ which, by our assumption on $a+b$ is equivalent to $a<\frac{\alpha+1}{p'}$. \end{proof}
\subsection{Extension to the case $-1<\alpha <0$ and end of proof of Theorem \ref{main-result}}
As before, we may assume that $a+b= \left(\frac{1}{q}-\frac{1}{p}\right)(\alpha+1) +\sigma.$ In this case, to extend our result to the case $-1<\alpha <0$ let us consider $-1<\alpha<\beta$, where $\beta\ge 0$, and use a transplantation result from \cite{Garrigos}, that we recall here as a lemma for the sake of completeness:
\begin{lemma}[\cite{Garrigos}, Corollary 6.19 (ii)] \label{lema-garrigos} Let $1<q<\infty$. Given $\alpha, \beta >-1$, we define the transplantation operator $$ \mathbb{T}_\beta^\alpha f = \sum_{k=0}^\infty \left( \int_0^\infty f(y) l_k^\alpha(y) y^{\alpha} \, dy \right) l_k^\beta. $$ Then, if $\sigma_0 \in \mathbb{R}$ and $\sigma_1 = \sigma_0 + (\alpha -\beta)(\frac{1}{p} - \frac12)$, $\mathbb{T}_\beta^\alpha : L_{\sigma_0}^q (\mathbb{R}_+, x^\alpha \, dx) \to L_{\sigma_1}^q (\mathbb{R}_+, x^\beta \, dx)$ and $\mathbb{T}_\alpha^\beta : L_{\sigma_1}^q (\mathbb{R}_+, x^\beta \, dx) \to L_{\sigma_0}^q (\mathbb{R}_+, x^\alpha \, dx)$ are bounded operators if and only if $$ -\frac{1+\alpha}{q}<\sigma_0 <\frac{1+\alpha}{q^\prime}. $$ \end{lemma}
Using this lemma, we can write \begin{align*}
\|M_{\alpha,m}f |x|^{-b}\|_{L^q(\mathbb{R}_+,x^{\alpha} \, dx)} & = \|\mathbb{T}_\alpha^\beta (M_{\beta,m}(\mathbb{T}_\beta^\alpha f)) |x|^{-b}\|_{L^q(\mathbb{R}_+, x^\alpha \, dx)}
\\ & \le C \|M_{\alpha,m, \beta}(\mathbb{T}_\beta^\alpha f) |x|^{-\tilde b}\|_{L^q(\mathbb{R}_+, x^{\beta} \, dx)} \end{align*} provided that \begin{equation} -1<\alpha<\beta \end{equation} \begin{equation} \label{btilde} -\tilde b = -b +(\alpha-\beta)\left(\frac{1}{q}-\frac12\right), \end{equation} and \begin{equation} \label{balfaq} -\frac{1+\alpha}{q}<-b<\frac{1+\alpha}{q'}, \end{equation} and, using Theorem \ref{teo-convolucion} for $M_{\beta,m}$ with $\beta \ge 0$, $$
\|M_{\alpha,m, \beta}(\mathbb{T}_\beta^\alpha f) |x|^{-\tilde b}\|_{L^q(\mathbb{R}_+, x^{\beta} \, dx)}
\le C\|\mathbb{T}_\beta^\alpha f |x|^{\tilde a}\|_{L^p(\mathbb{R}_+, x^{\beta} \, dx)} $$ provided that \begin{equation*} 0<\sigma<\beta+1 \quad, \quad \tilde a < \frac{\beta+1}{p'} \quad , \quad \tilde b <\frac{\beta+1}{q}, \end{equation*} \begin{equation} \label{desigtilde} \left(\frac{1}{q}-\frac{1}{p}\right)\left(\beta+\frac12\right) \le \tilde a +\tilde b \end{equation} and that \begin{equation} \label{atildebtilde}
\tilde a +\tilde b = \left(\frac{1}{q}-\frac{1}{p}\right)(\beta+1) + \sigma. \end{equation}
Finally, using Lemma \ref{lema-garrigos} again, we obtain \begin{equation}
\|M_{\alpha,m}f |x|^{-b}\|_{L^q(\mathbb{R}_+,x^{\alpha} \, dx)} \le C \|f |x|^a\|_{L^p(\mathbb{R}_+, x^\alpha \, dx)} \end{equation} provided that \begin{equation} \label{atilde2} \tilde a=a+(\alpha-\beta)\left(\frac{1}{p}-\frac{1}{2}\right) \end{equation} and that \begin{equation} \label{aalfap} -\frac{1+\alpha}{p}<a<\frac{1+\alpha}{p'}. \end{equation}
Now, replacing \eqref{btilde} and \eqref{atilde2} into \eqref{desigtilde} and \eqref{atildebtilde} we obtain \begin{equation*}
\left(\frac{1}{q}-\frac{1}{p}\right)\left(\alpha+\frac12\right) \le a + b
\end{equation*}
and
\begin{equation}
\label{res}
\quad a+b= \left(\frac{1}{q}-\frac{1}{p}\right)(\alpha+1) + \sigma. \end{equation} To conclude the proof of the theorem we need to see that the restrictions $a> -\frac{1+\alpha}{p}$ in \eqref{aalfap} and $b>-\frac{1+\alpha}{q'}$ in \eqref{balfaq} are redundant. Indeed, the first one follows from \eqref{res} and $b<\frac{\alpha+1}{q}$, while the second one follows from \eqref{res} and $a<\frac{\alpha+1}{p'}$.
\section{Multipliers for related Laguerre systems}
In this section we show how the results for multipliers for expansions in the Laguerre system $\{l^\alpha_k\}_{k\ge 0}$ can be extended to other related systems, using a transference result from I. Abu-Falah, R. A. Mac\'ias, C. Segovia and J. L. Torrea \cite{AMST}. To this end, for fixed $\alpha>-1$, we consider the orthonormal systems: \begin{enumerate} \item $\{\mathcal{L}_k^\alpha(y) := y^{\frac{\alpha}{2}} l_k^\alpha(y)\}_{k\ge 0}$ in $L^2(\mathbb{R}_+)$ \item $\{\varphi_k^\alpha(y) := \sqrt 2 y^{\alpha+\frac12} l_k^\alpha(y^2)\}_{k\ge 0}$ in $L^2(\mathbb{R}_+)$ \item $\{\psi_k^\alpha (y) := \sqrt 2 l_k^\alpha(y^2)\}_{k\ge 0}$ in $L^2(\mathbb{R}_+, y^{2\alpha+1} \, dy)$ \end{enumerate} which are eigenvectors of certain modifications of the Laguerre differential operator \eqref{laguerre}.
Then, following the notations in \cite{AMST}, if we let $W^\alpha, V,$ and $Z^\alpha$ be the operators defined by \begin{equation*} W^\alpha f(y)=y^{-\frac{\alpha}{2}} f(y), \quad Vf(y)= (2y)^\frac12 f(y^2) , \quad and \quad Z^\alpha f(y)= \sqrt 2 y^{-\alpha} f(y^2) \end{equation*} it is immediate that $W^\alpha \mathcal{L}_k^\alpha = l_k^\alpha$, $V \mathcal{L}_k^\alpha = \varphi_k^\alpha$, and $Z^\alpha \mathcal{L}_k^\alpha = \psi_k^\alpha$. Moreover, for $f$ a measurable function with domain in $\mathbb{R}_+$, the following result holds:
\begin{lemma}[\cite{AMST}, Lemma 3.22] \label{lema-cambio} Let $\alpha>-1$. \begin{enumerate}
\item Let $\delta = \rho -\alpha(\frac{p}{2}-1)$, then $\|W^\alpha f \|_{L^p(\mathbb{R}_+, y^{\rho+\alpha})}= \|f \|_{L^p(\mathbb{R}_+, y^\delta)}$
\item Let $2\delta = \gamma +\frac{p}{2}-1$, then $\|Vf\|_{L^p(\mathbb{R}_+, y^\gamma)}= 2^{\frac12 -\frac{1}{p}}\|f \|_{L^p(\mathbb{R}_+,y^\delta)}$
\item Let $\delta = \frac{\eta}{2} -\alpha(\frac{p}{2}- 1)$, then $\|Z^\alpha f \|_{L^p(\mathbb{R}_+, y^{\eta+2\alpha+1})}= 2^{\frac12 -\frac{1}{p}}\|f\|_{L^p(\mathbb{R}_+, y^\delta)}$ \end{enumerate} \end{lemma}
In analogy to what we have done for the system $\{l_k^\alpha\}_{k\ge 0}$, we can also define multipliers of Laplace transform type for the orthonormal systems listed above. For instance, in the case of the system $\{\mathcal{L}_k^\alpha\}_{k\ge 0}$, if \begin{equation*}
f(x) \sim \sum_{k=0}^\infty b_{\alpha,k}(f) \mathcal{L}_k^\alpha(x), \quad b_{\alpha,k}(f)= \int_0^\infty f(x) \mathcal{L}_k^\alpha(x) dx \end{equation*} given a bounded sequence $\{m_k\}_{k\ge 0}$ we may define the multiplier \begin{equation*}
M_{\alpha,m}^\mathcal{L} f(x) \sim \sum_{k=0}^\infty b_{\alpha,k}(f) m_k \mathcal{L}_k^\alpha(x),
\end{equation*} and we say that $M_{\alpha,m}^\mathcal{L}$ is a multiplier of Laplace transform type if $m_k=m(k)$ is given by \eqref{Laplace.transform} for some real-valued function $\Psi(t)$. Similar definitions can be given for the systems $\{\varphi_k^\alpha\}_{k\ge 0}$ and $\{\psi_k^\alpha\}_{k\ge 0}$; we will denote the corresponding multipliers by $M_{\alpha,m}^\varphi$ and $ M_{\alpha,m}^\psi$. Then, the following analogue of Theorem \ref{main-result} holds:
\begin{theorem} \label{teo31} Assume that $\alpha>-1$. \begin{enumerate} \item If $M_{\alpha,m}^{\mathcal{L}}$ is a multiplier of Laplace transform type for the system $\{\mathcal{L}_k^\alpha\}_{k\ge 0}$ such that $(H1)$ and $(H2)$ hold, then \begin{equation*}
\|M_{\alpha,m}^{\mathcal{L}} f\|_{L^q(\mathbb{R}_+, x^{-Bq})} \le C \|f\|_{L^p(\mathbb{R}_+, x^{Ap})} \end{equation*} provided that \begin{equation*} 1<p \le q<\infty \quad, \quad A< \frac{\alpha}{2}+\frac{1}{p'} \quad , \quad B<\frac{\alpha}{2}+\frac{1}{q}, \end{equation*}
and that \begin{equation*}
\left( \frac{1}{q}-\frac{1}{p}\right)(\alpha+1)<A+B \le \sigma \left(\frac{1}{q}-\frac{1}{p} \right). \end{equation*}
\item If $M_{\alpha,m}^\varphi$ is a multiplier of Laplace transform type for the system $\{\varphi_k^\alpha\}_{k\ge 0}$ such that $(H1)$ and $(H2)$ hold, then \begin{equation*}
\|M_{\alpha,m}^\varphi f\|_{L^q(\mathbb{R}_+, x^{-Dq})} \le C \|f\|_{L^p(\mathbb{R}_+, x^{Cp})} \end{equation*} provided that \begin{equation*} 1<p \le q<\infty \quad, \quad C< \alpha + \frac{1}{p'}+\frac12 \quad , \quad D< \alpha+\frac{1}{q}+\frac12 \end{equation*} and that \begin{equation*}
\left(\frac{1}{q}-\frac{1}{p}\right)(2\alpha+1) <C+D\le (2\sigma-1) \left(\frac{1}{q}-\frac{1}{p}\right). \end{equation*}
\item If $M_{\alpha,m}^\psi$ is a multiplier of Laplace transform type for the system $\{\psi_k^\alpha\}_{k\ge 0}$ such that $(H1)$ and $(H2)$ hold, then \begin{equation*}
\|M_{\alpha,m}^\psi f\|_{L^q(\mathbb{R}_+, x^{-Fq})} \le C \|f\|_{L^p(\mathbb{R}_+, x^{Ep})} \end{equation*} provided that \begin{equation*} 1<p \le q<\infty \quad, \quad E< 2\alpha + 1+ \frac{1}{p'} \quad , \quad F< \frac{1}{q} \end{equation*} and that \begin{equation*}
\left(\frac{1}{q}-\frac{1}{p}\right)(2\alpha+1) <E+F\le (2\sigma-1) \left(\frac{1}{q}-\frac{1}{p}\right). \end{equation*}
\end{enumerate} \end{theorem} \begin{proof} We explain how to prove (1), since the other cases are analogous. From the fact that $W^\alpha \mathcal{L}_k^\alpha =l_k^\alpha$ and by Lemma \ref{lema-cambio}(1), we have the following diagram
\begin{equation*} \begin{array}{cccc}
& L^p(\mathbb{R}_+, x^{ap+\alpha}) &\stackrel{M_{\alpha,m}}\longrightarrow & L^q(\mathbb{R}_+ , x^{-bq+\alpha})
\\ & (W^\alpha)^{-1} \Big\downarrow & & \Big\uparrow W^\alpha
\\ & L^p(\mathbb{R}_+, x^{Ap}) & \stackrel{M_{\alpha,m}^\mathcal{L}}\longrightarrow & L^q(\mathbb{R}_+ , x^{-Bq}) \end{array} \end{equation*} provided that \begin{equation} \label{condicionAa} Ap = ap - \alpha \left( \frac{p}{2}-1 \right) \quad \mbox{and}Ê\quad -Bq = -bq - \alpha\left( \frac{q}{2}-1\right). \end{equation} and $M_{\alpha, m} = W^\alpha M_{\alpha,m}^\mathcal{L} (W^\alpha)^{-1}$. Therefore, the identities \eqref{condicionAa} together with the conditions on $a,b$ given by Theorem \ref{main-result} imply the desired result. \end{proof}
\section{Proof of Theorem \ref{teorema-hermite}}
In this section we exploit the well-known relation between Hermite and Laguerre poynomials to obtain an analogous result to that of Section 2 in the Hermite case. Indeed, recalling that \begin{align*} H_{2k}(x) & = (-1)^k 2^{2k} k! L_k^{-\frac12} (x^2) \\ H_{2k+1}(x) & = (-1)^k 2^{2k} k! x L_k^{\frac12} (x^2) \end{align*} it is immediate that \begin{align*} h_{2k}(x) &= l^{-1/2}_k(x^2) \\ h_{2k+1}(x) & = x l^{\frac12}_k(x^2) \end{align*}
It is then natural to decompose $f=f_0+f_1$ where $$ f_0(x)=\frac{f(x)+f(-x)}{2} \quad , \quad f_1(x)= \frac{f(x)-f(-x)}{2} $$ and, clearly, when $k=2j$, if we let $g_0(y)= f_0(\sqrt{y})$ we obtain: $$ c_k(f) = \langle f_0, h_k \rangle = 2 \int_0^\infty f_0(x) l^{-\frac12}_j(x^2) \; dx = a_{-\frac12,j}(g_0) $$ while if $k=2j+1$, and we let $g_1(y)= \frac{1}{\sqrt{y}} f_1(\sqrt{y})$ we have:
$$ c_k(f) = \langle f_1, h_k \rangle = 2 \int_0^\infty f_1(x) x l^{\frac12}_j(x^2) \; dx = a_{\frac12,j}(g_1) $$
Then, \begin{align*} M_{H,m} f (x) & = \sum_{j=0}^\infty m_{2j} a_{-\frac12,j}(g_0 ) l^{-\frac12}_j(x^2) + \sum_{j=0}^\infty m_{2j+1} a_{\frac12,j}( g_1) x l^{\frac12}_j(x^2) \\ & = M_{-\frac12,m_0} g_0(x^2) + x M_{\frac12, m_1} g_1(x^2) \end{align*} where $(m_0)_k=m_{2k}$ and $(m_1)_k = m_{2k+1}$.
To apply Theorem \ref{main-result} to this decomposition, we need to check first that $m_0$ and $m_1$ are Laplace-Stiltjes functions of certain functions $\Psi_0$ and $\Psi_1$. Indeed, notice that $m_{2k}= \mathfrak{L}\Psi_0(k)$ where $$\Psi_0(u)=\frac12 \Psi(\frac{u}{2})$$
and $m_{2k+1}= \mathfrak{L} \Psi_1(k)$ where $$\Psi_1(u)= \frac12 \int_0^\frac{u}{2} e^{-\tau} d\Psi(\tau).$$
It is also easy to see that $\Psi_0$ satisfies the hypotheses of Theorem \ref{main-result} for $\alpha=-\frac12$ whereas $\Psi_1$ satisfies the hypotheses for $\alpha=\frac12$ (in this case condition $(H2)$ follows after an integration by parts).
Then, \begin{align}
\nonumber \|M_{H,m}f |x|^{-b}\|_{L^q(\mathbb{R})} & = \left( \int_{\mathbb{R}} |M_{H,m}f(x)|^q |x|^{-bq} \, dx\right)^{\frac{1}{q}}
\\ & = C \left( \int_{\mathbb{R}} \left|M_{-\frac12,m_0} g_0(x^2) + x M_{\frac12, m_1} g_1(x^2) \right|^q |x|^{-bq} \, dx\right)^{\frac{1}{q}} \label{Mh} \end{align}
Using Minkowski's inequality and making the change of variables $y=x^2, dx= \frac12 y^{-\frac12} \, dy$, we see that \begin{align*}
\eqref{Mh} &\sim \left( \int \left|M_{-\frac12,m_0} g_0(y)\right|^q |y|^{-\frac{bq}{2}-\frac12} \, dy \right)^{\frac{1}{q}} + \left( \int \left|M_{\frac12, m_1} g_1(y)\right|^q |y|^{\frac{(-b+1)q}{2}-\frac12} \, dy\right)^{\frac{1}{q}}
\\ & = \| M_{-\frac12,m_0} g_0(y) |y|^{-\frac{b}{2}}\|_{L^q(\mathbb{R}, x^{-\frac12} \, dx)} + \|M_{\frac12, m_1} g_1(y) |y|^{\frac{-b+1}{2}-\frac{1}{q}} \|_{L^q(\mathbb{R}, x^\frac12 \, dx)}
\\ &\le C \|g_0(y) |y|^{\tilde a}\|_{L^p(\mathbb{R}, x^{-\frac12} \, dx)} + C \|g_1(y) |y|^{\hat a}\|_{L^p(\mathbb{R}, x^{\frac12} \, dx)} \end{align*} where the last inequality follows from Theorem \ref{main-result} provided that:
\begin{equation*}
\tilde a < \frac{1}{2p'} \quad , \quad b < \frac{1}{q}
\end{equation*}
\begin{equation}
\label{resc1} 0\le \tilde a +\frac{b}{2} \le \frac12 \left(\frac{1}{q}-\frac{1}{p} \right)+\sigma
\end{equation}
\begin{equation*}
\hat a < \frac{3}{2p'}
\end{equation*}
and
\begin{equation}
\label{resc2} \left( \frac{1}{q}-\frac{1}{p} \right) \le \hat a + \frac{1}{q} -\frac{1-b}{2} \le \frac32 \left( \frac{1}{q}-\frac{1}{p}\right)+ \sigma. \end{equation} Therefore, \begin{align*}
\|M_{H,m}f |x|^{-b}\|_{L^q(\mathbb{R})} & \le C \left( \int |g_0(x)|^p |x|^{\tilde a p-\frac12} \, dx \right)^{\frac{1}{p}} + C \left( \int |g_1(x)|^p |x|^{\hat a p + \frac12} \, dx\right)^{\frac{1}{p}}
\\ &= C \left( \int |f_0(\sqrt{x})|^p |x|^{\tilde a p-\frac12} \, dx \right)^{\frac{1}{p}} + C \left( \int |f_1(\sqrt{x})|^p |x|^{\hat a p + \frac12-\frac{p}{2}} \, dx\right)^{\frac{1}{p}}
\\ & = C \left( \int |f_0(x)|^p |x|^{2\tilde a p} \, dx \right)^{\frac{1}{p}} + C \left( \int |f_1(x)|^p |x|^{2\hat a p + 2-p} \, dx\right)^{\frac{1}{p}}
\\ & \le C \|f(x) |x|^a\|_{L^p(\mathbb{R})} \end{align*} provided that \begin{equation} \label{aatildeahat} a = 2\tilde a = 2 \hat a + \frac{2}{p}-1. \end{equation}
Therefore, by \eqref{aatildeahat} and the conditions on $ \tilde a, \hat a$, there must hold $$ a<\frac{1}{p'} $$ while, by \eqref{aatildeahat}, \eqref{resc1} and \eqref{resc2} are equivalent to $$ 0 \le a+b \le \frac{1}{q} - \frac{1}{p}+ 2\sigma. $$
\begin{remark} It follows from the proof of Theorem \ref{teorema-hermite} that a better result holds if the function $f$ is odd. \end{remark}
\section{Examples and further remarks}
First, we should point out that it is clear that, since a Stieltjes integral of a continuous function with respect to a function of bounded variation can be thought as an integral with respect to the corresponding Lebesgue-Stieltjes measure, we could equivalently have formulated all our results in terms of integrals with respect to signed Borel measures in $\mathbb{R}_+$. However, we have found convenient to use the framework of Stieltjes integrals since many of the classical references on Laplace transforms are written in that framework (for instance \cite{Widder}), and leave the details of a possible restatement of the theorems in the case of regular Borel measure to the reader.
We also recall that the Laplace-Stieltjes transform contains as particular cases both the ordinary Laplace transform of (locally integrable) functions (when $\Psi(t)$ is absolutely continuous), and Dirichlet series (see below). In particular, if $\Psi$ is absolutely continuous and $\phi(t)=\Psi^\prime(t)$ (defined almost everywhere), the assumptions $(H1)$ and $(H2)$ of Theorem \ref{main-result} can be replaced by: \begin{itemize}
\item[(H1ac)] $$ \int_0^\infty |\phi(x)| \; dx < +\infty \quad \hbox{i.e.} \; \phi \in L^1(\mathbb{R}_+) $$ \item[(H2ac)] there exist $\delta>0$, $0 < \sigma < \alpha+1$, and $C>0$ such that
$$ \left|\int_0^t \phi(x) \; dx \right| \leq C t^{\sigma} \quad \hbox{for} \; 0 < t \leq \delta. $$ \end{itemize} In particular, assumption $(H2ac)$ holds if $\phi(t)=O(t^{\sigma-1})$ when $t \to 0$.
As we have already mentioned in the introduction, B. Wr\'obel \cite[Corollary 2.7]{W} has recently proved that Laplace type multipliers for the system $\{\varphi_k^\alpha\}_{k\ge 0}$ are bounded on $L^p(\mathbb{R}^d,\omega)$, $1<p<\infty$, for all $\omega \in A_p$ and $\alpha \in (\{-\frac12\} \cup [\frac12, \infty))^d$. In the case of power weights in one dimension this means that $\omega(x)=|x|^\beta$ must satisfy $-1<\beta<p-1$, while taking $p=q$ and letting the weight be $|x|^\beta$ on both sides, Theorem \ref{teo31}(2) can easily be seen to imply $-1-p\left(\alpha+\frac12 \right)<\beta<p-1+ p\left(\alpha +\frac12\right)$.
Also, weighted estimates had been obtained before for the case of some particular operators for the system $\{l_k^\alpha\}_{k\ge 0}$. Indeed, recall that one of the main examples of the kind of multipliers we are considering is the Laguerre fractional integral introduced in \cite{GST}, which corresponds to the choice $m_k=(k+1)^{-\sigma}$.
In \cite[Theorem 4.2]{Nowak-Stempak}, A. Nowak and K. Stempak considered multi-dimensional Laguerre expansions and used a slightly different definition of the fractional integral operator, given by the negative powers of the differential operator \eqref{laguerre}.
As they point out, their theorem contains as a special case the result of \cite{GST} (in the one dimensional case). To see that both operators are indeed equivalent, they rely on a deep multiplier theorem \cite[Theorem 1.1]{Stempak-Trebels}.
Instead, we can see that Theorem \ref{main-result} is applicable to both definitions by choosing: $$ m_k=(k+c)^{-\sigma}, \quad \phi(t)= \frac{1}{\Gamma(\sigma)} t^{\sigma-1} e^{-ct} \quad (c>0) $$
The case $c=1$ corresponds to the definition in \cite{GST}, whereas the choice $c=\frac{\alpha+1}{2}$ corresponds to the definition in \cite{Nowak-Stempak}. Therefore, Theorem \ref{main-result} applied to these choices, coincides in the first case with the result of \cite[Theorem 1]{GT} (which is an improvement of \cite[Theorem 3.1]{GST}) and improves in the second case the one-dimensional result of \cite[Theorem 4.2]{Nowak-Stempak}.
The same choice of $m_k$ and $\phi$ in Theorem \ref{teorema-hermite} gives a two-weight estimate for the Hermite fractional integral, which corresponds to the one-dimensional version of \cite[Theorem 2.5]{Nowak-Stempak}.
Another interesting example is the operator $(L^2+I)^{-\frac{\alpha}{2}}$, where $L$ is given by \eqref{laguerre}. In this case, Theorem \ref{main-result} with hypotheses $(H1ac)$ and $(H2ac)$ instead of $(H1)$ and $(H2)$ applies with $\alpha= \sigma$ and $$ \phi(t)= \frac{1}{C_\alpha} e^{-\frac{\alpha+1}{2}t}J_{\frac{\alpha-1}{2}} (t) t^{\frac{\alpha-1}{2}} $$ since, by \cite[formula 5, p. 386]{Watson}, $$ \int_0^\infty e^{-st} J_{\frac{\alpha -1}{2}}(t) t^{\frac{\alpha-1}{2}} \, dt = C_\alpha (s^2+1)^{-\frac{\alpha}{2}} $$ and, when $t\to 0$, $J_{\frac{\alpha-1}{2}} (t) t^{\frac{\alpha-1}{2}} \sim t^{\alpha-1}$.
A further example is obtained by choosing $\Psi(t)=e^{-s_0t} H(t-\tau)$ with $s_0=\frac{\alpha+1}{2}$, where $H$ is the Heaviside unit step function: $$ H(t) = \left\{ \begin{array}{rcl} 1 & \hbox{if} & t \geq 0 \\ 0 & \hbox{if} & t < 0 \\ \end{array} \right. $$ and we see that Theorem \ref{main-result} is applicable to the Heat diffusion semigroup (considered for instance in \cite{Stempak-heat-diffusion} and \cite{MST}) $$ M_{\tau} = e^{-\tau L} $$
associated to the operator $L$ for any $\sigma>0$. More generally, the same conclusion holds for $$\Psi(t)= \sum_{n=1}^\infty a_n e^{-s_0t} H(t-\tau_n ) $$
provided that the Dirichlet series $$ F(s)= \sum_{n=1}^\infty a_n e^{-\tau_n s}, \quad 0 < \tau_1 < \tau_2 < \ldots $$
conveges absolutely for $s=s_0$ (which corresponds to hypothesis $(H1)$).
As a final comment, we remark that finding a function $\Psi$ of bounded variation such that $m_k = \mathfrak{L} \Psi(k)$ holds (see \eqref{Laplace.transform}) is equivalent to solving the clasical Hausdorff moment problem (see \cite[Chapter III]{Widder}).
{\bf Acknowledgements.} We wish to thank Professor K. Stempak for bringing into our attention the connection between the generalized euclidean convolution and our previous results on fractional integrals of radially symmetric functions, and for helpful comments and corrections.
We are also indebted to Professor J. L. Torrea for pointing to us that our results could be transferred from one Laguerre system to the others, and for giving us reference \cite{AMST}.
\end{document} |
\begin{document}
\title{Regularized Riesz energies of submanifolds}
\author{Jun O'Hara\footnote{The first author was supported by JSPS KAKENHI Grant Number 25610014.} and Gil Solanes\footnote{The second author is a Serra H\'unter Fellow and was supported by FEDER-MINECO grants MTM2012-34834,IEDI-2015-00634, PGC2018-095998-B-I00.}}
\date{}
\maketitle
\begin{abstract}Given a closed submanifold, or a compact regular domain, in euclidean space, we consider the Riesz energy defined as the double integral of some power of the distance between pairs of points. When this integral diverges, we compare two different regularization techniques (Hadamard's finite part and analytic continuation), and show that they give essentially the same result. We prove that some of these energies are invariant under M\"obius transformations, thus giving a generalization to higher dimensions of the M\"obius energy of knots. \end{abstract} \maketitle
{\small {\it Keywords:} Riesz potential, energy, Hadamard regularization, analytic continuation, fractional perimeter.}
{\small 2010 {\it Mathematics Subject Classification:} 53C65, 53C40, 46T30. }
\section{Introduction}
Let $M\subset\mathbb R^n $ be either a smooth compact submanifold, or a compact regular domain with smooth boundary. We are interested in the {\em Riesz $z$-energy} \begin{equation}\label{def_Riesz_energy}
E_M(z)=\int_{M\times M}|x-y|^z\,dxdy, \end{equation}where $dx,dy$ denote the volume element of $M$. This integral is well-defined if $z>-\dim M$ and diverges otherwise. In the latter case we apply two techniques from the theory of generalized functions to regularize the divergent integral: {\em Hadamard's finite part} and {\em analytic continuation}. After showing that these two procedures give essentially the same result, we study the properties of the energies thus obtained. In particular, we show that $E_M(-2m)$ is M\"obius invariant if $M$ is a closed submanifold of odd dimension $m$, and also if $M$ is a regular domain in an even dimensional Euclidean space $\mathbb R^m$.
To put our results in perspective let us review some background. The first author introduced the {\em energy of a knot} $K$ in \cite{O1}, with the aim to produce a canonical representative (the energy minimizer) in each knot type. This energy is given by
\begin{eqnarray} E(K)&=&
\displaystyle \lim_{\varepsilon\to 0^+}\left(\int_{K\times K\setminus \Delta_\varepsilon}\frac{dxdy}{|x-y|^2}-\frac{2L(K)}\varepsilon\right),\label{def_energy_knot} \end{eqnarray}
where \begin{equation*}
\Delta_\varepsilon=\{(x,y)\in\mathbb{R}^n\times\mathbb{R}^n\,:\,|x-y|\le\varepsilon\}. \end{equation*}
This can be viewed as Hadamard's finite part of the divergent integral $\int_{K\times K}|x-y|^{-2}\,dxdy$. Indeed, Hadamard's regularization can be carried out as follows. First one restricts the integration to the complement of some $\varepsilon$-neighborhood of the set where the integrand blows up. Then one expands the result in a Laurent series in $\varepsilon$ and finally takes the constant term in the series as the {\em finite part} of the integral. Hadamard's finite part can be considered as a generalization of Cauchy's principal value; e.g. they coincide for $\int_{-1}^1\frac{1}{x} dx$ (cf. also \cite[eq. II.2.29]{schwartz}).
Another approach to $E(K)$ was used by Brylinski \cite{B} who defined the {\em beta function} $B_K(z)$ of a knot $K$ by means of a different regularization method. First, given a knot (closed curve) $K\subset\mathbb R^3$, he considered the complex function
\[B_K(z)= \int_{K\times K}|x-y|^z\,dxdy,\qquad z\in\mathbb C\] which is holomorphic on the domain $\Re\mathfrak{e} \, z>-1$. He then extended this function analytically to a meromorphic function on the whole complex plane with simple poles at $z=-1,-3,-5,\dots$. Finally, Brylinski showed that $B_K(-2)=E(K)$.
It turns out that $E(K)$ is invariant under M\"obius transformations (cf. \cite{FHW}), and it is thus often called {\em M\"obius energy}. This motivated the search of similar functionals on higher dimensional submanifolds (see \cite{AS, KS}). For closed surfaces $M$ in $\mathbb{R}^3$, Auckly and Sadun (\cite{AS}) defined the following functional
\begin{eqnarray} E_{AS}(M)&=&
\lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{-4}dxdy-\frac{\pi A(M)}{\varepsilon^2}+\frac{\pi\log\varepsilon}8\int_M(\kappa_1-\kappa_2)^2dx\right) \label{Hadamard_reg_surface_energy} \\
&&+\frac{\pi}{16}\int_M(\kappa_1-\kappa_2)^2\log (\kappa_1-\kappa_2)^2dx +\frac{\pi^2}2\chi(M), \nonumber \end{eqnarray} where $\kappa_1$ and $\kappa_2$ are principal curvatures of $M$ at $x$, and $\chi(M)$ is the Euler characteristic.
The right hand side of \eqref{Hadamard_reg_surface_energy} is Hadamard's finite part of $\int_{M\times M} |x-y|^{-4}\,dxdy$. The additional term $(\pi/16)\int_M(\kappa_1-\kappa_2)^2\log(\kappa_1-\kappa_2)^2\,dx$ was added to make the resulting energy M\"obius invariant, but it is not the only possible choice for this purpose, as was pointed out in \cite{AS}.
On the other hand, Fuller and Vemuri (\cite{FV}) generalized Brylinski's beta function to closed surfaces and closed submanifolds of Euclidean space in general. For a closed surface $M$, they extended the domain of $B_M(z)=\int_M|x-y|^zdxdy$ by analytic continuation to get a meromorphic function on the whole complex plane with simple poles at $z=-2,-4,-6,\dots$. They showed moreover that the residues of these poles are expressible as integrals of contractions of the second fundamental form of $M$. As for M\"obius invariance, while the integrand $|y-x|^zdxdy$ is a M\"obius invariant $2m$-form for $z=-2m$, it was unclear whether the regularized integral $B_M(-2m)$ would be invariant under M\"obius transformations.
In this paper we begin by showing that Hadamard's finite part of the Riesz energy $E_M(z)$ coincides with the meromorphic function $B_M(z)$ where this function is defined. At the poles, Hadamard's finite part exists and equals the beta function $B_M(z)$ with the pole {\em removed} (see \eqref{Riesz_energy_Hadamard=analytic_continuation}). This extends Brylinski's result to any exponent $z$ and to general dimensions. We also give a simple alternative description of the residues of $B_M(z)$ in terms of the volumes of extrinsic spheres of $M$.
Finally, we show that when $m=\dim M$ is odd, the energy $E_M(-2m)=B_M(-2m)$ is invariant under M\"obius transformations. This gives the desired generalization of the M\"obius energy in the case of odd dimensional submanifolds. For even dimensional submanifolds, we conjecture that none of the energies $E_M(z)$ is M\"obius invariant. We prove this conjecture in the case of two-dimensional surfaces.
The results mentioned so far deal with closed submanifolds, but it makes sense to consider \eqref{def_Riesz_energy} also in the case where $M$ is a compact submanifold with boundary. In particular, we are interested in the case where $M=\Omega$ is a compact domain with smooth boundary. For convex domains, the Riesz energy has been considered in \cite{HR} in connection with the statistics of electromagnetic wave propagation inside a domain. Besides, the Riesz energy is closely related to the so-called {\em fractional perimeter} of the domain (cf. e.g. \cite{CRS,L}).
In the last part of the paper, we use the techniques mentioned before to regularize the Riesz energy of a smooth regular domain $\Omega\subset\mathbb R^n$. In particular we obtain a meromporphic function $B_\Omega(z)$ which at the same time is an analyitic continuation of the Riesz energy and of the fractional perimeter (except for a sign). We compute some residues of $B_\Omega(z)$ and give some explicit expressions for small dimensions. Finally, we prove that $B_\Omega(-2n)$ is invariant under M\"obius transformations if (and only if) the dimension $n$ is even. This generalizes the results obtained by the authors in the planar case (cf. \cite{OS}).
The present version is an integration of the original version, which appeared in Math. Nachr. 291 (2018), 1356-1373, and the errata that gives a corrected proof of Theorem \ref{thm4.11}.
Acknowledgement: The authors would like to thank Professors Yoshihiro Sawano and Kazushi Yositomi for helpful suggestions. Thanks are also due to the anonymous referees of Mathematische Nachrichten for useful comments.
\section{Regularization of divergent integrals}
Let us recall two techniques in the theory of generalized functions (or distributions) that are used in the regularization of divergent integrals. The reader is refered to \cite{schwartz,GS} for more details.
Consider the integral \begin{equation}\label{example_regularization} \int_0^dt^z\,dt,\qquad z\in\mathbb{C} \end{equation} where $d$ is a positive constant. It converges for $\Re\mathfrak{e} \, z>-1$.
\begin{enumerate} \item[(i)] For a small positive number $\varepsilon$ we have \[ \int_\varepsilon^dt^z\,dt=\left\{ \begin{array}{lr} \displaystyle \frac{d^{z+1}}{z+1}-\frac{\varepsilon^{z+1}}{z+1}, & \qquad z\ne-1,\\[4mm] \displaystyle \log d-\log\varepsilon, &z=-1. \end{array} \right. \] {\em Hadamard's finite part} of \eqref{example_regularization} is defined for every $z\in\mathbb{C}$ as \[ \textrm{Pf.}\int_0^dt^z\,dt =\left\{ \begin{array}{ll} \displaystyle \lim_{\varepsilon\to0^+}\left(\int_\varepsilon^dt^z\,dt+\frac{\varepsilon^{z+1}}{z+1}\right)=\frac{d^{z+1}}{z+1} & \>\>\> (z\ne-1),\\[4mm] \displaystyle \lim_{\varepsilon\to0^+}\left(\int_\varepsilon^d\frac{dt}{t}+\log\varepsilon\right)=\log d& \>\>\> (z=-1). \end{array} \right. \]
\item[(ii)] Consider the complex function \[
f(z)=\int_0^dt^z\,dt, \] which is well defined and holomorphic on $\{z\in\mathbb{C}\colon\Re\mathfrak{e} \, z>-1\}$. It extends by analytic continuation to the meromorphic function $f(z)=d^{z+1}/(z+1)$ on the whole complex plane with a simple pole at $z=-1$ with residue $\mbox{\rm Res}(f,-1)=1$. \end{enumerate}
The relation between these two methods is given by \begin{equation}\label{zneq1} f(z)=\mathrm{Pf.}\int_0^d t^z dt\qquad z\neq -1 \end{equation}
\begin{equation}\label{pole_remove} \lim_{z\to-1}\left(f(z)-\frac1 {z+1}\right) =\lim_{z\to -1}\frac{d^{z+1}-1}{z+1}=\log d =\textrm{Pf.}\int_0^dt^{-1}\,dt. \end{equation}
More generally, let $\varphi(t)$ be a smooth function, and consider \[F(z)=\int_0^d t^z\varphi(t)dt\] which is well defined if $\Re\mathfrak{e} \, z> -1$. For any natural number $k$, the previous integral can be extended to $\Re\mathfrak{e} \, z>-k-1$ as follows. Put \begin{eqnarray} \varphi_{k-1}(t)&=&\displaystyle \sum_{j=0}^{k-1}\frac{\varphi^{(j)}(0)}{j!}t^j, \nonumber \\[4mm] h_{z,k}(t)&=&t^{z}\varphi(t)-t^{z}\varphi_{k-1}(t) \displaystyle =t^z\left[\varphi(t)-\varphi(0)-\varphi'(0)t-\dots -\frac{\varphi^{(k-1)}(0)}{(k-1)!}\,t^{k-1}\right]. \nonumber \end{eqnarray} Since $h_{z,k}(t)$ can be estimated by $t^{z+k}$, it is integrable on $[0,d]$ when $\Re\mathfrak{e} \, z>-k-1$. Therefore, the regularization can be reduced to that of \begin{equation}\label{regularizationpart} \int_0^dt^z\varphi_{k-1}(t)\,dt=\sum_{j=1}^{k}\int_0^d\frac{\varphi^{(j-1)}(0)}{(j-1)!}\,t^{z+j-1}\,dt. \end{equation}
By setting that the finite part of a convergent integral equals the integral itself, and by linearity, we arrive at the following definition of Hadamard's finite part (cf. \cite[(II,2;26)]{schwartz}) \begin{align} \textrm{Pf.}\int_0^d t^{z}\varphi(t)dt&=\int_0^dh_{z,k}(t)dt+\textrm{Pf.}\int_0^d t^{z}\varphi_{k-1}(t)dt \nonumber \\ &=\lim_{\varepsilon\to 0}\left[\int_\varepsilon^dt^{z}\varphi(t)dt+\sum_{j=1}^{k}\frac{\varphi^{(j-1)}(0)}{(j-1)!}\frac{\varepsilon^{z+j}}{z+j}\right]. \label{Pf} \end{align} If $z$ is a negative integer then $\varepsilon^0/0$ appears above and is to be replaced by $\log\varepsilon$.
On the other hand, since $\int_0^d h_{z,k}(t)dt$ is holomorphic on $z$, equality \eqref{regularizationpart} shows that the integral $F(z)$ can be analytically continued to a meromorphic function on the complex plane which we denote again by $F(z)$. On each half-plane $\Re\mathfrak{e} \, z>-k-1$, it is given by \begin{equation}F(z) = \int_0^d h_{z,k}(t)dt
+\sum_{j=1}^{k}\frac{\varphi^{(j-1)}(0)\,d^{z+j}}{(j-1)!\,(z+j)}. \label{GS} \end{equation} \noindent This function has (possible) poles at negative integers. The corresponding residues are \begin{equation}\label{basic_residues}\mbox{\rm Res}(F,-j)=\frac{\varphi^{(j-1)}(0)}{(j-1)!}.\end{equation}
The relation between these two regularizations can be obtained from \eqref{zneq1}, \eqref{pole_remove}, and \eqref{regularizationpart}. When $z$ is not a negative integer, \begin{equation}
\label{residue_continuation}\textrm{Pf.}\int_0^dt^z\varphi(t)\,dt=F(z). \end{equation} When $z$ is a negative integer $-k$,
\begin{equation}\label{Hadamard=analytic_continuation} \textrm{Pf.}\int_0^d{t^{-k}\varphi(t)}\,dt=\lim_{z\to-k}\left(F(z)-\frac{\varphi^{(k-1)}(0)}{(k-1)!(z+k)}\right). \end{equation}
Note that a $\log$ term appears in \eqref{Pf} exactly when $F(z)$ has a pole in $z$.
Finally, given an integrable compactly supported function $\varphi\colon[0,\infty)\to\mathbb R$ which is smooth in some interval $[0,d]$, one defines \[
\mathrm{Pf.}\int_0^\infty t^z\varphi(t)dt= \mathrm{Pf.}\int_0^d t^z\varphi(t)dt+\int_d^\infty t^z \varphi(t) dt. \] In particular, the integral $\int_0^\infty t^z\varphi(t)dt$, which converges for $\Re\mathfrak{e} \, z>-1$, can be extended to a meromorphic function.
\section{Riesz energies of closed submanifolds}
Let $M$ be a closed (i.e. compact without boundary) submanifold of dimension $m$ in $\mathbb{R}^n$. We are interested in the following integral \begin{equation}\label{riesz}
\int_{M\times M}|x-y|^z\,dxdy \end{equation} which is absolutely convergent for $\Re\mathfrak{e} \, z>-m$. It was shown, first by Brylinski in the case $m=1$, and then by Fuller and Vemuri for general $m$, that \eqref{riesz} can be extended by analytic continuation to a meromorphic function $B_M(z)$ on the complex plane, called the {\em beta function of $M$}, with possible poles at $z=-m-2j$ where $j\in\mathbb Z, j\geq 0$. It was shown that the residues of these poles are expressible as integrals of complete contractions of the second fundamental form of $M$. Here we provide an alternative, somewhat more concrete, interpretation of these residues.
Furthermore, we compare the analytic continuation $B_M(z)$ with the following alternative regularization of \eqref{riesz}, based on Hadamard's finite part regularization. When the integral \eqref{riesz} diverges, one can expand \begin{equation*}\label{Riesz_energy_Delta_e}
\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^z\,dxdy \end{equation*} in a Laurent series (possibly with a $\log$ term) of $\varepsilon$. The constant term in the series will be called {\em Hadamard's finite part} of \eqref{def_Riesz_energy}. In case $M=K$ is a knot, the first author used this method to introduce the so-called {\em energy of a knot} (or {\em M\"obius energy}) $E(K)$ (see \eqref{def_energy_knot} or \cite{O1}). It was shown by Brylinski that $E(K)=B_K(-2)$. Here we show similar relations for the other values of the beta function, not only in the case of knots, but also for submanifolds of any dimension.
Furthermore, we show that, for odd dimensional submanifolds, taking $z=-2m$ gives an energy that is invariant under M\"obius transformations. This generalizes the fact that the energy of knots $E(K)$ is M\"obius invariant (cf. \cite{FHW}).
\subsection{Analytic continuation} Our approach to Riesz energies depends on a careful analysis of the following functions. Define $\psi_{M,x}(t)$ by \begin{equation*}\label{def_varphi} \psi_{M,x}(t)=\textrm{vol}(M\cap B_t(x)),\qquad t\geq 0, \end{equation*} where $B_t(x)$ is the ball of center $x$ and radius $t$. The sets $M\cap B_t(x)$ are usually called {\em extrinsic balls} (cf. e.g. \cite{KP}).
\begin{proposition}\label{even_odd} (i) There exists $d>0$ such that, for each $x\in M$ the function \[ \psi_{M,x}(t)=\mathrm{vol}(M\cap B_t(x)), \qquad 0\leq t< d \] extends to a smooth function $\varphi(t)$ defined for $t\in(-d,d)$ such that $\varphi(-t)=(-1)^m\varphi(t)$ and $\varphi^{(i)}(0)=0$ for $i<m$.
(ii) More generally, given a smooth function $\rho$ on $M\times M$, the same conclusion as above holds for \[
\psi_{\rho,x}(t)=\int_{M\cap B_t(x)} \rho(x,y)dy. \] Moreover, if $(\rho_i)_{i=1}^\infty$ is a sequence of smooth functions with derivatives of all orders converging uniformly to $0$, then $\psi_{\rho_i,x}$ and its derivatives also converge uniformly to $0$. \end{proposition}
\begin{proof} $(i)$ It is clear that $\psi_{M,x}(t)$ is smooth at any $t\neq 0$ such that $\partial B_t(x)$ is transverse to $M$. Since $M$ is compact, there is some $d>0$ such that $\partial B_t(x)$ is transverse to $M$ for every $x\in M$ and any $t\in(0,d)$. Given $x\in M$, take $\varphi(t)=\psi_{M,x}(t)$ for $t\geq 0$, and $\varphi(t)=(-1)^m\psi_{M,x}(-t)$ for $t<0$. We need to show that $\varphi(t)$ is smooth at $t=0$.
Let us assume for simplicity that $x=0$. Let $\phi\colon\mathbb{R}^m\to M$ be a coordinate chart with $\phi(0)=x=0$, and let \[
\overline \phi(u,r)=\left(\frac{r}{|r|}\frac{\phi(ru)}{\|\phi(ru)\|},\frac{r}{|r|}\|\phi(ru)\|\right),\qquad u\in S^{m-1}, r\in\mathbb R\setminus\{0\}. \] It is shown in \cite{blowup}, that $\overline\phi$ extends to a smooth map $\overline\phi\colon S^{m-1}\times\mathbb{R}\to S^{n-1}\times \mathbb{R}$. For each $u\in S^{m-1}$ let $g_u\colon\mathbb{R}\to \mathbb{R}$ be the second coordinate of $\overline\phi(u,\cdot)$. Since $g_u'(0)\neq 0$, the function $g_u$ has a smooth inverse in a neighborhood of $r=0$. Now, for small $t\geq 0$ one has \[ \psi_{M,x}(t) =\int_{S^{m-1}}\int_0^{g_u^{-1}(t)}\mathrm{jac}(\phi)_{r\cdot u}r^{m-1}\,dr du. \] The right hand side defines a smooth function of $t$ in a neighborhood of $t=0$, and it coincides with $\varphi(t)=(-1)^m\psi_{M,x}(-t)$ for small negative $t$, since $g_{u}(-r)=-g_{-u}(r)$, and thus $g_{u}^{-1}(-t)=-g_{-u}^{-1}(t)$. Therefore $\varphi(t)$ is smooth at $t=0$ and hence on $(-d,d)$. Moreover, if $1\leq j\leq m-1$, then \[
\frac{d^j}{dt^j}\int_0^{g_u^{-1}(t)}\mathrm{jac}(\phi)_{r\cdot u}r^{m-1}dr=\eta_j(t)(g_u^{-1}(t))^{m-j} \] for some smooth function $\eta_j$. Hence, $\varphi^{(j)}(0)=0$.
$(ii)$ The same arguments as in the previous case give \[
\psi_{\rho,x}(t)=\int_{S^{m-1}}\int_0^{g_u^{-1}(t)}\rho(x,\phi(r\cdot u))\mathrm{jac}(\phi)_{r\cdot u}r^{m-1}\,dr du. \] Hence, the previous proof applies to $\psi_{\rho,x}(t)$ as well. The last part of the statement follows by uniform convergence. \end{proof}
Notice that, by the previous proof, for $\psi_{\rho,x}(t)$ to be smooth around $t=0$ it is in fact enough that $\rho(x,y)|y-x|^{m-1}$ be smooth. Note also that $\psi_{M,x}(t)$ may not be globally smooth, as the case $M=S^{n-1}$ shows.
In the following we denote by $b_{M,k}(x)$ the coefficients of the Taylor series of $\psi_{M,x}(t)$ around $t=0$; i.e. \[
b_{M,k}(x)=\left.\frac{1}{k!}\frac{d^{k}}{dt^k}\right|_{t=0} \psi_{M,x}(t).\]
\begin{corollary}\label{coro_odd} If $k-m$ is odd, then $b_{M,k}(x)=0$. \end{corollary} For small $k$, the coefficients $b_{M,k}(x)$ were given in \cite{KP}. For instance, if $M$ is a knot (closed curve) in $\mathbb{R}^n$, then \begin{eqnarray*} \psi_{M,x}(t)&=&\displaystyle 2t+\frac{\kappa^2}{12}t^3+O(t^5), \label{varphi_knot} \end{eqnarray*}where $\kappa$ is the curvature of $M$ at $x$. If $M$ is a closed surface in $\mathbb{R}^n$, then \begin{eqnarray}\label{kp_surfaces}
\psi_{M,x}(t)&=&\displaystyle \pi t^2+\frac\pi{32}(2\|B\|^2-\|H\|^2)t^4+O(t^6), \label{varphi_surface}
\end{eqnarray}where $\|B\|$ denotes the Hilbert-Schmidt norm of the second fundamental form $B(X,Y)=(\nabla_XY)^\bot$, and $H=\mathrm{tr}B$ is the mean-curvature vector. In particular, for $n=3$ \[
2\|B\|^2-\|H\|^2=(\kappa_1-\kappa_2)^2 \] where $\kappa_1,\kappa_2$ denote the principal curvatures of $M$ at $x$.
Let \[
\psi_M(t)=\int_{M}\psi_{M,x}(t)\,dx=\int_{(M\times M)\cap \Delta_t} dxdy, \] and more generally, given an integrable function $\rho$ on $M\times M$, let \[
\psi_\rho(t)=\int_{M}\psi_{\rho,x}(t)\,dx=\int_{(M\times M)\cap \Delta_t}\rho(x,y) dxdy. \]
\begin{proposition}\label{prop_coarea} The function $\psi_\rho(t)$ has derivative almost everywhere and \[
\psi_\rho(t)=\int_0^t\psi'_\rho(s)ds. \] For $\Re\mathfrak{e} \, z> -m$, \begin{equation} \label{coarea_rho}
\int_{M\times M}|x-y|^z\rho(x,y)dxdy =\int_0^\infty t^z\psi_{\rho}'(t)dt. \end{equation} \end{proposition}
\begin{proof}
By the coarea formula applied to the function $u(x,y)=|y-x|$ defined on $M\times M$ we have \begin{align}
\psi_{\rho}(s)&=\int_0^s \left(\int_{u^{-1}(t)}\frac{\rho(x,y)}{|\nabla u(x,y)|}d\mu \right)dt \nonumber \\
\int_{M\times M}|x-y|^z\rho(x,y)dxdy&=\int_0^\infty t^z\left(\int_{u^{-1}(t)}\frac{\rho(x,y)}{|\nabla u(x,y)|} d\mu \right)dt, \nonumber \end{align} where $\nabla$ stands for the gradient in $M\times M$, and $d\mu$ denotes the Riemmannian volume element on $u^{-1}(t)$. Note that, by Sard's theorem, $u^{-1}(t)$ is smooth for almost every $t$, and the inner intergals define a function which is continuous at almost every $t$. Differentiating the first equation with respect to $s$ yields the result. \end{proof}
We deduce that \eqref{riesz} extends to a meromorphic function $B_M(z)$ on the complex plane with possible poles at $z=-m-2j$ with $j\in\mathbb Z,j\geq 0$, as shown first by Brylinski and Fuller-Vemuri. The following description of the residues of these poles is new. \begin{proposition}\label{residues_beta} The meromorphic function $B_M(z)$ has the following residue at $z=-m-2j$ \begin{equation}\label{residue_Riesz_energy} R_M(-m-2j)=(m+2j)\int_M b_{M,m+2j}(x)dx, \hspace{1cm}j\in\mathbb Z,\ j\ge 0. \end{equation} If $\int_M b_{M,k}(x)dx =0$, then $B_M(-k)$ has no pole at $z=-k$. \end{proposition} \proof This follows at once from \eqref{basic_residues}.\endproof
\begin{example}\label{spheres} \rm The beta function of the $n$-dimensional unit sphere is given by (cf. \cite{B,FV}) \[ B_{S^n}(z)=2^{z+n-1}o_{n-1}o_{n}B\left(\frac{z+n}2,\frac{n}2\right), \] where $o_k$ is the volume of the unit $k$-sphere in $\mathbb{R}^{k+1}$, and $B(x,y)$ is Euler's beta function. It follows that if $n$ is odd then $B_{S^n}$ has infinitely many poles at $z=-n, -n-2, -n-4, \dots$, and if $n$ is even then $B_{S^n}$ has exactly $n/2$ poles at $z=-n, -n-2, \dots, -2n+2$. \end{example}
\subsection{Hadamard's finite part} Next we compare $B_M(z)$ with Hadamard's regularization.
\begin{definition}\rm For any $z\in\mathbb C$ we define \[ \E{M}{z}
=\mathrm{Pf.}\int_{M\times M}|x-y|^z dydx=\mathrm{Pf.}\int_0^\infty t^z\psi_{M}'(t)dt \]and call it the {\em regularized $z$-energy} of $M$. \end{definition}
Equivalently, if $z$ is not a negative integer, and $\Re\mathfrak{e} \, z> -k-1$ for some $k\in\mathbb Z$, then by \eqref{Pf} \begin{equation*}\label{Hadamard_E_M}
E_M(z)=\lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}{|x-y|^{z}}{dxdy} -\sum_{j=1}^{k}\frac{j}{(-z-j)\varepsilon^{-z-j}}\int_M b_{M,j}(x)dx \right). \end{equation*}
For $z=-k\in\mathbb Z$, by \eqref{Pf} and the explanation after that, \begin{equation*}\label{Hadamard_E_M2} \begin{array}{l} \displaystyle E_M(-k)=
\lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^{k}}
-\sum_{j=1}^{k-1}\frac{j}{(k-j)\varepsilon^{k-j}}\int_M b_{M,j}(x)dx +k\log\varepsilon\int_M b_{M,k}(x)dx\right). \end{array} \end{equation*}
Remark that $b_{M,k}(x)=0$ if $k<m$ by Proposition \ref{even_odd}.
The relation between Hadamard's finite part and regularization by analytic continuation is given next. \begin{proposition}\mbox{} \begin{enumerate} \item[(i)] $B_M(z)$ can have poles only at $z=-m-2i$ with $i\in\mathbb Z, i\geq0$. \item[(ii)] Away from the poles of $B_M(z)$, analytic continuation and Hadamard's finite part coincide: \[ \E{M}{z} =B_M(z),\qquad z\neq -m,-m-2,-m-4,\ldots \] and the same holds for $z=-k$ if $\int_M b_{M,k}(x)dx=0$.
\item[(iii)] If $B_M(z)$ has a pole at $z=-k$, then\begin{equation}\label{Riesz_energy_Hadamard=analytic_continuation} \E{M}{z}=\lim_{z\to-k}\left(B_M(z) -\frac{k}{z+k}R_M(-k)\right)=\lim_{z\to-k}\left(B_M(z) -\frac{k}{z+k}\int_M b_{M,k}(x)dx\right). \end{equation} \end{enumerate} \end{proposition} \begin{proof}
(i) follows from Corollary \ref{coro_odd} and Proposition \ref{residues_beta}. (ii) is a consequence of \eqref{residue_continuation}. (iii) follows from \eqref{Hadamard=analytic_continuation} and Proposition \ref{residues_beta}. \end{proof}
Note in particular that $B_M(-k)=E_M(-k)$ if $k-n$ is odd. Next we summarize the situation for knots and surfaces. \begin{proposition} Let $K\subset\mathbb R^n$ be a smooth knot (i.e. closed curve). Then Brylinski's beta function has simple poles at negative odd integers. The residues at $z=-1,-3$ are $$R_K(-1)=2 L(K),\qquad R_K(-3)=\frac14\int_K\kappa(x)^2dx.$$ The regularized $z$-energies for $z=-1,-3$ are given by \[ \begin{array}{rcl} \displaystyle \E{K}{-1}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{K\times K\setminus\Delta_\varepsilon}|x-y|^{-1}dxdy+2L(K)\log\varepsilon\right) =\displaystyle \lim_{z\to-1}\left(B_K(z)-\frac{2L(K)}{z+1}\right),\\[4mm]
\displaystyle \E{K}{-3}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{K\times K\setminus\Delta_\varepsilon}|x-y|^{-3}dxdy-\frac{L(K)}{\varepsilon^2}+\frac{\log\varepsilon}4\int_K\kappa(x)^2dx\right)\\[4mm]
&=&\displaystyle \lim_{z\to-3}\left(B_K(z)-\frac{1}{4(z+3)}\int_K\kappa(x)^2dx\right). \end{array} \]
For $\Re\mathfrak{e} \, z> -5, z\neq -1,-3$, it is \begin{align*}
E_K(z)&=\lim_{\varepsilon\to 0^+}\left(\int_{K\times K\setminus\Delta_\varepsilon}|x-y|^{z}dxdy-\frac{2L(K)}{(-z-1)\varepsilon^{-z-1}}-\frac{1}{4(-z-3)\varepsilon^{-z-3}}\int_K \kappa(x)^2dx\right)\\&=B_K(z). \end{align*} \end{proposition}
The residues of $B_K(z)$ for $z=-1,-3,-5$ were computed by Brylinski in \cite{B} (here we took the opportunity to correct the coefficient of $R_K(-3)$ given there) for knots in $\mathbb{R}^3$.
A similar Hadamard regularization was used in \cite{O1,O2} to define energy functionals on knots. The approach in this paper is slightly different since our regularized integrals are with respect to $t$, the extrinsic distance, whereas intrinsic distance (i.e. arc-length) was used in \cite{O1,O2}. Still, the resulting energies are closely related. For instance, when $K$ has length 1, the functionals $E^{(\alpha)}(K)$ in \cite[Section 2.2]{O2} are related to $E_K(-\alpha)$ by \[
E^{(\alpha)}(K)=E_K(-\alpha)+\frac{2^\alpha}{\alpha-1},\qquad 1<\alpha<3 \] \[ E^{(3)}(K) =E_K(-3)+\left(\frac{\log 2}{4}+\frac{1}{12}\right)\int_K\kappa^2(x)dx+4. \] The first equality follows from equation (2.17) and Remark 2.2.1 of \cite{O2}, and the second one follows from the last formula in Remark 2.2.1. When $\alpha>3$, the relation is more complicated but can be obtained in a similar way.
\begin{proposition} Let $M\subset \mathbb R^3$ be a smooth closed surface. The beta function $B_M(z)$ has simple poles at negative even integers. The residues at $z=-2,-4$ are \[
R_M(-2)=2\pi A(M),\qquad R_M(-4)=\frac\pi 8\int_M(\kappa_1-\kappa_2)^2dx \] The regularized $z$-energy for $z=-2,-4$ is given by \[ \begin{array}{rcl} \E{M}{-2}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{-2}dxdy+2\pi A(M)\log\varepsilon\right) =\displaystyle \lim_{z\to-2}\left(B_M(z)-\frac{2\pi A(M)}{z+2}\right),\\[4mm]
\E{M}{-4}
&=&\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{-4}dxdy-\frac{\pi A(M)}{\varepsilon^2}+\frac{\pi\log\varepsilon}8\int_M(\kappa_1-\kappa_2)^2dx\right)\\[4mm] &=&\displaystyle \lim_{z\to-4}\left(B_M(z)-\frac\pi{8(z+4)}\int_M(\kappa_1-\kappa_2)^2dx\right). \end{array} \] For $\Re\mathfrak{e} \, z> -6, z\neq -2,-4$, they are \begin{align*} \E{M}{z}
&=\displaystyle \lim_{\varepsilon\to0^+}\left(\int_{M\times M\setminus\Delta_\varepsilon}|x-y|^{z}dxdy-\frac{2\pi A(M)}{(-z-2)\varepsilon^{-z-2}}-\frac{\pi}{8(-z-4)\varepsilon^{-z-4}}\int_M(\kappa_1-\kappa_2)^2dx\right)\\
&=\displaystyle B_M(z).\end{align*} \end{proposition} {The residues of $B_M(z)$ for $z=-2,-4,-6$ were obtained by Fuller and Vemuri in \cite{FV} (we corrected the coefficient of $R_M(-4)$). Using their expression for $R_M(-6)$ one can extend the previous formulas to $\Re\mathfrak{e} \, z> -8$.
\subsection{M\"obius invariance} Here we study the M\"obius invariance of these energies. We first discuss scale-invariance.
\begin{lemma}\label{lemma_residue_homothety} Under a homothety $x\mapsto cx$ $(c>0)$, the residues of the beta function behave as follows \begin{equation*}\label{residue_homothety} R_{cM}(-k)=c^{2m-k} R_M(-k) \hspace{0.7cm}(k\ge m). \end{equation*} \end{lemma}
\begin{proof} We have the following Taylor series expansions \[ \begin{array}{l} \displaystyle \textrm{Vol}(cM\cap B_{ct}(cx))\sim\sum_{k\ge m} b_{cM,k}(cx)\cdot(ct)^k ,\\[2mm] \displaystyle c^m \textrm{Vol}(M\cap B_{t}(x)) \sim\sum_{k\ge m} c^mb_{M,k}(x) t^k, \end{array} \] which implies $b_{cM,k}(cx)=c^{m-k}b_{M,k}(x)$. The conclusion follows from \eqref{residue_Riesz_energy}. \end{proof}
\begin{proposition}\label{proposition_energy_homothety} Under a homothety $x\mapsto cx$ $(c>0)$, the regularized $z$-energy behaves as follows \begin{equation*}\label{Riesz-energy_homothety} \E{cM}{z} =c^{2m+z}\left(\E{M}{z}+(\log c) R_M(z)\right), \end{equation*}
where $R_M(z)$ is the residue at $z$ if $B_M$ has a pole there, and $R_M(z)=0$ otherwise. Hence the regularized $z$-energy of $m$-dimensional submanifolds is not scale invariant if $z\ne -2m$. The regularized $(-2m)$-energy is scale invariant if and only if $R_M(-2m)$ vanishes for any $m$-dimensional $M$. \end{proposition}
\begin{proof} Lemma \ref{lemma_residue_homothety} implies \[ \begin{array}{rcl} \E{cM}{z_0} &=&\displaystyle \lim_{z\to z_0}\left(B_{cM}(z)-\frac{R_{cM}(z_0)}{z-z_0}\right) \\[4mm]
&=&\displaystyle \lim_{z\to z_0}\left(c^{2m+z}\,B_M(z)-\frac{c^{2m+z_0}R_{M}(z_0)}{z-z_0}\right)\\[4mm]
&=&\displaystyle \lim_{z\to z_0}c^{2m+z}\left(B_M(z)-\frac{R_{M}(z_0)}{z-z_0}+\frac{c^{z_0-z}-1}{z_0-z}R_{M}(z_0)\right).\\[4mm] \end{array} \] Since $\lim_{w\to 0}{(c^w-1)}/w=\log c$, the conclusion follows. \end{proof} In particular, if $M\subset \mathbb{R}^3$ is a surface, then \begin{equation}\label{regularized_-4energy_homothety} \E{cM}{-4}=\E{M}{-4} +\frac{\pi\log c}8\int_M(\kappa_1-\kappa_2)^2dx, \end{equation}
and similarly for surfaces in $\mathbb R^n$ (cf. \eqref{kp_surfaces}). Hence $E_M(-4)$ is not scale invariant unless $M$ is a sphere. This corrects a statement in the conclusion of \cite{FV}.
However, if $M$ is a closed submanifold of odd dimension $m$, then $E_{M}(-2m)$ is scale invariant. In fact it is M\"obius invariant as we show next.
\begin{proposition} \label{moebius_invariance} If $m=\dim M$ is odd, then $E_M(-2m)=E_{I(M)}(-2m)$ for any M\"obius transformation $I$ such that $I(M)$ remains compact. \end{proposition}
\begin{proof} Since $E_M(-2m)$ is translation and scale invariant, we can suppose $0\not \in M$, and we only need to prove the statement when $I$ is an inversion in the unit sphere. Let $\widetilde M=I(M), \tilde x, \tilde y$ denote the images by $I$ of $M,x,y$ respectively. Since \[
|\tilde x-\tilde y|=\frac{|x-y|}{|x|\,|y|},\quad d\tilde x=\frac{dx}{|x|^{2m}}, \quad d\tilde y=\frac{dy}{|y|^{2m}}, \] we have for $\Re\mathfrak{e} \, z>-m$ \[
\int_{\tilde M\times\tilde M}|\tilde x-\tilde y|^z d\tilde xd\tilde y
=\int_{M\times M}|x-y|^z \frac1{|x|^{z+2m}|y|^{z+2m}} dxdy. \] Hence, for $\Re\mathfrak{e} \, z>-m$, and using \eqref{coarea_rho} \begin{align}\notag
B_{\widetilde M}(z)- B_{ M}(z)
&= \int_{M\times M}|x-y|^z \left[\left(\frac1{|x|\,|y|}\right)^{z+2m}-1\right] dxdy\\
&=\int_0^d t^z \psi_{\rho_z}'(t) dt+\int_{M\times M\setminus \Delta_d} |x-y|^z \rho_{z}(x,y)dxdy,\label{diffBbigz} \end{align}
where $\rho_z(x,y)=\left(\frac1{|x|\,|y|}\right)^{z+2m}-1$, and $d>0$ is such that the spheres $\partial B_t(x)$ are transverse to $M$ for all $x$ in $M$ and all $t\in (0,d]$.
Let \[
\Psi_z(t)=\psi_{\rho_z}(t)=\int_{M\times M\cap \Delta_t}\left[\left(\frac1{|x|\,|y|}\right)^{z+2m}-1\right]\,dxdy, \] which is smooth in $[0,d]$.
By putting $\varphi(t)=\Psi_z'(t)$ and $k=2m$ in \eqref{GS}, we can extend the domain of \eqref{diffBbigz} to $\Re\mathfrak{e} \, z>-2m-1$ to obtain
\begin{align}\label{Psi} B_{\widetilde M}(z)- B_{ M}(z)&=\displaystyle \int_0^d t^z\left[\Psi_z'(t)-\sum_{j=0}^{2m-1}\frac{\Psi_z^{(j+1)}}{j!}\,t^j \right]\,dxdt +\sum_{j=1}^{2m}\frac{\Psi_z^{(j)}(0)\, d^{z+j}}{(j-1)!\,(z+j)}\\
&+\int_{M\times M\setminus \Delta_d} |x-y|^z \rho_{z}(x,y)dxdy. \nonumber \end{align}
We show next that the three terms in the right hand side of the previous equality converge to 0 as $z$ approaches $-2m$. For the last term, this is true since $\rho_z$ converges uniformly to $0$ on $M\times M$ as $z\to-2m$.
Since all the derivatives of $\rho_z$ also converge uniformly to $0$ as $z\to-2m$, by Proposition \ref{even_odd} we have \begin{equation}\label{eq_lim_sup}
\lim_{z\to-2m} \sup_{0\le t\le d}|\Psi_z^{(i)}(t)|=0,\qquad \forall i. \end{equation} Since $m$ is odd, we know that $\Psi_z^{(2m)}(0)=0$ for any $z$, so the sum in the last term of line \eqref{Psi} runs over $1\le j\le 2m-1$. By \eqref{eq_lim_sup}, we deduce that this sum goes to $0$ as $z\to-2m$.
Finally, as \[
\left|\int_0^d t^z\left[\Psi_z'(t)-\sum_{j=0}^{2m-1}\frac{\Psi_z^{(j+1)}}{j!}\,t^j
\right]\,dxdt\right| \displaystyle\leq \sup_{0\le t\le d}\left|\Psi_z^{(2m+1)}(t)\right|\frac{1}{(2m)!}\int_0^d\,t^{z+2m}\,dt, \] using \eqref{eq_lim_sup} once more, we see that the first term on the right hand side of \eqref{Psi} goes to $0$ as $z$ approaches $-2m$. This completes the proof. \end{proof}
\begin{conjecture}The regularized energy $E_M(-2m)$ is not scale invariant if $m=\dim M$ is even; i.e. there exists $M$ such that $R_M(-2m)\ne0$ if $m$ is even. \end{conjecture} In particular we conjecture that $E_M(z)$ is a M\"obius invariant only if $z=-2m$ and $m=\dim M$ is odd. Note that the case of spheres discussed in Example \ref{spheres} does not help in proving this conjecture. The conjecture holds for surfaces in $\mathbb{R}^3$ by \eqref{regularized_-4energy_homothety}.
\section{Energy of regular domains}
Next, we study the Riesz energies of compact domains with smooth boundary. As before, we regularize when necessary to get a meromorphic function which we call the beta function of the domain. We compute some residues and give some explicit presentations in low dimensions. Finally we prove that M\"obius invariant regularized Riesz energies exist in even dimensional spaces. \subsection{Riesz energies} Let $\Omega$ be a compact domain in $\mathbb{R}^n$ with smooth boundary $M=\partial\Omega$, and $\uon{x}$ the outer unit normal to $\Omega$ at a point $x$ in $ M$. For $z>-n$, we consider
\[E_\Omega(z)=\int_{\Omega\times\Omega}|x-y|^z\,dxdy.\] A closely related quantity is \[
P_\Omega(z)=\int_{\Omega\times\Omega^c}|x-y|^z\,dxdy. \] This integral converges for $-n-1<\Re\mathfrak{e} \, z<-n$, and is called {\em fractional perimeter} especially when $z\in\mathbb{R}$ (cf. \cite{CRS}).
\begin{lemma}\label{lemma_Riesz_energy_compact_bodies_boundary_integral} For $\Re\mathfrak{e} \, z>-n$ and $z\neq -2$, the Riesz $z$-energy can be expressed by a double integral over the boundary: \begin{equation}\label{eq_compact_bodies_z-energy_boundary_integral} E_\Omega(z)
=\frac{-1}{(z+2)(z+n)}\int_{ M\times M}|x-y|^{z+2}\langle\uon{x},\uon{y}\rangle\,dxdy. \end{equation} \end{lemma}
\begin{proof} Since \[ \begin{array}{rcl}
\textrm{div} _x\left[\,|x-y|^z(x-y)\,\right]
=\displaystyle \sum_{i=1}^n\frac{\partial}{\partial x_i}\left[\,(x_i-y_i)|x-y|^z\,\right]
=(z+n)|x-y|^z, \end{array} \] we have \begin{align}
\int_{\Omega\times\Omega}|x-y|^z\,dxdy
&=\displaystyle \frac1{z+n}\int_{\Omega}\int_{ M} \langle x-y,\uon{x}\rangle |x-y|^zdx dy\label{first_step}. \nonumber \end{align} Similarly, since \[
\textrm{div} _y\left[|x-y|^{z+2}\uon{x}\right]
=\langle \nabla_y|x-y|^{z+2}, \uon{x}\rangle+|x-y|^{z+2} \textrm{div} _y\uon{x}
=(z+2)\langle |x-y|^{z}(y-x), \uon{x}\rangle, \] we find \begin{align}
\int_{\Omega\times \Omega}|x-y|^z\,dxdy&= \frac1{z+n}\int_{ M}\int_{\Omega} \langle x-y,\uon{x}\rangle |x-y|^zdydx \nonumber \\
&= \frac{-1}{(z+2)(z+n)}\int_{ M}\int_{ M}|x-y|^{z+2}\langle\uon{x},\uon{y}\rangle dy dx. \nonumber \end{align} \end{proof}
With a similar argument one shows that for $-n-1<\Re\mathfrak{e} \, z<-n$, \[
P_\Omega(z)=\frac{1}{(z+2)(z+n)}\int_{ M}\int_{ M}|y-x|^{z+2}\langle\uon{x},\uon{y}\rangle dy dx. \] i.e. the right hand side of \eqref{eq_compact_bodies_z-energy_boundary_integral} gives $-P_\Omega(z)$ if $-n-1<\Re\mathfrak{e} \, z<-n$ and $E_\Omega(z)$ when $\Re\mathfrak{e} \, z>-n$.
\subsection{Regularization} In order to extend $E_\Omega(z)$ to the whole complex plane we follow a similar but not identical procedure as in the case of closed submanifolds.
We will use the following elementary fact. \begin{lemma}
Let $A,C\subset N$ be compact domains with regular boundary in a smooth orientable manifold $N$. Suppose that $\partial A,\partial C$ are transverse hypersurfaces. Let $X$ be a complete vector field in $N$ with associated flow $\phi\colon\mathbb{R}\times N\to N$, and denote $\phi_t=\phi(t,\cdot)$. Then, for any differential form $\omega$ of top degree in $N$, we have
\begin{equation}\label{eq_lemma}
\left.\frac{d}{dt}\right|_{t=0}\int_{C\cap \phi_t(A)}\omega=\int_{C\cap\partial A} X\lrcorner\, \omega. \end{equation} \end{lemma}
\begin{proof}Let $I=(-\epsilon,\epsilon)$ such that $\phi_t(\partial A)$ is transverse to $\partial C$ for all $t\in I$. Consider the vector field $\overline X=(1,X)$ on $I\times N$, which has associated flow ${\overline\phi(t,h,x)=}\overline\phi_t(h,x)=(t+h,\phi_t(x))$. Clearly $\overline X$ is tangent to the hypersurface ${\overline\phi}(I{\times\{0\}}\times\partial A)\subset I\times N$.
Let us take a vector field on $I\times N$ of the form $\overline Y=(1,Y)$, and such that $\overline Y$ is tangent to $I\times \partial C$ and also to ${\overline\phi}(I{\times\{0\}}\times\partial A)$. This is possible because these hypersurfaces are transverse.
Given $\epsilon>0$, there is some field $\overline Z_\epsilon=(1,Z_\epsilon)$, tangent to ${\overline\phi}(I{ \times\{0\}}\times\partial A)$, and such that \[
Z_\epsilon(t,x)=Y(t,x),\quad \forall x\in \partial C,\qquad Z_\epsilon(t,x)=X(x),\quad \forall x\notin {(\partial C)_\epsilon } \] where ${(\partial C)_\epsilon}$ is the set of points at distance $\le\epsilon$ from ${\partial}C$, with respect to an auxiliary riemannian metric. The flow $\psi_t^\epsilon\colon I\times N\to I\times N$ associated to $\overline Z_\epsilon$ fulfills \[
\psi_t^\epsilon(0,A)=(t,\phi_t(A)),\quad \psi_t^\epsilon(0,C)=(t,C). \] Hence, \begin{align*}
\left.\frac{d}{dt}\right|_{t=0}\int_{\phi_t(A)\cap C} \omega&=\left.\frac{d}{dt}\right|_{t=0}\int_{\psi_t^\epsilon(0,A\cap C)} \omega=\left.\frac{d}{dt}\right|_{t=0}\int_{(0,A\cap C)} (\psi_t^\epsilon)^*\omega=\int_{(0,A\cap C)} \mathcal L_{\overline Z_\epsilon}\omega\\ &=\int_{(0,A\cap C)} d(\overline Z_\epsilon\lrcorner \omega)=\int_{(0,C\cap\partial A)} \overline Z_\epsilon\lrcorner \omega+\int_{(0,A\cap\partial C)} \overline Z_\epsilon\lrcorner \omega. \end{align*} where $\omega$ denotes also its pull-back to $I\times N$, and $\mathcal L$ is the Lie derivative. {The fourth equality follows from Cartan's formula and the fact that $d\omega=0$. } The second term in the last expression vanishes, since $Z_\epsilon$ is tangent to $\partial C$. Finally, we can assume $Z_\epsilon$ is uniformly bounded and thus \[ \lim_{\epsilon\to 0} \int_{(0,C\cap\partial A)} \overline Z_\epsilon\lrcorner \omega= \int_{(0,C\cap\partial A)} \overline Z_0\lrcorner \omega=\int_{C\cap\partial A} X\lrcorner \omega . \] \end{proof}
Given $\rho\in C(\Omega\times\Omega)$, let \[
\Psi_\rho(t)=\int_{(\Omega\times\Omega)\cap\Delta_t} \rho(x,y)dxdy,
\]where $dx,dy$ are volume elements in $\mathbb{R}^n$, and $\Delta_t=\{(x,y)\colon |y-x|\leq t\}$.
Put \begin{align*} D_t&=\{(x,v)\in \Omega\times S^{n-1}\colon x+tv\in\Omega\},\\ E_t&=\{(x,v)\in M\times S^{n-1}\colon x+tv\in\Omega\}. \end{align*}
>From here on, let $d>0$ be such that $\partial B_t(x)$ is transverse to $M$ whenever $x\in M$ and $0<t \leq d$. For $0<t<d$, the set $E_t$ is diffeomorphic to the product of $M$ and a closed hemisphere, and $D_t$ is the intersection of two domains with regular boundary such that the two boundaries intersect transversely. To see the latter, consider the involution $i_t(x,v)=(x+tv,-v)$ on $\mathbb{R}^n\times S^{n-1}$, and note that $D_t=(\Omega\times S^{n-1})\cap i_t(\Omega\times S^{n-1})$.
Given $f\in C(\Omega\times S^{n-1}\times [0,d\,])$, put \[
\Phi_f(t)=\int_{D_{t}} f(x,v,t)dxdS^{n-1}_v,\qquad \Xi_f(t)=\int_{E_t} f(x,v,t)dM_xdS^{n-1}_v \] where $dM$ and $dS^{n-1}$ are the volume elements in $M$ and $S^{n-1}$ respectively.
\begin{proposition}\label{variation} Let $\rho\in C^\infty(\Omega\times\Omega)$ and $f\in C^\infty(\Omega\times S^{n-1}\times [0,d\,])$. For $t_0\in(0,d\,]$, \begin{align} \label{var1} \Psi_\rho'(t_0)=&\int_{D_{t_0}} \rho(x,x+t_0v)t_0^{n-1}dxdS^{n-1}_v\\ \label{var2} \Phi_f'(t_0)=&\int_{D_{t_0}} \frac{\partial f}{\partial t}(x,v,t_0)dxdS^{n-1}_v+\int_{E_{t_0}} \langle \uon{x},v\rangle f(x+t_0v,-v,t_0) dM_xdS^{n-1}_v\\ \label{var3} \Xi_f'(t_0)=&\int_{E_{t_0}} \frac{\partial f}{\partial t}(x,v,t_0)dM_xdS^{n-1}_v \\
\label{var4} &-\left.\frac{d}{dt}\right|_{t=t_0}\int_{(M\times M)\cap(\Delta_t \setminus\Delta_0)} \frac{\langle \uon{y},{y-x}\rangle}{|y-x|^{n}} \,f \!\left(x,\frac{y-x}{|y-x|},|y-x|\right) dM_xdM_y, \end{align} where $\uon{x}$ is the outer unit normal to $M$ at $x$. \end{proposition}
\begin{proof} Equation \eqref{var1} follows from Fubini's theorem using polar coordinates for $y$ around $x$, and noting that $$(\Omega\times\Omega)\cap\Delta_t=\{(x, x+sv) \colon 0\le s\le t, (x,v)\in D_s\}.$$
By the chain rule, to prove \eqref{var2} and \eqref{var3} we only need to consider the case $f(x,v,t)\equiv f(x,v,t_0)$. Consider the vector field $X(x,v)=(-v,0)$ on $\mathbb{R}^n\times S^{n-1}$ and its associated flow $\phi_t(x,v)=(x-tv,v)$.
Since \[ D_{t+s}=(\Omega\times S^{n-1})\cap\phi_s(i_t(\Omega\times S^{n-1})) \] and \[ (\Omega\times S^{n-1})\cap \partial(i_t(\Omega\times S^{n-1})) =(\Omega\times S^{n-1})\cap i_t(M\times S^{n-1}) =i_t(E_t), \] the previous lemma implies \[\begin{array}{rcl} \displaystyle \frac{d}{dt}\int_{D_t} f(x,v,t_0)dxdS^{n-1}_v
&=&\displaystyle \left.\frac{d}{ds}\right|_{s=0}\int_{D_{t+s}} f(x,v,t_0)dxdS^{n-1}_v \\[4mm]
&=&\displaystyle \int_{(\Omega\times S^{n-1})\cap \partial(i_t(\Omega\times S^{n-1}))} f(x,v,t_0) X\lrcorner (dx\wedge dS^{n-1}_v) \\[4mm]
&=&\displaystyle \int_{i_t(E_t)} f(x,v,t_0) X\lrcorner (dx\wedge dS^{n-1}_v), \end{array}\]
where $dx\wedge dS^{n-1}$ is the differential form corresponding to the measure $dxdS^{n-1}$ with the product orientation.
Since $i_t^*(dx\wedge dS^{n-1})=(-1)^n dx\wedge dS^{n-1}$, and ${i_t}_*(X)=-X$, taking suitable orientations, the previous integral equals \[ \int_{E_t} f(x+tv,-v,t_0) X\lrcorner (dx\wedge dS^{n-1}_v)=\int_{E_t} \langle \uon{x},v\rangle f(x+tv,-v,t_{0}) dM_xdS^{n-1}_v, \] which yields \eqref{var2}.
To prove \eqref{var3}, let $\pi\colon (M\times M)\setminus \Delta_0\to M\times S^{n-1}$ be given by $\pi(x,y)=(x,\frac{y-x}{|y-x|})$. A simple computation shows \[
\pi^*(dM\wedge dS^{n-1})_{(x,y)}=\frac{1}{|y-x|^{n-1}} \langle \uon{y},\frac{y-x}{|y-x|}\rangle dM_x\wedge dM_y. \] On the other hand, given $(x,v)\in M\times S^{n-1}$ we have \[
\sum_{(x,y)\in \pi^{-1}(x,v){\cap(\Delta_{t+h}\setminus\Delta_t)}} \mathrm{sgn}\langle y-x,\uon{y}\rangle=\mathbf{1}_{E_t}(x,v)-\mathbf{1}_{E_{t+h}}(x,v). \] Therefore, \begin{multline*}
\int_{(M\times M)\cap(\Delta_{t+h}\setminus\Delta_t)} \frac{\langle \uon{y},{y-x}\rangle}{|y-x|^{n}} f(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y\\
=\int_{E_{t}}f(x,v,t)dM_x\wedge dS_v^{n-1}-\int_{E_{t+h}}f(x,v,t)dM_x\wedge dS_v^{n-1}. \end{multline*} This yields \eqref{var3}. \end{proof}
Given an $n$-dimensional manifold $X$, and $k\in\mathbb N\cup\{\infty\}$, we take on $C^k(X)$ the structure of locally convex topological vector space defined by the family of seminorms $\| \cdot \|_{\alpha,\phi,K}$ given by \[
\| f \|_{\alpha,\phi,K}=\sup_{x\in K}\left|D^\alpha (f\circ \phi)(x)\right| \] where $\alpha=(a_1,\ldots,a_n), a_i\in \mathbb N,$ \[
D^\alpha= \frac{\partial^{|\alpha|} }{\partial_{x_1}^{a_1}\cdots \partial_{x_n}^{a_n}},\qquad |\alpha|=a_1+\cdots +a_n\leq k, \] and $(U,\phi)$ is a local chart with $K\subset U$ compact. By \cite[Chapter III \S1.1]{schaefer}, a linear map $L\colon C^\infty(X)\to C^k([0,d\,])$, is continuous if and only if for every $r\in \mathbb N, r\le k$, there exist $c>0$, local charts $\{(U_i,\phi_i)\}_{i=1}^m$, compact sets $K_i\subset U_i$, and index sets $\alpha_i$ such that \[
\sup_{t\in[0,d]}\left|\frac{d^rL(f)}{dt^r} (t)\right|<c \sum_{i=1}^m \|f\|_{\alpha_i, \phi_i,K_i} \] for all $f\in C^\infty(X)$.
\begin{proposition}\label{smooth}
Given $h\in C^\infty(M\times S^{n-1}\times [0,d\,])$, consider
\[
\Lambda_h(t)=\int_{(M\times M)\cap \Delta_t} h(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y.
\] Then $\Lambda_h(t)$ is smooth on $[0,d\,]$. Moreover, the map $h\mapsto \Lambda_h$ is continuous with respect to the $C^\infty$-topologies. \end{proposition}
\begin{proof} Away from $t=0$, the statement is easy. In the following we assume $t$ small enough. By compactness, there exist $\epsilon,\delta>0$, and a finite collection of local charts $\phi_i\colon U_i\to M$, and open sets $V_i\subset U_i$ such that $\bigcup_i \phi_i(V_i)=M$, \[
B_p(\epsilon)\subset U_i,\qquad\mbox{and}\qquad B_{\phi_{i}(p)}(\delta)\cap M\subset \phi_{i}(B_p(\epsilon)),\qquad\forall p\in V_i, \, {\forall i}. \] Using partitions of unity, we can assume that $h$ has support inside $\phi_{i}(V_i)\times S^{n-1}\times [0,d\,]$ for some $i$. {Put $\phi=\phi_i, U=U_i$ and $V=V_i$ in what follows. } Let \[
F(p,u,r)=\frac{\phi(p+ru)-\phi(p)}{|\phi(p+ru)-\phi(p)|},\quad g(p,u,r)=|\phi(p+ru)-\phi(p)|,
\] which extend to smooth mappings on ${V}\times S^{n-2}\times [0,\epsilon)$ (cf. \cite{blowup} {Cor. 2.6}). Since $\frac{\partial g}{\partial r}(p,u,0)> 0$, there exists a smooth function $s(p,u,\tau)$ such that $g(p,u,s(p,u,t))=t$ defined for small $t\geq 0$. Hence, for $0 \leq t \leq\delta$, \begin{equation}\label{integrals}
\Lambda_h(t)= \int_{K}\int_{S^{n-2}}\int_0^{s(p,u,t)} (\mathrm{jac}\phi)_p(\mathrm{jac}\phi)_{p+ru} h(\phi(p),F(p,u,r),g(p,u,r))r^{n-2} dr dS^{n-2}_u dp, \end{equation} where $K\subset V$ is the inverse image by $\phi$ of the projection to $M$ of the support of $h$. Since $K$ is compact, the integrand and all its derivatives are uniformly bounded. It follows that the innermost integral defines a smooth function of $t$, and all its derivatives are uniformly bounded. Therefore $\Lambda_h$ is smooth. Finally, the continuity follows using again partitions of unity and \eqref{integrals}. \end{proof}
\begin{proposition}\label{coro}
Given $f\in C^\infty(M\times S^{n-1}\times [0,d\,])$, the function
\[
\Gamma_f(t)=\int_{(M\times M)\cap\Delta_t} \frac{\langle \uon{y},{y-x}\rangle}{|y-x|^{n}} f(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y
\] is smooth on $[0,d\,]$. The operator $f\mapsto \Gamma_f\in C^\infty([0,d\,])$ is continuous. \end{proposition}
\begin{proof} Given $f$, take $h(x,v,r)=\langle \uon{x+{rv}},v\rangle f(x,v,r)$ (after suitably extending $\uon{}$ to a neighborhood of $M$). Arguing as in Proposition 3.3 we have \begin{equation}\label{preparation}
\Gamma_f(t)=\int_{(M\times M)\cap\Delta_t} {|y-x|^{1-n}} h(x,\frac{y-x}{|y-x|},|y-x|) dM_xdM_y= \int_0^t {\tau}^{1-n} \Lambda'_h({ \tau})\,d{\tau .} \end{equation}
By \eqref{integrals}, and using $\langle \uon{y},\frac{y-x}{|y-x|}\rangle=O(|y-x|)$, we have $\Lambda_h(t)=O(t^{n})$. It follows by Proposition \ref{smooth} that $\Gamma_f(t)$ is smooth in $[0,d]$. The continuity is also clear by Proposition \ref{smooth}. \end{proof}
\begin{proposition}\label{uniform} Given $f\in C^\infty(\Omega\times S^{n-1}\times [0,d\,])$ the functions $\Psi_f$, $\Phi_f$, and $\Xi_f$ are smooth on $[0,d\,]$. The linear maps $f\mapsto \Psi_f,\Phi_f,\Xi_f$ are smooth with respect to the $C^\infty$-topologies. \end{proposition} \begin{proof}We will proceed by induction. First, as for $\Xi_f$, it is clear that $\Xi_f(t)$ is continuous on $[0,d\,]$, and that $f\mapsto \Xi_f\in C([0,d\,])$ is continuous. Assume for every $k\leq k_0$ and for any $f\in C^\infty(\Omega\times S^{n-1}\times [0,d\,])$ that $\Xi_f\in C^k([0,d])$ and the map $f \mapsto \Xi_f$ from $C^\infty(\Omega\times S^{n-1}\times [0,d\,])$ to $C^k([0,d\,])$ is continuous with the given topologies.
By \eqref{var3} and Proposition \ref{coro} applied to \eqref{var4}, it follows that the same holds for $k=k_0+1$, and by induction for all $k$.
Proceeding analogously and using \eqref{var2}, one proves that $\Phi_f(t)$ is smooth and $f\mapsto \Phi_f$ is continuous. The stated properties of $\Psi_f$ follow by \eqref{var1}. \end{proof}
\begin{corollary} The function $$\psi_\Omega(t)=\int_{(\Omega\times\Omega)\cap\Delta_t} dxdy$$ is smooth on $[0,d]$. Moreover, \begin{equation}\label{expansion_psi}
\psi_\Omega(t)=\frac{o_{n-1}}{n}t^{n}V(\Omega)-\frac{o_{n-2}}{(n+1)(n-1)}t^{n+1} A( M)+O(t^{n+3}). \end{equation} \end{corollary} \begin{proof}
The smoothness follows from Proposition \ref{uniform}, while the given expansion is a consequence of \eqref{var1} and \eqref{var2}. \end{proof}
Given any $z\in\mathbb{C}$, by the coarea formula, one shows as in Proposition \ref{prop_coarea}, \[
\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}|x-y|^z dxdy=\int_\varepsilon^\infty t^z\psi_{\Omega}'(t) dt,\qquad \varepsilon>0. \] \begin{definition} For any $z\in\mathbb{C}$, the {\em regularized $z$-energy} of a domain $\Omega\subset\mathbb R^n$ with smooth boundary is \begin{equation}\label{eq_def_E_dom}
E_\Omega(z)=\mathrm{Pf.}\int_0^\infty t^z\psi'_\Omega(t) dt= \lim_{\varepsilon \to 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon} |x-y|^z dxdy+\sum_{j=0}^{k-1}\frac{\psi_\Omega^{(j+1)}(0)}{j!}\frac{\varepsilon^{z+j+1}}{z+j+1}\right), \end{equation} where $k\in\mathbb N$ is such that $\Re\mathfrak{e} \, z>-k-1$, and ${\varepsilon^0}/{0}$ is to be replaced by $\log \varepsilon$ in case $z\in\mathbb Z,z <0$. \end{definition}
As before, there is a meromorphic function $B_\Omega(z)$ which coincides with $E_\Omega(z)$ away from its poles, which are located at the negative integers $z=-k$ such that $\psi_\Omega^{(k+1)}(0)\neq 0$. We call $B_\Omega$ the {\em beta function} of $\Omega$. As before \begin{equation}\label{pole_remove_domain}
E_\Omega(-k)=\lim_{z\to -k}\left(B_\Omega(z)-\frac{1}{z+k}\mbox{\rm Res}(B_\Omega,-k)\right) \end{equation} if $-k$ is a pole of $B_\Omega$. Furthermore, the coefficients in \eqref{eq_def_E_dom} coincide with the residues of $B_\Omega(z)$. Indeed, by \eqref{basic_residues}, \begin{equation}\label{basic_res_dom}
\mbox{\rm Res}(B_\Omega,-k)=\frac{\psi_\Omega^{(k)}(0)}{(k-1)!}. \end{equation}
In order to compute these residues, the following alternative approach, based on \eqref{eq_compact_bodies_z-energy_boundary_integral}, will be useful. Let $M=\partial\Omega$, and $\rho\in C^\infty(M\times M)$ be given by $\rho(x,y)=\langle \uon{x},\uon{y}\rangle$. For $z\neq -n,-2$ \begin{equation}\label{second_approach}
E_\Omega(z)=\frac{-1}{(z+2)(z+n)}\mathrm{Pf.}\int_0^\infty t^{z+2} \psi'_{\rho}(t) dt. \end{equation} Indeed, for $\Re\mathfrak{e} \, z>-n$ this is \eqref{eq_compact_bodies_z-energy_boundary_integral}. For $z$ not a negative integer, the equality follows by analytic continuation. Finally, for $z\in \mathbb Z, {z<0}$, it follows from \eqref{pole_remove_domain}.
Note also that $B_\Omega(z)=-P_\Omega(z)$ for $-n-1<\Re\mathfrak{e} \, z<-n$, so the beta function is the analytic continuation of both the Riesz energy and minus the fractional perimeter.
Another consequence of \eqref{second_approach}, combined with Proposition \ref{even_odd} (ii), is the following: \begin{proposition}\label{prop_residue_B_Omega} The beta function $B_\Omega(z)$ can have poles only at $z=-n$ and $z=-n-2j-1$ with $j\in\mathbb Z, j\geq 0$. \end{proposition}
\subsection{Residues} Next we compute the residues of the beta function, and we derive some explicit presentations of $E_\Omega(z)$ in low dimensions. \begin{lemma} For $n>2$, the pole of $B_\Omega(z)$ at $z=-n$ is simple and has residue \begin{equation}
\displaystyle \mbox{\rm Res}(B_\Omega,-n)= \frac{1}{n-2}\int_{ M\times M}|x-y|^{2-n}\langle\uon{x},\uon{y}\rangle\,dxdy , \label{formula_volume} \end{equation}where $o_k$ is the volume of the unit $k$-sphere in $\mathbb{R}^{k+1}$. For $n=2$ this pole is simple with residue \begin{equation}
\mbox{\rm Res}(B_\Omega,-2)=-\displaystyle \int_{ M\times M}\log|x-y|\,\langle\uon{x},\uon{y}\rangle\,dxdy . \label{formula_area} \end{equation} \end{lemma}
\begin{proof} Equality \eqref{formula_volume} follows from Lemma \ref{lemma_Riesz_energy_compact_bodies_boundary_integral}. Let us prove \eqref{formula_area}. Since $\int_{ M}\langle \uon{x},\uon{y}\rangle dy$ vanishes, we have \[ \begin{array}{rcl} \mbox{\rm Res}(B_\Omega,-2)&=&\displaystyle \lim_{z\to-2}(z+2)B_\Omega(z)\\[2mm]
&=&\displaystyle \lim_{z\to-2}\left(-\int_{ M\times M}\frac{|x-y|^{z+2}}{z+2}\langle\uon{x},\uon{y}\rangle dxdy \right)\\[4mm]
&=&\displaystyle -\lim_{z\to-2}\int_{ M\times M}\frac{|x-y|^{z+2}-1}{z+2}\langle\uon{x},\uon{y}\rangle dxdy \\[4mm]
&=&\displaystyle -\int_{ M\times M}\log|x-y|\langle\uon{x},\uon{y}\rangle dxdy, \end{array} \]
by dominated convergence (as $\log|y-x|$ is integrable on $M\times M$). \end{proof}
\begin{corollary} When $n>2$ the volume of a compact domain $\Omega$ with boundary $M=\partial\Omega$ is given by \begin{equation*}\label{formula_volume_boundary_integral}
\text{\rm Vol}\,(\Omega)=\frac{1}{(n-2)o_{n-1}}\int_{ M\times M}|x-y|^{2-n}\langle\uon{x},\uon{y}\rangle\,dxdy. \end{equation*}
When $n=2$ the area of a compact domain $\Omega$ with boundary $M=\partial\Omega$ is given by \begin{equation*}\label{formula_area_boundary_integral}
A(\Omega)=-\frac1{2\pi} \int_{M\times M}\log|x-y|\,\langle\uon{x},\uon{y}\rangle\,dxdy. \end{equation*} \end{corollary}
\begin{proof} The equations \eqref{basic_res_dom} and \eqref{expansion_psi} imply \begin{equation*}\label{residue_domain_volume} \mbox{\rm Res}(B_\Omega,-n)={o_{n-1}}\text{\rm Vol}\,(\Omega) \end{equation*} for any $n\ge2$. The two formulae in the corollary follow from \eqref{formula_volume} and \eqref{formula_area}.
{We remark that the two formulae can also be proved directly by application of the Stokes theorem and Hadamard-type regularization at $M$. } \end{proof}
By \eqref{second_approach}, the other residues are given by
\begin{equation*}\label{residues_domain}
\mbox{\rm Res}(B_\Omega,-n-2j-1)=\frac{-1}{(n+2j-1)!\,(2j+1)}\int_{ M} \psi^{(n+2j-1)}_{\rho,x}(0)\, dx. \end{equation*} \begin{proposition}Let $\Omega\subset \mathbb R^n$ be a compact domain bounded by a smooth hypersurface $ M$. Given $x\in M$, let $\rho(y)=\langle \uon{x},\uon{y}\rangle$. Then \begin{equation}\label{eq_psi_rho}
\psi_{\rho,x}(t)=\frac{o_{n-2}t^{n-1}}{n-1}\left(1-\frac{t^2}{8(n+1)}\left(3(n-1)^2H^2-4K\right) +O(t^4)\right), \end{equation} where $H=\frac{1}{n-1}\sum_i k_i$ is the mean curvature, $K=\sum_{i<j}k_ik_j$ is the scalar curvature, and $k_1,\ldots,k_{n-1}$ are the principal curvatures of $ M$. Hence \begin{align*} \label{residues_j01} &\mbox{\rm Res}(B_\Omega,-n)=o_{n-1}{\rm Vol}(\Omega),\\
&\mbox{\rm Res}(B_\Omega,-n-1)=-\frac{o_{n-2}}{n-1}\textrm{\rm Vol}( M),
\\ &\mbox{\rm Res}(B_\Omega,-n-3)=\frac{o_{n-2}}{24(n^2-1)}\int_{ M} (3(n-1)^2H^2-4K) dx. \end{align*} \end{proposition}
\begin{proof}We can choose orthogonal coordinates $(v_1,\ldots,v_n)$ so that $x$ is the origin and $M=\partial\Omega$ coincides locally with the graph of a smooth function $g(v_1,\ldots,v_{n-1})$ and $\mathbf \uon_x=(0,\ldots,0,1)$. Using polar coordinates $(r,u)\in \mathbb R_{\geq 0}\times S^{n-2}$ in the domain of $g$, we parametrize the points $y\in M$ around $x$ by \[y=h(r,u)=\left(r u_1, \ldots, r u_{n-1}, g(r\cdot u)\right)= \left(r u_1, \ldots, r u_{n-1}, -\frac{r^2}{2}k_n(u)+O(r^3)\right), \] where $k_n(u)=\sum_{i=1}^{n-1}k_iu_i^2$ is the normal curvature in the direction $u$.
It is geometrically clear that \[ h^\ast\left(\langle\uon{x},\uon{y}\rangle dy\right)=dv_1\cdots dv_{n-1}=r^{n-2}drdu. \] On the other hand, the distance between $x$ and $y$ is given by \[
t=t(r,u)=\sqrt{r^2+\frac{r^4}{4}k_n(u)^2+O(r^5)}=r\left(1+\frac12\frac{k_n(u)^2}{4}r^2+O(r^3)\right) \]
Then, it follows that $r=r(t,u)$ can be expanded in a series of $t$ as \begin{equation*}\label{formula_r-t} r=t\left(1-\frac18k_n(u)^2t^2+O(t^3)\right) \end{equation*}
Now, using $(t,u)$ as coordinates instead of $(r,u)$, the area element of the plane $\{v_n=0\}$ can be expressed as \begin{align*} r^{n-2}drdu&= t^{n-2}\left(1-\frac18k_n(u)^2t^2+O(t^3)\right)^{n-2}\left(1-\frac38k_n(u)^2t^2+O(t^3)\right)dtdu \\
&= t^{n-2}\left(1-\frac{n+1}{8}k_n^2(u)t^2+O(t^3)\right)dtdu. \end{align*}
Therefore \begin{align*} \psi_{\rho,x}(\varepsilon)&=\int_{ M\cap B_\varepsilon(x)}\langle\uon{x},\uon{y}\rangle dy \\
&= \int_0^\varepsilon\int_{S^{n-2}} t^{n-2}\left(1-\frac{n+1}{8}\left(k_n(u)\right)^2t^2+O(t^3)\right)du dt\\ &= \frac{o_{n-2}\,\varepsilon^{n-1}}{n-1}-\frac{\varepsilon^{n+1}}{8}\int_{S^{n-2}} k_n(u)^2du +O(\varepsilon^{n+2}).
\end{align*} By Proposition \ref{even_odd} we know that $\psi_{\rho,x}$ extends to an even (resp. odd) function when $n-1$ is even (resp. odd), so the latter $O(\varepsilon^{n+2})$ is in fact $O(\varepsilon^{n+3})$.
Finally, using e.g. \cite[Formula (A.5)]{gray} one gets \[
\int_{S^{n-2}}k_n(u)^2 du=\frac{o_{n-2}}{(n-1)(n+1)}\left(3\sum_{i=1}^{n-1} k_i^2+2\sum_{1\leq i<j\leq n-1}k_ik_j\right) \] where $k_1,\ldots, k_{n-1}$ are the principal curvatures of $M$ at $x$. Equation \eqref{eq_psi_rho} follows. \end{proof}
\begin{theorem}Let $\Omega\subset\mathbb R^n$ be a compact domain with smooth boundary $\partial \Omega$. The first three poles (along the negative real axis) of $B_{\Omega}(z)$ have the following residues
\begin{enumerate} \item For $n=2$ \[ R_\Omega(-2)=2\pi A(\Omega),\> R_\Omega(-3)=-2L( \partial \Omega),\> R_\Omega(-5)=\frac1{12}\int_{ \partial\Omega}\kappa^2\,dx, \] where $L$ and $A$ denote length and area respectively, and $\kappa$ denotes the curvature of $\partial\Omega$.
\item For $n=3$ \begin{equation*}\label{residues_compact_body_dim3} R_\Omega(-3)=4\pi V(\Omega), \> R_\Omega(-4)=-\pi A(\partial \Omega), \> R_\Omega(-6)=\frac{\pi}{24}\int_{ M}(3H^2-K)dx, \end{equation*}
where $V$ and $A$ denote volume and area respectivley, and $H,K$ are the mean and the Gauss curvatures of $ M$.
\item For $n=4$ \begin{equation*}\label{residues_compact_body_dim4} R_\Omega(-4)={2}\pi^2 V_4(\Omega), \> R_\Omega(-5)=-\frac43\pi V_3(\partial\Omega), \> R_\Omega(-7)=\frac{\pi}{90}\int_{\partial\Omega}(27H^2-4K)dx, \end{equation*} where $V_k$ denotes $k$-dimensional volume, and $H,K$ are the mean and scalar curvatures of $\partial \Omega$.
\end{enumerate} \end{theorem}
The previous formulas allow to describe explicitly the $z$-energy for $\Re\mathfrak{e} \, z> -n-5$ in dimensions $n=2,3,4$ using \eqref{eq_def_E_dom} and \eqref{basic_res_dom}. Next we carry this out for $z=-2n$. \begin{corollary}Let $\Omega\subset \mathbb{R}^n$ be a compact domain with smooth boundary. \begin{enumerate}
\item For $n=2$, the regularized $(-4)$-energy is \[ E_\Omega(-4)=B_\Omega(-4)
=\lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^4}-\frac{\pi}{\varepsilon^2}A(\Omega)+\frac{2}{\varepsilon}L(\partial\Omega)\right). \] \item For $n=3$ the regularized $(-6)$-energy is \[ \begin{array}{rcl} \displaystyle \E{\Omega}{-6} &=& \displaystyle \lim_{z\to-6}\left(B_\Omega(z)-\frac\pi{24(z+6)}\int_{ \partial\Omega}(3H^2-K)dx\right) \\[4mm]
&=&\displaystyle \lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^6}
-\frac{4\pi }{3\varepsilon^3}\textrm{\rm Vol}(\Omega)+\frac{\pi }{2\varepsilon^2}A( \partial\Omega)+\frac{\pi\log\varepsilon}{24}\int_{ \partial\Omega}(3H^2-K)dx \right). \end{array} \] \item For $n=4$, the regularized $(-8)$-energy is \[\begin{array}{l}
E_\Omega(-8)=B_\Omega(-8) \\
\displaystyle =\lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^8}-\frac{\pi^2}{{2}\varepsilon^4} {V_4(\Omega)} +\frac{4\pi}{9\varepsilon^3} {V_3( \partial\Omega)} -\frac{\pi}{90\varepsilon}\int_{ \partial\Omega}(27H^2-4K)dx\right). \end{array} \] \end{enumerate} \end{corollary} In \cite{OS}, we introduced an energy $E(\Omega)$ for planar compact domains $\Omega\subset\mathbb R^2$. This energy is related to $E_\Omega(-4)$ by $E(\Omega)=E_\Omega(-4)+\frac{\pi^2}{4}\chi(\Omega)$. Indeed by \cite[Definition 3.11 and Proposition 3.13]{OS}, one has
\begin{equation*}\label{E_OS}
{{E}}(\Omega)=\lim_{\varepsilon\rightarrow 0}\left(\int_{\Omega\times\Omega\setminus\Delta_\varepsilon}\frac{dxdy}{|x-y|^4}-\frac{\pi}{\varepsilon^2}A(\Omega)+\frac{2}{\varepsilon}L(\partial\Omega)\right)+\frac{\pi^2}{4}\chi(\Omega). \end{equation*}
It was shown in \cite{OS}, that this energy is M\"obius invariant. In the next section we prove the analogous result for any even dimension.
\subsection{M\"obius invariance}
\begin{proposition}\label{last_proposition} Under a homothety $x\mapsto cx$ $(c>0)$, the residues of the Riesz energy behave as follows. \[ \begin{array}{rcl} R_{c\Omega}(-k)&=&\displaystyle c^{2n-k} R_\Omega(-k) \hspace{0.7cm}(k\ge n). \\[2mm] \E{c\Omega}{z}&=&\displaystyle c^{2n+z}\left(E_\Omega(z)+(\log c) R_\Omega(z)\right). \end{array} \] Hence the regularized $z$-energy is not scale invariant if $z\ne -2n$. The regularized $(-2n)$-energy is scale invariant if and only if $R_\Omega(-2n)$ vanishes for any $\Omega$. \end{proposition}
\proof The arguments in Lemma \ref{lemma_residue_homothety} and Proposition \ref{proposition_energy_homothety} go parallel here. \endproof
\begin{example} \rm Let $\Omega=B^n$ be the $n$-dimensional unit ball. Using Lemma \ref{lemma_Riesz_energy_compact_bodies_boundary_integral} one easily gets the following expression (which appears also in \cite{Mi,HR})
\begin{eqnarray} B_{\Omega}(z)&=&\frac{2^{z+n}o_{n-1}o_{n-2}}{(n-1)(z+n)}\,B\!\left(\frac{z+n+1}2,\frac{n+1}2\right) \nonumber \\
&=&\displaystyle \left\{ \begin{array}{ll} \displaystyle \frac{2^{z+n+1}\,\pi^{n-\frac12}\,\Gamma\left(\frac{z}2+\frac{n+1}2\right)}{(z+n)\left(\frac{n}2-1\right)!\,\Gamma\left(\frac{z}2+{n+1}\right)} & \hspace{0.5cm}(\mbox{ if $n$ is even}) \\[6mm]
\displaystyle \frac{2^{z+2n+1}\,\pi^{n-1}}{(z+n)\,(n-2)!!\,(z+n+1)(z+n+3)\cdots(z+2n)} & \hspace{0.5cm}(\mbox{ if $n$ is odd}), \end{array} \right.\nonumber \label{unit_ball_even_odd} \end{eqnarray}
where $(n-2)!!=(n-2)\cdot(n-4)\cdots3\cdot1$. Hence, the beta function of a ball has infinitely many poles at $z=-n, -n-1, -n-3, -n-5, \dots$ when $n$ is even, and exactly $(n+3)/2$ poles at $z=-n, -n-1, -n-3, \dots, -2n$ when $n$ is odd. \end{example}
\begin{theorem}\label{thm4.11} The regularized $z$-energy $E_\Omega(z)$ is a M\"obius invariant if and only if $n=\dim\Omega$ is even and $z=-2n$. \end{theorem}
\begin{proof} The regularized $z$-energy is scale invariant only if $z=-2n$ by Proposition \ref{last_proposition}. The example above shows that the regularized $(-2n)$-energy is not scale-invariant if $n$ is odd. Propositions \ref{prop_residue_B_Omega} and \ref{last_proposition} show that $E_\Omega(-2n)$ is scale invariant if $n$ is even. Therefore, we have only to show that $E_\Omega(-2n)=E_{I(\Omega)}(-2n)$ if $n$ is even, $I$ is an inversion with respect to the unit sphere, and $\Omega$ is a compact domain in $\mathbb{R}^n$ with smooth boundary that does not contain the origin.
For $\mathfrak{Re}(z)>-n$, and denoting $\widetilde\Omega=I(\Omega)$ we have \[
E_{\widetilde\Omega}(z)-E_\Omega(z)=\int_{\Omega\times \Omega} |x-y|^z\rho_z(x,y) dxdy \]
where $\rho_z(x,y)=|x|^{-z-2n}|y|^{-z-2n} -1$. By Proposition \ref{uniform}, we have $\Psi_{\rho_z}\in C^\infty([0,d\,])$ and all derivatives $\Psi^{(k)}_{\rho_z}$ converge uniformly to $0$ as $z\to -2n$. Arguing as in Proposition 3.3, \begin{align*}
E_{\widetilde\Omega}(z)-E_\Omega(z)=\int_0^d t^z\Psi'_{\rho_z}(t)dt+\int_{(\Omega\times \Omega)\setminus \Delta_d} |x-y|^z\rho_z(x,y)dxdy \end{align*} for $\mathfrak{Re}(z)>-n$. For $\mathfrak{Re}z>-2n-1$ we have \begin{equation}\label{F_threeterms} \begin{array}{rl} E_{\widetilde\Omega}(z)-E_\Omega(z)=&\displaystyle \int_0^d t^z\left[\Psi_{\rho_z}'(t)-\sum_{j=0}^{2n-1}\frac{\Psi_{\rho_z}^{(j+1)}(t)}{j!}\,t^j \right]\,dxdt +\sum_{k=1}^{2n}\frac{\Psi_{\rho_z}^{(k)}(0)\, d^{z+k}}{(k-1)!\,(z+k)}\\[6mm]
&\displaystyle +\int_{(\Omega\times \Omega)\setminus \Delta_d} |x-y|^z \rho_{z}(x,y)dxdy. \end{array} \end{equation}
The third term of the right hand side of \eqref{F_threeterms} goes to $0$ as $z$ tends to $-2n$ since $\rho_z(x,y)$ goes to $0$ uniformly.
The {modulus} of the first term goes to $0$ as $z$ tends to $-2n$ since it is bounded above by \[
\frac1{(2n)!}\sup_{0\le t\le d}\left|\Psi_{\rho_z}^{(2n+1)}(t)\right|\left|\int_0^d t^{z+2n}\,dt\right|
=\frac{ |d^{z+2n+1} |}{(z+2n+1)(2n)!}\sup_{0\le t\le d}\left|\Psi_{\rho_z}^{(2n+1)}(t)\right|, \] which tends to $0$ as $z$ goes to $-2n$ by Proposition \ref{uniform}.
By Proposition 4.4, the function $E_{\widetilde\Omega}(z)-E_{\Omega}(z)$ has possible poles at $z=-n$ and $z=n-(2j+1)$ with $j\in\mathbb Z, j\geq 0$. Since $n$ is even, it does not have a pole at $z=-2n$. Hence, the term $k=2n$ in \eqref{F_threeterms} must vanish identically. It follows by Proposition \ref{uniform} that the sum over $k$ in \eqref{F_threeterms}, and therefore $E_{\widetilde\Omega}(z)-E_\Omega(z)$, tends to $0$ as $z$ approaches $-2n$.
This completes the proof of the M\"obius invariance of $E_\Omega(-2n)$. \end{proof}
\end{document} |
\begin{document}
\title{Characterizing Real-Valued Multivariate Complex Polynomials and Their Symmetric Tensor Representations}
\author{ Bo JIANG \thanks{Research Center for Management Science and Data Analytics, School of Information Management and Engineering, Shanghai University of Finance and Economics, Shanghai 200433, China. Email: isyebojiang@gmail.com} \and Zhening LI \thanks{Department of Mathematics, University of Portsmouth, Portsmouth, Hampshire PO1 3HF, United Kingdom. Email: zheningli@gmail.com} \and Shuzhong ZHANG \thanks{Department of Industrial and Systems Engineering, University of Minnesota, Minneapolis, MN 55455, USA. Email: zhangs@umn.edu}}
\date{\today}
\maketitle
\begin{abstract}
In this paper we study multivariate polynomial functions in complex variables and their corresponding symmetric tensor representations. The focus is to find conditions under which such complex polynomials always take real values. We introduce the notion of symmetric conjugate forms and general conjugate forms, characterize the conditions for such complex polynomials to be real-valued, and present their corresponding tensor representations.
New notions of eigenvalues/eigenvectors for complex tensors are introduced, extending similar properties from the Hermitian matrices. Moreover,
we study a property of the symmetric tensors, namely the {largest eigenvalue (in the absolute value sense)} of a symmetric real tensor is equal to its largest singular value; the result is also known as Banach's theorem. We show that a similar result holds for the complex case as well. Finally, we discuss some applications of the new notion of eigenvalues/eigenvectors for the complex tensors.
\noindent {\bf Keywords:} symmetric complex tensor; conjugate complex polynomial; tensor eigenvalue; tensor eigenvector; nonnegative polynomial.
\noindent {\bf Mathematics Subject Classification:} 15A69, 15A18, 15B57, 15B48.
\end{abstract}
\section{Introduction}\label{sec:introduction}
In this paper we set out to study the functions in multivariate complex variables which however always take real values. Such functions are frequently encountered in engineering applications arising from signal processing~\cite{ADJZ12}, electrical engineering, and control theory~\cite{TO98}. It is interesting to note that such complex functions are usually not studied by conventional complex analysis, since they are typically not even analytic because the Cauchy-Riemann conditions will never be satisfied unless the function in question is trivial. There has been a surge of research attention to solve optimization models related to such kind of complex functions~\cite{ADJZ12,SBD12,SBD13,JLZ14,JMZ14}. Sorber et al.~\cite{SBD13b} developed a MATLAB toolbox for optimization problems in complex variables, where the complex function in question is either {\it pre-assumed} to be always real-valued~\cite{SBD12}, or it is the modulus/norm of a complex function~\cite{ADJZ12,SBD13}. An interesting question thus arises: {\em Can such real-valued complex functions be characterized?} Indeed there does exist a class of special complex functions that always take real values: the Hermitian quadratic form $\boldsymbol{x}^{\textnormal{H}}A\boldsymbol{x}$ where $A$ is a Hermitian matrix. In this case, the quadratic structure plays a key role. This motivates us to search for more general complex polynomial functions with the same property.
Interestingly, such complex polynomials can be completely characterized, as we will present in this paper.
As is well-known, polynomials can be represented by tensors. The same question can be asked about complex tensors. In fact, there is a considerable amount of recent research attention on the applications of complex tensor optimization. For instance, Hilling and Sudberythe~\cite{HS10} formulated a quantum entanglement problem as a complex multilinear form optimization under the spherical constraint, and Zhang and Qi~\cite{ZQ12} and Ni et al.~\cite{NQB14} discussed quantum eigenvalue problems, which arised from the geometric measure of entanglement of a multipartite symmetric pure state in the complex tensor space. Examples of complex polynomial optimization include Aittomaki and Koivunen~\cite{AK09} who formulated the problem of beam-pattern synthesis in array signal processing as complex quartic polynomial minimization, and Aubry et al.~\cite{ADJZ12} who modeled a radar signal processing problem by complex polynomial optimization. Solution methods for complex polynomial optimization can be found in, e.g., \cite{SBD12,JLZ14,JMZ14}. As mentioned before, polynomials and tensors are known to be related. In particular in the real domain, homogeneous polynomials (or forms) are bijectively related to {\em symmetric} tensors; i.e., the components of the tensor is invariant under the permutation of its indices. This important class of tensors generalizes the concept of symmetric matrices. As the role played by symmetric matrices in matrix theory and quadratic optimization, symmetric tensors have a profound role to play in tensor eigenvalue problems and polynomial optimization. A natural question can be asked about complex tensors: {\em What is the higher order complex tensor generalization of the Hermitian matrix?} In this paper, we manage to identify two classes of symmetric complex tensors, both of which include Hermitian matrices as a special case when the order of the tensor is two.
In recent years, the eigenvalue of tensor has become a topic of intensive research interest. {Perhaps a first attempt to generalize eigenvalue decomposition of matrices can be traced back to 2000 when De Lathauwer et al.~\cite{DDV00} introduced the so-called higher-order eigenvalue decomposition. Shortly after that, Kofidis and Regalia~\cite{KR02} showed that blind deconvolution can be formulated as a nonlinear eigenproblem.} A systematic study of eigenvalues of tensors was pioneered by Lim~\cite{L05} and Qi~\cite{Q05} independently in 2005. Various applications of tensor eigenvalues and the connections to polynomial optimization problems have been proposed;
cf.~\cite{Q07,NQWW07,ZQ12,CS13,NQB14} and the references therein. We refer the interested readers to the survey papers~\cite{Q12} for more details on the spectral theory of tensors and various applications of tensors. Computation of tensor eigenvalues is an important source for polynomial optimization~\cite{HLZ10,LHZ12}. Essentially the problem is to maximize or minimize a homogeneous polynomial under the spherical constraint, which can also be used to test the (semi)-definiteness of a symmetric tensor.
In this paper we are primarily interested in complex polynomials/tensors that arise in the context of optimization. By nature of optimization, we are interested in the complex polynomials that always take real values. However, it is easy to see that if no {\em conjugate} term is involved, then the only class of real-valued complex polynomials is the set of real constant functions\footnote{This should be differentiated from the notion of real-symmetric complex polynomial, sometimes also called real-valued complex polynomial in abstract algebra, i.e., $\overline{f(\boldsymbol{x})}=f(\overline{\boldsymbol{x}})$.}. Therefore, the conjugate terms are necessary for a complex polynomial to be real-valued. Hermitian quadratic forms mentioned earlier belong to this category, which is an active area of research in optimization; see e.g.~\cite{LMSYZ10,ZH06,SZY07}.
In the aforementioned papers~\cite{Q07,NQWW07,CS13} on eigenvalues of complex tensors, the associated complex polynomials however are not real-valued. The aim of this paper is different. We target for a systematic study on
the nature of symmetricity for higher order complex tensors which will lead to the property that the associated polynomials always take real values.
The main contribution of this paper is to give a full characterization for the real-valued conjugate complex polynomials and to identify two classes of symmetric complex tensors, which have already shown potentials in the algorithms design~\cite{ADJZ12,JLZ14,JMZ14}. We also proposed two new types of tensor eigenvalues/eigenvectors for the new classes of complex tensors.
This paper is organized as follows. We start with the preparation of various notations and terminologies in Section~\ref{sec:preparation}. In particular, two types of conjugate complex polynomials are defined and their symmetric tensor representations are discussed. Section~\ref{sec:condition} presents the necessary and sufficient condition for real-valued conjugate complex polynomials, based on which two types of symmetric complex tensors are defined, corresponding to the two types of real-valued conjugate complex polynomials.
As an important result in this paper, we then present the definitions and properties of eigenvalues and eigenvectors for two types of symmetric complex tensors in Section~\ref{sec:eigenvalue}. In Section~\ref{sec:Banach}, we discuss Banach's theorem, which states that the {largest eigenvalue (in the absolute value sense)} of a symmetric real tensor is equal to its largest singular value, and extend it to the two new types of symmetric complex tensors. Some application examples are discussed in Section~\ref{sec:application} to show the significance in practice of the theoretical results in this paper. Finally, we conclude this paper by summarizing our main findings and outlining possible future work in Section~\ref{sec:conclusion}.
\section{Preparation}\label{sec:preparation}
Throughout this paper we use usual lowercase letters, boldface lowercase letters, capital letters, and calligraphic letters to denote scalars, vectors, matrices, and tensors, respectively. For example, a scalar $a$, a vector $\boldsymbol{x}$, a matrix $Q$, and a tensor $\mathcal{F}$. We use subscripts to denote their components, e.g. $x_i$ being the $i$-th entry of a vector $\boldsymbol{x}$, $Q_{ij}$ being the $(i,j)$-th entry of a matrix $Q$ and $\mathcal{F}_{ijk}$ being the $(i,j,k)$-th entry of a third order tensor $\mathcal{F}$. As usual, the field of real numbers and the field of complex numbers are denoted by $\mathbb{R}$ and $\mathbb{C}$, respectively.
For any complex number $z=a+\mbox{\bf i} b\in\mathbb{C}$ with $a,b\in\mathbb{R}$, its real part and imaginary part are denoted by $\textnormal{Re}\, z:=a$ and $\textnormal{Im}\, z:=b$, respectively. Its modulus is denoted by $|z|:=\sqrt{\overline{z}z}=\sqrt{a^2+b^2}$, where $\overline{z}:=a-\mbox{\bf i} b$ denotes the conjugate of $z$. For any vector $\boldsymbol{x}\in\mathbb{C}^n$, {we let $\boldsymbol{x}^{\textnormal{H}}:=\overline{\boldsymbol{x}}^{\textnormal{T}}$ be the transpose of its conjugate, and we define it analogously for matrices}. Throughout this paper we uniformly use the 2-norm for vectors, matrices and tensors in general, which is the usual Euclidean norm. For example, the norm of a vector $\boldsymbol{x}\in\mathbb{C}^n$ is defined as $\|\boldsymbol{x}\|:=\sqrt{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}}$, and the norm of a $d$-th order tensor $\mathcal{F}\in\mathbb{C}^{n_1 \times \dots\times n_d}$ is defined as $$
\|\mathcal{F}\|:= \sqrt{\sum_{i_1=1}^{n_1}\dots\sum_{i_d=1}^{n_d}\overline{\mathcal{F}_{i_1 \dots i_d}} \cdot \mathcal{F}_{i_1 \dots i_d}}. $$
\subsection{Complex forms and their tensor representations}
A multivariate complex polynomial $f(\boldsymbol{x})$ is a polynomial function of variable $\boldsymbol{x}\in\mathbb{C}^n$ whose coefficients are complex, e.g.\ $f(x_1,x_2)=x_1+(1-\mbox{\bf i}){x_2}^2$. A multivariate {\em conjugate} complex polynomial (sometimes abbreviated by conjugate polynomial in this paper) $f_C(\boldsymbol{x})$ is a polynomial function of variables $\boldsymbol{x},\overline{\boldsymbol{x}}\in\mathbb{C}^n$, which is differentiated by the subscript $C$, standing for `conjugate', e.g. $f_C(x_1,x_2) = x_1 + \overline{x_2} + \overline{x_1}x_2+ (1-\mbox{\bf i}){x_2}^2$. In particular, a general $n$-dimensional $d$-th degree conjugate complex polynomial can be explicitly written as summation of monomials $$ f_C(\boldsymbol{x}):=\sum_{\ell=0}^d \sum_{k=0}^\ell\,\sum_{1\le i_1\le \dots \le i_k \le n}\,\sum_{1\le j_1 \le \dots \le j_{\ell-k} \le n} a_{i_1\dots i_k,j_1\dots j_{\ell-k}}\overline{x_{i_1}\dots x_{i_k}}x_{j_1} \dots x_{j_{\ell-k}}. $$
{In the above notation for a monomial $a_{i_1\dots i_k,j_1\dots j_{\ell-k}}\overline{x_{i_1}\dots x_{i_k}}x_{j_1} \dots x_{j_{\ell-k}}$, the indices of the coefficient $a_{i_1\dots i_k,j_1\dots j_{\ell-k}}$ are always partitioned by a `,' to separate that of conjugate variables and that of regular variables. In particular, the coefficient of a monomial that only has conjugate variables such as $\overline{x_{i_1}}\overline{x_{i_2}}$ will be written as $a_{i_1i_2,}$.} In this definition, it is obvious that complex polynomials are a subclass of conjugate complex polynomials. Remark that a pure complex polynomial can never only take real values unless it is a constant. This observation follows trivially from the basic theorem of algebra.
Given a $d$-th order complex tensor $\mathcal{F}\in\mathbb{C}^{n_1 \times \dots\times n_d}$, its associated multilinear form is defined as $$ \mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d):=\sum_{i_1=1}^{n_1}\dots\sum_{i_d=1}^{n_d} \mathcal{F}_{i_1 \dots i_d} x^1_{i_1}\dots x^d_{i_d}, $$ where $\boldsymbol{x}^k\in \mathbb{C}^{n_k}$ for $k=1,\dots,d$. A complex tensor $\mathcal{F}\in\mathbb{C}^{n_1\times \dots\times n_d}$ is called {\em symmetric} if $n_1=\dots=n_d \, (=n)$ and every component $\mathcal{F}_{i_1 \dots i_d}$ are invariant under all permutations of the indices $\{i_1,\dots,i_d\}$. We remark that conjugation is not involved here when speaking of symmetricity for complex tensors. Closely related to a symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^d}$ is a general $d$-th degree complex homogeneous polynomial function $f(\boldsymbol{x})$ (or complex form) of variable $\boldsymbol{x}\in \mathbb{C}^n$, i.e., \begin{equation}\label{eq:symmetric} f(\boldsymbol{x}):=\mathcal{F}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)=\sum_{i_1=1}^{n}\dots\sum_{i_d=1}^{n} \mathcal{F}_{i_1 \dots i_d} x_{i_1}\dots x_{i_d}. \end{equation} In fact, symmetric tensors (either in the real domain or in the complex domain) are bijectively related to homogeneous polynomials; see~\cite{CGLM08}. In particular, for any $n$-dimensional $d$-th degree complex form $$
f(\boldsymbol{x})= \sum_{1\le i_1 \le \dots \le i_d\le n} a_{i_1\dots i_d}x_{i_1}\dots x_{i_d}, $$ there is a uniquely defined $n$-dimensional $d$-th order symmetric complex tensor $\mathcal{F}\in\mathbb{C}^{n^d}$ with $$
\mathcal{F}_{i_1\dots i_d}=\frac{a_{i_1\dots i_d}}{|\Pi(i_1\dots i_d)|}, \quad\forall\, 1\le i_1\le \dots\le i_d\le n, $$ satisfying~\eqref{eq:symmetric}, where $\Pi(i_1\dots i_d)$ is the set of all distinct permutations of the indices $\{i_1,\dots, i_d\}$. On the other hand, in light of formula~\eqref{eq:symmetric}, a complex form $f(\boldsymbol{x})$ is easily obtained from the symmetric multilinear form $\mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d)$ by letting $\boldsymbol{x}^1=\dots=\boldsymbol{x}^d=\boldsymbol{x}$.
\subsection{Symmetric conjugate forms and their tensor representations}\label{sec:cform}
To discuss higher order conjugate complex forms and complex tensors, let us start with the {well-established} properties of Hermitian matrices. Let $A \in \mathbb{C}^{n^2}$ with $A^{\textnormal{H}}=A$, which is not symmetric in the usual sense because $A^{\textnormal{T}}\neq A$ in general. The following conjugate quadratic form $$\boldsymbol{x}^{\textnormal{H}}A\boldsymbol{x}=\sum_{i=1}^{n}\sum_{j=1}^{n}A_{ij}\overline{x_i}x_j$$ always takes real values for any $\boldsymbol{x}\in\mathbb{C}^n$. In particular, we notice that each monomial in the above form is the product of one `conjugate' variable $\overline{x_i}$ and one usual (non-conjugate) variable $x_j$.
To extend the above form to higher degrees, let us consider the following special class of conjugate polynomials: \begin{definition} \label{def:sform} A symmetric conjugate form of the variable $\boldsymbol{x}\in\mathbb{C}^n$ is defined as \begin{equation}\label{eq:sform} f_S(\boldsymbol{x}):=\sum_{1\le i_1 \le \dots \le i_d \le n}\, \sum_{1\le j_1 \le \dots \le j_d \le n} a_{i_1\dots i_d,j_1\dots j_d}\overline{x_{i_1}\dots x_{i_d}}x_{j_1} \dots x_{j_d}. \end{equation} \end{definition}
Essentially, $f_S(\boldsymbol{x})$ is the summation of all the possible $2d$-th degree monomials that consist of $d$ conjugate variables and $d$ usual variables. Here the subscript `$S$' stands for `symmetric'. The following example is a special case of~\eqref{eq:sform}.
\begin{example} Given a $d$-th degree complex form $h(\boldsymbol{x}) = \sum_{1\le i_1 \le \dots \le i_d \le n } c_{i_1\dots i_d} x_{i_1}\dots x_{i_d}$, the function \begin{align*}
|h(\boldsymbol{x})|^2&=\left(\sum_{1\le i_1 \le \dots \le i_d \le n } \overline{c_{i_1\dots i_d} x_{i_1}\dots x_{i_d}}\right) \left(\sum_{1\le j_1 \le \dots \le j_d \le n } c_{j_1\dots j_d} x_{j_1}\dots x_{j_d}\right)\\ &= \sum_{1\le i_1 \le \dots \le i_d \le n} \, \sum_{1\le j_1 \le \dots \le j_d \le n } \left(\overline{c_{i_1\dots i_d}}\cdot c_{j_1\dots j_d}\right)\overline{ x_{i_1}\dots x_{i_d}} x_{j_1}\dots x_{j_d} \end{align*} is a $2d$-th degree symmetric conjugate form. \end{example}
Notice that $|h(\boldsymbol{x})|^2$ is actually a real-valued conjugate polynomial. Later in Section~\ref{sec:condition} we shall show that a symmetric conjugate form $f_S(\boldsymbol{x})$ in~\eqref{eq:sform} always takes real values if and only if the coefficients of any pair of conjugate monomials $\overline{ x_{i_1}\dots x_{i_d}} x_{j_1}\dots x_{j_d}$ and $\overline{x_{j_1}\dots x_{j_d}}x_{i_1}\dots x_{i_d}$ are conjugate to each other, i.e., $$ a_{i_1\dots i_d,j_1\dots j_d}=\overline{a_{j_1\dots j_d,i_1\dots i_d}}, \quad\forall \,1\le i_1\le \dots \le i_d \le n,\, 1\le j_1 \le \dots \le j_d \le n. $$
As any complex form uniquely defines a symmetric complex tensor and vice versa, we observe a class of tensors representable for symmetric conjugate forms. \begin{definition} \label{def:partial-symmetric}
An even order tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ is called partial-symmetric if for {every} $1\le i_1\le\dots\le i_{d}\le n,\, 1\le i_{d+1}\le\dots\le i_{2d}\le n$ \begin{equation}\label{eq:partial-symmetric} \mathcal{F}_{j_1\dots j_d j_{d+1}\dots j_{2d}} = \mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}}, \quad\forall \, (j_1\dots j_d) \in \Pi (i_1\dots i_d),\, (j_{d+1}\dots j_{2d})\in\Pi(i_{d+1}\dots i_{2d}). \end{equation} \end{definition}
We remark that the so-called partial-symmetricity was studied earlier in algebraic geometry by Carlini and Chipalkatti~\cite{CC03}, and was also studied in polynomial optimization~\cite{HLZ13} in the framework of mixed polynomial forms, i.e., for any fixed first $d$ indices of the tensor, it is symmetric with respect to its last $d$ indices, and vise versa. It is clear that partial-symmetricity~\eqref{eq:partial-symmetric} is weaker than the usual symmetricity for tensors.
Let us formally define the bijection $\mathbf{S}$ (taking the first initial of symmetric conjugate forms) between symmetric conjugate forms and partial-symmetric complex tensors, as follows:\\ (i) $\mathbf{S}(\mathcal{F})=f_S$: Given a partial-symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ with its associated multilinear form $\mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^{2d})$, the symmetric conjugate form is defined as
$$
f_S(\boldsymbol{x})=\mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)
= \sum_{i_1=1}^n\dots\sum_{i_{2d}=1}^n \mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}}\overline{x_{i_1}\dots x_{i_d}}x_{i_{d+1}} \dots x_{i_{2d}}.
$$ (ii) $\mathbf{S}^{-1}(f_S)=\mathcal{F}$: Given a symmetric conjugate form $f_S$~\eqref{eq:sform}, the components of the partial-symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ are defined by
\begin{equation}
\mathcal{F}_{j_1\dots j_d j_{d+1}\dots j_{2d}}=\frac{a_{i_1\dots i_d ,i_{d+1}\dots i_{2d}}} {|{\Pi}(i_1\dots i_d)| \cdot |{\Pi}(i_{d+1}\dots i_{2d})|}\label{eq:sinverse}
\end{equation}
for all $1\le i_1\le\dots\le i_{d}\le n,\,
1\le i_{d+1}\le\dots\le i_{2d}\le n,\,
(j_1\dots j_d) \in \Pi (i_1\dots i_d)$ and $(j_{d+1}\dots j_{2d})\in\Pi(i_{d+1}\dots i_{2d})$.
\begin{example}
Given a bivariate fourth degree symmetric conjugate form $f_S(\boldsymbol{x})=(1-\mbox{\bf i}){\overline{x_1}}^2{x_1}^2 +4 \overline{x_1}\overline{x_2}x_1x_2+ 6 \overline{x_1}\overline{x_2}{x_2}^2 $, the corresponding partial-symmetric tensor $\mathcal{F}=\mathbf{S}^{-1}(f_S)\in\mathbb{C}^{2^4}$ satisfies that $\mathcal{F}_{1111}=1-\mbox{\bf i},\,\mathcal{F}_{1212}=\mathcal{F}_{1221}=\mathcal{F}_{2112}=\mathcal{F}_{2121}=1,\,\mathcal{F}_{1222}=\mathcal{F}_{2122}=3$ and other entries are zeros. Conversely, $f_S(\boldsymbol{x})$ can be obtained from $\mathcal{F}\big(\binom{\,\overline{x_1}\,}{\overline{x_2}}, \binom{\,\overline{x_1}\,}{\overline{x_2}}, \binom{x_1}{x_2}, \binom{x_1}{x_2}\big)$. \end{example}
According to the mappings defined previously, the following result readily follows. \begin{lemma}\label{thm:tensorS} The bijection $\mathbf{S}$ is well-defined, i.e., any $n$-dimensional $2d$-th order partial-symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ uniquely defines an $n$-dimensional $2d$-th degree symmetric conjugate form, and vice versa. \end{lemma}
\subsection{General conjugate forms and their tensor representations}\label{sec:gform}
In \eqref{eq:sform}, for each monomial the numbers of conjugate variables and the usual variables are always equal. This restriction can be relaxed further. \begin{definition} \label{def:gform} A general conjugate form of the variable $\boldsymbol{x}\in\mathbb{C}^n$ is defined as \begin{equation}\label{eq:gform} f_G(\boldsymbol{x})=\sum_{k=0}^d \, \sum_{1\le i_1\le \dots \le i_k \le n}\, \sum_{1\le j_1 \le \dots \le j_{d-k} \le n} a_{i_1\dots i_k,j_1\dots j_{d-k}}\overline{x_{i_1}\dots x_{i_k}}x_{j_1} \dots x_{j_{d-k}}. \end{equation} \end{definition}
Essentially, $f_G(\boldsymbol{x})$ is the summation of all the possible $d$-th degree monomials, allowing any number of conjugate variables as well as the usual variables in each monomial. Here the subscript `$G$' stands for `general'.
Obviously $f_S(\boldsymbol{x})$ is a special case of $f_G(\boldsymbol{x})$, and $f_G(\boldsymbol{x})$ is a special case of $f_C(\boldsymbol{x})$.
In Section~\ref{sec:condition} we shall show that a general conjugate form $f_G(\boldsymbol{x})$ will always take real values for all $\boldsymbol{x}$ if and only if the coefficients of each pair of conjugate monomials are conjugate to each other.
To this end, below we shall explicitly treat the conjugate variables as new variables:\\ (i) $\mathbf{G}(\mathcal{F})=f_G$: Given a symmetric tensor $\mathcal{F}\in\mathbb{C}^{(2n)^d}$ with its associated multilinear form $\mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d)$, the general conjugate form of $\boldsymbol{x}\in\mathbb{C}^n$ is defined as
\begin{equation}\label{eq:tensor-gform}
f_G(\boldsymbol{x})=\mathcal{F}\bigg(\underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}},\dots,\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_d\bigg).
\end{equation} (ii) $\mathbf{G}^{-1}(f_G)=\mathcal{F}$: Given a general conjugate form $f_G$ of $\boldsymbol{x}\in\mathbb{C}^n$ as~\eqref{eq:gform}, the components of the symmetric tensor $\mathcal{F}\in\mathbb{C}^{(2n)^d}$ are defined as follows: for any $1\le j_1,\dots,j_d\le 2n$, sort these $j_\ell$'s in a nondecreasing order as $1\le j_{i_1}\le\dots\le j_{i_d}\le 2n$ and let $k={\arg\max}_{1\le \ell\le d}\{j_{i_\ell}\le n\}$, then
\begin{equation}\label{eq:tensor-gform-1}
\mathcal{F}_{j_1\dots j_d} = \frac{a_{j_{i_1}\dots j_{i_k},(j_{i_{k+1}}-n)\dots (j_{i_d}-n)}}{|\Pi(j_1\dots j_d)|}.
\end{equation} \begin{example}
{Given} a symmetric second order tensor (matrix) $F=\left( \begin{smallmatrix} \mbox{\bf i} & 0 & 1 & 0\\ 0 & 0 & 2 & 0\\ 1 & 2 & 0 & 0 \\ 0 & 0 &0 & 3 \end{smallmatrix} \right)\in\mathbb{C}^{4^2}$, the corresponding general conjugate form is $$f_G(\boldsymbol{x})=(\overline{x_1},\overline{x_2},x_1,x_2)F(\overline{x_1},\overline{x_2},x_1,x_2)^{\textnormal{T}}=\mbox{\bf i}{\overline{x_1}}^2 + 2\overline{x_1}x_1 + 4 \overline{x_2}x_1 + 3 {x_2}^2.$$
Conversely, $F=\mathbf{G}^{-1}(f_G)$ can obtained component-wisely by~\eqref{eq:tensor-gform-1}. \end{example}
Similar {to} Lemma~\ref{thm:tensorS}, the following is easily verified; we leave its proof to the interested readers. \begin{lemma}\label{thm:tensorG} The bijection $\mathbf{G}$ is well-defined, i.e., any $2n$-dimensional $d$-th order symmetric tensor $\mathcal{F}\in\mathbb{C}^{(2n)^d}$ uniquely defines an $n$-dimensional $d$-th degree general conjugate form, and vice versa. \end{lemma}
To conclude this section we remark that a partial-symmetric tensor (representation for a symmetric conjugate form) is less restrictive than a symmetric tensor (representation for a general conjugate form), while a symmetric conjugate form is a special case of a general conjugate form. One should note that the dimensions of these two tensor representations are actually different.
\section{Real-valued conjugate forms and their tensor representations}\label{sec:condition}
In this section, we study the two types of conjugate complex forms introduced in Section~\ref{sec:preparation}: symmetric conjugate forms and general conjugate forms.
\subsection{Real-valued conjugate polynomials} Let us first focus on polynomials, and present {the following general characterization of real-valued conjugate complex polynomials}.
\begin{theorem}\label{thm:realvalue}
A conjugate complex polynomial function is real-valued if and only if the coefficients of any pair of its conjugate monomials are conjugate to each other, i.e., any two monomials $au_C(\boldsymbol{x})$ and $bv_C(\boldsymbol{x})$ with $a$ and $b$ being their coefficients satisfying $\overline{u_C(\boldsymbol{x})}=v_C(\boldsymbol{x})$ must have that $\overline{a}=b$. \end{theorem}
The above condition actually implies that the coefficient of any self-conjugate monomial must be real. Applying Theorem~\ref{thm:realvalue} to the two classes of conjugate forms that we just introduced, the conditions for them to always take real values can now be characterized: \begin{corollary}\label{thm:condition} A symmetric conjugate form $$ f_S(\boldsymbol{x})=\sum_{1\le i_1 \le \dots \le i_d \le n}\, \sum_{1\le j_1 \le \dots \le j_d \le n} a_{i_1\dots i_d,j_1\dots j_d}\overline{x_{i_1}\dots x_{i_d}}x_{j_1} \dots x_{j_d} $$ is real-valued if and only if \begin{equation}\label{eq:sform-condition} a_{i_1\dots i_d,j_1\dots j_d}=\overline{a_{j_1\dots j_d,i_1\dots i_d}}, \quad\forall\,1\le i_1\le \dots \le i_d \le n, \,1\le j_1 \le \dots \le j_d \le n. \end{equation} A general conjugate form $$ f_G(\boldsymbol{x})=\sum_{k=0}^d \, \sum_{1\le i_1\le \dots \le i_k \le n}\, \sum_{1\le j_1 \le \dots \le j_{d-k} \le n} a_{i_1\dots i_k,j_1\dots j_{d-k}}\overline{x_{i_1}\dots x_{i_k}}x_{j_1} \dots x_{j_{d-k}} $$ is real-valued if and only if
$$ a_{i_1\dots i_k,j_1\dots j_{d-k}}= \overline{a_{j_1\dots j_{d-k},i_1\dots i_k}}, \quad\forall\,1\le i_1 \le \dots \le i_k \le n, \,1\le j_1 \le \dots \le j_{d-k} \le n,\, 0\leq k \leq d. $$
\end{corollary}
Let us now prove Theorem~\ref{thm:realvalue}. We first show the `if' part of the theorem, which is quite straightforward. To see this, for any pair of conjugate monomials (including self-conjugate monomial as a special case) of a conjugate complex polynomial: $au_C(\boldsymbol{x})$ and $b\overline{u_C(\boldsymbol{x})}$ with $a,b\in\mathbb{C}$ being their coefficients, if $\overline{a}=b$, then $$
\overline{au_C(\boldsymbol{x})+b\overline{u_C(\boldsymbol{x})}} = \overline{au_C(\boldsymbol{x})+\overline{a}\overline{u_C(\boldsymbol{x})}} = au_C(\boldsymbol{x}) + \overline{a}\overline{u_C(\boldsymbol{x})} = au_C(\boldsymbol{x})+b\overline{u_C(\boldsymbol{x})}, $$ implying that $a u_C(\boldsymbol{x})+b \overline{u_C(\boldsymbol{x})}$ is real-valued. Since all the conjugate monomials of a conjugate complex polynomial can be partitioned by conjugate pairs and self-conjugate monomials, the result follows immediately.
To proceed to the `only if' part of the theorem, let us first consider an easier case of univariate conjugate polynomials. \begin{lemma}\label{thm:uni-zero} A univariate conjugate complex polynomial $\sum_{\ell=0}^d\sum_{k=0}^{\ell} b_{k,\ell-k}\overline{x}^kx^{\ell-k}=0$ for all $x\in\mathbb{C}$ if and only if all its coefficients are zeros, i.e., $b_{k,\ell-k}=0$ for all $0\le \ell\le d$ and $0\le k\le \ell$. \end{lemma} \begin{proof} Let $x = \rho e^{\mbox{\bf i} \theta}$ with $\rho\ge0$ and $\theta\in[0,2\pi)$, and the identity can be rewritten as \begin{equation}\label{eq:unipoly} \sum_{\ell=0}^d\left(\sum_{k=0}^{\ell} b_{k,\ell-k} e^{\mbox{\bf i}(\ell-2k)\theta}\right)\rho^\ell = 0. \end{equation} For any fixed $\theta$, the function can be viewed as a polynomial with respect to $\rho$. Therefore the coefficient of the highest degree monomial $\rho^d$ must be zero, i.e., $$ \sum_{k=0}^d b_{k,d-k} e^{\mbox{\bf i} (d-2k)\theta} = 0, \quad \forall\, \theta\in[0,2\pi). $$ Consequently we have for any $\theta\in[0,2\pi)$,
\begin{align} &\sum_{k=0}^d \textnormal{Re}\,(b_{k,d-k})\cos ((d-2k)\theta)-\sum_{k=0}^d \textnormal{Im}\,(b_{k,d-k})\sin ((d-2k)\theta) =0, \label{eq:unipoly1} \\ &\sum_{k=0}^d \textnormal{Im}\,(b_{k,d-k})\cos ((d-2k)\theta) + \sum_{k=0}^d \textnormal{Re}\,( b_{k,d-k}) \sin ((d-2k)\theta)=0. \label{eq:unipoly2} \end{align} The first and second parts of~\eqref{eq:unipoly1} can be respectively simplified as \begin{align*} &~~~~\sum_{k=0}^d \textnormal{Re}\,(b_{k,d-k}) \cos ((d-2k)\theta)\\ &=\left\{ \begin{array}{ll} \sum_{k=0}^{\frac{d-1}{2}}\textnormal{Re}\,(b_{k,d-k}+b_{d-k,k})\cos((d-2k)\theta) & d\mbox{ is odd}\\ \sum_{k=0}^{ \frac{d-2}{2}}\textnormal{Re}\,(b_{k,d-k}+b_{d-k,k}) \cos((d-2k)\theta) + \textnormal{Re}\,(b_{d/2,d/2}) & d\mbox{ is even}
\end{array}
\right. \end{align*} and $$ \sum_{k=0}^d \textnormal{Im}\,(b_{k,d-k}) \sin ((d-2k)\theta)= \sum_{k=0}^{\lfloor \frac{d-1}{2}\rfloor} \textnormal{Im}\,(b_{k,d-k}-b_{d-k,k}) \sin((d-2k)\theta). $$ By the orthogonality of the trigonometric functions, the above further leads to $$ \textnormal{Re}\,(b_{k,d-k}+b_{d-k,k})=\textnormal{Im}\,(b_{k,d-k}-b_{d-k,k})=0, \quad\forall\,k=0,1,\dots,d. $$ Similarly,~\eqref{eq:unipoly2} implies $$ \textnormal{Re}\,(b_{k,d-k}-b_{d-k,k})=\textnormal{Im}\,(b_{k,d-k}+b_{d-k,k})=0, \quad\forall\,k=0,1,\dots,d. $$ Combining the above two sets of identities yields $$ b_{k,d-k}=0, \quad\forall\, k=0,1,\dots,d. $$ The degree of the function in~\eqref{eq:unipoly} (in terms of $\rho$) is then reduced by 1. The desired result follows obviously. \end{proof}
Let us now extend Lemma~\ref{thm:uni-zero} to general multivariate conjugate polynomials. \begin{lemma}\label{thm:multi-zero} An $n$-dimensional $d$-th degree conjugate complex polynomial
$$
f_C(\boldsymbol{x})=\sum_{\ell=0}^d \sum_{k=0}^\ell\, \sum_{1\le i_1\le \dots \le i_k \le n}\, \sum_{1\le j_1 \le \dots \le j_{\ell-k} \le n} b_{i_1\dots i_k,j_1\dots j_{\ell-k}}\overline{x_{i_1}\dots x_{i_k}}x_{j_1} \dots x_{j_{\ell-k}} = 0 $$ for all $\boldsymbol{x}\in\mathbb{C}^n$ if and only if all its coefficients are zeros, i.e.,
$b_{i_1\dots i_k,j_1\dots j_{\ell-k}}=0$ for all $0\le\ell\le d$, $0\le k\le \ell$, $1\le i_1 \le\dots \le i_k \le n$ and $1\le j_1 \le\dots \le j_{d-k} \le n$. \end{lemma} \begin{proof} We shall prove the result by induction on the dimension $n$. The case $n=1$ is already shown in Lemma~\ref{thm:uni-zero}. Suppose the claim holds for all positive integers no more than $n-1$. Then for the dimension $n$, the conjugate polynomial $f_C(\boldsymbol{x})$ can be rewritten according to the degrees of $\overline{x_1}$ and $x_1$ as $$ f_C(\boldsymbol{x}) = \sum_{\ell=0}^d\sum_{k=0}^{\ell}\overline{x_1}^k {x_1}^{\ell-k} h_C^{\ell k} (x_2,\dots,x_n). $$ For any given $x_2,\dots,x_n\in\mathbb{C}$, taking $f_C$ as a univariate conjugate polynomial of $x_1$, by Lemma~\ref{thm:uni-zero} we have $$ h_C^{\ell k}(x_2,\dots,x_n)=0, \quad\forall\, 0\le \ell\le d,\, 0\le k\le \ell. $$ For any given $(\ell,k)$, as $h_C^{\ell k}(x_2,\dots,x_n)$ is a conjugate polynomial of dimension at most $n-1$, by the induction hypothesis all the coefficients of $h_C^{\ell k}$ are zeros. Observing that all the coefficients of $f_C$ are distributed in the coefficients of $h_C^{\ell k}$ for all $(\ell,k)$, the result is proven for dimension $n$.
\end{proof}
With Lemma~\ref{thm:multi-zero} at hand, we can finally complete the `only if' part of Theorem~\ref{thm:realvalue}. Suppose a conjugate polynomial $f(\boldsymbol{x})$ is real-valued for all $\boldsymbol{x}\in\mathbb{C}^n$. Clearly we have $f(\boldsymbol{x})-\overline{f(\boldsymbol{x})}=0$ for all $\boldsymbol{x}\in\mathbb{C}^n$, i.e., $$
\sum_{\ell=0}^d \sum_{k=0}^\ell \,
\sum_{1\le i_1\le \dots \le i_k \le n} \,\sum_{1\le j_1 \le \dots \le j_{\ell-k} \le n}
\left(b_{i_1\dots i_k,j_1\dots j_{\ell-k}}-\overline{b_{j_1\dots j_{\ell-k},i_1\dots i_k}}\right)
\overline{x_{i_1}\dots x_{i_k}}x_{j_1} \dots x_{j_{\ell-k}}=0. $$ By Lemma~\ref{thm:multi-zero} it follows that $b_{i_1\dots i_k,j_1\dots j_{\ell-k}} -\overline{b_{j_1\dots j_{\ell-k},i_1\dots i_k}}=0$ for all $0\le\ell\le d$, $0\le k\le \ell$, $1\le i_1 \le\dots \le i_k \le n$ and $1\le j_1 \le\dots \le j_{d-k} \le n$, proving the `only if' part of Theorem~\ref{thm:realvalue}.
With Theorem~\ref{thm:realvalue}, in particular Corollary~\ref{thm:condition}, we are in a position to characterize the tensor representations for real-valued conjugate forms. Before concluding this subsection, let us present an alternative representation of real-valued symmetric conjugate forms, as a consequence of Corollary~\ref{thm:condition}.
\begin{proposition} \label{thm:polyrealvalued}
A symmetric conjugate form $f_S(\boldsymbol{x})$ is real-valued if and only if $$f_S(\boldsymbol{x})=\sum_{k=1}^m\alpha_k |h_k(\boldsymbol{x})|^2,$$ where $h_k(\boldsymbol{x})$ is a complex form and $\alpha_k\in\mathbb{R}$ for all $1\le k\le m$. \end{proposition} \begin{proof} The `if' part is trivial. Next we prove the `only if' part of the proposition. If $f_S(\boldsymbol{x})$ is real-valued, by Corollary~\ref{thm:condition} we have~\eqref{eq:sform-condition}. Then for any $1\le i_1\le \dots \le i_d \le n$ and $1\le j_1 \le \dots \le j_d \le n$, the sum of the conjugate pair satisfies \begin{align*} &~~~~a_{i_1\dots i_d,j_1\dots j_d}\overline{ x_{i_1}\dots x_{i_d}}x_{j_1} \dots x_{j_d}+ a_{j_1\dots j_d,i_1\dots i_d}\overline{ x_{j_1} \dots x_{j_d}}x_{i_1}\dots x_{i_d}\\ &=a_{i_1\dots i_d,j_1\dots j_d}\overline{ x_{i_1}\dots x_{i_d}}x_{j_1} \dots x_{j_d}+ \overline{a_{i_1\dots i_d,j_1\dots j_d}}\overline{ x_{j_1} \dots x_{j_d}}x_{i_1}\dots x_{i_d}\\
&=|x_{i_1}\dots x_{i_d} + a_{i_1\dots i_d,j_1\dots j_d}x_{j_1}
\dots x_{j_d} |^2-|x_{i_1}\dots x_{i_d}|^2-|a_{i_1\dots i_dj_1\dots j_d}x_{j_1} \dots x_{j_d} |^2. \end{align*} Summing up all such pairs (taking half if it is a self-conjugate pair), the conclusion follows. \end{proof}
Similarly we have the following result for general conjugate forms. \begin{proposition} A general conjugate form $f_G(\boldsymbol{x})$ is real-valued if and only if
$$f_G(\boldsymbol{x})=\sum_{k=1}^m \alpha_k |h_k(\boldsymbol{x})|^2,$$ where $h_k(\boldsymbol{x})$ is a complex polynomial and $\alpha_k\in\mathbb{R}$ for all $1\le k\le m$. \end{proposition}
\subsection{Conjugate partial-symmetric tensors} \label{sec:ctensor}
As any symmetric conjugate form uniquely defines a partial-symmetric tensor (Lemma~\ref{thm:tensorS}), it is interesting to see more structured tensor representations for real-valued symmetric conjugate forms.
\begin{definition}\label{thm:cps}
An even order tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ is called conjugate partial-symmetric if\\
(i) $\mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}} = \mathcal{F}_{j_1\dots j_d j_{d+1}\dots j_{2d}}$ for all $(j_1\dots j_d) \in \Pi (i_1\dots i_d)$ and $(j_{d+1}\dots j_{2d})\in\Pi(i_{d+1}\dots i_{2d})$, and \\
(ii) $\mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}} = \overline{\mathcal{F}_{i_{d+1}\dots i_{2d} i_1\dots i_d}}$\\
hold for all $1\le i_1\le\dots\le i_{d}\le n$ and $1\le i_{d+1}\le\dots\le i_{2d}\le n$. \end{definition}
We remark that when $d=1$, a conjugate partial-symmetric tensor is simply a Hermitian matrix. For a general even degree, the square matrix flattening of a conjugate partial-symmetric tensor; i.e., flattening a tensor in $\mathbb{C}^{n^{2d}}$ to a matrix in $\mathbb{C}^{(n^d)^2}$ by grouping the tensor's first $d$ modes into the rows of the matrix and its last $d$ modes into the columns of the matrix, is actually a Hermitian matrix.
The conjugate partial-symmetric tensors and the real-valued symmetric conjugate forms are connected as follows.
\begin{proposition} \label{thm:conju-mapping} Any $n$-dimensional $2d$-th order conjugate partial-symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ uniquely defines (under $\mathbf{S}$) an $n$-dimensional $2d$-th degree real-valued symmetric conjugate form, and vice versa (under $\mathbf{S}^{-1}$). \end{proposition} \begin{proof} For any conjugate partial-symmetric tensor $\mathcal{F}$, $f_S=\mathbf{S}(\mathcal{F})$ satisfies \begin{align*} \overline{f_S(\boldsymbol{x})}=\overline{\mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)} &= \sum_{i_1=1}^n\dots\sum_{i_{2d}=1}^n \overline{\mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}}\overline{x_{i_1}\dots x_{i_d}}x_{i_{d+1}} \dots x_{i_{2d}}}\\ &= \sum_{i_1=1}^n\dots\sum_{i_{2d}=1}^n \overline{\mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}}}x_{i_1}\dots x_{i_d}\overline{x_{i_{d+1}} \dots x_{i_{2d}}}\\ &= \sum_{i_1=1}^n\dots\sum_{i_{2d}=1}^n \mathcal{F}_{i_{d+1}\dots i_{2d}i_1\dots i_d}\overline{x_{i_{d+1}} \dots x_{i_{2d}}}x_{i_1}\dots x_{i_d}\\ &= f_S(\boldsymbol{x}), \end{align*} implying that $f_S$ is real-valued.
On the other hand, for any real-valued symmetric conjugate form $f_S(\boldsymbol{x})$ in~\eqref{eq:sform}, it follows from Corollary~\ref{thm:condition} that $a_{i_1\dots i_d,j_1\dots j_d}=\overline{a_{j_1\dots j_d,i_1\dots i_d}}$ holds for all the possible $(i_1,\dots,i_d,j_1,\dots,j_d)$. By~\eqref{eq:sinverse}, its tensor representation $\mathcal{F}=\mathbf{S}^{-1}(f_S)$ with $$
\mathcal{F}_{i_1\dots i_d i_{d+1}\dots i_{2d}}=\frac{a_{i_1\dots i_d ,i_{d+1}\dots i_{2d}}} {|{\Pi}(i_1\dots i_d)| \cdot |{\Pi}(i_{d+1}\dots i_{2d})| } $$ satisfies the 2nd condition in Definition~\ref{thm:cps}, proving the conjugate partial-symmetricity of $\mathcal{F}$. \end{proof}
Below is a useful property for conjugate partial-symmetric tensors, in the same vein as Proposition~\ref{thm:polyrealvalued} for the real-valued symmetric conjugate forms. \begin{proposition} \label{thm:conju-decomp}
An even order tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ is conjugate partial-symmetric if and only if
$$
\mathcal{F} = \sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k,
$$
where $\mathcal{H}_k\in\mathbb{C}^{n^d}$ is symmetric and $\alpha_k\in\mathbb{R}$ for all $1\le k\le m$. \end{proposition} \begin{proof} According to Definition~\ref{thm:cps}, it is straightforward to verify that $\sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k$ is conjugate partial-symmetric, proving the `if' part of the proposition. Let us now prove the `only if' part.
By Proposition~\ref{thm:conju-mapping}, $\mathbf{S}(\mathcal{F})$ is a real-valued symmetric conjugate form. Further by Proposition~\ref{thm:polyrealvalued}, $\mathbf{S}(\mathcal{F})$ can be written as
$$\mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)=\sum_{k=1}^m\alpha_k |h_k(\boldsymbol{x})|^2,$$ where $h_k(\boldsymbol{x})$ is a complex form and $\alpha_k\in\mathbb{R}$ for $k=1,\dots,m$. Let $\mathcal{H}_k\in\mathbb{C}^{n^d}$ be the symmetric complex tensor associated with the complex form $h_k(\boldsymbol{x})$ for $k=1,\dots,m$; i.e., $$\mathcal{H}_k(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)=h_k(\boldsymbol{x}).$$ We have \begin{equation}\label{eq:HHexpand}
|h_k(\boldsymbol{x})|^2 =\overline{h_k(\boldsymbol{x})} h_k(\boldsymbol{x})
=\overline{\mathcal{H}_k}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d) \mathcal{H}_k(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) =(\overline{\mathcal{H}_k}\otimes \mathcal{H}_k)(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d). \end{equation} Thus, the symmetric conjugate form $\mathbf{S}\left(\sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k\right)$ satisfies \begin{align*}
\left(\sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k\right) (\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)
&= \sum_{k=1}^m\alpha_k(\overline{\mathcal{H}_k}\otimes \mathcal{H}_k)(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) \\
&= \sum_{k=1}^m\alpha_k |h_k(\boldsymbol{x})|^2 \\
&= \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d), \end{align*} i.e., $\mathbf{S}\left(\sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k\right)=\mathbf{S}(\mathcal{F})$. As $\mathbf{S}$ is bijective, we have $\mathcal{F}=\sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k$. \end{proof}
\subsection{Conjugate super-symmetric tensors} \label{sec:gtensor}
Similar as for real-valued symmetric conjugate forms, we have the following tensor representations for real-valued general conjugate forms. \begin{definition}\label{thm:css} An even dimensional tensor $\mathcal{F}\in\mathbb{C}^{(2n)^d}$ is called conjugate super-symmetric if \\ {(i) $\mathcal{F}$ is symmetric, and \\
(ii) $\mathcal{F}_{i_1\dots i_d} = \overline{\mathcal{F}_{j_1\dots j_d }}$ holds for all $1\le i_1, \dots, i_d, j_1,\dots, j_d\le 2n$ with $|i_k -j_k| = n$ for $k=1,\dots,d$.} \end{definition}
We remark that the conjugate super-symmetricity is actually {\em stronger} than the ordinary symmetricity for complex tensors since a second condition in Definition~\ref{thm:css} is required. Actually, this condition is to ensure that the general conjugate form $$\mathbf{G}(\mathcal{F})(\boldsymbol{x})=\mathcal{F}\bigg(\underbrace{\binom{\overline\boldsymbol{x}}{\boldsymbol{x}},\dots,\binom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_d\bigg)$$
is real-valued. This is because if $|i_k -j_k| = n$ holds for $k=1,\dots,d$, then the monomial with coefficient $\mathcal{F}_{i_1\dots i_d}$ and the monomial with coefficient $\mathcal{F}_{j_1\dots j_d}$ in the above form are actually a conjugate pair by noticing that the position of a conjugate variable $\overline{x_i}$ and that of a usual variable $x_i$ in the vector $\binom{\overline{\boldsymbol{x}}}{\boldsymbol{x}}$ differs exactly {by} $n$ for every $i$. Under the mapping $\mathbf{G}$ defined in Section~\ref{sec:gform}, it is straightforward to verify the following tensor representations for real-valued general conjugate forms.
\begin{proposition}\label{thm:gform-rv} Any $2n$-dimensional $d$-th order conjugate super-symmetric tensor $\mathcal{F}\in\mathbb{C}^{(2n)^d}$ uniquely defines (under $\mathbf{G}$) an $n$-dimensional $d$-th degree real-valued general conjugate form, and vice versa (under $\mathbf{G}^{-1}$). \end{proposition}
\section{Eigenvalues and eigenvectors of complex tensors}\label{sec:eigenvalue}
As mentioned earlier, Lim~\cite{L05} and Qi~\cite{Q05} independently proposed to systematically study the eigenvalues and eigenvectors for real tensors. Subsequently, the topic has attracted much attention due to the potential applications in magnetic resonance imaging, polynomial optimization theory, quantum physics, statistical data analysis, higher order Markov chains, and so on. After that, this study was also extended to complex tensors~\cite{Q07,NQWW07,CS13} without considering the conjugate variables.
Zhang and Qi in~\cite{ZQ12} proposed the so-called Q-eigenvalues of complex tensors. \begin{definition}[Zhang and Qi~\cite{ZQ12}]\label{thm:Qeigen} A scalar $\lambda$ is called a Q-eigenvalue of a symmetric complex tensor $\mathcal{H}$, if there exists a vector $\boldsymbol{x}$ called Q-eigenvector, such that \begin{equation}\label{eq:Qeigen} \left\{ \begin{array}{l} \mathcal{H}(\bullet,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1})=\lambda\overline\boldsymbol{x}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1\\ \lambda \in \mathbb{R}. \end{array} \right. \end{equation} \end{definition}
Throughout this paper, the notation `$\bullet$' stands for a position left for a vector entry. In Definition~\ref{eq:Qeigen}, as the corresponding complex tensor does not have conjugate-type symmetricity, the Q-eigenvalue does not specialize to the classical eigenvalues of Hermitian matrices. In particular, $\lambda\in\mathbb{R}$ is required in the system~\eqref{eq:Qeigen}. Later on, Ni et al.~\cite{NQB14} defined the notion of unitary symmetric eigenvalue (US-eigenvalue) {and demonstrated a relation with the geometric measure of quantum entanglement}. \begin{definition}[Ni et al.~\cite{NQB14}]\label{thm:USeigen} A scalar $\lambda$ is called a US-eigenvalue of a symmetric complex tensor $\mathcal{H}$, if there exists a vector $\boldsymbol{x}$ called US-eigenvector, such that \begin{equation}\label{eq:USeigen} \left\{ \begin{array}{l} \overline\mathcal{H}(\bullet,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1})=\lambda\overline\boldsymbol{x}\\ \mathcal{H}(\bullet,\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_{d-1})=\lambda\boldsymbol{x}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1. \end{array} \right. \end{equation} \end{definition}
{ In fact, the Q-eigenvalue and the US-eigenvalue are essentially the same. \begin{proposition}
$(\lambda, \boldsymbol{x})$ is a pair of Q-eigenvalue and Q-eigenvector if and only if $(\lambda, \overline{\boldsymbol{x}})$ is a pair of US-eigenvalue and US-eigenvector. \end{proposition} \begin{proof}
First, Definition~\ref{thm:USeigen} implies that a US-eigenvalue is always real. To see this, pre-multiplying $\boldsymbol{x}^{\textnormal{T}}$ to the first equation of~\eqref{eq:USeigen} gives
$$\overline\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d})=\lambda\boldsymbol{x}^{\textnormal{T}}\overline\boldsymbol{x}=\lambda,$$
and pre-multiplying $\overline\boldsymbol{x}^{\textnormal{T}}$ to the second equation of~\eqref{eq:USeigen} yields
$$\mathcal{H}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_{d})=\lambda\overline\boldsymbol{x}^{\textnormal{T}}\boldsymbol{x}=\lambda.$$
Therefore $\overline\lambda=\lambda$ and so $\lambda\in\mathbb{R}$. This actually implies that the first and second equations of~\eqref{eq:USeigen} are the same by applying the conjugation to the second one. Thus, ~\eqref{eq:USeigen} is equivalent to
\begin{equation*}
\left\{
\begin{array}{l}
\mathcal{H}(\bullet,\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_{d-1})=\lambda\boldsymbol{x}\\
\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1\\
\lambda\in\mathbb{R}.
\end{array}
\right.
\end{equation*}
The claimed equivalence is obvious by comparing the above system with~\eqref{eq:Qeigen}. \end{proof}
In terms of eigenvalues, Definitions~\ref{thm:Qeigen} and~\ref{thm:USeigen} are the same.} Now with all the new notions introduced in the previous sections---in particular the bijection between conjugate partial-symmetric tensors and real-valued symmetric conjugate forms, and the bijection between conjugate super-symmetric tensors and real-valued general conjugate forms---we are able to present new definitions and properties of eigenvalues for complex tensors, which are naturally related to that of Hermitian matrices.
\subsection{Definitions and properties of eigenvalues} \label{sec:eigendef}
Let us first introduce two types of eigenvalues for conjugate partial-symmetric tensors and conjugate super-symmetric tensors. \begin{definition}\label{thm:Ceigen} $\lambda\in\mathbb{C}$ is called a C-eigenvalue of a conjugate partial-symmetric tensor $\mathcal{F}$, if there exists a vector $\boldsymbol{x} \in \mathbb{C}^n$ called C-eigenvector, such that \begin{equation}\label{eq:Ceigen} \left\{ \begin{array}{l} \mathcal{F}(\bullet,\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d-1}, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)=\lambda \boldsymbol{x}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1.
\end{array}\right. \end{equation} \end{definition}
\begin{definition}\label{thm:Geigen} $\lambda\in\mathbb{C}$ is called a G-eigenvalue of a conjugate super-symmetric tensor $\mathcal{F}$, if there exists a vector $\boldsymbol{x} \in \mathbb{C}^n$ called G-eigenvector, such that \begin{equation}\label{eq:Geigen} \left\{ \begin{array}{l} \mathcal{F}\bigg(\dbinom{\bullet}{\bullet}, \underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_{d-1} \bigg)=\lambda \dbinom{\boldsymbol{x}}{\overline\boldsymbol{x}}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1.
\end{array}\right. \end{equation} \end{definition}
In fact, the two types of eigenvalues defined above are always real, although they are defined in the complex domain. This property generalizes the well-known property of Hermitian matrices. In particular, Definition~\ref{thm:Ceigen} includes eigenvalues of Hermitian matrices as a special case when $d=1$. \begin{proposition}\label{thm:eigen-real} {Every} $C$-eigenvalue of a conjugate partial-symmetric tensor is always real; so is {every} $G$-eigenvalue of a conjugate super-symmetric tensor. \end{proposition} \begin{proof} Suppose $(\lambda,\boldsymbol{x})$ is a C-eigenvalue and C-eigenvector pair of a conjugate partial-symmetric tensor $\mathcal{F}$. Multiplying ${\overline\boldsymbol{x}}^{\textnormal{T}}$ on both sides of the first equation in~\eqref{eq:Ceigen}, we get $$ \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) =\lambda \overline\boldsymbol{x}^{\textnormal{T}}\boldsymbol{x}=\lambda. $$ As $\mathcal{F}$ is conjugate partial-symmetric, the left hand side of the above equation is real-valued, and so is $\lambda$.
Next, suppose $(\lambda,\boldsymbol{x})$ is a G-eigenvalue and G-eigenvector pair of a conjugate super-symmetric tensor $\mathcal{F}$. Multiplying $\binom{\overline\boldsymbol{x}}{\boldsymbol{x}}^{\textnormal{T}}$ on both sides of the first equation in~\eqref{eq:Geigen} yields $$ \mathcal{F}\bigg(\underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_d \bigg)=\lambda {\binom{\overline\boldsymbol{x}}{\boldsymbol{x}}}^{\textnormal{T}} \dbinom{\boldsymbol{x}}{\overline\boldsymbol{x}} = 2 \lambda\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x} = 2\lambda. $$ As $\mathcal{F}$ is conjugate super-symmetric, the left hand side of the above equation is real-valued, and so is $\lambda$.
\end{proof}
As a consequence of Proposition~\ref{thm:eigen-real},
one can similarly define the C-eigenvalue $\lambda\in\mathbb{R}$ and its corresponding C-eigenvector $\boldsymbol{x}\in\mathbb{C}^n$ for a conjugate partial-symmetric tensor $\mathcal{F}$ equivalently as follows. \begin{proposition}\label{thm:Ceigen2} $\lambda\in\mathbb{C}$ is a C-eigenvalue of a conjugate partial-symmetric tensor $\mathcal{F}$, if and only if there exists a vector $\boldsymbol{x}\in\mathbb{C}^n$, such that \begin{equation}\label{eq:Ceigen2} \left\{ \begin{array}{l} \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d}, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1},\bullet) =\lambda \overline\boldsymbol{x}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1.
\end{array}\right. \end{equation} \end{proposition}
One important property of the Z-eigenvalues for real symmetric tensors is that they can be fully characterized by the KKT solutions of a certain optimization problem~\cite{L05,Q05}. At a first glance, this property may not hold for C-eigenvalues and G-eigenvalues since the real-valued complex functions are not analytic. Therefore, direct extension of the KKT condition of an optimization problem with such objective function may not be valid. However, this class of functions is indeed analytic if we treat the complex variables and their conjugates as a whole due to the so-called Wirtinger calculus~\cite{R91} developed in the early 20th century. In the optimization context, without noticing the Wirtinger calculus, Brandwood~\cite{B83} first proposed the notion of complex gradient. In particular, the gradient of a real-valued complex function can be taken as $\left(\frac{\partial}{\partial \boldsymbol{x}}, \frac{\partial}{\partial \overline\boldsymbol{x}}\right)$. Interested readers are referred to~\cite{SBD12} for more discussions on the Wirtinger calculus in optimization with complex variables.
With the help of Wirtinger calculus, we are able to characterize C-eigenvalues and C-eigenvectors in terms of the KKT solutions. Therefore many optimization techniques can be applied to find the C-eigenvalues/eigenvectors for a conjugate partial-symmetric tensor. \begin{proposition}\label{prop:C-eigen} $\boldsymbol{x}\in\mathbb{C}^n$ is a C-eigenvector associated with a C-eigenvalue $\lambda\in\mathbb{R}$ for a conjugate partial-symmetric tensor $\mathcal{F}$ if and only if $\boldsymbol{x}$ is a KKT point of the optimization problem $$ \max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1}\mathcal{F}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_d, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) $$ with Lagrange multiplier being $d\lambda$ and the corresponding objective value being $\lambda$. \end{proposition} \begin{proof} {By the multilinearity of $\mathcal{F}$, the gradient on $\boldsymbol{x}$ of the real-valued symmetric conjugate form associated with $\mathcal{F}$ is given by $$ \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d},\bullet, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1}) + \cdots + \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d},\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1},\bullet) = d \cdot \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d},\bullet, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1}), $$ where the equality is due to the partial-symmetry of $\mathcal{F}$.}
Denote $\mu$ to be the Lagrange multiplier associated with the constraint $\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1$. The KKT condition gives rise to the equations $$ \left\{ \begin{array}{l} d \cdot \mathcal{F}(\bullet, \underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d-1},\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)- \mu \boldsymbol{x}=0\\ d \cdot \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d},\bullet, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1})- \mu \overline{\boldsymbol{x}}=0\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1. \end{array} \right. $$ The conclusion follows immediately by comparing the above with~\eqref{eq:Ceigen} and~\eqref{eq:Ceigen2}. \end{proof}
Similarly, we have the following characterization. \begin{proposition}\label{prop:G-eigen} $\boldsymbol{x}\in\mathbb{C}^n$ is a G-eigenvector associated with a G-eigenvalue $\lambda\in\mathbb{R}$ for a conjugate super-symmetric tensor $\mathcal{F}$ if and only if $\boldsymbol{x}$ is a KKT point of the optimization problem $$ \max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1} \mathcal{F}\bigg( \underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_d \bigg) $$ with Lagrange multiplier being $d\lambda$ and the corresponding objective value being $\lambda$. \end{proposition}
\subsection{Eigenvalues of complex tensors and their relations}
Although the definitions of the C-eigenvalue, the G-eigenvalue, and the previously defined Q-eigenvalue and the US-eigenvalue involve different tensor spaces, they are indeed closely related. Our main result in this section essentially states that the Q-eigenvalue and the US-eigenvalue are special cases of the C-eigenvalue, and the C-eigenvalue is a special case of the G-eigenvalue.
\begin{theorem}\label{thm:q-c-eigen} Denote $\mathcal{H}\in\mathbb{C}^{n^d}$ to be a complex tensor and define $\mathcal{F}=\overline\mathcal{H}\otimes\mathcal{H}\in\mathbb{C}^{n^{2d}}$. It holds that\\ (i) $\mathcal{H} $ is symmetric if and only if $\mathcal{F}$ is conjugate partial-symmetric;\\ (ii) If $\mathcal{H}$ is symmetric, then all the C-eigenvalues of $\mathcal{F}$ are nonnegative;\\ (iii) If $\mathcal{H}$ is symmetric, then $\lambda^2$ is a C-eigenvalue of $\mathcal{F}$ if and only if $\lambda$ is a Q-eigenvalue (or a US-eigenvalue) of $\mathcal{H}$. \end{theorem} \begin{proof} (i) This equivalence can be easily verified by the definition of conjugate partial-symmetricity (Definition~\ref{thm:cps}).
(ii) Let $\boldsymbol{x}\in\mathbb{C}^n$ be a C-eigenvector associated with a C-eigenvalue $\lambda\in\mathbb{R}$ of $\mathcal{F}$. By multiplying $\boldsymbol{x}$ on both sides of the first equation in~\eqref{eq:Ceigen}, we obtain \begin{eqnarray*} \lambda &=& \mathcal{F}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) = (\overline\mathcal{H}\otimes\mathcal{H})(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) = \overline\mathcal{H}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_d)\cdot\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) \\
&=& |\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)|^2 \ge 0. \end{eqnarray*}
(iii) Since the Q-eigenvalue is the same as the US-eigenvalue, we only prove the former case. Suppose $\boldsymbol{x}\in\mathbb{C}^n$ is a Q-eigenvector associated with a Q-eigenvalue $\lambda\in\mathbb{R}$ of $\mathcal{H}$. By~\eqref{eq:Qeigen} we have $\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1$ and $\mathcal{H}(\bullet,\boldsymbol{x},\dots,\boldsymbol{x})=\lambda\overline\boldsymbol{x}$, and so $\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)=\lambda\boldsymbol{x}^{\textnormal{T}}\overline\boldsymbol{x}=\lambda.$ By the similar derivation in the proof of (ii), we get \begin{align*} \mathcal{F}(\bullet,\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_{d-1},\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) &= \overline\mathcal{H}(\bullet,\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_{d-1})\cdot\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) \\ &=\overline{\mathcal{H}(\bullet,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1})}\cdot\lambda = \overline{\lambda\overline\boldsymbol{x}}\cdot\lambda=\lambda^2\boldsymbol{x}, \end{align*} implying that $\lambda^2$ is a C-eigenvalue of $\mathcal{F}$.
On the other hand, suppose $\boldsymbol{x}\in\mathbb{C}^n$ is a C-eigenvector associated with a nonnegative C-eigenvalue $\lambda^2$ of $\mathcal{F}$. Then by~\eqref{eq:Ceigen2} we have $\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1$ and \begin{align} \overline{\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)} \cdot \mathcal{H}(\bullet,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1}) &= \overline\mathcal{H}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_d) \cdot \mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1},\bullet) \nonumber \\ &= \mathcal{F}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1},\bullet) = \lambda^2\overline\boldsymbol{x}, \label{eq:Qeigen-Ceigen} \end{align}
where the first equality is due to the symmetricity of $\mathcal{H}$. This leads to $|\mathcal{H}(\boldsymbol{x},\dots,\boldsymbol{x})|^2 =\lambda^2$. Let $\mathcal{H}(\boldsymbol{x},\dots,\boldsymbol{x})=\lambda e^{\mbox{\bf i}\theta}$ with {some fixed} $\theta\in[0,2\pi)$, and further define $\boldsymbol{y}=\boldsymbol{x} e^{-\mbox{\bf i}\theta/d}$. We then get $$ \mathcal{H}(\underbrace{\boldsymbol{y},\dots,\boldsymbol{y}}_d)=\mathcal{H}(\underbrace{\boldsymbol{x} e^{-\mbox{\bf i}\theta/d},\dots,\boldsymbol{x} e^{-\mbox{\bf i}\theta/d}}_d) = (e^{-\mbox{\bf i}\theta/d})^d\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) =e^{-\mbox{\bf i}\theta}\lambda e^{\mbox{\bf i}\theta}=\lambda. $$ Now we are able to verify that $\boldsymbol{y}$ is a Q-eigenvector associated with Q-eigenvalue $\lambda$ of $\mathcal{H}$. Observing $\boldsymbol{y}^{\textnormal{H}}\boldsymbol{y}=(\boldsymbol{x} e^{-\mbox{\bf i}\theta/d})^{\textnormal{H}}\boldsymbol{x} e^{-\mbox{\bf i}\theta/d}=1$, and by~\eqref{eq:Qeigen-Ceigen}, \begin{align*} \lambda^2\overline\boldsymbol{x}&=\overline{\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d)} \cdot \mathcal{H}(\bullet,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1})\\ &=\overline{\lambda e^{\mbox{\bf i}\theta}}\mathcal{H}(\bullet, \underbrace{\boldsymbol{y} e^{\mbox{\bf i}\theta/d},\dots,\boldsymbol{y} e^{\mbox{\bf i}\theta/d}}_{d-1})\\ &=\lambda e^{-\mbox{\bf i}\theta}(e^{\mbox{\bf i}\theta/d})^{d-1}\mathcal{H}(\bullet,\underbrace{\boldsymbol{y},\dots,\boldsymbol{y}}_{d-1}), \end{align*} we finally get $$\mathcal{H}(\bullet,\underbrace{\boldsymbol{y},\dots,\boldsymbol{y}}_{d-1})=\lambda \overline{\boldsymbol{x}}e^{\mbox{\bf i}\theta/d}=\lambda\overline{\boldsymbol{y} e^{\mbox{\bf i}\theta/d}} e^{\mbox{\bf i}\theta/d}=\lambda\overline\boldsymbol{y}.$$ \end{proof}
{As we saw} in Section~\ref{sec:preparation}, by definition, a symmetric conjugate form is a special general conjugate form. Hence in terms of their tensor representations, a conjugate partial-symmetric tensor is a special case of conjugate super-symmetric tensor, although they live in different tensor spaces. To study the relationship between the C-eigenvalue and the G-eigenvalue, let us { embed a conjugate partial-symmetric tensor} $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ to the space of $\mathbb{C}^{(2n)^{2d}}$. The conjugate super-symmetric tensor $\mathcal{G}\in\mathbb{C}^{(2n)^{2d}}$ corresponding to $\mathcal{F}$ is then defined by \begin{equation}\label{eq:cps-css} \mathcal{G}_{j_1\dots j_{2d}}=\left\{ \begin{array}{ll} \mathcal{F}_{i_1\dots i_{2d}}/\binom{2d}{d}, & (j_1\dots j_{2d}) \in \Pi(i_1,\dots, i_d,i_{d+1}+n,\dots,i_{2d}+n); \\ 0, & \mbox{otherwise}. \end{array} \right. \end{equation} For example when $d=1$, a conjugate partial-symmetric tensor is simply a Hermitian matrix $A \in \mathbb{C}^{n^2}$. Then its embedded conjugate super-symmetric tensor is $\left( \begin{smallmatrix} O & A/2 \\mathcal{A}^{\textnormal{T}}/2 & O \end{smallmatrix} \right)\in\mathbb{C}^{(2n)^2}$, and clearly we have
$$
\overline{\boldsymbol{x}}^{\textnormal{T}}A\boldsymbol{x}=\dbinom{\overline{\boldsymbol{x}}}{\boldsymbol{x}}^{\textnormal{T}}\left(
\begin{array}{cc}
O & A/2 \\
A^{\textnormal{T}}/2 & O \\
\end{array}
\right)\dbinom{\overline{\boldsymbol{x}}}{\boldsymbol{x}}.
$$ In general it is straightforward to verify that \begin{equation}\label{eq:CG-link} \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) =\mathcal{G}\bigg( \underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_{2d} \bigg). \end{equation} Based on this, we are led to the following relationship between the C-eigenvalue and the G-eigenvalue. \begin{theorem}\label{thm:c-g-eigen} If $\mathcal{G}\in \mathbb{C}^{(2n)^{2d}}$ is a conjugate super-symmetric tensor induced by a conjugate partial-symmetric tensor $\mathcal{F} \in \mathbb{C}^{n^{2d}}$ according to~\eqref{eq:cps-css}, then $\lambda$ is a C-eigenvalue of $\mathcal{F}$ if and only if $\lambda/2$ is a G-eigenvalue of $\mathcal{G}$. \end{theorem} \begin{proof}
First, by taking the gradient $\left(\frac{\partial}{\partial \overline\boldsymbol{x}},\frac{\partial}{\partial \boldsymbol{x}} \right)$ on both sides of~\eqref{eq:CG-link}, we have that
$$
\dbinom{d \cdot \mathcal{F}(\bullet,\overbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}^{d-1},\overbrace{\boldsymbol{x},\dots,\boldsymbol{x}}^d)}
{d \cdot \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1},\bullet) } = 2d \cdot \mathcal{G}\bigg(\dbinom{\bullet}{\bullet}, \underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots,
\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_{2d-1} \bigg).
$$ Next, according to Definition~\ref{thm:Ceigen} and Proposition~\ref{thm:Ceigen2}, $\lambda$ is a C-eigenvalue of $\mathcal{F}$ if and only if there exists a vector $\boldsymbol{x}\in\mathbb{C}^n$ such that
$$ \left\{ \begin{array}{l} \mathcal{F}(\bullet, \underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_{d-1},\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) = \lambda \boldsymbol{x}\\ \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d-1}, \bullet) = \lambda \overline{\boldsymbol{x}}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1. \end{array} \right.
$$ Finally, according to Definition~\ref{thm:Geigen}, $\lambda/2$ is a G-eigenvalue of $\mathcal{G}$ if and only if there exists a vector $\boldsymbol{x}\in\mathbb{C}^n$ such that $$ \left\{ \begin{array}{l} \mathcal{G}\bigg(\dbinom{\bullet}{\bullet}, \underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_{2d-1} \bigg)=\frac{\lambda}{2} \dbinom{\boldsymbol{x}}{\overline\boldsymbol{x}}\\ \boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1.
\end{array}\right. $$ The conclusion follows immediately by combining the above three facts. \end{proof}
\section{Extending Banach's theorem to the real-valued conjugate forms}\label{sec:Banach}
A classical result originally due to Banach~\cite{B38} states that if $\mathcal{L}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d)$ is a continuous symmetric $d$-linear form, then \begin{equation}\label{banach}
\sup\{ |\mathcal{L}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d)| \mid \|\boldsymbol{x}^1\|\le1, \dots, \|\boldsymbol{x}^d\|\le1\} = \sup \{ |\mathcal{L}(\underbrace{\boldsymbol{x}, \dots, \boldsymbol{x}}_d)| \mid \|\boldsymbol{x}\|\le1\}. \end{equation} In the space of real tensors where $\boldsymbol{x}\in\mathbb{R}^n$ and $\mathcal{L}$ is a multilinear form defined by a real symmetric tensor $\mathcal{L}\in\mathbb{R}^{n^d}$,~\eqref{banach} states that the largest singular value~\cite{L05} of $\mathcal{L}$ is equal to the {largest eigenvalue~\cite{Q05} (in the absolute value sense) of $\mathcal{L}$ }, i.e., \begin{equation}\label{banach_real}
\max_{(\boldsymbol{x}^k)^{\textnormal{T}}\boldsymbol{x}^k= 1,\,\boldsymbol{x}^k \in \mathbb{R}^n,\,k=1,\dots,d} \mathcal{L}( \boldsymbol{x}^1, \dots, \boldsymbol{x}^d) = \max_{\boldsymbol{x}^{\textnormal{T}}\boldsymbol{x}=1,\,\boldsymbol{x} \in \mathbb{R}^n} | \mathcal{L}( \underbrace{\boldsymbol{x}, \dots, \boldsymbol{x}}_d ) |. \end{equation} Alternatively,~\eqref{banach_real} is essentially equivalent to the fact that the best rank-one approximation of a real symmetric tensor can be obtained at a symmetric rank-one tensor~\cite{CHLZ12,ZLQ12}. A recent development on this topic for special classes of real symmetric tensors can be found in~\cite{CHLZ14}. In this section, we shall extend Banach's theorem to { symmetric conjugate forms (the conjugate partial-symmetric tensors) and general conjugate} forms (the conjugate super-symmetric tensors).
\subsection{Equivalence for conjugate super-symmetric tensors} \label{sec:gbanach}
Let us start with conjugate super-symmetric tensors, which are a generalization of conjugate partial-symmetric tensors. A key observation leading to the equivalence (Theorem~\ref{thm:equal-con}) is the following result. \begin{lemma}\label{lemma:super-sym} For a given real tensor $\mathcal{F}\in \mathbb{R}^{n^d}$, if $\mathcal{F}(\boldsymbol{x}^1, \dots, \boldsymbol{x}^d) = \mathcal{F}( \boldsymbol{x}^{\pi(1)}, \dots, \boldsymbol{x}^{\pi(d)})$ for every $\boldsymbol{x}^1,\dots,\boldsymbol{x}^d \in \mathbb{R}^n$ and every permutation $\pi$ of $\{1,\dots,d \}$, then $\mathcal{F}$ is symmetric. \end{lemma}
Our first result in this section extends~\eqref{banach_real} to any conjugate super-symmetric tensors in the complex domain. \begin{theorem}\label{thm:equal-con} For any conjugate super-symmetric tensor $\mathcal{G}\in\mathbb{C}^{(2n)^d}$, we have \begin{equation}\label{eq:equal-conjugate}
\max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1} \bigg{|} \mathcal{G}\bigg( \underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_d \bigg) \bigg{|} = \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,d}\textnormal{Re}\, \mathcal{G}\bigg( \dbinom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \dbinom{\,\overline{\boldsymbol{x}^d}\,}{\boldsymbol{x}^d} \bigg). \end{equation} \end{theorem} \begin{proof} Let $\boldsymbol{y}^k = \binom{\textnormal{Re}\, \boldsymbol{x}^k}{\textnormal{Im}\, \boldsymbol{x}^k}\in\mathbb{R}^{2n}$ for $k=1,\dots, d$. We observe that $\textnormal{Re}\, \mathcal{G}\big( \binom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \binom{\,\overline{\boldsymbol{x}^d\,}}{\boldsymbol{x}^d} \big)$ is also a multilinear form with respect to $\boldsymbol{y}^1,\dots,\boldsymbol{y}^d$. As a result, we are able to find a real tensor $\mathcal{F}\in \mathbb{R}^{(2n)^d}$ such that \begin{equation}\label{eq:real-conjugate} \mathcal{F}\big( \boldsymbol{y}^1, \dots, \boldsymbol{y}^d\big) = \textnormal{Re}\, \mathcal{G}\bigg(\,\dbinom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \dbinom{\,\overline{\boldsymbol{x}^d}\,}{\boldsymbol{x}^d} \bigg). \end{equation} As $\mathcal{G}$ is conjugate super-symmetric, for any $\boldsymbol{y}^1,\dots,\boldsymbol{y}^d \in \mathbb{R}^{2n}$ and any permutation $\pi$ of $\{1,\dots,d\}$, one has \begin{align*} \mathcal{F}\big( \boldsymbol{y}^1, \dots, \boldsymbol{y}^d\big) &= \textnormal{Re}\, \mathcal{G}\bigg( \dbinom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \dbinom{\,\overline{\boldsymbol{x}^d}\,}{\boldsymbol{x}^d} \bigg) \\ &= \textnormal{Re}\, \mathcal{G}\bigg( \dbinom{\,\overline{\boldsymbol{x}^{\pi(1)}}\,}{\boldsymbol{x}^{\pi(1)}}, \dots, \dbinom{\,\overline{\boldsymbol{x}^{\pi(d)}}\,}{\boldsymbol{x}^{\pi(d)}} \bigg)\\ &= \mathcal{F}\big( \boldsymbol{y}^{\pi(1)}, \dots, \boldsymbol{y}^{\pi(d)}\big). \end{align*} By Lemma~\ref{lemma:super-sym} we have that the real tensor $\mathcal{F}$ is symmetric. Finally, noticing that $(\boldsymbol{y}^k)^{\textnormal{T}}\boldsymbol{y}^k=(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k$ for $k=1,\dots,d$, the conclusion follows immediately by applying~\eqref{banach_real} to $\mathcal{F}$ and then using the equality~\eqref{eq:real-conjugate}. \end{proof}
\subsection{Equivalence for conjugate partial-symmetric tensors} \label{sec:cbanach}
For {extending Banach's theorem to a conjugate partial-symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$, one could hope to proceed as follows. Since it is} a special case of the conjugate super-symmetric tensor, one can embed $\mathcal{F}$ into a conjugate super-symmetric tensor
$\mathcal{G}\in\mathbb{C}^{(2n)^{2d}}$ using~\eqref{eq:cps-css}. Then, by applying Theorem~\ref{thm:equal-con} to $\mathcal{G}$ and rewriting the real part of its associated multilinear form $\textnormal{Re}\,\mathcal{G}\big( \binom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \binom{\,\overline{\boldsymbol{x}^{2d}}\,}{\boldsymbol{x}^{2d}} \big)$ in terms of $\mathcal{F}$, we may have an equivalent expression as~\eqref{eq:equal-conjugate}. However, this expression is not succinct.
Taking the case $d=2$ (degree 4) for example, it is straightforward to verify from~\eqref{eq:cps-css} that \begin{align*} &~~~~\textnormal{Re}\,\mathcal{G}\bigg(\binom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1},\binom{\,\overline{\boldsymbol{x}^2}\,}{\boldsymbol{x}^2}, \binom{\,\overline{\boldsymbol{x}^3}\,}{\boldsymbol{x}^3},\binom{\,\overline{\boldsymbol{x}^4}\,}{\boldsymbol{x}^4}\bigg)\\ &=\frac{1}{6} \left(\mathcal{F}(\overline{\boldsymbol{x}^1},\overline{\boldsymbol{x}^2},\boldsymbol{x}^3,\boldsymbol{x}^4) +\mathcal{F}(\overline{\boldsymbol{x}^1},\overline{\boldsymbol{x}^3},\boldsymbol{x}^2,\boldsymbol{x}^4) +\mathcal{F}(\overline{\boldsymbol{x}^1},\overline{\boldsymbol{x}^4},\boldsymbol{x}^2,\boldsymbol{x}^3)\right.\\ &~~~\left.+\mathcal{F}(\overline{\boldsymbol{x}^2},\overline{\boldsymbol{x}^3},\boldsymbol{x}^1,\boldsymbol{x}^4) +\mathcal{F}(\overline{\boldsymbol{x}^2},\overline{\boldsymbol{x}^4},\boldsymbol{x}^1,\boldsymbol{x}^3) +\mathcal{F}(\overline{\boldsymbol{x}^3},\overline{\boldsymbol{x}^4},\boldsymbol{x}^1,\boldsymbol{x}^2)\right)\\ &:=f_S'(\boldsymbol{x}^1,\boldsymbol{x}^2,\boldsymbol{x}^3,\boldsymbol{x}^4), \end{align*} and this would lead to $$
\max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1} | \mathcal{F}(\overline{\boldsymbol{x}},\overline{\boldsymbol{x}},\boldsymbol{x},\boldsymbol{x}) | = \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,2,3,4}f_S'(\boldsymbol{x}^1,\boldsymbol{x}^2,\boldsymbol{x}^3,\boldsymbol{x}^4). $$
Instead, one would hope to get \begin{equation}\label{eq:equivalence-conjugate}
\max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1} | \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d,\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) | = \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,2d} \textnormal{Re}\,\mathcal{F}(\overline{\boldsymbol{x}^1},\dots,\overline{\boldsymbol{x}^d},\boldsymbol{x}^{d+1},\dots,\boldsymbol{x}^{2d}). \end{equation} However, this does not hold in general. The main reason is that $$\mathcal{G}\bigg( \dbinom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \dbinom{\,\overline{\boldsymbol{x}^{2d}}\,}{\boldsymbol{x}^{2d}} \bigg)\neq \mathcal{F}(\overline{\boldsymbol{x}^1},\dots,\overline{\boldsymbol{x}^d},\boldsymbol{x}^{d+1},\dots,\boldsymbol{x}^{2d}),$$ which is easily observed since its left hand side is invariant under the permutation of $(\boldsymbol{x}^1,\dots,\boldsymbol{x}^{2d})$ while its right hand side is not. In particular,~\eqref{eq:equivalence-conjugate} only holds for $d=1$, viz.\ Hermitian matrices; {see the following proposition} and Example~\ref{ex:quarticfail}.
\begin{proposition}\label{degree2}
For any Hermitian matrix $Q\in\mathbb{C}^{n\times n}$, it holds that
$$
(L)\quad {\max_{\boldsymbol{z}^{\textnormal{H}}\boldsymbol{z}=1} \boldsymbol{z}^{\textnormal{H}} Q \boldsymbol{z} } = \max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=\boldsymbol{y}^{\textnormal{H}}\boldsymbol{y}=1} \textnormal{Re}\, \boldsymbol{x}^{\textnormal{T}} Q \boldsymbol{y}. \quad (R)
$$
Furthermore, for any optimal solution $(\boldsymbol{x}^*,\boldsymbol{y}^*)$ of $(R)$ with $\overline{\boldsymbol{x}^*}+\boldsymbol{y}^*\neq 0$, $(\overline{\boldsymbol{x}^*} + \boldsymbol{y}^*)/\|\overline{\boldsymbol{x}^*} + \boldsymbol{y}^*\|$ is an optimal solution of $(L)$ as well. \end{proposition} \begin{proof} Denote $v(L)$ and $v(R)$ to be the optimal values of $(L)$ and $(R)$, respectively. Noticing that $\textnormal{Re}\, \boldsymbol{x}^{\textnormal{T}} Q \boldsymbol{y} = \frac{1}{2}(\boldsymbol{x}^{\textnormal{T}} Q \boldsymbol{y} + \overline \boldsymbol{x}^{\textnormal{T}} \overline Q \overline \boldsymbol{y})$, by the optimality condition of $(R)$ we have that \begin{equation}\label{eq:2opt-condition-CQP} \left\{ \begin{array}{l} Q \boldsymbol{y}^* - 2\lambda \overline {\boldsymbol{x}^*} =0\\ \overline Q \overline {\boldsymbol{y}^*} - 2\lambda \boldsymbol{x}^* =0\\ \overline Q \boldsymbol{x}^* - 2\mu \overline {\boldsymbol{y}^*} =0\\ Q \overline {\boldsymbol{x}^*} - 2\mu \boldsymbol{y}^* =0\\ (\boldsymbol{x}^*)^{\textnormal{H}}\boldsymbol{x}^*=1\\ (\boldsymbol{y}^*)^{\textnormal{H}}\boldsymbol{y}^*=1, \end{array} \right. \end{equation} where $\lambda$ and $\mu$ are the Lagrangian multipliers of the constraints $\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1$ and $\boldsymbol{y}^{\textnormal{H}}\boldsymbol{y}=1$, respectively.
{Pre-multiplying the first two equations in~\eqref{eq:2opt-condition-CQP} with $(\boldsymbol{x}^*)^{\textnormal{T}}$ and $(\overline{\boldsymbol{x}^*})^{\textnormal{T}}$ respectively, and summing them up,} lead to $$2\textnormal{Re}\, (\boldsymbol{x}^*)^{\textnormal{T}} Q \boldsymbol{y}^* = (\boldsymbol{x}^*)^{\textnormal{T}} Q \boldsymbol{y}^* + (\overline{\boldsymbol{x}^*})^{\textnormal{T}} \overline Q \overline{\boldsymbol{y}^*} = 2 \lambda (\boldsymbol{x}^*)^{\textnormal{T}}\overline{\boldsymbol{x}^*} + 2 \lambda (\overline{\boldsymbol{x}^*})^{\textnormal{T}}\boldsymbol{x}^* = 4\lambda(\boldsymbol{x}^*)^{\textnormal{H}}\boldsymbol{x}^* = 4\lambda.$$ Similarly, the summation of the third and fourth equations in~\eqref{eq:2opt-condition-CQP} leads to $$2\textnormal{Re}\, (\boldsymbol{x}^*)^{\textnormal{T}} Q \boldsymbol{y}^* = 4\mu,$$ which further leads to \begin{equation}\label{eq:CQP} v(R)= \textnormal{Re}\, (\boldsymbol{x}^*)^{\textnormal{T}} Q \boldsymbol{y}^* = 2\lambda =2 \mu. \end{equation}
Moreover, the summation of the first and {fourth} equations in~\eqref{eq:2opt-condition-CQP} yields $$Q (\boldsymbol{y}^* + \overline{\boldsymbol{x}^*}) - 2\lambda (\overline {\boldsymbol{x}^*} + \boldsymbol{y}^*)=0,$$ which further leads to \begin{equation}\label{eq:CQP2}
(\boldsymbol{y}^* + \overline{\boldsymbol{x}^*})^{\textnormal{H}}Q (\boldsymbol{y}^* + \overline{\boldsymbol{x}^*}) =2\lambda (\boldsymbol{y}^* + \overline{\boldsymbol{x}^*})^{\textnormal{H}}(\overline {\boldsymbol{x}^*} + \boldsymbol{y}^*)= 2\lambda\|\overline {\boldsymbol{x}^*} + \boldsymbol{y}^*\|^2. \end{equation}
Let $\boldsymbol{z}^*=(\overline {\boldsymbol{x}^*} + \boldsymbol{y}^*)/\|\overline {\boldsymbol{x}^*} + \boldsymbol{y}^*\|$. Clearly $\boldsymbol{z}^*$ is a feasible solution of $(L)$. {By~\eqref{eq:CQP2} and~\eqref{eq:CQP} we have} $$(\boldsymbol{z}^*)^{\textnormal{H}}Q\boldsymbol{z}^*=2\lambda=\textnormal{Re}\, (\boldsymbol{x}^*)^{\textnormal{T}} Q \boldsymbol{y}^*=v(R).$$ This implies that $v(L)\ge v(R)$. Notice that $(R)$ is a relaxation of $(L)$ and hence $v(L)\le v(R)$. Therefore we conclude that $v(R)=v(L)$, and an optimal solution $\boldsymbol{z}^*$ of $(L)$ is constructed from an optimal solution $(\boldsymbol{x}^*,\boldsymbol{y}^*)$ of $(R)$. \end{proof}
\begin{example}\label{ex:quarticfail}
Let $\mathcal{F}\in\mathbb{C}^{2^4}$ with $\mathcal{F}_{1122}=\mathcal{F}_{2211}=1$ and other entries being zeros. Clearly $\mathcal{F}$ is conjugate partial-symmetric. In this case~\eqref{eq:equivalence-conjugate} fails to hold because:\\
(i) $|\mathcal{F}(\overline{\boldsymbol{x}},\overline{\boldsymbol{x}},\boldsymbol{x},\boldsymbol{x})|=|{\overline{x_1}}^2{x_2}^2+{\overline{x_2}}^2{x_1}^2| \le 2|x_1|^2|x_2|^2\le\frac{1}{2}(|x_1|^2+|x_2|^2)^2=\frac{1}{2}$ for any $\boldsymbol{x}\in\mathbb{C}^2$ with $\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1$.\\
(ii) $\mathcal{F}(\overline{\boldsymbol{x}},\overline{\boldsymbol{y}},\boldsymbol{z},\boldsymbol{w})=\overline{x_1}\overline{y_1}z_2w_2+\overline{x_2}\overline{y_2}z_1w_1=1$ for $\boldsymbol{x}=\boldsymbol{y}=(1,0)^{\textnormal{T}}$ and $\boldsymbol{z}=\boldsymbol{w}=(0,1)^{\textnormal{T}}$. \end{example}
Thus, Banach's theorem~\eqref{eq:equivalence-conjugate} does not hold in general for conjugate partial-symmetric tensors. A natural question arises: Is there any reasonable condition to ensure the identity to hold? Recall from Proposition~\ref{thm:conju-decomp} that every conjugate partial-symmetric tensor can be written as $\sum_{k=1}^m\alpha_k\overline{\mathcal{H}_k}\otimes \mathcal{H}_k$ where $\mathcal{H}_k\in\mathbb{C}^{n^d}$ is symmetric and $\alpha_k\in\mathbb{R}$ for all $1\le k\le m$. If further we have all $\alpha_k$'s being nonnegative, then~\eqref{eq:equivalence-conjugate} is true. Before presenting this result, we first need the following type of Banach's theorem for symmetric complex tensors, whose proof can be constructed almost identically to that of Theorem~\ref{thm:equal-con}.
\begin{proposition} \label{thm:complexBanach}
If $F\in\mathbb{C}^{n^d}$ is symmetric, then \begin{equation} \max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) = \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,d} \textnormal{Re}\,\mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d). \end{equation} \end{proposition} \begin{theorem}\label{thm:Banach2} If a conjugate partial-symmetric tensor $\mathcal{F}\in\mathbb{C}^{n^{2d}}$ written as $\sum_{k=1}^m\alpha_k \overline{\mathcal{H}_k}\otimes \mathcal{H}_k$ satisfies that $\alpha_k\ge0$ for all $1\le k\le m$, then $$ (L')\quad\max_{\boldsymbol{x}^{\textnormal{H}}\boldsymbol{x}=1} \mathcal{F}(\underbrace{\overline{\boldsymbol{x}},\dots,\overline{\boldsymbol{x}}}_d, \underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_d) = \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,2d} \textnormal{Re}\,\mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^{2d}) \quad (R') $$ \end{theorem} \begin{proof} Let us first introduce a sandwiched optimization model: $$ (M')\quad \max_{\boldsymbol{y}^{\textnormal{H}}\boldsymbol{y}=\boldsymbol{z}^{\textnormal{H}}\boldsymbol{z}=1} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y},\dots,\boldsymbol{y}}_d, \underbrace{\boldsymbol{z},\dots,\boldsymbol{z}}_d). $$ Denote $v(L')$, $v(M')$ and $v(R')$ to be the optimal values of $(L')$, $(M')$ and $(R')$, respectively. Clearly, $(R')$ is a relaxation of $(M')$, and $(M')$ is a relaxation of $(L')$, implying that $v(L')\le v(M') \le v(R')$.
Next, let $(\boldsymbol{x}^1_*,\dots,\boldsymbol{x}^{2d}_*)$ be an optimal solution of $(R')$. Consider the following problem: $$ \max_{\boldsymbol{y}^{\textnormal{H}}\boldsymbol{y}=1} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y},\dots,\boldsymbol{y}}_d, \boldsymbol{x}^{d+1}_*,\dots,\boldsymbol{x}^{2d}_*), $$ whose optimal solution is denoted by $\boldsymbol{y}^*$. Noticing that $\mathcal{F}(\bullet,\dots,\bullet, \boldsymbol{x}^{d+1}_*,\dots,\boldsymbol{x}^{2d}_*)\in\mathbb{C}^d$ is symmetric, by Proposition~\ref{thm:complexBanach}, we have \begin{align*} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \boldsymbol{x}^{d+1}_*,\dots,\boldsymbol{x}^{2d}_*) &=\max_{\boldsymbol{y}^{\textnormal{H}}\boldsymbol{y}=1} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y},\dots,\boldsymbol{y}}_d, \boldsymbol{x}^{d+1}_*,\dots,\boldsymbol{x}^{2d}_*) \\ &= \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,d} \textnormal{Re}\, \mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^d, \boldsymbol{x}^{d+1}_*,\dots,\boldsymbol{x}^{2d}_*)\\ &\ge \textnormal{Re}\, \mathcal{F}(\boldsymbol{x}^1_*,\dots,\boldsymbol{x}^{2d}_*)=v(R'). \end{align*} For the same reason, we have \begin{align*} \max_{\boldsymbol{z}^{\textnormal{H}}\boldsymbol{z}=1} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \underbrace{\boldsymbol{z},\dots,\boldsymbol{z}}_d) & = \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=d+1,\dots,2d} \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \boldsymbol{x}^{d+1},\dots,\boldsymbol{x}^{2d}) \\ &\ge \textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \boldsymbol{x}^{d+1}_*,\dots,\boldsymbol{x}^{2d}_*) \ge v(R'), \end{align*} implying that $v(M')\ge v(R')$.
Finally, let $(\boldsymbol{y}^*,\boldsymbol{z}^*)$ be an optimal solution of $(M')$. Since $\alpha_k\ge0$ for all $0\le k\le m$, we have \begin{align*}
&~~~~\textnormal{Re}\,\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d)\\
&= \textnormal{Re}\, \left(\sum_{k=1}^m\alpha_k \overline{\mathcal{H}_k}\otimes \mathcal{H}_k\right) (\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d)\\
&= \sum_{k=1}^m \alpha_k \textnormal{Re}\, (\overline{\mathcal{H}_k}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d)\cdot \mathcal{H}_k(\underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d)) \\
&\le \sum_{k=1}^m \alpha_k |\overline{\mathcal{H}_k}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d) |\cdot| \mathcal{H}_k(\underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d)| \\
&\le \sum_{k=1}^m \frac{\alpha_k}{2}\bigg(|\overline{\mathcal{H}_k}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d)|^2 + |\mathcal{H}_k(\underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d)|^2\bigg) \\
&= \frac{1}{2}\sum_{k=1}^m \alpha_k\bigg((\overline{\mathcal{H}_k}\otimes \mathcal{H}_k)(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d,\underbrace{\overline{\boldsymbol{y}^*},\dots,\overline{\boldsymbol{y}^*}}_d) + (\overline{\mathcal{H}_k}\otimes \mathcal{H}_k)(\underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d, \underbrace{\overline{\boldsymbol{z}^*},\dots,\overline{\boldsymbol{z}^*}}_d)\bigg) \\
&= \frac{1}{2}\bigg(\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \underbrace{\overline{\boldsymbol{y}^*},\dots,\overline{\boldsymbol{y}^*}}_d) + \mathcal{F}( \underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d, \underbrace{\overline{\boldsymbol{z}^*},\dots,\overline{\boldsymbol{z}^*}}_d)\bigg) \\
&\le \max\bigg\{ \mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \underbrace{\overline{\boldsymbol{y}^*},\dots,\overline{\boldsymbol{y}^*}}_d), \mathcal{F}(\underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d, \underbrace{\overline{\boldsymbol{z}^*},\dots,\overline{\boldsymbol{z}^*}}_d)\bigg\}. \end{align*} {Remark that the positivity of $\alpha_k$'s is exploited when invoking the triangle inequality in the first inequality above.} This implies that either $\mathcal{F}(\underbrace{\boldsymbol{y}^*,\dots,\boldsymbol{y}^*}_d, \underbrace{\overline{\boldsymbol{y}^*},\dots,\overline{\boldsymbol{y}^*}}_d)$ or $\mathcal{F}(\underbrace{\boldsymbol{z}^*,\dots,\boldsymbol{z}^*}_d, \underbrace{\overline{\boldsymbol{z}^*},\dots,\overline{\boldsymbol{z}^*}}_d)$ attains $v(M')$, proving that $v(L')\ge v(M')$. Therefore we have $v(L')= v(M') = v(R')$. \end{proof}
We remark that the condition for $\alpha_k$'s being nonnegative in $\mathcal{F}$ in Theorem~\ref{thm:Banach2} is actually the condition for the real-valued symmetric conjugate form $\mathbf{S}(\mathcal{F})$ being a sum of squares (SOS) of complex polynomials; see the relation between {Propositions~\ref{thm:polyrealvalued} and~\ref{thm:conju-decomp}}. In the field of polynomial optimization, checking whether a polynomial is SOS can be done by the feasibility of a semidefinite program. In fact, there is an easy sufficient condition for the condition {on} $\mathcal{F}$ in Theorem~\ref{thm:Banach2} to hold: the square matrix flattening of $\mathcal{F}$ is Hermitian positive semidefinite.
Interested readers are referred to~\cite{JLZ15} for details.
\section{Applications} \label{sec:application}
The theoretical results developed {in the previous sections are also useful in practice. In this section, we shall discuss some applications that can be formulated as real-valued complex polynomial optimization models. In particular, these problems can be cast as finding the largest C-eigenvalue of a conjugate partial-symmetric tensor or the largest G-eigenvalue of a conjugate super-symmetric tensor.}
One challenge of these eigenvalue optimization problems is that the variables are coupled in the { complex polynomial objective function}. However, the extended Banach's theorem in Section~\ref{sec:Banach}, specifically Theorems~\ref{thm:equal-con} and~\ref{thm:Banach2}, guarantee that we can separate the variables without losing the optimality. This enables us to focus on the multilinear (block) optimization model $$ \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,d}\textnormal{Re}\, \mathcal{G}\bigg( \binom{\,\overline{\boldsymbol{x}^1}\,}{\boldsymbol{x}^1}, \dots, \binom{\,\overline{\boldsymbol{x}^d}\,}{\boldsymbol{x}^d} \bigg) $$ for a conjugate super-symmetric tensor $\mathcal{G}$, or $$ \max_{(\boldsymbol{x}^k)^{\textnormal{H}}\boldsymbol{x}^k=1,\,k=1,\dots,2d} \textnormal{Re}\,\mathcal{F}(\boldsymbol{x}^1,\dots,\boldsymbol{x}^{2d}) $$ for certain conjugate partial-symmetric tensor $\mathcal{F}$. One great advantage of the above models is that the optimization over one block variable is easy when other blocks are fixed. Therefore, some efficient solution methods tailored for these models can be applied, such as the block coordinate decent method~\cite{LT92} and the maximum block improvement method~\cite{CHLZ12}. {Conversely, the extended Banach's theorem in Section~\ref{sec:Banach} provides an alternative way to solve the symmetric multilinear optimization model by resorting to some approaches tailored for symmetric tensor problems such as the power method~\cite{KoldaMayo11} and the semidefinite programming method~\cite{NieWang14,JMZ15}. In particular, as the search space can be restricted to symmetric solutions, the latter equivalent model significantly reduces the number of decision variables, which is beneficial to many practical algorithms such as semidefinite programs.}
\subsection{Ambiguity function shaping for radar waveform}
The ambiguity function of the waveform is often used to probe the environment in radar system. By controlling both the Doppler and the range resolutions of the system, it can regulate the interference power produced by unwanted returns~\cite{ADJZ12}. To be specific, suppose $v_0$ is the normalized target Doppler frequency and $\boldsymbol{s} = (s_1,\dots,s_n)^{\textnormal{T}} \in \mathbb{C}^n$ is the radar code to be optimized. There are $n_0$ interfering scatterers and the matrix $J^r\in\mathbb{R}^{n^2}$ for $r\in\{0,1,\dots,n-1\}$ is defined as \[ {(J^r)}_{ij}=\left\{ \begin{array}{cc} 1 & i-j=r \\ 0 & i-j\ne r \end{array}\right., \quad\forall\, 1\le i, j\le n. \] The ambiguity function of $\boldsymbol{s}$ for the time-lag $r\in\{0,1,\dots,n-1\}$ and the normalized Doppler frequency $v\in \left[-\frac{1}{2}, \frac{1}{2}\right]$ is given by $$
g_{\boldsymbol{s}}(r,v)=\frac{1}{\|\boldsymbol{s}\|^2}\left|\boldsymbol{s}^{\textnormal{H}}J^r(\boldsymbol{s}\odot \boldsymbol{p}(v))\right|^2, $$ where $\boldsymbol{p}(v) = (1, e^{\ii2\pi v} ,\dots , e^{\ii2(n-1)\pi v})^{\textnormal{T}}$ and $\odot$ denotes the Hadamard product; interested readers are referred to~\cite{ADJZ12} for more details of the ambiguity function and radar waveform design.
{Denote by $r_k$ the time-lag of the $k$-th scatterer, and let $v_k$ be} the normalized Doppler frequency of the $k$-th scatterer. The latter is usually modeled as a random variable uniformly distributed around {a mean frequency $\hat{v}_k$ with some tolerance $\frac{\epsilon_k}{2}$}, i.e., $v_k$ is a uniform distribution in $\left[\hat{v}_k - \frac{\epsilon_k}{2}, \hat{v}_k + \frac{\epsilon_k}{2}\right]$. Consequently, the disturbance power at the output of the matched filter is given by \begin{equation}\label{continous-distur-power}
\sum_{k=1}^{n_0}{\sigma_k}^2\|\boldsymbol{s}\|^2 \textnormal{E}\, [g_{\boldsymbol{s}} (r_k, v_{k} -v_{0} )] + \sigma^2\|\boldsymbol{s}\|^2, \end{equation} where $\sigma^2$ is the variance of the circular white noise, and ${\sigma_k}^2$ is the echo mean power produced by the $k$-th scatterer. To simplify the notation, all the following normalized Doppler frequencies are expressed in terms of the difference with respect to $v_0$. We discretize the normalized Doppler interval $[-\frac{1}{2}, \frac{1}{2})$ into $m$ bins, denoted by discrete frequencies $x_j = -\frac{1}{2} + \frac{j}{m}$ for $j\in\{0,1,\dots,m\}$. Let $$ \Delta_k = \left\{ j:\left[x_j - \frac{1}{2m},x_j + \frac{1}{2m}\right) \bigcap \left[\hat{v}_k - \frac{\epsilon_k}{2}, \hat{v}_k + \frac{\epsilon_k}{2}\right] \neq \varnothing \right\}. $$ Then the above statistical expectations can be approximated by the sample means over $\Delta_k$, i.e., $$
\textnormal{E}\, [g_{\boldsymbol{s}} (r_k, v_{k} )] \approx \frac{1}{|\Delta_k|}\sum_{j \in \Delta_k} g_{\boldsymbol{s}} (r_k, x_{j} ), $$ Plugging the above expression into~\eqref{continous-distur-power}, the total disturbance power at the output of the matched filter can be rewritten as $$
\phi(\boldsymbol{s})=\sum_{r=0}^{n-1} \sum_{j=1}^{m} \rho(r,k) |\boldsymbol{s}^{\textnormal{H}}J^r (\boldsymbol{s}\odot\boldsymbol{p}(x_j))|^2, $$
where $\rho(r,k) = \sum_{k=1}^{n_0}{\delta_{r,r_k}}{\bf{1}}_{\Delta_k}(j)\frac{\sigma_k^2}{|\Delta_k|}$ with {$\delta_{r,r_k}$ being the Kronecker delta and} ${\bf{1}}_{\Delta_k}(j)$ being an indicator function.
To obtain phase-only modulated waveforms, an optimization model to minimize $\phi(\boldsymbol{s})$ subject to constant modulus constraints was proposed in~\cite{ADJZ12}:
$\min_{|s_i| = 1,\, i=1,\dots,n}\phi(\boldsymbol{s})$.
{Another} modeling strategy is to account for the finite energy transmitted by the radar and assume that $\|\boldsymbol{s}\|^2 = 1$. However, this single constraint does not provide any kind of control on the shape of the resulting coded waveform. To circumvent this drawback, one practical approach is to enforce a similarity constraint (see~\cite{ADFW13} for more details): \begin{equation}\label{similarity-constraint}
\| \boldsymbol{s} - \boldsymbol{s}^0 \|^2 \le \gamma, \end{equation}
where $\boldsymbol{s}^0$ is a known code which shares some nice properties {such as a constant modula and a reasonable range resolution}. Moreover, since any feasible $\boldsymbol{s}$ satisfies $\|\boldsymbol{s}\|=1$ and $$
\| \boldsymbol{s} - \boldsymbol{s}^0 \|^2 = \| \boldsymbol{s} \|^2 + \| \boldsymbol{s}^0 \|^2 - (\boldsymbol{s}^{\textnormal{H}}\boldsymbol{s}^0 + (\boldsymbol{s}^0)^{\textnormal{H}}\boldsymbol{s}) = 1 + \| \boldsymbol{s}^0 \|^2 - (\boldsymbol{s}^{\textnormal{H}}\boldsymbol{s}^0 + (\boldsymbol{s}^0)^{\textnormal{H}}\boldsymbol{s}). $$
Therefore, $\| \boldsymbol{s} - \boldsymbol{s}^0 \|^2 \le \gamma$ is equivalent to $- (\boldsymbol{s}^{\textnormal{H}}\boldsymbol{s}^0 + (\boldsymbol{s}^0)^{\textnormal{H}}\boldsymbol{s}) \le \gamma - 1 - \| \boldsymbol{s}^0 \|^2$. Typically, the similarity constraint~\eqref{similarity-constraint} is not a hard constraint, it aims to restrict the searching area within some neighborhood of $\boldsymbol{s}^0$ and the size of the neighborhood is controlled by $\gamma$. Motivated by the aforementioned equivalence, {a similar result} can be achieved by penalizing the quantity $- (\boldsymbol{s}^{\textnormal{H}}\boldsymbol{s}^0 + (\boldsymbol{s}^0)^{\textnormal{H}}\boldsymbol{s})$ in the objective and we arrive the following formulation \begin{equation}\label{eq:radar}
\min_{\|\boldsymbol{s}\| = 1} \left(\phi(\boldsymbol{s}) - \rho (\boldsymbol{s}^{\textnormal{H}}\boldsymbol{s}^0 + (\boldsymbol{s}^0)^{\textnormal{H}}\boldsymbol{s})^2 \| \boldsymbol{s}\|^2\right) \end{equation} with penalty parameter $\rho$. Notice that the objective function in~\eqref{eq:radar} is a real-valued quartic conjugate complex form. If $\boldsymbol{s}^*$ is the optimal solution and so is $-\boldsymbol{s}^*$, then we can choose one of them to make sure that ${(\boldsymbol{s}^*)^{\textnormal{H}}\boldsymbol{s}^0 + (\boldsymbol{s}^0)^{\textnormal{H}}\boldsymbol{s}^*}>0$. The model~\eqref{eq:radar} is obviously finding the smallest C-eigenvalue of a conjugate partial-symmetric tensor, which can also be viewed as finding the smallest G-eigenvalue of a conjugate super-symmetric tensor as mentioned in Theorem~\ref{thm:c-g-eigen}.
\subsection{The best rank-one approximation of a complex tensor}
Many modern engineering problems can be cast as multilinear least squares regression given as \begin{equation}\label{eq:tensor1}
\min_{\boldsymbol{z}^k\in\mathbb{C}^{n_k},\,k=1,\dots,d}\frac{1}{2}\|\boldsymbol{z}^1\otimes\dots \otimes \boldsymbol{z}^d - \mathcal{F} \|^2, \end{equation} where $\mathcal{F} \in \mathbb{C}^{n_1\times \dots \times n_d }$ is a given nonzero complex tensor. For instance, in quantum entanglement the geometric measure of a given $d$-partite pure state $\mathcal{F}$ is defined by~\eqref{eq:tensor1}; see~\cite{WG03,NQB14} for details.
In fact,~\eqref{eq:tensor1} can be also categorized as a G-eigenvalue problem for a conjugate super-symmetric tensor. To see this, first it is easy to see that~\eqref{eq:tensor1} is equivalent to $$
\min_{\lambda\in\mathbb{R},\,\|\boldsymbol{z}^k\|=1,\,k=1,\dots,d}\|\lambda \boldsymbol{z}^1\otimes\dots \otimes \boldsymbol{z}^d - \mathcal{F} \|^2. $$
{When all $\boldsymbol{z}^k$'s with $\|\boldsymbol{z}^k\| =1$ for $k = 1,\dots, d$ are fixed, the optimal $\lambda$ satisfies} \begin{align*}
\min_{\lambda \in \mathbb{R}}\|\lambda \boldsymbol{z}^1\otimes\dots \otimes \boldsymbol{z}^d - \mathcal{F} \|^2 &=\min_{\lambda \in \mathbb{R}}\left( \|\mathcal{F}\|^2 - 2 \lambda \textnormal{Re}\, \mathcal{F}(\boldsymbol{z}^1,\dots,\boldsymbol{z}^d) + \lambda^2 \right) \\
&=\|\mathcal{F}\|^2 - (\textnormal{Re}\,\mathcal{F}(\boldsymbol{z}^1,\dots, \boldsymbol{z}^d))^2. \end{align*} Therefore, by multilinearity,~\eqref{eq:tensor1} is equivalent to \begin{equation}\label{eq:tensor2}
\max_{\|\boldsymbol{z}^k\|=1,\,k=1,\dots,d}|\textnormal{Re}\,\mathcal{F}(\boldsymbol{z}^1,\dots, \boldsymbol{z}^d)|=\max_{\|\boldsymbol{z}^k\|=1,\,k=1,\dots,d}\textnormal{Re}\,\mathcal{F}(\boldsymbol{z}^1,\dots, \boldsymbol{z}^d). \end{equation}
Let us now consider a relaxation of the above model \begin{equation}\label{eq:tensor3}
\max_{\sum_{k=1}^d\|\boldsymbol{z}^k\|^2 = d }\textnormal{Re}\, \mathcal{F}(\boldsymbol{z}^1, \dots , \boldsymbol{z}^d). \end{equation}
A key observation is that this relaxation is actually tight. To see this, suppose $(\boldsymbol{z}_*^1,\dots, \boldsymbol{z}_*^{d})$ is an optimal solution of~\eqref{eq:tensor3}. Trivially we have $\textnormal{Re}\,\mathcal{F}(\boldsymbol{z}_*^1,\boldsymbol{z}_*^2,\dots,\boldsymbol{z}_*^{d})>0$ as $\mathcal{F}$ is nonzero and so $\|\boldsymbol{z}_*^k\|\ne 0$ for $k=1,\dots,d$. By noticing $$
\left(\prod_{k=1}^d\|\boldsymbol{z}_*^k\|^2\right)^{1/d} \le \frac{1}{d}\sum_{k=1}^{d}\|\boldsymbol{z}_*^k\|^2=1, $$
we have that $\prod_{k=1}^d\|\boldsymbol{z}_*^k\|\le1$ and so $$
\textnormal{Re}\,\mathcal{F}\left(\frac{\boldsymbol{z}_*^1}{\|\boldsymbol{z}_*^1 \|},\dots,\frac{\boldsymbol{z}_*^{d}}{\|\boldsymbol{z}_*^{d} \|}\right)=\textnormal{Re}\,\frac{\mathcal{F}(\boldsymbol{z}_*^1,\dots,\boldsymbol{z}_*^{d})}{\prod_{k=1}^{d}\|\boldsymbol{z}_*^k\|} \ge \textnormal{Re}\, \mathcal{F}(\boldsymbol{z}_*^1,\boldsymbol{z}_*^2,\dots,\boldsymbol{z}_*^{d}). $$
Therefore, the feasible solution $\left(\boldsymbol{z}_*^1/\|\boldsymbol{z}_*^1 \|,\dots,\boldsymbol{z}_*^d/\|\boldsymbol{z}_*^d\|\right)$ of~\eqref{eq:tensor2} is already optimal to the relaxation model~\eqref{eq:tensor3}, proving the equivalence between~\eqref{eq:tensor2} and~\eqref{eq:tensor3}.
Finally, to formulate~\eqref{eq:tensor3} as a G-eigenvalue {optimization} problem, let us denote $\boldsymbol{z}=\left((\boldsymbol{z}^1)^{\textnormal{T}},\dots,(\boldsymbol{z}^{d})^{\textnormal{T}}\right)^{\textnormal{T}}\in\mathbb{C}^{nd}$ and construct a symmetric complex tensor $\mathcal{H}\in\mathbb{C}^{(nd)^d}$ such that $$ \mathcal{H}(\underbrace{\boldsymbol{z},\dots,\boldsymbol{z}}_{d}) = \mathcal{F}(\boldsymbol{z}^1,\dots,\boldsymbol{z}^d). $$ Thus,~\eqref{eq:tensor3} can be rewritten as \begin{align*}
\max_{\|\boldsymbol{z}\|=\sqrt{d}} \textnormal{Re}\, \mathcal{H}(\underbrace{\boldsymbol{z},\dots,\boldsymbol{z}}_{d})
&=\max_{\|\boldsymbol{z}\|=\sqrt{d}} \frac{1}{2} \left(\mathcal{H}(\underbrace{\boldsymbol{z},\dots,\boldsymbol{z}}_{d}) + \overline\mathcal{H}(\underbrace{\overline\boldsymbol{z},\dots,\overline\boldsymbol{z}}_{d})\right)\\
&=\max_{\|\boldsymbol{x}\|=1} \frac{\sqrt{d^d}}{2} \left(\mathcal{H}(\underbrace{\boldsymbol{x},\dots,\boldsymbol{x}}_{d}) + \overline\mathcal{H}(\underbrace{\overline\boldsymbol{x},\dots,\overline\boldsymbol{x}}_{d})\right)\\
&=\max_{\|\boldsymbol{x}\|=1} \mathcal{G}\bigg(\underbrace{\dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}, \dots, \dbinom{\overline\boldsymbol{x}}{\boldsymbol{x}}}_d \bigg), \end{align*} where $\mathcal{G}\in\mathbb{C}^{(2nd)^d}$ is a conjugate super-symmetric tensor. The multilinear least square model~\eqref{eq:tensor1} is shown to be a special case of the G-eigenvalue optimization problem.
\section{Conclusion}\label{sec:conclusion}
This paper focuses on complex polynomial functions that incorporate conjugate variables. We {introduced} two types of conjugate complex forms and their symmetric tensor representations. Necessary and sufficient conditions for these conjugate complex forms being real-valued are presented, based on which two types of symmetric complex tensors are introduced. We present new definitions of eigenvalues/eigenvectors, namely the $C$-eigenvalue and the $G$-eigenvalue, which generalize the existing concepts of eigenvalues in the literature. Extensions of Banach~\cite{B38} type's theorem on these complex tensors are discussed as well. To give the readers a holistic picture Table~\ref{table} summarizes the main contents.
\begin{table}[h] \begin{center}
\begin{tabular}{ | l p{60mm} l |} \hline Sec. & Subject & Results \\ \hline \ref{sec:cform} & Symmetric conjugate form {and} partial-symmetric tensor & Def.~\ref{def:sform}, Def.~\ref{def:partial-symmetric}, Lemma~\ref{thm:tensorS} \\ \ref{sec:ctensor} & Real-valued symmetric conjugate form {and} conjugate partial-symmetric tensor & Cor.~\ref{thm:condition}, Def.~\ref{thm:cps}, Prop.~\ref{thm:conju-mapping} \\ \ref{sec:eigendef} & $C$-eigenvalue and $C$-eigenvector & Def.~\ref{thm:Ceigen}, Prop.~\ref{thm:Ceigen2}, Prop.~\ref{prop:C-eigen} \\ \ref{sec:cbanach} & Banach type theorem & Prop.~\ref{degree2}, Theorem~\ref{thm:Banach2} \\ \hline \ref{sec:gform} & General conjugate form {and} symmetric tensor & Def.~\ref{def:gform}, Lemma~\ref{thm:tensorG} \\ \ref{sec:gtensor} & Real-valued general conjugate form {and} conjugate super-symmetric tensor & Cor.~\ref{thm:condition}, Def.~\ref{thm:css}, Prop.~\ref{thm:gform-rv} \\ \ref{sec:eigendef} & $G$-eigenvalue and $G$-eigenvector & Def.~\ref{thm:Geigen}, Prop.~\ref{prop:G-eigen}\\ \ref{sec:gbanach} & Banach type theorem & Theorem~\ref{thm:equal-con} \\ \hline \end{tabular} \end{center} \caption{Summary of the symmetric conjugate form and the general conjugate form} \label{table} \end{table}
An important aspect of polynomials is the theory of nonnegativity. Most existing results only apply for polynomials in real variables, for the reason that such polynomials are real-valued. Since we have the full characterization of real-valued conjugate complex polynomials introduced in this paper, the question about their nonnegativity naturally arises, in particular, the relationship between nonnegativity and SOS. In the real domain, this problem was completely solved by Hilbert~\cite{H88} in 1888. However, relationship between nonnegative complex polynomials and SOS has not been established explicitly in the literature as far as we know. This would be one of the future research using the notion of conjugate polynomials. Moreover, the new notions of symmetric complex tensors and the eigenvalues/eigenvectors would hopefully attract future modelling opportunities, and the newly developed properties, in particular the extension of Banach's result would be helpful in solution methods for complex polynomial optimization.
\end{document} |
\begin{document}
\maketitle
\section{Introduction}
The restriction problem for the Fourier transform in $\bR^n$ was introduced by E. M. Stein, who proved the first result in any dimension \cite[p. 28]{Fe}, later improved by the sharper Stein-Tomas method [T]. Since then more and more sophisticated techniques have been introduced to attack the still open problems in this area, concerning the maximal range of exponents for which the restriction inequality holds.
In two-dimensions, the restriction estimate for the circle had been proved already, in an almost optimal range of exponents, by Fefferman and Stein \cite[p. 33]{Fe}. Shortly later, sharp estimates were obtained by Zygmund \cite{Z} for the circle and by Carleson and Sj\"olin \cite{CS} and Sj\"olin \cite{Sj} for a class of curves including strictly convex $C^2$ curves.
The present paper does not mean to proceed along these lines, but rather to propose a reflection on the measure-theoretic meaning of the restriction phenomenon and possibly suggest some related problems.
A restriction theorem is usually meant as a family of {\it apriori} inequalities \begin{equation}\label{restriction_inequality}
\big\|\widehat f_{|_S}\big\|_{L^q(S,\mu)}\le C\big\|f\|_{L^p(\bR^n)}\ ,
\end{equation} where $f\in \cS(\bR^n)$, $S$ is a surface with appropriate curvature properties, and $\mu$ a suitably weighted finite surface measure on $S$. The validity of such an inequality implies the existence of a bounded {\it restriction operator} $\cR:L^p(\bR^n)\longrightarrow L^q(S,\mu)$ such that $\cR f=\widehat f_{|_S}$ when $f$ is a Schwartz function.
In general terms our question is: assuming that \eqref{restriction_inequality} holds, what is the ``intrinsic'' pointwise relation between $\cR f$ and $\widehat f$ for a general $L^p$-function $f$?
A partial answer follows directly from the restriction inequality. Assume that \eqref{restriction_inequality} holds for given $p,q$. This forces the condition $p<2$, so that $\widehat f\in L^{p'}$. Fix an approximate identity $\chi_\eps(x)=\eps^{-n}\chi(x/\eps)$ with $\chi\in\cS(\bR^n)$, $\int\chi=1$. Then, with $\psi=\cF\inv\chi$, $$ \widehat f*\chi_\eps=\widehat{f\psi(\eps\cdot)} $$
is well defined on $S$ and coincides with $\cR\big(f\psi(\eps\cdot)\big)$. Moreover, $f\psi(\eps\cdot)\to f$ in $L^p(\bR^n)$, so that $(\widehat f*\chi_\eps)_{|_S}\to \cR f$ in $L^q(S,\mu)$. Hence, for a subsequence $\eps_k\to0$, the $\chi_{\eps_k}$-averages of $\widehat f$ converge pointwise to $\cR f$ $\mu$-a.e.
It is natural to ask if the limit over all $\eps$ exists $\mu$-a.e. We give positive answers in two dimensions to this and related questions.
We recall that, for a curve $S$ in the plane, necessary conditions on $p,q$ for having \eqref{restriction_inequality} are $p<\frac43$ and $p'\ge 3q$ and that they are also sufficient when $S$ is $C^2$ with nonvanishing curvature and $\mu$ is the arclength measure, or, more generally, when $S$ is just $C^2$ and convex, and $\mu$ is the {\it affine arclength measure}~\cite{Sj}. Notice that the two measures differ by a factor comparable to the $\frac13$ power of the curvature, so that the affine arclength is concentrated on the set of points with nonvanishing curvature and ordinary arclength is damped near these points.
\begin{theorem}\label{lebesgue} Let $S$ be a $C^2$ curve in $\bR^2$ and $f\in L^p(\bR^2)$. \begin{enumerate} \item[\rm(i)] Assume that $1\le p<\frac43$ and let $\chi\in\cS(\bR^2)$ with $\int\chi=1$. Then, with respect to arclength measure, for almost every $x\in S$ at which the curvature does not vanish, $\lim_{\eps\to0}\widehat f*\chi_\eps(x)=\cR f(x)$. \item[\rm(ii)] Assume that $1\le p<\frac87$. Then, with respect to arclength measure, almost every $x\in S$ at which the curvature does not vanish is a Lebesgue point for $\widehat f$ and the regularized value of $\widehat f$ at $x$ coincides with $\cR f(x)$. \end{enumerate} \end{theorem}
Several questions remain open, regarding extensions to less regular curves, to other values of $p$ in the range $\frac87\le p<\frac43$, or to higher dimensions. We just mention here that, in dimension $d\ge3$, our method gives results for a class of curves including $\Gamma(t)=(t,t^2,\dots,t^d)$.
Theorem \ref{lebesgue} is a direct consequence of certain ``maximal restriction theorems'' concerning restrictions to $S$ of truncated maximal functions of the Fourier transform. Since maximal restriction inequalities may also have an intrinsic interest, we go beyond what is strictly needed to deduce Theorem \ref{lebesgue} and consider (truncated) two-parameter maximal functions, such as the strong maximal function, relative to any coordinate system in $\bR^2$.
In Theorem \ref{vertical} we prove that, for a convex $C^2$ curve, the two-parameter maximal operator defined in \eqref{Mv}, is $L^p-L^q$ bounded for $p,q$ in the full range of validity of the restriction theorem, with the $L^q$-norm on $S$ relative to affine arc-length measure.
In Corollary \ref{tildeM} we deduce the same $L^p-L^q$ estimates, but in the smaller range $p<\frac87$, for the truncated strong maximal function, which does not only control averages of $\hat f$, but also those of~$|\hat f|$.
The proof is based on the Kolmogorov-Seliverstov-Plessner linearization method \cite[Ch. XIII]{Zb}. This leads to proving uniform estimates for a family of linear operators to which a modification of the basic approach of [CS,Z] for curves in $\bR^2$ can be applied. For this reason our method is limited to the two-dimensional context. Unfortunately, the usual $TT^*$ method of Stein-Tomas does not seem to be applicable, even for the Hardy-Littlewood maximal function.
\section{The strong maximal function of $\widehat f$ along a curve }\label{sec-vertical}
Let $S=\{\Gamma(t):t\in I\}$, where $\Gamma$ is a $C^2$ curve in $\bR^2$ with nonnegative signed curvature, i.e., with $\kappa(t)=\det(\Gamma',\Gamma'')(t)\ge0$. Denote by $d\mu(t)=\kappa^{\frac13}(t)\,dt$ the pull-back to~$I$ of the affine arclength measure on~$S$.
We assume for simplicity that $\Gamma(x)=\big(x,\ph(x)\big)$ is the graph of a convex $C^2$ function $\ph$ on a bounded interval $I$. Notice that the measure $\mu$ is concentrated on the set where $\kappa=\ph''>0$.
We consider the two-parameter maximal function\footnote{Theorem \ref{vertical} also holds if $\chi\otimes\chi$ is replaced by a general $\chi\in\cS(\bR^2)$, because this can be expanded into a rapidly decreasing series $\sum_j\chi'_j\otimes\chi''_j$.} \begin{equation}\label{Mv}
\cM f(x)=\sup_{0<\eps',\eps''<1}\Big|\int \widehat f\big(x+s,\ph(x)+t\big) \chi_{\eps'}(s) \chi_{\eps''}(t)\,ds\,dt\Big|\ , \end{equation} where $\chi_\eps(\cdot)=\eps\inv\chi(\cdot/\eps)$, with $\chi\in\cS(\bR)$, even, with $\int\chi=1$.
\begin{theorem}\label{vertical} The inequality \begin{equation}
\|\cM f\|_{L^q(I,\mu)}\le C_p\|f\|_{L^p(\bR^2)}\ , \end{equation} holds for $1\le p<\frac43$ and $p'\ge 3q$. \end{theorem}
\begin{proof} We may and shall assume $f\in\cS(\bR^2)$ and, since $\mu$ is finite, $p'= 3q$ by H\"older's inequality. We linearize $\cM $ by defining, for fixed measurable functions $\eps'(x),\eps''(x)$ on $I$ with values in $(0,1)$, \begin{equation}\begin{aligned}\label{Reps} \cR_{\eps',\eps''} f(x)&=\int \widehat f\big(x+s,\ph(x)+t\big) \chi_{\eps'}(s) \chi_{\eps''}(t)\,ds\,dt\\ &=\int f(\xi,\eta)\int e^{-i(\xi (x+s)+\eta(\ph(x)+t))}\chi_{\eps'}(s) \chi_{\eps''}(t)\,ds\,dt\,d\xi\,d\eta\\ &=\int \widehat\chi\big(\eps'(x)\xi\big)\widehat\chi\big(\eps''(x)\eta\big) e^{-i(\xi x+\eta\ph(x))}f(\xi,\eta)\,d\xi\,d\eta\ . \end{aligned}\end{equation}
The formal adjoint of $\cR_{\eps',\eps''}$ is \begin{equation}\begin{aligned}\label{Eeps} \cE_{\eps',\eps''} g(\xi,\eta)&=\cR^*_{\eps',\eps''} g(\xi,\eta)\\ &=\int_I\widehat\chi\big(\eps'(x)\xi\big)\widehat\chi\big(\eps''(x)\eta\big)e^{i(\xi x+\eta\ph(x))}g(x)\kappa^\frac13(x)\,dx\ . \end{aligned}\end{equation}
It suffices to prove the inequality \begin{equation}\label{Eeps-estimate}
\|\cE_{\eps',\eps''} g\|_{L^{p'}(\bR^2)}\le C_p \|g\|_{L^{q'}(I,\mu)}\ ,\qquad g\in C^\infty_c(I)\ , \end{equation} uniformly in the functions $\eps'(x),\eps''(x)$. We introduce a truncation in $\xi$ and $\eta$, in order to gain
decay at infinity for $\cE_{\eps',\eps''} g$. Fixing another function $\chi_0$ smooth on $\bR$, supported in $[-2,2]$ and equal to 1 on $[-1,1]$, we define, for $\la\gg1$, \begin{equation}\label{Eepslambda} \cE_{\eps',\eps''}^\la g(\xi,\eta)=\chi_0\Big(\frac\xi\la\Big)\chi_0\Big(\frac\eta\la\Big)\int_I\widehat\chi\big(\eps'(x)\xi\big)\widehat\chi\big(\eps''(x)\eta\big)e^{i(\xi x+\eta\ph(x))}g(x)\kappa^\frac13(x)\,dx\ . \end{equation}
It will then suffice to prove \eqref{Eeps-estimate} with $\cE_{\eps',\eps''}$ replaced by $\cE^\la_{\eps',\eps''}$, uniformly in $\eps'(x),\eps''(x)$ and $\la$.
We start from the identity \begin{equation}\label{square}
\|\cE_{\eps',\eps''}^\la g\|_{p'}=\big\|(\cE_{\eps',\eps''}^\la g)^2\|^\half_{p'/2}\ . \end{equation}
If $U$ is the open subset of $I$ where $\kappa(x)>0$, the measure $\mu$ is concentrated on $U$, so we have \begin{equation*}\begin{aligned} (\cE_{\eps',\eps''}^\la g)^2(\xi,\eta)&=\chi_0^2\Big(\frac\xi\la\Big)\chi_0^2\Big(\frac\eta\la\Big)\int_{U^2}\widehat\chi\big(\eps'(x)\xi\big)\widehat\chi\big(\eps''(x)\eta\big)\widehat\chi\big(\eps'(y)\xi\big)\widehat\chi\big(\eps''(y)\eta\big)\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad e^{i(\xi (x+y)+\eta(\ph(x)+\ph(y))}g(x)\kappa^\frac13(x)g(y)\kappa^\frac13(y)\,dx\,dy\\ &=\chi_0^2\Big(\frac\xi\la\Big)\chi_0^2\Big(\frac\eta\la\Big)\int_{U^2}\widehat\chi\big(\eps'(x)\xi\big)\widehat\chi\big(\eps''(x)\eta\big)\widehat\chi\big(\eps'(y)\xi\big)\widehat\chi\big(\eps''(y)\eta\big)\\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad e^{i(\xi (x+y)+\eta(\ph(x)+\ph(y))}G_0(x,y)\,dx\,dy\ , \end{aligned}\end{equation*} with $G_0=(g\kappa^\frac13)\otimes(g\kappa^\frac13)$.
We want to make the change of variables $z_1=x+y$, $z_2=\ph(x)+\ph(y)$. It follows from the convexity of $\ph$ that the map $\Phi(x,y)=\big(x+y,\ph(x)+\ph(y)\big)$ is injective on each of the subsets $U^2_\pm=\{(x,y)\in U^2: x\lessgtr y\}$ and that $\det\Phi'(x,y)=\ph'(y)-\ph'(x)\ne0$ on $U^2$.
With $A=\Phi(U_+)=\Phi(U_-)$, we set, for $z=(z_1,z_2)\in A$, \begin{equation}\begin{aligned}\label{change}
&\big(x_\pm(z),y_\pm(z)\big)=(\Phi_{|_{U^2_\pm}})\inv(z)\\ &{\eps'_1}^\pm(z)=\eps'\big(x_\pm(z)\big)\ ,\qquad {\eps'_2}^\pm(z)=\eps'\big(y_\pm(z)\big)\\ &{\eps''_1}^\pm(z)=\eps''\big(x_\pm(z)\big)\ ,\qquad {\eps''_2}^\pm(z)=\eps''\big(y_\pm(z)\big)\\
&G_\pm(z)=\frac{G_0\big(x_\pm(z),y_\pm(z)\big)}{\big|\ph'\big(x_\pm(z)\big)-\ph'\big(y_\pm(z)\big)\big|}\ . \end{aligned}\end{equation}
Then \begin{equation}\begin{aligned}\label{E2} \cE_{\eps',\eps''}^\la g(\xi,\eta)^2&=\chi_0^2\Big(\frac\xi\la\Big)\chi_0^2\Big(\frac\eta\la\Big)\\ &\qquad \sum_\pm\int_A\widehat\chi\big({\eps'_1}^\pm(z)\xi\big)\widehat\chi\big({\eps''_1}^\pm(z)\eta\big)\widehat\chi\big({\eps'_2}^\pm(z)\xi\big)\widehat\chi\big({\eps''_2}^\pm(z)\eta\big)e^{i(\xi z_1+\eta z_2)}G_\pm(z)\,dz\ . \end{aligned}\end{equation}
We are so led to consider the operator $$ T^\la_{\overline\eps}G(\xi,\eta)=\chi_0^2\Big(\frac\xi\la\Big)\chi_0^2\Big(\frac\eta\la\Big)\int_A\widehat\chi\big(\eps'_1(z)\xi\big)\widehat\chi\big(\eps''_1(z)\eta\big)\widehat\chi\big(\eps'_2(z)\xi\big)\widehat\chi\big(\eps''_2(z)\eta\big)e^{i(\xi z_1+\eta z_2)}G(z)\,dz\ , $$ for arbitrary measurable functions $\overline\eps=(\eps'_1,\eps''_1,\eps'_2,\eps''_2)$ on $A$ with values in $(0,1)^4$ and arbitrary continuous functions~$G$ on $A$.
\begin{lemma}\label{Tpp'} For $1\le p\le 2$, $T^\la_{\overline\eps}$ is bounded from $L^p(A)$ to $L^{p'}(\bR^2)$, uniformly in $\overline\eps$ and $\la$. \end{lemma}
\begin{proof} The statement is trivial for $p=1$.
For $p=2$ we prove the equivalent statement that $(T^\la_{\overline\eps})^*T^\la_{\overline\eps}:L^2(A)\longrightarrow L^2(A)$. We have $$ (T^\la_{\overline\eps})^*T^\la_{\overline\eps}G(z)=\int_A K^\la_{\overline\eps}(z,w)G(w)\,dw\ , $$ where, for $(z,w)\in A^2$, \begin{equation}\begin{aligned}\label{Keps} K^\la_{\overline\eps}(z,w)&=\int_{\bR^2} e^{-i(\xi,\eta)\cdot(z-w)}\\ &\qquad \chi_0^4\Big(\frac\xi\la\Big)\chi_0^4\Big(\frac\eta\la\Big)\widehat\chi\big(\eps'_1(z)\xi\big)\widehat\chi\big(\eps'_2(z)\xi\big)\widehat\chi\big(\eps''_1(z)\eta\big)\widehat\chi\big(\eps''_2(z)\eta\big)\\ &\qquad\qquad\qquad\qquad \widehat\chi\big(\eps'_1(w)\xi\big)\widehat\chi\big(\eps'_2(w)\xi\big)\widehat\chi\big(\eps''_1(w)\eta\big)\widehat\chi\big(\eps''_2(w)\eta\big)\,d\xi\,d\eta
\ . \end{aligned}\end{equation}
Let \begin{equation}\begin{aligned}\label{eps(zwla)} \eps'(z,w,\la)&=\max\big\{\eps'_1(z),\eps'_2(z),\eps'_1(w),\eps'_2(w),\la\inv\big\}\\ \eps''(z,w,\la)&=\max\big\{\eps''_1(z),\eps''_2(z),\eps''_1(w),\eps''_2(w),\la\inv\big\}\ . \end{aligned}\end{equation}
Using iteratively the property that, given two Schwartz functions $f,g$ on $\bR$, the product $f(at)g(bt)$ can be expressed as $h\big((a\vee b)t\big)$ with each Schwartz norm $\|h\|_{(N)}$ controlled by the same norm of $f$ and $g$, we can write \begin{equation*}\begin{aligned} \chi_0^4\Big(\frac\xi\la\Big)\widehat\chi\big(\eps'_1(z)\xi\big)\widehat\chi\big(\eps'_2(z)\xi\big)\widehat\chi\big(\eps'_1(w)\xi\big)\widehat\chi\big(\eps'_2(w)\xi\big)&=\psi'_{z,w,\la}\big(\eps'(z,w,\la)\xi\big)\\ \chi_0^4\Big(\frac\eta\la\Big)\widehat\chi\big(\eps''_1(z)\eta\big)\widehat\chi\big(\eps''_2(z)\eta\big)\widehat\chi\big(\eps''_1(w)\eta\big)\widehat\chi\big(\eps''_2(w)\eta\big)&=\psi''_{z,w,\la}\big(\eps''(z,w,\la)\eta\big)\ , \end{aligned}\end{equation*} with $\psi'_{z,w,\la},\psi''_{z,w,\la}\in\cS(\bR)$ uniformly bounded in each Schwartz norm.
Then \begin{equation}\label{Ksimpler} K^\la_{\overline\eps}(z,w)=\frac 1{\eps'(z,w,\la)\eps''(z,w,\la)}\widehat{\psi'}_{z,w,\la}\Big(\frac{z_1-w_1}{\eps'(z,w,\la)}\Big)\widehat{\psi''}_{z,w,\la}\Big(\frac{z_2-w_2}{\eps''(z,w,\la)}\Big)\ , \end{equation} so that, for every $N$, we have the uniform bound $$
\big|K^\la_{\overline\eps}(z,w)\big|\le C_N \frac1{\eps'(z,w,\la)\eps''(z,w,\la)}\Big(1+\frac{|z_1-w_1|}{\eps'(z,w,\la)}\Big)^{-N}\Big(1+\frac{|z_2-w_2|}{\eps''(z,w,\la)}\Big)^{-N}\ . $$
We now make a double partition of $A^2$, depending on which of the three parameters $z,w,\la$ determines the value of $\eps'$ and $\eps''$ respectively: $$ A^2=E'_1\cup E'_2\ ,\qquad A^2=E''_1\cup E''_2\ , $$ such that $$ \eps'(z,w,\la)=\begin{cases} \eps'_1(z)\text{ or }\eps'_2(z)\text{ or }\la\inv&\text{ on }E'_1\\ \eps'_1(w)\text{ or }\eps'_2(w)&\text{ on }E'_2\ , \end{cases}\qquad \eps''(z,w,\la)=\begin{cases} \eps''_1(z)\text{ or }\eps''_2(z)\text{ or }\la\inv&\text{ on }E''_1\\ \eps''_1(w)\text{ or }\eps''_2(w)&\text{ on }E''_2 \ . \end{cases} $$
On any intersection $E'_j\cap E''_k=E_{jk}$, each of $\eps'$ and $\eps''$ depends on only one of the variables $z,w$. We decompose \begin{equation*}\begin{aligned}
\big|(T^\la_{\overline\eps})^*T^\la_{\overline\eps}G(z)\big|&\le\sum_{j,k=1}^2\int_A {\mathbf 1}_{E_{jk}}(z,w)\big|K^\la_{\overline\eps}(z,w)\big|G(w)\big|\,dw\\
&=\sum_{j,k=1}^2U_{jk}|G|(z)\ , \end{aligned}\end{equation*}
In the case $j=k=1$ we have
\begin{equation*}\begin{aligned} U_{11}|G|(z)&\le C \int_A \frac1{\tilde\eps'(z)\tilde\eps''(z)}\Big(1+\frac{|z_1-w_1|}{\tilde\eps'(z)}\Big)^{-2}\Big(1+\frac{|z_2-w_2|}{\tilde\eps''(z)}\Big)^{-2}\big|G(w)\big|\,dw\\ &\le CM_s G(z)\ , \end{aligned}\end{equation*} where $M_s$ denotes the strong maximal function in $\bR^2$. Hence $U_{11}$ is bounded on $L^2$.
In the case $j=k=2$, it is sufficient to observe that $U_{22}^*$ has the same form as $U_{11}$ to obtain the same conclusion.
Suppose now that $j\ne k$, say $j=1,k=2$, i.e., with $\eps'$ depending on $z$ and $\eps''$ on $w$. Then, extending $G$ to be 0 on $\bR^2\setminus A$,
\begin{equation*}\begin{aligned} U_{12}|G|(z)&\le C \int_A \frac1{\tilde\eps'(z)\tilde\eps''(w)}\Big(1+\frac{|z_1-w_1|}{\tilde\eps'(z)}\Big)^{-2}\Big(1+\frac{|z_2-w_2|}{\tilde\eps''(w)}\Big)^{-2}\big|G(w)\big|\,dw\\
&= C \int_\bR\frac1{\tilde\eps'(z)}\Big(1+\frac{|z_1-w_1|}{\tilde\eps'(z)}\Big)^{-2}\bigg(\int_\bR \frac1{\tilde\eps''(w)}\Big(1+\frac{|z_2-w_2|}{\tilde\eps''(w)}\Big)^{-2}\big|G(w_1,w_2)\big|\,dw_2\bigg)\,dw_1\\
&= C\int_\bR\frac1{\tilde\eps'(z)}\Big(1+\frac{|z_1-w_1|}{\tilde\eps'(z)}\Big)^{-2} (T|G|)(w_1,z_2)\,dw_1\\
&\le C\,M_1(T|G|)(z_1,z_2)\ , \end{aligned}\end{equation*} where $M_1f(z_1,z_2)$ denotes the one-dimensional Hardy-Littlewood maximal function of $f(\cdot,z_2)$ evaluated at $z_1$ and $$
Tf(w_1,z_2)=\int_\bR \frac1{\tilde\eps''(w)}\Big(1+\frac{|z_2-w_2|}{\tilde\eps''(w)}\Big)^{-2}f(w_1,w_2)\,dw_2\ . $$
In analogy with the previous case, the operator $T^*$, $$
T^*h(w_1,w_2)=\int_\bR \frac1{\tilde\eps''(w)}\Big(1+\frac{|z_2-w_2|}{\tilde\eps''(w)}\Big)^{-2}h(w_1,z_2)\,dz_2\ , $$
is dominated by
$$
\sup_{0<\eps<1}\int_\bR \frac1{\eps}\Big(1+\frac{|z_2-w_2|}{\eps}\Big)^{-2}\big|h(w_1,z_2)\big|\,dz_2= M_2h(w_1,w_2)\ , $$ $M_2$ being now the Hardy-Littlewood maximal operator in the second variable. It follows that $T$, and hence $U_{12}$, is bounded on $L^2$ and this proves the statement for $p=2$.
The conclusion for $1<p<2$ follows by Riesz-Thorin interpolation. \end{proof}
We go back to the proof of Theorem \ref{vertical}, recalling that we are assuming $p'=3q$. Observing that $p'/2>2$ and combining together \eqref{square}, \eqref{E2} and Lemma \ref{Tpp'}, we have $$
\|\cE^\la_{\eps',\eps''} g\|_{L^{p'}(\bR^2)}\le C\big(\|G_+\|_{L^r(A)}+\|G_-\|_{L^r(A)})^\half\ , $$ with $G_\pm$ as in \eqref{change} and $r=(p'/2)'=\frac p{2-p}$. To express the right-hand side in terms of the original function $g$, we find that \begin{equation*}\begin{aligned}
\|G_+\|_{L^r(A)}^r&=\int_A \Big|\frac{G_0\big(x_+(z),y_+(z)\big)}{\ph'\big(x_+(z)\big)-\ph'\big(y_+(z)\big)}\Big|^r\,dz\\
&=\int_{U_+} \frac{|G_0(x,y)|^r}{|\ph'(x)-\ph'(y)|^{r-1}}\,dx\,dy\\
&=\int_{U_+} \frac{|g(x)|^r|g(y)|^r}{|\ph'(x)-\ph'(y)|^{r-1}}\kappa(x)^\frac r3\kappa(y)^\frac r3\,dx\,dy\ . \end{aligned}\end{equation*}
Making the change of variables $$ u=\ph'(x)\ ,\qquad v=\ph'(y)\ , $$ and setting $x(u)=(\ph')\inv(u)$, $y(v)=(\ph')\inv(v)$, we obtain that \begin{equation*}\begin{aligned}
\|G_+\|_{L^r(A)}^r&=\int_{\ph'(U_+)} \frac{|g\big(x(u)\big)|^r|g\big(y(v)\big)|^r}{|u-v|^{r-1}}\kappa\big(x(u)\big)^{\frac r3-1}\kappa\big(y(v)\big)^{\frac r3-1}\,du\,dv\ . \end{aligned}\end{equation*}
Notice that $1\le r<2$, so that we can interpret, up to a constant factor, the integral as the pairing $\lan I^{2-r}f,f\ran$, where $I^\alpha$ denotes fractional integration of order $\alpha$ and $f(u)=|g\big(x(u)\big)|^r\kappa\big(x(u)\big)^{\frac r3-1}$. By the Hardy-Littlewood-Sobolev inequality, $$
\|G_+\|_{L^r(A)}^r\le C_r\|f\|_{L^s(\ph'(U_+))}^2\ , $$ with $s=\frac 2{3-r}$. The same estimate holds for $G_-$, so that, for this value of $s$, \begin{equation*}\begin{aligned}
\|\cE^\la_{\eps',\eps''} g\|_{L^{p'}(\bR^2)}&\le C_p\|f\|_{L^s(\ph'(U))}^\frac1r\\
&=C_p\Big(\int_{\ph'(U)}|g\big(x(u)\big)|^\frac{2r}{3-r}\kappa\big(x(u)\big)^{-\frac 23}\,du\Big)^\frac{3-r}{2r}\\
&=C_p\Big(\int_U|g(x)|^\frac{2r}{3-r}\kappa(x)^{\frac 13}\,du\Big)^\frac{3-r}{2r}\\
&=\|g\|_{L^\frac{2r}{3-r}(I,\mu)}\ . \end{aligned}\end{equation*}
But $\frac{2r}{3-r}=(p'/3)'=q'$ with $q$ as in the statement of the theorem. \end{proof}
Consider now the truncated strong maximal function of $\widehat f$, \begin{equation}\label{tildeM}
\cM^+f(x)=\sup_{0<\eps',\eps''<1/4}\frac1{4\eps'\eps''}\int_{|s|<\eps',|t|<\eps''}\big|\widehat f\big(x+s,\ph(x)+t\big) \big|\,ds\,dt\ ,\qquad x\in I. \end{equation}
From Theorem \ref{vertical} we obtain the following inequality for $ \cM^+$ for a more restricted range of $p$.
\begin{corollary}\label{tildeM} The inequality \begin{equation}\begin{aligned}
\| \cM^+f\|_{L^q(I,\mu)}\le C_p\|f\|_{L^p(\bR^2)}\ ,\qquad f\in\cS(\bR^2), \end{aligned}\end{equation} holds for $1\le p<\frac87$ and $p'\ge 3q$. \end{corollary}
\begin{proof}
As before, we assume $p'=3q$. Let $h=f*f^*$, where $f^*(x,y)=\overline{f(-x,-y)}$. Then $\widehat h=|\widehat f|^2$, so that $\|h\|_r\le\|f\|_p^2$, with $r=\frac p{2-p}<\frac43$. Then , for $s$ such that $r'=3s$, $\|\cM h\|_s\le C_r\|f\|_p^2$. But, for $\eps',\eps''<\frac14$ and $\chi$ as in \eqref{Mv}, \begin{equation*}\begin{aligned}
\frac1{4\eps'\eps''}\int_{|s|<\eps',|t|<\eps''}\big|\widehat f\big(x+s,\ph(x)+t\big) \big|\,ds\,dt&\le \Big(\frac1{4\eps'\eps''}\int_{|s|<\eps',|t|<\eps''}\big|\widehat f\big(x+s,\ph(x)+t\big) \big|^2\,ds\,dt\Big)^\half\\
&=\Big(\frac1{4\eps'\eps''}\int_{|s|<\eps',|t|<\eps''}\widehat h\big(x+s,\ph(x)+t\big) \,ds\,dt\Big)^\half\\ &\le \Big( \int \widehat h\big(x+s,\ph(x)+t\big) \chi_{4\eps'}(s)\chi_{4\eps''}(t)\,ds\,dt\Big)^\half\\ &\le \big(\cM h(x)\big)^\half\ . \end{aligned}\end{equation*}
Hence $\| \cM^+f\|_{L^q(I,\mu)}\le \|\cM h\|_{q/2}^\half$ and it can be easily checked that $q/2=s$. \end{proof}
\section{Lebesgue points of $\widehat f$ along a curve}\label{sec-differentiation}
Adapting standard arguments, cf. [S], we obtain the following reformulation of Theorem \ref{lebesgue} (ii), where $B_\eps$ denotes the disk of radius $\eps$ centered at $0$.
\begin{corollary} Let $1\le p<\frac87$ and $S$ be a $C^2$ curve in the plane. Given $f\in L^p(\bR^2)$, for almost every $x\in I$ relative to affine arclength, $$
\lim_{\eps\to0}\frac1{|B_\eps|}\int_{B_\eps}\big|\widehat f\big(x+x',\ph(x)+y'\big)-\cR f\big(x,\ph(x)\big)\big|\,dx'\,dy'=0\ . $$ \end{corollary}
\begin{proof} We may restrict ourselves to a subset of $S$ which is the graph of a $C^2$ function $\ph$ on an interval $I$ with $\ph''\ne0$. Let $\mu$ be as in Section \ref{sec-vertical}.
Given $\tau>0$, let $g\in\cS(\bR^2)$ such that $\|f-g\|_p<\tau$. Since $\cR g=\widehat g_{|_S}$,
\begin{equation*}\begin{aligned} F(x)&=\limsup_{\eps\to0}\frac1{|B_\eps|}\int_{B_\eps}\big|\widehat f\big(x+x',\ph(x)+y'\big)-\cR f\big(x,\ph(x)\big)\big|\,dx'\,dy'\\
&\le \limsup_{\eps\to0}\frac1{|B_\eps|}\int_{B_\eps}\big|\widehat {(f-g)}\big(x+x',\ph(x)+y'\big)\big|\,dx'\,dy'+\big|\cR (f-g)\big(x,\ph(x)\big)\big|\\
&\le \cM^+(f-g)(x)+\big|\cR (f-g)\big(x,\ph(x)\big)\big|\ . \end{aligned}\end{equation*}
Hence, if $q=p'/3$, $\|F\|_{L^q(I,\mu)}\le C\tau$ for every $\tau>0$, i.e., $F=0$ $\mu$-a.e. \end{proof}
\vskip1cm
\end{document} |
\begin{document}
\title{On scattered posets with finite dimension}\dedicatory{ To the memory of Eric C.Milner (1928-1997)}
\author{Maurice Pouzet}\address{Math\'ematiques, ICJ, Universit\'e Claude-Bernard Lyon1, 43 Bd 11 Novembre 1918, 69622 Villeurbanne Cedex, France} \email{pouzet@univ-lyon1.fr} \author{Hamza Si Kaddour} \address{Math\'ematiques, ICJ, Universit\'e Claude-Bernard Lyon1, 43 Bd 11 Novembre 1918, 69622 Villeurbanne Cedex, France} \email{sikad@univ-lyon1.fr} \author{Nejib Zaguia} \address{SITE, Universit\'e d'Ottawa, 800 King Edward Ave, Ottawa, Ontario, K1N6N5,Canada} \email{zaguia@site.uottawa.ca}
\thanks{Research done under the auspices of the CMCU Franco-Tunisien "Outils math\'ematiques pour l'informatique". This research was completed while the authors visited each others. Support provided by the university of Ottawa is gratefully acknowledged}
\keywords{Ordered sets, Scattered posets, Scattered topological spaces. Dushnik-Miller dimension. }
\subjclass[2000]{06 A06, 06 A15, 54G12}
\date{\today }
\begin{abstract} We discuss a possible characterization, by means of forbidden configurations, of posets which are embeddable in a product of finitely many scattered chains. \end{abstract}
\maketitle
\section*{Introduction and presentation of the results}
A fundamental result, due to Szpilrajn \cite{szpilrajn30}, states that every order on a set is the intersection of a family of linear orders on this set. The \emph{dimension} of the order, also called the dimension of the ordered set, is then defined as the minimum cardinality of such a family (Dushnik, Miller \cite{dushnik-miller}). Specialization of Szpilrajn's result to several types of orders have been studied \cite{{bonn-pouz2}}. An ordered set (in short poset), or its order, is \emph {scattered} if it does not contain a subset which is ordered as the chain $\eta$ of rational numbers. Bonnet and Pouzet \cite{bonn-pouz1} proved that \emph{a poset is scattered if and only if the order is the intersection of scattered linear orders}.
It turns out that there are scattered posets whose order is the intersection of finitely many linear orders but which cannot be the intersection of finitely many scattered linear orders. We give nine examples in Theorem \ref{thm:main}. This naturally leads to the following question: \begin{question} \label{que:scatt} If an order is the intersection of finitely many linear scattered orders, does this order the intersection of $n$ many scattered linear orders, where $n$ is the dimension of this order?
\end{question}
We do not have the answer even for dimension two orders. We cannot even answer this: \begin{question} If an order of dimension two is the intersection of three scattered linear orders, does this order the intersection of two scattered linear orders?
\end{question}
Question \ref{que:scatt} is a special instance of the following general qestion:
\emph{Given a positive integer $n$, which orders are intersection of at most $n$ scattered linear orders?}
We propose an approach based on the notion of obstruction.
Let $n$ be a non negative integer; let $\mathcal {L}(n)$, resp. $\mathcal {L}_{\mathcal S}(n)$ be the class of posets $P$ whose order is the the intersection of at most $n$ linear orders, resp. at most $n$ scattered linear orders. Set $\mathcal {L}(<\omega):=\bigcup_{n<\omega}\mathcal {L}(n)$ and $ \mathcal {L}_{\mathcal S}(<\omega):=\bigcup_{n<\omega}\mathcal {L}_{\mathcal S}(n)$.
These four classes are \emph{closed under embeddability}, that is if $\mathcal C$ is one of these classes, then for every poset $P\in \mathcal C$, a poset $Q$ belongs to $\mathcal C$ whenever it is embeddable in $P$ (that is $Q$ is isomorphic to an induced subposet of $P$). Say that an \emph {obstruction} to a class $\mathcal C$ as above is any poset not belonging to $\mathcal C$. Then such a class $\mathcal C$ can be characterized by obstructions, eg as the class of posets in which no obstruction to $\mathcal C$ is embeddable. But, it can be also characterized by means of smaller collections of obstructions. If $\mathcal B$ is a class of poset, denote by $Forb(\mathcal B)$ the class of posets in which no member of $\mathcal \mathcal B$ is embeddable.
With this terminology, we may ask:
\emph{Find $\mathcal B$ as simple as possible such that $\mathcal {L}_{\mathcal S}(n)=Forb(\mathcal B)$.}
The following question emerges immediately: \begin{question} Is there a cardinal $\lambda$ such that every obstruction to $\mathcal {L}_{\mathcal S}(n)$ contains an obstruction of size at most $\lambda$? \end{question}
As it can be easily seen, the existence of such a cardinal for an arbitrary class closed under embeddability follows readily from the \emph{Vo$\check{p}$enka principle}, a strong set theoretical principle which could be inconsistent with usual set theoretical axioms. It implies the existence of large cardinal numbers (eg supercompact cardinals) and its consistency is implied by the existence of huge cardinals (see \cite{jechst78} pp. 413--415).
In the case of $\mathcal {L}_{\mathcal S}(n)$ we do not know if $\lambda$ exists. In fact, \emph{we conjecture that it exists and is countable.}
The same general question for $\mathcal {L}(n)$ has a simpler answer: each obstruction contains a finite one. Indeed, as it is well known, \emph{ a poset $P$ belongs to $\mathcal {L}(n)$ whenever for every \emph {finite} subset $A$ of $P$ the poset induced by $P$ on $A$ is also in $\mathcal {L}(n)$} (this striking fact is a consequence of the compactness theorem of first order logic - for a proof, see the survey \cite{kelly}). Furthermore, if $\mathcal Crit(\mathcal {L}(n))$ denotes the collection of minimal obstructions (that is the collection of finite posets $Q$ whose dimension is larger than $n$, whereas every proper subposet has dimension at most $n$), then $\mathcal {L}(n)=Forb(\mathcal Crit(\mathcal {L}(n)))$. Members of $\mathcal Crit(\mathcal {L}(n))$ have dimension $n+1$; these posets are the so-called \emph{$n+1$-irreducible posets} \cite{trotter}. For $n=1$, there is just one: the two element antichain. For $n=2$, a complete description has been given by D.Kelly in 1972 (see \cite{kelly}). For $n>2$ a description seems to be hopeless; in fact, the problem to decide whether or not a finite poset belongs to $\mathcal {L}(n)$ is NP-complete. If $\mathcal C=\mathcal {L}(<\omega)$, every obstruction contains a countable one (this easily follows from the finitary result mentionned above), hence $\mathcal {L}(<\omega)=Forb(\mathcal B)$ where $\mathcal B$ is a set of countable posets, each with a countable dimension. In terms of obstructions, Question \ref{que:scatt} amounts to: \begin{question} Is $Crit(\mathcal {L}(n))$ determines $\mathcal {L}_{\mathcal S}(n)$ within $\mathcal {L}_{\mathcal S}(<\omega)$?\end{question}
We rather consider the following: \begin{question} Is $\mathcal {L}_{\mathcal S}(<\omega)$ can be determined within $\mathcal {L}(<\omega)$ by a finite set $\mathcal B_{\mathcal S}$ of obstructions?
\end{question}
We provide ten examples of obstructions. All are countable and have dimension at most $3$.
In order to present these examples,
we denote by $P^*$ the dual of a poset $P$, we denote by $\check P$ the set $P$ equipped with the strict order $<$. We denote by
$B(\check P)$ the poset defined as follows: the underlying set is $P\times \{0,1\}$, the ordering defined by $(x,i) < (y,j)$ if $i < j$ and $x< y$. This poset is the \emph{open split} of $P$. It is clearly bipartite, moreover $B(\check P^*)$ is order-isomorphic to $B(\check P)^*$. Let $T_2$ be the infinite binary tree and let $\Omega (\eta )$ be the infinite binary tree in which each level is totally ordered by an increasing way from the left to the right (see Figure \ref{Omega} for an equivalent representation).
\begin{figure}
\caption{$ \Omega(\eta)$}
\label{Omega}
\end{figure} We prove:
\begin{theorem} \label {thm:main} A poset whose the order is the intersection of finitely many scattered linear orders contains no isomorphic copy of $\eta$, $T_2$, $\Omega (\eta)$, $B(\check \eta)$, $B(\check T_2)$, $B(\check \Omega (\eta))$ and their dual. \end{theorem}
Since $\eta$ and $B(\check \eta)$ are self dual, this list contains only ten members. In fact, these members do not embed in each other (Lemma \ref{lem:incomp}).
\begin{problem} Is this list determines the class $\mathcal {L}_{\mathcal S}(<\omega)$ of orders which are intersection of finitely many scattered linear orders within the class $\mathcal {L}(<\omega)$ of orders which are intersection of finitely many linear orders? \end{problem}
The reader will notice that each of our obstructions distinct from $\eta$ contains an infinite antichain. This is general. Indeed, if a poset $P$ is scattered with no infinite antichain, each linear extension of the order on $P$ is scattered (\cite {bonn-pouz1}, see also \cite {bonn-pouz2}), hence if $dim (P)=n$, the order is the intersection of $n$ scattered linear orders.
The occurence of open splits in Theorem \ref{thm:main} asks for an explanation. We present one, despite the fact that it is not fully satisfactory. It is based on the notion of split rather than open split. If $P$ is a poset, the \emph{split} of $P$ is the poset $B(P)$ whose underlying set is $P\times\{0, 1\}$ ordered by: $$(x,i)<(y,j)\; \text{if } \; x\leq y\; \text{ and} \; i<j.$$
We prove: \begin{theorem}\label{bip2} Let $P$ be a poset. Then $P\in \mathcal L_{\mathcal S}(<\omega)$ if and only if $B(P)\in \mathcal L_{\mathcal S}(<\omega)$. \end{theorem}
The analogous equivalence with $B(\check P)$ instead of $B(P)$ is in general false. But, if $P$ is $\eta$, $T_2$, $\Omega (\eta)$ or their dual, $B(P)$ and $B(\check P)$ can be embedded in each other (Lemma \ref{lem:comp}). Hence, in order to prove Theorem \ref{thm:main} it suffices to prove that $\eta$, $T_2$, $\Omega (\eta)$ and their dual are obstructions to $\mathcal {L}_S(<\omega)$ and to apply Theorem \ref{bip2}. In order to do that, we introduce a peculiar object: the topological closure $\overline {N(P)}$ in the powerset $\mathfrak P(P)$ of the MacNeille completion $N(P)$ of a poset $P$. As a poset, $\overline {N(P)}$ is an algebraic lattice.
We prove:
\begin{theorem}\label{thm:second}
Let $P$ be a poset and $n$ be a positive integer. Then the following properties are equivalent:
\begin{enumerate}[{(i)}]
\item The order on $P$ is the intersection of $n$ scattered linear orders;
\item $\overline{N(P)}$ is embeddable into a product of $n$ scattered linear orders.
\end{enumerate}
Moreover, if one of these conditions hold, $\overline {N(P)}$ is topologically scattered.
\end{theorem}
With this result at hand, in order to show that if $P$ is $\eta$, $T_2$ or $\Omega (\eta)$, $P$ is an obstruction, it suffices to observe that $\overline{N (P )}$ is not topologically scattered. We give the proof of this fact in Section \ref{section:proofs}.
Note that while $N(P)$ and $N(P^*)$ are dually isomorphic, $\overline{N(P)}$ and $\overline {N(P^*)}$ are not. Hence, one can be topologically scattered, whereas the other is not. For an example, $\overline{N(T_{2}^*)}$ is topologically scattered and $\overline{N(T_{2})}$ is not. \begin{question} If $dim (P)\leq n$ and both $\overline{N(P)}$ and $\overline {N(P^*)}$ are topologically scattered does the order on $P$ is the intersection of $n$ scattered linear orders?\end{question}
After such unsuccessful attempt of a description of $\mathcal L_{\mathcal S}(<\omega)$ by means of obstructions, we looked at subclasses $\mathcal C$ of $\mathcal L_{\mathcal S}(<\omega)$ such that every member of $\mathcal L_{\mathcal S}(<\omega)$ can be embedded in a member of $\mathcal C$. It turns out that the class of scattered distributive lattices of finite dimension has this property. In fact: \begin{theorem}\label{thm:distributivelattice}Let $T$ be a distributive lattice. The following properties are equivalent: \begin{enumerate}[{(i)}] \item The order on $T$ is the intersection of $n$ scattered linear orders.
\item $T$ is isomorphic to a sublattice of a product of $n$ scattered chains.
\item $dim(T)\leq n$ and $T$ is order-scattered.
\end{enumerate}
\end{theorem}
We also consider extensions of our initial question.
Instead of linear orders, we consider interval orders and instead of scattered linear orders, interval orders which can be represented as intervals of a scattered chain. Instead of ordered sets we consider incidence structures, we replace linear orders by Ferrers relations, we replace MacNeille completion by Galois lattices and scattered linear orders by Ferrers relations whose Galois lattice is scattered. We obtain an extension of Theorem \ref{thm:second} (see Theorem \ref {thm:extension}). From our study, it follows that a positive answer to our initial question implies a positive answer to the extensions we consider. The basic objets of our study are incidence structures and Galois lattices. One of our key result is a property of the topological closure of Galois lattices (Theorem \ref {thm:topologicalbouchet}) which refines Bouchet's Coding theorem (\cite {bouchetetat}, see also \cite {bouchet}, see Theorem \ref{bouchet}).
To conclude, we mention a specialization of our question which comes from the following observation. All finite ordered sets of dimension $2$ are obtained as follows.
Let $\underline n:= \{0,2,...,n-1\}, n \geq 2$ and $C$ be the linear ordering $0<1<\dots <n-1$ on $\underline n$. Let $\sigma$ be a permutation on $\underline n$, distinct of the identity map. Define the order $\leq _{\sigma}$ on $\underline n$ by $x\leq _{\sigma} y$ if and only if $x \leq_C y$ and $\sigma (x) \leq _C \sigma (y)$. Let $P_{\sigma}:= (\underline n ,\leq _{\sigma})$, and $C_{\sigma}:= \{(x,y): \sigma (x) \leq _C \sigma (y)\}$, then $\leq _{\sigma}$ is the intersection of $C$ and $C_{\sigma}$. Thus $P_{\sigma}$ has dimension $2$. For infinite posets, even countable, the situation is quite different. Order which are intersection of two orders of type $\omega$ are close to finite orders. A characterization in terms of obstructions is included in the following:
\begin{theorem}
An order on an infinite set is the intersection of $n$ linear orders of type $\omega$ if and only if: \begin{enumerate}[{(i)}] \item The order has dimension at most $n$. \item The poset does not contain an infinite antichain, an infinite decreasing chain, the chain $\omega +1$ and the direct sum $\omega\oplus 1$ of the chain $\omega$ with the one-element chain. \end{enumerate} \end{theorem}
See Proposition 4.1 and Corollary 4.2 of \cite{pouzet-sauer}.
A more general question is the following:
\begin{question} \label{que:sametype}
Given a positive integer $n$ and an order type $\alpha$. Which orders are the intersection of $n$ linear orders of the same type $\alpha$?
\end{question}
More specifically
\begin{question}
Characterize by means of obstructions the posets which are embeddable into posets whose order is the intersection of $n$ linear orders of the same type $\alpha$?
\end{question}
This paper is composed as follows. Section \ref{section:ingredients} contains the definitions of the main notions with a development on incidence structures and Galois lattices and coding. It includes our refinement of Bouchet's Coding theorem, and also some basic facts on Ferrers relations, interval orders and dimension. Section \ref {section:scattered} contains a discussion on the notions of scattered dimension, including Theorem \ref {thm:extension}. Sections \ref{section:proofs} and \ref {section:proof4} contains the proofs of the results presented above. Section \ref{section:two-dimensional} contains a characterization of orders which are intersection of two scattered linear orders (Theorem \ref{equiv}).
\section{Ingredients} \label{section:ingredients}
Our terminology follows \cite{daveypriestley} and \cite{gratzer98}. Among set theoretical notations, we point out that if $f$ is a map from a set $E$ to a set $F$, and $A$ is a subset of $E$, the set $\{f(x): x\in A\}$, the \emph{image} of $A$ by $f$, is denoted by $f[A]$ rather than $f(A)$. \subsection{Order, lattices and topology} As usual, a {\it poset} is the pair $P$ formed of a set $E$ and an order $\varepsilon$ on $E$. If the order is \emph {linear}(or total), the poset is a \emph{chain}. The {\it dual} of $P:= (E, \varepsilon)$ is $P^*:=(E, \varepsilon ^{-1})$. If this causes no confusion, we will denote an order on $E$ by the symbol $\leq$ and its complement by $\not \leq$; we will denote the equality relation by $=$ (and, when needed, by $\Delta _E:=\{ (x,x): x\in E\}$), we identify $P$ with $E$, writing $x\in P$ instead of $x\in E$. We will denote by $x \parallel _P y$ the fact that two elements $x$ and $y$ of $P$ are incomparable. Given a poset $P:=(E, \leq)$, a subset $I$ of $E$ is an {\it initial segment} (or is {\it closed downward}) if $x\leq y$ and $y\in I$ imply $x\in I$. Let $X$ be a subset of $E$, we set: \begin{align}\label{downarrow} \downarrow\hskip -2pt X:= \{y\in E: y\leq x \text{ for some } x\in X\}. \end{align} This set is an initial segment, in fact the least initial segment containing $X$. We say that $\downarrow\hskip-2pt X$ is generated by $X$. If $X$ contains only one element $x$, we write $\downarrow\hskip -2pt x$ instead of $\downarrow\hskip -2pt \{x\}$. An initial segment of this form is \emph{principal}. We set $down(P):=\{\downarrow x: x\in P\}$.
We denote by ${\bf I}(P)$ the set of initial segments of $P$ ordered by inclusion. For example, ${\bf I}((E, \Delta_E))={\mathfrak P}(E)$ the power set of $E$ ordered by inclusion, whereas ${\bf I}((\mathbb{Q},\leq))$ is the {\it Cantor chain}. We also denote by ${\bf I}_{<\omega}(P)$ the set of finitely generated initial segments of $P$ ordered by inclusion.
An {\it ideal} of $P$ is a non empty initial segment $I$ which is up-directed, that is every pair $x, y\in I$ has an upper bound $z\in I$. We denote by ${\mathcal J}(P)$ the set of ideals of $P$ and by $\mathcal J^{\neg \downarrow\hskip -2pt }(P)$ the subset of non-principal ideals
of $P$.
Let $N(P)$ be the set made of intersections of principal initial segments of $P$. Ordered by inclusion, $N(P)$ is a complete lattice, called the \emph{MacNeille completion of $P$}.
A \emph{join-semilattice} is a poset $P$ such that every two elements $x,y$ have a least upper-bound, or join, denoted by $x\vee y$. If $P$ has a least element, that we denote $0$, this amounts to say that every finite subset of $P$ has a join. An element $a$ in a lattice $L$ is {\it compact}\index{compact} if for every $A\subset L$, $a\leq \bigvee A$ implies $a\leq \bigvee A'$ for some finite subset $A'$ of $A$. The lattice $L$ is {\it compactly generated}\index{compactly generated} if every element is a supremum of compact elements. A lattice is {\it algebraic}\index{algebraic} if it is complete and compactly generated. Algebraic lattices and join-semilattices with a least element are sides of the same coin. Indeed, the set $K(L)$ of compact elements of an algebraic lattice $L$ is a join-semilattice with a least element and $L$ is isomorphic to the set $\mathcal J(K(L))$ of ideals of $K(L)$, ordered by inclusion. Conversely, the set $\mathcal J(P)$ of ideals of a join-semilattice $P$ having a least element, once ordered by inclusion, is an algebraic lattice, and the subset $K(\mathcal J(P))$ of its compact elements is isomorphic to $P$. We note that if $P$ is an arbitrary poset, ${\bf I}(P)$ is an algebraic lattice and $K({\bf I}(P))={\bf I}_{ <\omega}(P)$. Hence, $\mathcal J({\bf I}_{ <\omega}(P))$ is order isomorphic to ${\bf I}(P)$. We also note that $\mathcal J(P)$ is the set of join-irreducible elements of $I(P)$; moreover, ${\bf I}_{<\omega}(\mathcal J(P))$ is order-isomorphic to ${\bf I}(P)$ whenever $P$ has no infinite antichain.
Identifying the power set
$\mathfrak{P}(E)$ of a set $E$ with $2^{E}$, we may view it as a topological space. A basis of open sets consists of subsets of the form $O(F,G):=\{X\in \mathfrak{P}(E): F\subseteq X $ and $ G\cap X=\emptyset \}$, where $F, G$ are finite subsets of $E$. As it is customary, we denote by $\overline {\mathcal F}$ the topological closure of a subset $\mathcal F$ of $\mathfrak P(E)$.
Recall that a compact totally disconnected space is called a {\it Stone space}, whereas a {\it Priestley space} is a set $X$ together with a topology and an ordering which is compact and \emph {totally order disconnected} in the sense that for every $x,y\in X$ such that $x\not \leq y$ there is some clopen initial segment containing $y$ and not $x$. Closed subspaces of $\mathfrak{P}(E)$, with the inclusion order added, are Priestley spaces \cite{prie}. For an example, we recall that if $L$ is an algebraic lattice then, with the topology induced by the product topology on $\mathcal J(K(L))$, it becomes a Priestley space. Priestley spaces are associated to bounded distributive lattices as Stone spaces are associated to Boolean algebras. We will recall in Section \ref{section:proof4} the properties we need about the relationship between Priestley spaces and distributive lattices. We refer to \cite{prie} and to \cite {daveypriestley} for an introduction to Stone-Priestley duality and to \cite{Gierz and all} for more on topologically ordered structures. \subsection{Basic facts} We will need the following basic result due to O.Ore and T.Hiraguchi (see \cite{schroder}): \begin{lemma}\label{lem:dimensionproduct} Let $P$ be a poset and $\kappa$ be a cardinal. The order on $P$ is the intersection of $\kappa$ linear orders if and only if $P$ is embeddable in a product of $\kappa$ chains. \end{lemma}
\begin{lemma} \label{lem:product} \begin{enumerate} \item Let $P$ and $Q$ be two posets. If $P$ is embeddable in $Q$ then $\mathcal J(P)$ is embeddable in $\mathcal J (Q)$. \item Let $(P_i)_{i\in I}$ be a family of posets, then $\mathcal J(\Pi_{i\in I} P_i)$ is order-isomorphic to $\Pi_{i\in I} \mathcal J(P_i)$ provided that $I$ is finite. \end{enumerate} \end{lemma} \begin{proof} \noindent The proof of Item 1 is immediate. For Item 2, let $A$ be a subset of $Q:=\Pi_{i\in I} P_i$. Given $i\in I$, let $p_i: Q\rightarrow P_i$ be the $i$-th projection and $p_i[A]$ be the image of $A$. Finally, set $\overline p(A):= (p_i[A])_{i\in I}$. We prove that $\overline p$ induces an order-isomorphism from $\mathcal J(Q)$ onto $\Pi_{i\in I} \mathcal J(P_i)$. From this, Item 2 follows. Let $A\in \mathcal J(Q)$. First, we claim that $\overline p(A)\in \Pi_{i\in I} \mathcal J(P_i)$. Indeed, let $i\in I$. Since $p_i$ is order-preserving and $A$ is up-directed, $p_i[A]$ is up-directed. Furthermore, $p_i[A]\in \mathbf {I}( P_i)$. Indeed, let $x\in P_i$ and $y\in p_i[A]$ such that $x\leq y$. Let $\overline y\in A$ such that $p_i(\overline y)=y$. Let $\overline x\in \Pi_{i\in I} P_i$ defined by $\overline x_i:=x$ and $\overline x_j:=y_j$ for $j\not= i$. Then $\overline x\leq \overline y$. Since $A\in \mathbf{I}(\Pi_{i\in I} P_i)$, $\overline x\in A$, and thus $x\in p_i[A]$, proving that $p_i[A]\in \mathbf{I}(P_i)$. Since $p_i[A]$ is up-directed, $p_i[A]\in \mathcal J(P_i)$. Thus $\overline p(A)\in \Pi_{i\in I} \mathcal J(P_i)$ as claimed. Next, let $\overline A:=(A_i)_{i\in I}\in \Pi_{i\in I} \mathcal J(P_i)$. Then, trivially, $\Pi_{i\in I}A_i\in\mathcal J(Q)$. Since all $A_i$'s are non-empty, $\overline p(\Pi_{i\in I}A_i)=\overline A\in\mathcal J(Q)$, proving that $\overline p$ is surjective. To conclude that $\overline p$ is an isomorphism, we note that $A=\Pi_{i\in I}p_i[A]$ for every $A\in \mathcal J(Q)$. Indeed, we have trivially $A\subseteq \Pi_{i\in I}p_i[A]$. For the reverse inclusion, let $\overline x\in \Pi_{i\in I}p_i[A]$. For each $i\in I$, select $\overline y(i)\in A$ such that $\overline y(i)_i=\overline x_i$. Since $I$ is finite and $A$ is up-directed, there is $\overline z\in A$ which majorizes each $\overline y(i)$. Due to our choices, $\overline z$ majorizes $\overline x$. Thus $\overline x\in A$, as required. \end{proof}
Let $E$ be a set and $\mathcal F$ be a subset of $\mathfrak P(E)$. We say that $\mathcal F$ is \emph{closed under intersections} if $\cap \mathcal F'\in \mathcal F$ for every
subset $\mathcal F'$ of $\mathcal F$ (with the convention that $\mathcal F'=E$ if $\mathcal F'=\emptyset$). We denote by $\mathcal F^{\wedge}$ the set of intersections of members of $\mathcal F$, (in particular $E\in \mathcal F^{\wedge}$. Hence $\mathcal F$ is closed under intersections if and only if $\mathcal F=\mathcal F^{\wedge}$. Sets closed under intersections are usually called \emph{Moore families}. As it is well known, a Moore family $\mathcal F$ is topologically closed in $\mathfrak P(E)$ if and only if it is closed under unions of up-directed subfamilies. Moore families correspond to closure systems, those which are topology closed to \emph{algebraic closure systems} \cite{gratzer98}, \cite {Gierz and all}. We will need the following fact.
\begin{proposition}\label{prop:topoclo}
Let $E$ be a set and $\mathcal F$ be a subset of $\mathfrak P(E)$.
Then $\overline{\mathcal F}^{\wedge}=\overline{\mathcal F^{\wedge}}$.
\end{proposition} \begin{proof} It relies on the following claims.
\begin{claim}\label {claim:wedge1}
$\overline{\mathcal F^{\wedge}}$ is closed under intersections.
\end{claim}
\noindent{\bf Proof of Claim \ref{claim:wedge1}.} Set $\mathcal G:=\overline{\mathcal F^{\wedge}}$. Let $\mathcal G'\subseteq \mathcal G$ and $X:= \bigcap \mathcal G'$. We prove that $X\in \mathcal G$. For that we prove that $O(F,G)\cap {\mathcal F^{\wedge}}\not = \emptyset$ for each finite $F\subseteq X $ and finite $G\subseteq E\setminus X$. We may suppose $X\not =E$ (otherwise, since $E\in {\mathcal F^{\wedge}}$, $X\in \mathcal G$ as required). Let $ a\in G$. Since $X= \bigcap \mathcal G'$ there is some $X_a\in \mathcal G'$ such that $X\subseteq X_a \subseteq E\setminus \{a\}$. Since $X_{a}\in \mathcal G$, there is some $Y_{F, a}\in O(F,\{a\})\cap \mathcal F^{\wedge}$. Let $X_{F}:= \cap_{a\in G} X_{a}$. Clearly $X_F\in O(F,G)\cap\mathcal F^{\wedge}$. This proves our claim
\endproof
\begin{claim}\label {claim:wedge2}
$\overline{\mathcal F}^{\wedge}$ is topologically closed. \end{claim}
\noindent{\bf Proof of Claim \ref{claim:wedge2}.} Set $\mathcal G:=\overline{\mathcal F}^{\wedge}$. Let $X\in \overline {\mathcal G}$. Then $O(F,G)\cap \mathcal G\not = \emptyset$ for each finite $F\subseteq X $ and finite $ G\subseteq E\setminus X$. This implies that for every finite subset $F\subseteq X$, $a\in E\setminus X$, $O(F,\{a\})\cap \overline{\mathcal F}\not = \emptyset$. Let $ a\in E\setminus X$. Since $\overline{\mathcal F}$ is compact, the intersection $\bigcap \{ O(F,\{a\}): F\subseteq X, \; F\; \text{finite}\} \cap \mathcal \overline{\mathcal F}$ is non empty. Pick $X_a$ in this intersection. Let $X':=\bigcap_{a\in E\setminus X} X_{a}$. Then $X'\in \overline{\mathcal F}^{\wedge}= \mathcal G$. But, since each $X_a$ contains $X$, $X=X'$, hence $X\in \mathcal G$. It follows that $\overline {\mathcal G}= \mathcal G$. This proves our claim. \endproof
From Claim \ref{claim:wedge1} we deduce that $\overline{\mathcal F}^{\wedge}$ is included into $\overline{\mathcal F^{\wedge}}$ and from Claim \ref{claim:wedge2} the reverse inclusion.
\end{proof}
If $P$ is a poset, we have $N(P)= {down(P)}^{\wedge}$. Hence, Proposition \ref{prop:topoclo} yields immediately:
\begin{corollary}
$\overline {N(P)}=\overline {down(P)}^{\wedge}$.
\end{corollary}
We recall the following fact (\cite {bekk-pouz-zhan} Corollary 2.4). \begin{lemma}\label{lem:ideals1} \begin{equation}\label{eq:ideals1} down(P)\subseteq \mathcal {J}(P)\subseteq \overline {down (P)}\setminus\{\emptyset\}. \end{equation}
In particular, the topological closures in $\mathfrak{P}(P)$ of $down(P)$ and $\mathcal {J}(P)$ are the same. \end{lemma}
\begin{lemma} \label{lem:ideals2}Let $P$ be a join-semilattice with a least element. Then: \begin{equation} \overline {down(P)}=\overline {N(P)}= \mathcal J(P). \end{equation} \end{lemma} \begin{proof} We start with the following: \begin{claim}\label{claim:ideal2}\begin{equation}\label {eq:ideal2} down(P)\subseteq N(P)\subseteq \mathcal J(P). \end{equation} \end{claim} \noindent{\bf Proof of Claim \ref{claim:ideal2}.} Trivially, $down (P)\subseteq \mathcal J(P)$. Since $P$ is a join-semilattice with a least element, $\mathcal J(P)$ is closed under intersection. Hence, $N(P)$ which is made of the
intersections of members of $down(P)$ is included into $\mathcal J(P)$. \endproof
With Lemma \ref{lem:ideals1} this yields:\begin{equation}\label {eq:ideal3} \overline {down(P)}\subseteq \overline {N(P)}\subseteq \overline{\mathcal J(P)}\subseteq \overline {down (P)}. \end{equation} To conclude, we note that $\mathcal J(P)$ is topologically closed. Indeed, $\mathcal J(P)$ is closed under union of up-directed sets and as, observed above, it is closed under intersection. \end{proof} \subsection{Incidence structures and coding} Let $E,F$ be two sets. A {\it binary relation from $E$ to $F$} is any subset $\rho$ of the cartesian product $E\times F$. As usual, we denote by $x\rho y$ the fact that $(x,y)\in \rho$ and by $x \neg \rho y$ the negation. The triple $R:=(E,\rho ,F)$ is an {\it incidence structure}; its
{\it complement} is $\neg R :=(E, \neg \rho,F)$, where $\neg \rho:= E\times F\setminus \rho$, whereas its {\it dual } is $R^{-1} :=(F,\rho ^{-1},E)$, where $\rho ^{-1} := \{ (y,x) : (x,y) \in \rho \}$. We set $L_R(Y):= \{ x\in E : \{ x\} \rho Y \}$, resp. $U_R(X):= \{ y\in F : X\rho \{ y\} \}$, for each $Y\subseteq F$, resp. $X\subseteq E$. And we use $L_R(y)$ and $U_R(x)$ for $L_R(\{y\})$ and $U_R(\{x\})$. With these notations, we have $U_R(X)=L_{R^{-1}}(X)$. The sets $Gal(R):=\{ L_R(Y): Y\subseteq F\}$ and $Gal(R^{-1}):=\{ U_R(X): X\subseteq E\}$ are closed under intersection; hence, once ordered by inclusion, they are complete lattices. Ordered by inclusion, $Gal(R)$ is the
{\it Galois lattice} of $R$. A fundamental result is that $Gal(R^{-1})$ is isomorphic to $Gal(R)^*$, the {\it dual} of $Gal(R)$. If $P:= (E, \leq)$ is a poset, $Gal((E, \not\geq ,E))= {\bf I}(P)$, whereas $Gal((E, \leq ,E))=N(P)$.
Let $R:=(E,\rho ,F)$, $R':=(E',\rho ',F')$ be two incidence structures, a {\it coding from} $R$ {\it into} $R'$ is a pair of maps $f: E\rightarrow E' , \ \ g: F\rightarrow F'$ such that
$$x\rho y \Longleftrightarrow f(x)\rho ' g(y)$$
for all $x\in E$ and $y\in F$. When such a pair exists, we say that $R$ has a {\it coding into} $R'$.
\begin{example} If $R:= (E, \rho, F)$ is an incidence structure, the pair $(f,g)$, where $f(x):= L_R\circ U_R(x)$ for $x\in E$ and $g(y):= L_R(y)$ for $y\in F$, is a coding from $R$ into $(Gal(R), \subseteq, Gal(R))$. \end{example}
If $E=F$ and $E'=F'$, the pairs $(E,\rho)$, $(E',\rho')$ are {\it binary relational structures} (or simply, {\it directed graphs}) and a map $f:E\rightarrow E'$ is an {\it embedding} if it is one-to-one and $$x\rho y \Longleftrightarrow f(x)\rho' f(y)$$ for all $x, y\in E$. When such a map exists, we say that $(E,\rho)$ {\it is embeddable into} $(E', \rho')$.
\begin{example}If $\rho$ and $\rho'$ are two orders and $(E', \rho')$ is a complete lattice, $R$ has a coding into $R'$ if and only if $(E, \rho)$ is embeddable in $(E, \rho')$.
\end{example}
Bouchet's Coding theorem (\cite {bouchetetat}, see also \cite {bouchet}) is a striking illustration of the links between coding and embedding.
\begin {theorem}\label {bouchet} Let $T$ be a complete lattice and $R$ be an incidence structure, then $R$ has a coding into $(T,\leq ,T)$ if and only if $Gal(R)$ is embeddable in $T$. \end {theorem}
\begin{corollary}\label{lem:coding-embedding} Let $R:= (E, \rho, F)$ and $R':= (E', \rho', F')$ be two incidence structures. Then $Gal(R)$ is embeddable in $Gal(R')$ whenever $R$ has a coding into $R'$. \end{corollary}
We will need the following strengthening of Corollary \ref{lem:coding-embedding}. \begin{theorem}\label{thm:topologicalbouchet} Let $R:= (E, \rho, F)$ and $R':= (E', \rho', F')$ be two incidence structures. If $R$ has a coding into $R'$ then there is an embedding $\phi$ from $\overline {Gal(R)}$ into $\overline {Gal(R')}$ and a continuous and order preserving map $\psi$ from a closed subspace $\mathcal H$ of $\overline {Gal(R')}$ onto $\overline{Gal(R)}$ such that $\psi\circ \phi =1_{\overline {Gal(R)}}$. \end{theorem} \begin{proof} Let $(f,g)$ be a coding from $R$ into $R'$. Let $f^{d}: \mathfrak P(E')\rightarrow \mathfrak P(E)$ be defined by $f^d(X'):= f^{-1}(X')$ for $X'\subseteq E'$. The map $f^d$ is continuous. With the fact that $\mathfrak P(E')$ is compact, it follows that: \begin{equation}\label{eq:close1} f^{d}[\overline {\mathcal F'}]= \overline {f^d[\mathcal F']} \end{equation} for every $\mathcal F'\subseteq \mathfrak P(E')$.
Let $\mathcal F:= \{L_{R'}(g[Y]): Y\subseteq F\}$. Clearly, $\mathcal F$ is closed under intersection and included into $Gal(R')$. Furthermore, since $(f,g)$ is a coding:
\begin{equation}\label{eq:close2} f^{d}(L_{R'}(g(y)))=L_R(y) \end{equation} for every $y\in F$.
Hence, \begin{equation}\label{eq:close3} f^{d}(L_{R'} (g[Y]))=L_R(Y) \end{equation} for every $Y\subseteq F$.
This implies: \begin{equation}\label{eq:close4} f^{d}[ {\mathcal F}]= Gal(R). \end{equation} With equation (\ref{eq:close1}), this yields: \begin{equation}\label{eq:close5} f^{d}[ \overline {\mathcal F}]= \overline{Gal(R)}. \end{equation} Set $\mathcal H:= \overline {\mathcal F}$ and $\psi:= f^{d}_{\restriction \mathcal H}$. Clearly $\mathcal H$ is a closed subset of $Gal(R')$ and $\psi$ is a continuous and order preserving map from $\mathcal H$ into $\overline {Gal(R)}$. Let $X\in \overline{ Gal(R)}$. According to equation (\ref{eq:close5}), $X$ belongs to the range of $\psi$. Set $\phi (X):= \cap \psi^{-1}(X)$. Since $\mathcal F$ is closed under intersections, $\overline {\mathcal F}$ is closed under intersections too (Claim \ref{claim:wedge1}). By definition, $\psi$ preserves intersections. It follows that $\psi(\phi(X))=X$. From this fact, $\phi(X)$ is the least member $X'$ of $\mathcal H$ such that $\psi (X')=X$. This and the fact that $\psi$ preserves intersections imply that $\phi$ is order preserving. \end{proof} \begin{remark} The map $\phi$ in the proof of Theorem \ref{thm:topologicalbouchet} above does not need to be continuous. For an example, take $R:=(P, \not \geq , P)$, $R':= (P', \not \geq, P')$ where $P$ and $P'$ are two posets type $1+\omega^*$ and $(1\oplus 1)+\omega^*$ respectively (here, $1\oplus 1$ denotes a $2$-element antichain) and, as a coding from $R$ to $R'$, the pair $(f,f)$ where $f$ is an embedding from $P$ into $P'$.\end{remark}
From Theorem \ref{thm:topologicalbouchet}, Lemma \ref{lem:ideals2} and Lemma \ref{lem:product}, we derive the following result. \begin{proposition} \label {prop:embedproduct} If an incidence structure $R:=(E, \rho, F)$ has a coding in $(Q, \leq, Q)$, where $Q:=\Pi_{i\in I} C_i$ is a finite product of chains, then $\overline {Gal(R)}$ is embeddable in the product $\Pi_{i\in I} \mathbf {I}(C_i)$. \end{proposition} \begin{proof} Set $C'_i:= 1+C_i$ for each $i\in I$ and $Q':=\Pi_{i\in I} C'_i$. A coding from $(P, \leq , P)$ in $(Q, \leq ,Q)$ induces a coding from $(P, \leq , P)$ in $(Q', \leq ,Q')$. According to Theorem \ref{thm:topologicalbouchet}, such a coding yields an embedding from $\overline {Gal(R)}$ into $\overline {N(Q')}=\overline {Gal((Q',\leq, Q'))}$. The poset $Q'$ is a join-semilattice with a least element, hence according to Lemma \ref{lem:ideals2}, $\overline {N(Q')}= \mathcal J(Q')$. According to Lemma \ref{lem:product}, $\mathcal J(Q')$ is isomorphic to $\Pi_{i\in I} \mathcal J(C'_i)$. To conclude, observe that $\mathcal J(C'_i)=\mathbf {I}(C_i)$. \end{proof}
We need also the following properties:
\begin{lemma}\label{lem:trivia}Let $(f,g)$ be a coding from $R:= (E, \rho, F)$ into $R':= (E', \rho', F')$ and $(\rho'_i)_{\in I}$ such that $\rho':=\bigcap_{i\in I} \rho'_{i}$ then $\rho= \bigcap_{i\in I} \rho_{i}$ where $\rho_i:=\{(x,y)\in E\times F: f(x)\rho'_i g(y)\}$. \end{lemma} The proof is immediate. \begin{lemma} \label{lem:dimensional} Let $R:= (E, \rho, F)$ be an incidence structure. \begin{enumerate} \item If $\rho= \bigcap_{i\in I} \rho_{i}$ where each $\rho_i$ is an incidence relation from $E$ to $F$, then $Gal(R)$ is embeddable in $T:= \Pi_{i\in I}Gal(R_i)$ where $R_i:= (E, \rho_i, F)$. \item If $Gal(R)$ is embeddable in a product $C:= \Pi_{i\in I}C_i$ of posets, then $\rho= \bigcap_{i\in I} \rho_{i}$ where each $\rho_i$ is an incidence relation from $E$ to $F$ such that $Gal((E, \rho_i,F))$ is embeddable in $N(C_i)$. \end{enumerate} \end{lemma} \begin{proof} $(1)$. Let $A\subseteq E$. Set $\varphi(A):= (L_{R_{i}}\circ U_{R_{i}}(A))_{i\in I}$. Clearly: \begin{equation}\label{eq:implication} A\subseteq B\; \text{implies}\; \varphi(A)\subseteq \varphi(B). \end{equation} Hence $\varphi$ is an order-preserving map from $\mathfrak P(E)$ into $T$. In particular, its restriction to $Gal(R)$ is order-preserving. The fact that this is an embedding is an immediate consequence of the following: \begin {claim}\label{claim:equality} \begin{equation} A= \bigcap_{i\in I} L_{R_{i}}\circ U_{R_{i}}(A) \;\text{provided that}\; A=L_{R}\circ U_{R}(A). \end{equation} \end{claim} \noindent{\bf Proof of Claim \ref{claim:equality}.} From $\rho \subseteq \rho_i$ for all $i\in I$, we have $A\subseteq \bigcap_{i\in I} L_{R_{i}}\circ U_{R_{i}}(A)\subseteq \bigcap_{i\in I} L_{R_{i}}\circ U_{R}(A)$. From $\rho= \bigcap_{i\in I} \rho_{i}$ we get $\bigcap_{i\in I} L_{R_{i}}(B)=L_R(B)$ for every $B\subseteq F$. Applying this to $B:=U_R(A)$, we get $A\subseteq \bigcap_{i\in I} L_{R_{i}}\circ U_{R_{i}}(A)\subseteq \bigcap_{i\in I} L_{R_{i}}\circ U_{R}(A)=L_R\circ U_R(A)$. The claim follows immediately. \endproof
\noindent $(2)$. Let $c: Gal(R)\rightarrow C$ be an embedding and $p_i: C\rightarrow C_i$ be the $i-th$-projection. Set $f(x):= L_R\circ U_R(x)$ for $x\in E$ and $g(y):= L_R(y)$ for $y\in F$. Set $f_i := p_i\circ c\circ f$, $g_i := p_i\circ c\circ g$ and $\rho_{i}:=\{(x,y)\in E\times F: f_i(x)\leq_i g_i(x)\}$. Then $(f_i, g_i)$ is a coding from $R_i:= (E, \rho_i, F)$ into $(C_i, \leq_i, C_i)$. Thus, from Lemma \ref{lem:coding-embedding}, $Gal(R_i)$ is embeddable in $Gal((C_i, \leq_i, C_i))=N(C_i)$. To conclude observe that $(f,g)$ is a coding from $R$ into $(Gal(R), \subseteq, Gal(R))$, hence $\rho= \bigcap_{i\in I} \rho_{i}$. \end{proof}
\subsection{Ferrers relations, interval orders and dimensions}
Let $R:= (E, \rho, F)$ be an incidence structure. The binary relation $\rho$ from $E$ to $F$ is a \emph{Ferrers relation} if for every $x,x'\in E$, $y,y'\in F$, $x\rho y$ and $x'\rho y'$ imply $x\rho y'$ or $x'\rho y$. As it is well known, $\rho$ is Ferrers if and only if $Gal(R)$ is a chain. It follows from Bouchet's theorem that \emph{$Gal(R)$ is a chain if and only if $R$ has a coding into $(C, \leq, C)$ where $C$ is a chain}.
Let $C$ be a chain, an \emph{interval} of $C$ is any subset $I$ of $C$ such that $x,y\in I, z\in C$ and $x<z<y$ imply $z\in I$. One may order the set $Int (C)$ of non empty intervals of $C$ by setting $I<J$ if $ x < y$ for all $x\in I$ and $y\in J$. Let $P$ be a poset; the order on $P$ is an \emph{interval order}, and by extension $P$ too, if $P$ is isomorphic to a subset of $Int (C)$ for some chain $C$. We recall that:
\begin{lemma}\label{lem:intervalorder} A poset $P$ is an interval order if and only if $(P,<, P)$ is a Ferrers relation, or equivalently $(P, <, P)$ has a coding into a chain. \end{lemma}
Let $\mathcal F$, resp. $\mathcal J$, be the class of Ferrers relations, resp. interval orders. We recall that the \emph {Ferrers dimension} of an incidence structure $R:= (E, \rho, F)$ is the least cardinal $\kappa$ such that $\rho$ is the intersection of $\kappa$ \emph{Ferrers relations} from $E$ to $F$. We denote it by $\mathcal F-dim(R)$. The \emph{interval dimension} of $P$ is the smallest cardinal $\kappa$ such that the order on $P$ is the intersection of $\kappa$ interval orders. We denote it by $\mathcal I-dim (P)$.
We recall two basic results relating theses notions, due to Bouchet \cite{bouchet} and Cogis \cite{cogis}, namely:
\begin{equation}\label{ferrerlarge} \mathcal F-dim((P, \leq, P))=dim (P) \end{equation} and \begin{equation}\label{ferrerstrict} \mathcal F-dim ((P, <,P))=\mathcal I-dim(P) \end{equation} for every poset $P$.
These three notions of dimension: order dimension, Ferrers dimension and interval dimension are based on three classes of structures: chains, Ferrers relations and interval orders and are expressible in terms of Galois lattices. Replacing theses these classes by others yield other notions of dimension that we discuss at the end of this section. \subsection{ Bipartite posets} A poset is \emph{bipartite} if this is the union of two antichains. We recall the following result: \begin{lemma} Let $Q$ be a bipartite poset. Then \begin{equation} \label{bipartite} \mathcal{I}-dim(Q)\leq dim(Q)\leq \mathcal{I}-dim(Q)+1. \end{equation} \end{lemma}
Let $R:= (E, \rho, F)$ be an incidence structure. The \emph{bipartite poset associated to} $R$, denoted by $B(R)$, is the poset whose base set is $E':= E\times \{0\}\cup F\times\{1\}$ ordered by: $$(x,i)<(y,j)\; \text{if } \; (x,y)\in \rho \; \text{ and} \; i<j.$$
If $P:=(E, \leq)$ we set $B(P):= B(E, \leq, E)$ and $B(\check P):= B(E,<, E)$. The posets $B(P)$ and $B(\check P)$ are respectively called the \emph{split} and the \emph{open split} of $P$.
We note that if $R$ is an incidence structure then $R$ has a coding into $(B(R), \leq , B(R))$ as well as in $(B(R), < , B(R))$. In particular: \begin{equation}\label{eq:edding} Gal(R) \; \text{is embeddable into}\; N(B(R)). \end{equation}
As a corollary of (\ref{eq:edding}) it turns out that for every poset $P$:\begin{equation}\label{claim:embedding} N(P) \; \text{is embeddable into}\; N(B(P)). \end{equation}
Note also that:
\begin{lemma}\label{claim:embedding2} If $P$ is a poset, $B(P)$ is embeddable in a product $P\times C$ where $C$ is a chain of the form $D+D$, the order type of $D$ being given by any linear extension of $P^*$. \end{lemma} We will use the following easy fact:\begin{lemma}\label{lem:easyfact} Let $R:=(E, \rho, F)$ and $R':= (E', \rho, F')$ be two incidence structures. Every coding $(f,g)$ from $R$ to $R'$ such that $f$ and $g$ are one to one induces an embedding of $B(R)$ in $B(R')$. The converse holds if for every $x\in E$ there is some $y\in F$ such that $(x,y)\in \rho$. \end{lemma}
We also recall the following result of Bouchet and Cogis: \begin{equation}\label{bouchet-cogis} \mathcal{F}-dim (R)=\mathcal{I}-dim(B(R))=dim(Gal(R)). \end{equation} The first equality in (\ref{bouchet-cogis}) added to equality (\ref{ferrerlarge}) yields: \begin{equation}\label{eq:bip} dim (P)=\mathcal{I}-dim(B(P)). \end{equation}
Similarly, the first equality in (\ref{bouchet-cogis}) added to equality (\ref{ferrerstrict}) yields: \begin{equation}\label{eq:bipstrict} \mathcal{I}-dim (P)=\mathcal{I}-dim(B(\check P)). \end{equation}
Inequalities (\ref{bipartite}) with equality (\ref{eq:bip}) yield
\begin{equation} \label{eq:bip1} dim(P)\leq dim(B(P))\leq dim (P)+1.
\end{equation}
Similarly, inequalities (\ref{bipartite}) with equality (\ref{eq:bipstrict}) yield
\begin{equation} \label{eq:split1} \mathcal{I}-dim(P)\leq dim(B(\check P))\leq \mathcal{I}-dim (P)+1.
\end{equation}
Inequalities (\ref{eq:bip1}) are due to Kimble (cf. \cite{trotter-moore}).
Let $\underline {2}.P$ the ordinal product of the two-element chain $\underline 2$ by a poset $P$. This is the set of pairs $(x,i)$, with $x\in P$, $i\in 2$, lexicographically ordered (that is $(x,i)\leq (x',i')$ if either $x<x'$ or $x=x'$ and $i<i'$). \begin{lemma}\label{bisplit} Let $P$ be a poset and $Q:= \underline 2.P$ then: \begin{enumerate} \item$B(P)$ is embeddable in $B(\check Q)$. \item $B(\check P)$ is embeddable in $B(Q)$. \end{enumerate}\end{lemma} \begin{proof} Item (1). Let $f$ and $g$ be the maps from $P$ to $Q$ defined by $f(x):=(x,0)$ and $g(x):=(x,1)$. Then $(f,g)$ is a one-to one coding of $(P, \leq , P)$ in $(Q,<,Q)$. This coding induces an embedding from $B(P)$ in $B(\check Q)$.
Item(2). Let $f'$ and $g'$ be the maps from $P$ to $Q$ defined by $f'(x):=g(x)$ and $g'(x):=f(x)$. Then $(f',g')$ is a one-to one coding of $(P, < , P)$ in $(Q,\leq,Q)$. This coding induces an embedding from $B(\check P)$ in $B(Q)$. \end{proof} \begin{proposition} \label{open-nonopensplit}$B(P)$ and $B(\check P)$ are embeddable in each other whenever $\underline {2}.P$ is embeddable in $P$. \end{proposition} \begin{proof} Let $Q:=\underline 2.P$. Suppose that $Q$ is embeddable in $P$. Then $B(Q)$ is embeddable in $B(P)$. According to item (2) of Lemma \ref{bisplit}, $B(\check P)$ is embeddable in $B(Q)$. Hence $B(\check{P})$ is embeddable in $B(P)$. Similarly, $B(\check Q)$ is embeddable in $B(\check P)$. According to item (1) of Lemma \ref{bisplit}, $B( P)$ is embeddable in $B(\check Q)$. Hence $B(P)$ is embeddable in $B(\check P)$.\end{proof}
\subsection{A relativisation of the notions of dimension} Let $\mathcal R$ be a class of incidence structures and let $R: = (E, \rho, F)$ be an incidence structure. If $\rho$ is the intersection of incidence relations $\rho_i$ such that $(E, \rho_i, F)\in \mathcal R$, we define the $\mathcal R$-\emph{dimension} of $R$, that we denote by $\mathcal R-dim(R)$, as the least cardinal $\kappa$ such that $\rho$ is the intersection of $\kappa$ such relations. Let $\mathcal D$ be a class of posets and let $P$ be a poset. If the order $\leq$ is the intersection of orders $\leq_i$ such that $(E, \leq_i)\in \mathcal D$, the $\mathcal D$-\emph{dimension} of $P$, that we denote by $\mathcal D-dim(P)$, is the least cardinal $\kappa$ such that $\leq$ is the intersection of $\kappa$ such orders. If the poset $P$ is embeddable in a product of members of $\mathcal D$ we denote by $\mathcal D-\pi dim(P)$ the least cardinal $\kappa$ such that $P$ is embeddable in a product of $\kappa$ members of $\mathcal D$. For example, if $\mathcal R$ is the class $\mathcal F$ of Ferrers relations, $\mathcal R-dim(R)$ is the Ferrers dimension of $R$. If $\mathcal D$ is the class $\mathcal L$ of chains, $\mathcal D-dim(P)$ is the order dimension of $P$ and if $\mathcal D$ is the class of interval orders, $\mathcal D-dim(P)$ is the interval dimension of $P$.
\begin{definition} A class $\mathcal C$ of posets is \emph{dimensional} if:
\begin{enumerate}
\item $\underline 2 \in \mathcal C$.
\item If $C\in \mathcal C$ and $C'$ is embeddable in $C$ then $C'\in \mathcal C$.
\item If $C\in \mathcal C$ then $N(C)\in \mathcal C$.
\end{enumerate} \end{definition}
Let $(C_i)_{i\in I}$ be a family of posets such that $I$ is equipped with a well-ordering. The \emph{lexicographical product} of this family is the poset denoted $\bigodot _{i\in I}C_i$ whose underlying set is the cartesian product $\Pi_{i\in I} C_i$, the ordering being defined by: $$(x_i)_{i\in I}\leq (y_i)_{i\in I}$$ if either $(x_i)_{i\in I}= (y_i)_{i\in I}$ or $x_{i_0}<y_{i_0}$ where $i_0$ is the least $ i \in I$ such that $x_{i_0}\not =y_{i_0}$.
\begin{proposition} \label{prop:dimension}Let $\mathcal C$ be a dimensional class of posets and $Gal^{-1}(\mathcal C)$ be the class of incidence structures $S$ such that $Gal(S)\in \mathcal C$.
Then: \begin{enumerate}[{(i)}] \item \label{itemi} $Gal^{-1}(\mathcal C)-dim(R) = {\mathcal C}-\pi dim(Gal(R))$ for every incidence structure $R:= (E, \rho, F)$. \item \label{itemii}$Gal^{-1}(\mathcal C)-dim((P, \leq ,P))={\mathcal C}-\pi dim(P)$ for every poset $P$. \item \label{itemiii} If $\mathcal I ({\mathcal C})$ is the class of posets $(L,\leq)$ such that $Gal((L, <, L))\in \mathcal C$ then ${\mathcal C}-\pi dim(Gal((P, <, P)))\leq {\mathcal I({\mathcal C})}-dim(P)\leq {\mathcal C}-dim(Gal((P, <, P)))$. \end{enumerate}
Let $\kappa$ be a cardinal. If $\bigodot _{i\in I}C_i\in \mathcal C$ whenever $(C_i)_{i\in I}$ is a family of members of $\mathcal C$ such that $\vert I\vert < \kappa$ then:
\begin{enumerate}[{(i')}]
\item \label{itemi'}${\mathcal C}-dim(P)={\mathcal C}-\pi dim(P)$ for every poset $P$ such that ${\mathcal C}-\pi dim(P)<\kappa$.
\item \label{itemii'} ${\mathcal I({\mathcal C})}-dim(P)={\mathcal C}-dim(Gal((P, <, P)))$ for every poset $P$ such that ${\mathcal C}-\pi dim(Gal((P, <,P)))<\kappa$.
\end{enumerate} \end{proposition}
\begin{proof} Observe that since a poset $P$ is embeddable in the power set $\mathfrak P(P)$ ordered by inclusion, and since this poset is isomorphic to the power $\underline 2^{P}$, $P$ is embeddable in a power of $\underline 2$. Since $\underline 2\in \mathcal C$, ${\mathcal C}-\pi dim (P)$ is well-defined.
\noindent Item $(i)$. Let $\kappa:={\mathcal C}-\pi dim(Gal(R))$. According to the observation above, this quantity is well-defined. Let $C:= \Pi_{i\in I}C_i$ be a product of $\kappa$ members of $\mathcal C$ such that $Gal(R)$ is embeddable in $C$. According to Item $(2)$ of Lemma \ref{lem:dimensional}, $\rho= \bigcap_{i\in I} \rho_{i}$ where each $\rho_i$ is an incidence relation from $E$ to $F$ such that $Gal((E, \rho_i,F))$ is embeddable in $N(C_i)$. Since $\mathcal C$ is dimensional, $Gal((E, \rho_i,F))\in \mathcal C$, hence $Gal^{-1}(\mathcal C)-dim(R)$ is well-defined and $Gal^{-1}(\mathcal C)-dim(R) \leq {\mathcal C}-\pi dim(Gal(R))$. The converse inequality follows immediately from Item $(1)$ of Lemma \ref{lem:dimensional}.
\noindent Item $(ii)$. We have $N(P)=Gal((P, \leq, P))$. Hence, from Item $(i)$, we have $Gal^{-1}(\mathcal C)-dim((P, \leq ,P))={\mathcal C}-\pi dim(N(P))$. Since $P$ is embeddable in $N(P)$, ${\mathcal C}-\pi dim(P)\leq {\mathcal C}-\pi dim(N(P))$. To get the converse inequality, note that if $P$ is embeddable in a product $C:=\Pi_{i\in I}C_i$ then, since each $C_i$ is embeddable in $N(C_i)$, $C$ is embeddable in $C':= \Pi_{i\in I}N(C_i)$, hence $P$ is embeddable in $C'$. Since $C'$ is a complete lattice, $N(P)$ is embeddable in $C'$. From the fact that $\mathcal C$ is dimensional, $C'\in \mathcal C$. The result follows.
\noindent Item $(iii)$. Set $R:= (P, <,P)$. We prove first the second inequality. \begin{claim} \label {claim:interval}$\mathcal I({\mathcal C})-dim(P)\leq {\mathcal C}-dim(Gal(R))$. \end{claim} \noindent{\bf Proof of Claim \ref {claim:interval}.} Let $(f,g)$ be the coding from $R$ into $(Gal(R), \subseteq, Gal(R))$ defined by $f(x):= L_R\circ U_R(x)$ for $x\in P$ and $g(y):= L_R(y)$ for $y\in P$. We have: \begin{equation} \label{eq:intorder} g(x)\subset f(x) \end{equation}
for all $x\in P$. Indeed, since $x\not <x$, we have $f(x)\not \subseteq g(x)$; on an other hand we have $g(x)=L_R(x)\subseteq L_R\circ U_R(x)=f(x)$. Now, let $\mathcal L'$ be an order extending the inclusion order on $Gal(R)$. Set $\mathcal L:= \{(x,y)\in P: (f(x),g(y))\in \mathcal L'\}$. Then $\mathcal L$ is irreflexive and transitive. Indeed, according to (\ref{eq:intorder}) we have $g(x)\subset f(x)$,thus $(g(x), f(x))\in \mathcal L'$. This implies that $(f(x),g(x))\not \in \mathcal L$, hence $(x,x)\not \in \mathcal L$, proving that $\mathcal L$ is irreflexive. Let $(x,y), (y,z)\in \mathcal L$. Since $g(y)\subset f(y)$, $(g(y), f(y))\in \mathcal L'$. This easily yields that $(x, y)\in \mathcal L$, thus $\mathcal L$ is transitive. If, moreover $(Gal(R), \mathcal L')\in \mathcal C$, $Gal((P, \mathcal L, P))\in \mathcal C$. Indeed, $(f,g)$ is a coding from $(P, \mathcal L, P)$ into $(Gal(R), \mathcal L', Gal(R))$. Hence, from Bouchet's theorem (cf. Corollary \ref{lem:coding-embedding}), $Gal((P, \mathcal L, P))$ is embeddable into $Gal((Gal(R), \mathcal L', Gal(R)))= N((Gal(R), \mathcal L'))$. Since $\mathcal C$ is dimensional, if $(Gal(R), \mathcal L')\in \mathcal C$, $N((Gal(R), \mathcal L'))\in \mathcal C$ too. Thus $Gal((P, \mathcal L, P))\in\mathcal C$. With that, our claim follows from Lemma \ref{lem:trivia}. \endproof
\begin{claim}\label{claim:idim} $Gal^{-1}(\mathcal C)-dim(R)\leq \mathcal I({\mathcal C})-dim(P)$. \end{claim} \noindent{\bf Proof of Claim \ref{claim:idim}.} Trivial. \endproof.
From $(i)$ we have $Gal^{-1}(\mathcal C)-dim(R) = {\mathcal C}-\pi dim(Gal(R))$. Thus, with Claim \ref{claim:idim}, ${\mathcal C}-\pi dim(Gal(R))\leq \mathcal I({\mathcal C})-dim(P)$. This is the first inequality. With that, the proof of Item $(iii)$ is complete.
\noindent Item $(i')$. We have ${\mathcal C}-\pi dim(P)\leq {\mathcal C}-dim(P)$ without any condition on $\mathcal C$. Indeed, if the order $\leq$ on $P$ is the intersection of a family $(\leq_i)_{i\in I}$ of orders on $P$, the map $\delta: P\rightarrow P^I$ defined by $\delta(x)(i):=x$ is an embedding of $P$ in the direct product $\Pi_{i\in I} P_i$ where $P_i:=(P, \leq_i)$. Conversely, suppose that there is an embedding from $P$ in a direct product $Q:= \Pi_{i\in I} P_i$, with $P_i\in \mathcal C$. Let $P'$ be the image of $P$. \begin{claim}\label{claim:lexico}The order on $Q$ is the intersection of $ \vert I\vert $ orders $\leq_i$ such that $(Q, \leq_i)\in \mathcal C$. \end{claim} \noindent{\bf Proof of Claim \ref{claim:lexico}.} For each $i\in I$, choose a well-ordering $\mathcal L_i$ on $I$ for which $i$ is the first element and let $Q_i$ be the lexicographical product of the $P_i$'s indexed by $L_i:= (I, \mathcal L_i)$. The order on $Q$ is the intersection of the orders of the $Q_i$'s. If each $P_i$ belong to $\mathcal C$, then with our hypothese on $\mathcal C$, the $Q_i$'s belong to $\mathcal C$. \endproof
Now, the order on $P'$ is the intersection of the orders induced on $P'$ by the $Q_i$'s. Since $\mathcal C$ is dimensional, these orders belong to $\mathcal C$, hence ${\mathcal C}-dim(P)\leq \vert I\vert$. Thus Item $(i')$ holds.
Item $(ii')$. Apply Item $(i')$ to $Gal((P, <, P))$ and use Item $(iii)$.
With this, the proof of Proposition \ref{prop:dimension} is complete. \end{proof}
Since the class of chains is preserved under lexicographical products, Proposition \ref{prop:dimension} applied to $\mathcal C:= \mathcal L$ yields formulas (\ref{ferrerlarge}) and (\ref{ferrerstrict}).
\section{Scattered posets and scattered topological spaces}\label{section:scattered}
A poset $P$, or its order as well, is \emph{scattered} if it does not contain a subset ordered as the chain $\eta$ of rational numbers; in other words, the chain $\eta$ is not embeddable in $P$. A topological space is {\it scattered} if every non-empty subset has at least an isolated point(w.r.t. the induced topology). Sometimes, to avoid confusion, we use the terms \emph{ordered scattered} and \emph{topologically scattered}. These two notions are quite related. This is particularly the case when the order and the topology are defined on the same universe. For an example, if the ordering is linear and the topology is the interval-topology, the chain is complete if and only if the space compact (Hausdorff). Moreover, \emph{if $C$ is a complete chain, the conditions that $C$ is order-scattered, $C$ is topologically scattered, $C$ is order isomorphic to ${\bf I } (D)$, where $D$ is a scattered chain, are equivalent}. From this fact follows that \emph{a chain $D$ is order scattered if and only if its MacNeille completion $N(D)$ is order scattered}.
The class $\mathcal S$ of scattered posets is closed downward, that is if $P\in \mathcal S$ and $Q$ is embeddable in $P$ then $Q\in \mathcal S$. Furthermore, it is closed under finite direct product and under finite lexicographical product. In particular, the class $\mathcal L_{\mathcal S}$ of scattered chains is preserved under finite lexicographical product. This property, and $(i')$ of Proposition \ref{prop:dimension}, yield an important known fact:
\begin{proposition}\label{cor:product} Let $n$ be a positive integer. An ordered set $P$ is embeddable in a product of $n$ scattered chains if and only if the order on $P$ is the intersection of $n$ scattered linear orders. \end{proposition}
\subsection{Scattered dimensions} Let $\mathcal F_{\mathcal S}$, resp. $\mathcal I_{\mathcal S}$, be the class of incidence structure $R$, resp. posets $P$, such that the Galois lattice $Gal(R)$, resp. $Gal((P,<,P))$ belongs to $\mathcal L_{\mathcal S}$.
The following lemma completes the analogy between $\mathcal I_{\mathcal S}$ and $\mathcal I$ \begin{lemma}\label{lem:intervalorder} A poset $P$ belongs to $\mathcal I_{\mathcal S}$ if and only if $P$ is isomorphic to a subset of $Int (C)$ for some scattered chain $C$. \end{lemma}
Let $n$ be an integer, we denote by $\mathcal F(n)$, resp. $\mathcal I(n)$, resp. $\mathcal L(n)$ the class of incidence structures $R$, resp. of posets $P$ such that $\mathcal F-dim (R)\leq n$, resp. $\mathcal {I}-dim (P)\leq n$, resp. $dim(P)\leq n$. We define $\mathcal F_{\mathcal S}(n)$, resp. $\mathcal I_{\mathcal S} (n)$, resp. $\mathcal L_{\mathcal S}(n)$, accordingly.
\begin{theorem} \label{thm:extension}Let $n$ be an integer and let $R$ be an incidence structure, resp. a poset $P$. Then $R\in \mathcal F_{\mathcal S}(n)$, resp. $P\in \mathcal I_{\mathcal S}(n)$, resp. $P\in \mathcal L_{\mathcal S}(n)$, if and only if $Gal(R)$, resp. $Gal((P, <, P))$, resp. $N(P)$, belongs to $\mathcal L_{\mathcal S}(n)$. \end{theorem} \begin{proof} We apply Proposition \ref{prop:dimension} with $\mathcal C:=\mathcal L_{\mathcal S}$. Since $\mathcal F_{\mathcal S}=Gal^{-1}(\mathcal L_{\mathcal S})$, Item (\ref{itemi}) yields $\mathcal F_{\mathcal S}-dim(R) = \mathcal L_{\mathcal S}-\pi dim(Gal(R))$ for every incidence structure $R:= (E, \rho, F)$. From Proposition \ref{cor:product}, ${\mathcal L_{\mathcal S}}-dim(Gal(R))={\mathcal L_{\mathcal S}}-\pi dim(Gal(R))$ provided that ${\mathcal L_{\mathcal S}}-\pi dim(Gal(R))<\omega$. Thus if $R\in \mathcal F_{\mathcal S}(n)$, ${\mathcal L_{\mathcal S}}-dim(Gal(R))\leq n$, that is $Gal(R)\in\mathcal L_{\mathcal S}(n)$. The converse follows from the fact that ${\mathcal L_{\mathcal S}}-\pi dim(Gal(R))\leq {\mathcal L_{\mathcal S}}-dim(Gal(R))$. Set $R:= (P, <,P)$. Since $\mathcal I_{\mathcal S}=\mathcal I(\mathcal C)$, Item (\ref{itemiii}) yields ${\mathcal L_{\mathcal S}}-\pi dim(Gal(R)\leq \mathcal I_{\mathcal S}-dim(P)\leq {\mathcal L_{\mathcal S}}-dim(Gal(R)$. Thus, if $P\in \mathcal I_{\mathcal S}(n)$, ${\mathcal L_{\mathcal S}}-\pi dim(Gal(R)\leq n$. Since ${\mathcal L_{\mathcal S}}-\pi dim(Gal(R)={\mathcal L_{\mathcal S}}-dim(Gal(R)$, $Gal(R)\in\mathcal L_{\mathcal S}(n)$. Again, the converse follows from the fact that ${\mathcal L_{\mathcal S}}-\pi dim(Gal(R))\leq {\mathcal L_{\mathcal S}}-dim(Gal(R))$. Now, set $R:=(P,\leq, P)$. Combining Item $(i)$ and Item $(ii)$, we get ${\mathcal C}-\pi dim(Gal(R))=Gal^{-1}(\mathcal C)-dim(R)={\mathcal C}-\pi dim(P)$, hence ${\mathcal C}-\pi dim(N(P))={\mathcal C}-\pi dim(P)$. Since ${\mathcal C}-\pi dim(P)\leq {\mathcal C}-dim(P)$, if $P\in \mathcal L_{\mathcal S}(n)$, ${\mathcal C}-\pi dim(N(P))\leq n$. With Item $(i')$ we get $N(P)\in \mathcal L_{\mathcal S}(n)$. The converse is similar. \end{proof}
With these notations, one may ask:
\begin{questions} Let $P$ be a poset and $R$ be an incidence structure.
\noindent \begin{enumerate}[{(i)}]
\item If $\mathcal L_{\mathcal S}-dim(P)$ is finite does ${\mathcal L_{\mathcal S}}-dim(P)= dim(P)$?
\item If $\mathcal I_{\mathcal S}-dim(P)$ is finite does $\mathcal I_{\mathcal S}-dim(P)= \mathcal{I}-dim(P)$?
\item If $\mathcal F_{\mathcal S}-dim(R)$ is finite does $\mathcal F_{\mathcal S}-dim(R)= \mathcal{F}-dim(R)$? \end{enumerate} \end{questions}
Question $(i)$ is just a reformulation of Question \ref{que:scatt}.
With the help of Theorem \ref{thm:extension}, one can show that a positive answer to $(i)$ is equivalent to a positive answer to $(iii)$ and implies a positive answer to $(ii)$.
\subsection{Topologically scattered spaces and Galois lattices} .
\begin{lemma}\label{toposcatt}
\begin{enumerate}
\item \label{rudin}The continuous image of a compact scattered space is scattered.
\item A finite product of scattered topological spaces is scattered.
\item A Priestley space which is topologically scattered is order scattered.
\end{enumerate}
\end{lemma}
The first fact is non-trivial, it is due to W.Rudin. The second and third fact are easy and well-known.
\noindent{\bf Remark} If $L$ is a topologically scattered algebraic lattice, the algebraic lattice $\overline {N(L)}$ is not necessarily topologically scattered. A topologically scattered algebraic lattice $L$ containing an infinite independent set $X$ will do. Indeed, recall that a subset $X$ of a lattice $L$ is \emph{independent} if $x\not \leq \vee X$ for every $x\in X$, $F\in [X\setminus \{x\} ]^{<\omega}$. Furthermore, if $X$ is independent, $\mathfrak P(X)$ is embeddable in $\mathcal J(L)$. Thus, if $X$ is infinite, $\mathcal J(L)$ is not order scattered. Since $\overline {N(L)}=\mathcal J(L)$, this set is not topologically scattered. For that, let $P$ be a countable well-founded poset with no infinite antichain. Set $L= {\bf I} (P)$. Then $L$ is countable, thus topologically scattered. Since $L$ is distributive, antichains of join-irreducible members of $L$ are independent subsets of $L$. To get $L$ containing an infinite antichain of join-irreducibles, take for $P$ the poset made of $\{ (m,n) \in \mathbb{N}^2: m<n\}$ ordered by setting \begin{equation}\label{eqrado}(m,n)\leq_P (m',n')\; \mbox {if either} \; m=m'\; \mbox {and}\; n \leq n' \; \mbox {or}\; n<m' \end{equation} This poset was discovered by R. Rado \cite{rado}.
\begin{lemma}\label{lem:cor1bouch} Let $R:= (E, \rho, F)$ and $R':= (E', \rho', F')$ be two incidence structures. If $R$ has a coding into $R'$ and $\overline{ Gal(R')}$ is topologically scattered then $\overline{ Gal(R)}$ is topologically scattered. \end{lemma} \begin{proof} According to Theorem \ref{thm:topologicalbouchet}, $\overline {Gal(R)}$ is the continuous image of a closed subspace of $\overline {Gal(R')}$. From Rudin's result ((\ref{rudin}) of Lemma \ref{toposcatt}) it is topologically scattered. \end{proof}
\begin{lemma}\label{lem:pdscatteredchains} Let $n$ be an integer and $R:= (E, \rho, F)$ be an incidence structure. If $Gal(R)$ is embeddable into a product of $n$ scattered chains, then $\overline {Gal(R)}$ too. Moreover, $\overline {Gal(R)}$ is topologically scattered. \end{lemma} \begin{proof} Suppose that $Gal(R)$ is embeddable in $Q:=\Pi_{i\in I} C_{i}$ with $\vert I \vert =n$. According to Bouchet's theorem (Theorem \ref{bouchet}), $R$ has a coding into $Q$. According to Theorem \ref{thm:topologicalbouchet}, $\overline{Gal(R)}$ is embeddable in $\overline {Gal((Q, \leq , Q))}=\overline {N(Q)}$. Since $Gal(R)$ has a least element, we may suppose w.l.o.g that $Q$ has a least element, that is each $C_i$ has a least element. Then $Q$ is a join-semilattice with a least element, hence from Lemma \ref{lem:ideals2}, $\overline {N(Q)}=\mathcal J(Q)$. Since $I$ is finite, Lemma \ref{lem:product} ensures that $\mathcal J(Q)$ is order isomorphic to $\Pi_{i\in I} \mathcal J(C_{i})$, that is to $\Pi_{i\in I} \mathbf {I}(C'_{i})$, where each $C'_i$ is such that $C_i=1+C'_{i}$. Since the $C'_i$'s are order scattered, the $\mathbf {I}(C'_i)$'s are order scattered. Their product, being finite, is scattered too. This proves the first assertion. The $\mathbf {I}(C'_i)$'s are in fact topologically scattered. Hence, as a finite product of scattered spaces, $\mathcal J(Q)$ is topologically scattered. The second part of the assertion follows from Lemma \ref{lem:cor1bouch}. \end{proof}
\section{Proofs of Theorems 1, 2 and 3}\label{section:proofs}
\subsection{Proof of Theorem \ref{thm:second}} $(i)\Rightarrow (ii)$. Suppose that $(i)$ holds. Then, according to Proposition \ref{cor:product}, $P$ is embeddable in a product $Q:=\Pi_{i\in I}C_i$ of $n$ scattered chains. In particular $(P\leq P)$ has a coding into $(Q, \leq Q)$. According to Proposition \ref{prop:embedproduct}, $\overline {N(P)}$ is embeddable in $\Pi_{i\in I} {\bf I}(C_{i})$. Thus $(ii)$ holds. Moreover, from Lemma \ref{lem:pdscatteredchains}, $\overline {N(P)}$ is topologically scattered. $(ii)\Rightarrow (i)$. Suppose that $(ii)$ holds. Since $P$ is embeddable in $\overline {N(P)}$, it is embeddable in a product of $n$ scattered chains. According to Proposition \ref{cor:product}, $(i)$ holds.
\subsection{Proof of Theorem \ref{bip2}} Suppose that $B(P)\in \mathcal L_{\mathcal S}(n)$. From Theorem \ref{thm:second}, $N(B(P))\in \mathcal L_{\mathcal S}(n)$. Since from (\ref{claim:embedding}), $N(P)$ is embeddable into $N(B(P))$, $N(P)\in \mathcal L_{\mathcal S}(n)$. Hence, $P\in \mathcal L_{\mathcal S}(n)$. Conversely, suppose that $P\in \mathcal L_{\mathcal S}(n)$. In this case, we apply Lemma \ref{claim:embedding2} with $D\in \mathcal L_{\mathcal S}$. It turns out that $B(P)\in \mathcal L_{\mathcal S}(n+1)$. \endproof
\subsection{Proof of Theorem \ref{thm:main}}We prove that if $P$ is one of the ten posets listed in Theorem \ref{thm:main}, either $\overline {N(P)}$ or $\overline {N(P^*)}$ is not topologically scattered. According to Theorem \ref{thm:second} the order on $P$ cannot be the intersection of finitely many scattered linear orders and thus any poset containing a copy of $P$ has the same property.
Since for each member $P$ of our list, $P^*$ belongs to our list, it suffices to check that $\overline {N(P)}$ is not topologically scattered in the following cases.
\noindent{\bf Case 1.} $P\in \{\eta, T_2,\Omega (\eta)\}$. If $P=\eta$, $\overline {N(P)}={\bf I}(\eta)$. Topologically, this space is the Cantor set; it is not topologically scattered. If $P=T_2$, then $\overline {N(P)}$ is made of the binary tree plus the maximal branches of the binary tree and a top element added. These maximal branches form a Cantor space, hence $\overline {N(P)}$ is not topologically scattered (a strengthening of this fact will be given in Proposition \ref{lem:separdyadic}). If $P= \Omega (\eta)$, the pictorial representation of $\Omega (\eta )$ given in Figure \ref{Omega} show that $\Omega (\eta )$ is a $2$-dimensional poset, in fact the intersection of a linear order of type $\omega$ and of a linear order of type $\omega.\eta$. Moreover, as it is easy to see, ${\bf I}(\eta)$ is embeddable in $\mathcal J(\Omega (\eta ))$. Since $\mathcal J(\Omega (\eta ))\subseteq \overline {down (\Omega (\eta ))}\subseteq \overline {N(\mathcal J(\Omega (\eta ))}$, it follows that $\overline {N(\mathcal J(\Omega (\eta ))}$ is not order scattered, hence not topologically scattered.
{\bf Case 2. } $P:= B(\check Q)$ where $Q\in \{\eta, T_2,\Omega (\eta)\}$. We deal with the three cases at once. Since $ {N(Q)}\setminus \overline {Gal((Q, <,Q))}$ is made of isolated points, it follows from Case 1 that $\overline {Gal((Q, <,Q))}$ is not topologically scattered. Since $(Q, <, Q)$ has a coding into $B((Q, <, Q))=B(\check Q)$, Theorem \ref{thm:topologicalbouchet} yields that $\overline {Gal( (Q, <, Q))}$ is the continuous image of $\overline {N(B(\check{Q}))}$. From Rudin's result ((\ref{rudin}) of Lemma \ref{toposcatt}) this latter set cannot be topologically scattered. \endproof
\begin{lemma} \label{lem:comp}If $P\in\{\eta, T_2,\Omega (\eta)\}$, $B(P)$ and $B(\check P)$ are embeddable in each other. \end{lemma} \begin{proof} Observe that $\underline {2}.P$ is embeddable in $P$ and apply Proposition \ref{open-nonopensplit}. \end{proof} \begin{lemma}\label{lem:dimten} The ten posets listed in Theorem \ref{thm:main} have dimension at most $3$. \end{lemma} \begin{proof} Trivially $\eta$ has dimension $1$. As a tree, $T_2$ has dimension $2$. Figure \ref{Omega} shows that $\Omega(\eta)$ has dimension $2$. The poset $B(\check \eta)$ is defined as the strict product of the chain of rational numbers and the $2$-element chain on $\{0,1\}$ with $0<1$. Hence, this is a $2$-dimensional poset. Let $P\in\{T_2,\Omega (\eta)\}$. Since $P$ has dimension $2$, it follows from equation (\ref{eq:bip1}) that $B(P)$ has dimension at most $3$. According to Lemma \ref{lem:comp}, $B(\check P)$ is embeddable in $B(P)$, thus $B(\check P)$ has dimension at most $3$. Let $A:= \{0\}\cup 3\times 2$ ordered so that $0$ is the least element and $(i,j)<(i',j')$ if $i=i' $ and $j<j'$. This poset is a tree obtained by taking the direct sum of three copies of a $2$-element chain and adding a least element. This tree is obviously embeddable in $T_2$. Every $2$-dimensional poset is embeddable in $\Omega (\eta)$ thus $A$ is also embeddable in $\Omega (\eta)$. Let $X:=\{(0,0)\}\cup \{((i,j),j): i<3, j<2\}$ and $B:=B(\check A)_{\restriction X}$. The poset $B$ is $3$-dimensional poset (in fact a $3$-irreducible poset). Since $A$ is embeddable in $P$, $B(\check A)$ is embeddable in $B(\check P)$. Thus, $B(\check P)$ has dimension $3$. With the fact that a poset and its dual have the same dimension, our proof is complete. \end{proof} \begin{lemma} \label{lem:incomp}The ten posets listed in Theorem \ref{thm:main} are pairwise incomparable with respect to embeddability. \end{lemma} \begin{proof} Let $X_0:=\eta$, $X_1:=T_2$, $X_2:=\Omega (\eta)$, $X_3:=B(\check \eta)$, $X_4:=B(\check T_2)$, $X_5:=B(\check \Omega (\eta))$, $X_6:=(T_2)^*$, $X_7:=(\Omega (\eta))^*$, $X_8:=(B(\check T_2))^*$, $X_9:=(B(\check \Omega (\eta)))^*$. We need to prove that $X_i$ is not embeddable in $X_j$ for all pairs $(i,j)$ of distinct elements. Clearly, it suffices to consider the pairs for which $i\leq 5$ and $j\leq 9$. We consider only pairs $(i, j)$ for which a significant argument is needed. For the pair $(1, 2)$ note that $\mathcal J^{\neg \downarrow\hskip -2pt }(X_1)$ is an antichain whereas $\mathcal J^{\neg \downarrow\hskip -2pt }(X_2)$ is a chain. For pairs $(3, j)$, with $j\not \in \{0, 6,8, 9 \}$, note that $\mathbf I( X_3)$ contains principal initial segments which are infinite whereas $\mathbf I(X_j)$ contains none. For pairs $(i,j)$ such that $i\in \{4,5\}$ and $j\not \in \{4,5, 8,9\}$ note that $dim (X_i)=3$ and $dim (X_j)\leq 2$ (Lemma \ref{lem:dimten}). For pairs $(i, j)$ such that $i\in \{4, 5\}$, $j\in \{3, 4, 5, 8\}$, then we may write $X_i=B(\check{Y_i})$ and $X_j=B(\check{Y_j})$. If $X_i$ is embeddable in $X_j$, it follows from Lemma \ref{lem:comp} that $B({Y_i})$ is embeddable in $B(Y_j)$. From Lemma \ref {lem:easyfact} there is a coding from $(Y_i, \leq, Y_i)$ in $(Y_j, \leq, Y_j)$, from which follows that $\overline{N(Y_i)}$ is embeddable in $\overline{N(Y_j)}$. Since $Y_i=X_{i'}$ for some $i'\leq 2$, this yields that $\overline{N(X_{i'})}$ is embeddable in $\overline{N(Y_j)}$. Except for the pair $(5,3)$ (which has been previously ruled out) this is clearly impossible. With this last argument, the proof is complete.\end{proof}
\section{Scattered distributive lattices}\label {section:proof4} In this section, we consider \emph{bounded} distributive lattices, that is distributive lattices with a least and a largest element denoted respectively by $0$ and $1$. If $T$ is a such a lattice, an ideal $I$ is \emph{prime} if its complement $T\setminus I$ is a filter. The \emph{spectrum} of $T$, that we denote $Spec (T)$, is the subset of $\mathfrak P(T)$ made of prime ideals of $T$. W.r.t. the topology on $\mathfrak P(T)$, this a closed subspace of $\mathfrak P(T)$, and with the inclusion order added, this is a Priestley space. The set of order preserving and continuous maps from $Spec(T)$ onto the two element chain $\underline 2$ is a distributive lattice isomorphic to $T$. This fact is the essence of Priestley duality. We give below the facts we need in order to prove Theorem \ref {thm:distributivelattice}. We only give the proofs or an hint when needed. The first one is obvious: \begin{lemma}\label{lem:examplepriestley} If $D$ is a chain with least and largest elements $0$ and $1$, then as a Priestley space, $Spec(D)$ is isomorphic to ${\bf I}(C)$ where $C:=D\setminus\{0, 1\}$. \end{lemma}
\begin{lemma}\label{lem:maximalpriestley} Let $T$ be a distributive lattice and $C$ be a maximal chain of $Spec(T)$. Then, as a Priestley space, $C$ is isomorphic to $Spec(D)$ where $D$ a chain, quotient of $T$. \end{lemma} For a proof, note that the spectrum of $T$, $Spect(T)$, is closed under unions and intersections of non-empty chains. Hence $C$ is a complete chain.
We recall that the \emph{width} of a poset $P$, denoted by $width(P)$, is the supremum of the cardinalities of the antichain of $P$. The following result is due to Dilworth \cite{dilworth}. \begin{theorem}\label {lem:spectdim}Let $T$ be a distributive lattice and $n$ be an integer. Then $dim(T)\leq n$ if and only if $width (Spec (T))\leq n$.\end{theorem}
\begin{lemma}Ê\label{lem:spec} Let $T$ be a distributive lattice, two elements $x, y$ of $T$ such that $x<y$ and $T':= [x, y]$. Then $Spec(T')$ is isomorphic as a Priestley space to $A:=\{J\in Spect(T): x\in J\; \text{and}\; y\not \in J\}$. \end{lemma} \begin{proof}Let $\phi:A \rightarrow Spec(T')$ and $\theta: Spec(T')\rightarrow A$ defined by setting $\phi(I):=I\cap T'$ and $\theta(I'):=\downarrow I'$ are order preserving, continuous and inverse of each other. \end{proof}
\begin{lemma}\label{lem:keyscatered}Let $T$ be a distributive lattice. If $Spec(T)$ is not topologically scattered, there is some element $x\in T\setminus\{0,1\}$ such that the spectra of $T'':=\downarrow x$ and $T':=\uparrow x$ are not topologically scattered. \end{lemma} \begin{proof} Since $Spec(T)$ is not topologically scattered, it contains a perfect subspace. Let $P$ be such a subspace. Since $\vert P\vert \geq 2$, we may pick $J',J''\in P$ such that $J'\not\subseteq J''$. Let $x\in J'\setminus J''$. Then $Spec(T'')$ and $Spec( T')$ are not scattered. Indeed, note first that according to Lemma \ref{lem:spec}, $Spec(T'')= \{J\in Spec (T): x\not \in J\}$ and $Spec( T')= \{J\in Spec (T): x \in J\}$. Next, observe that the sets $F':=\{J\in P: x\in J\}$ and $F'':=\{J\in P: x\not \in J\}$ are perfect. Since there are respectively contained in $Spec(T')$ and $Spec(T'')$ the conclusion follows. \end{proof}
\begin{theorem}\label {lem:spectscattered} Let $T$ be a distributive lattice. Then $T$ is order-scattered if and only if $Spec(T)$ is topologically scattered. \end{theorem} \begin{proof} Suppose that $Spec(T)$ is not topologically scattered. For each pair of elements $x,y$ in $T$ such that $Spec([x,y])$ is not topologically scattered, Lemma \ref{lem:keyscatered} yields some $z\in ]x, y[$ such that neither $Spec([x,z])$ nor $Spec([z,y])$ is topologically scattered. This fact allows to define an embedding $\phi$ from the set $D$ of dyadic numbers of the $[0,1]$ interval of the real line. Since $D:= \{\frac{m}{2^n}: n \leq m\in \mathbb{N}\}$ is dense, $T$ is not order scattered. Conversely, if $T$ is not order scattered,select a non scattered chain and extend it to a maximal chain, say $D$. The natural embedding from $D$ into $T$ yields a continuous surjective map from $Spec(T)$ onto $Spec (D)$. As a Priestley space, $Spec (D)$ is isomorphic to ${\bf I}(C)$ where $C:=D\setminus \{0, 1\}$ (Lemma \ref{lem:examplepriestley}). Since $C$ is not order scattered, $Spec(D)$ is not topologically scattered. According to Rudin's result ((\ref{rudin}) of Lemma \ref{toposcatt}), $Spec(T)$ is not topologically scattered. \end{proof}
\subsection{Proof of Theorem \ref{thm:distributivelattice}.} We prove the result for bounded lattices. If $T$ is not bounded, we add a least and a largest element, and apply the result to the resulting lattice. $(ii)\Rightarrow (i)$ Apply Proposition \ref{cor:product}.\\ $(i)\Rightarrow (iii)$ Trivial. \\ For the proof of $(iii) \Rightarrow (ii)$, we introduce the following property:\\ \emph{\noindent $(iv)$ $Spec(T)$ is order scattered and $width(Spec(T))\leq n$}.
We prove successively $(iii)\Rightarrow (iv)$ and $(iv)\Rightarrow (ii)$.\\ $(iii)\Rightarrow (iv)$. Suppose that $(iii)$ holds. Since $T$ is order scattered, Theorem \ref{lem:spectscattered} ensures that $Spec(T)$ is topologically scattered. With the inclusion order and the topology, $Spec(T)$ is a Priestley space, hence it is order scattered. Since $dim(T)\leq n$, Theorem \ref {lem:spectdim} ensures that $width (Spec (T))\leq n$. Thus, $(iv)$ holds. \\ $(iv)\Rightarrow (ii)$. Suppose that $(iv)$ holds. Cover $Spec(T)$ with $m$ chains, where $m:=width (Spec (T))$. Extend each of these chains to a maximal chain of $Spec(T)$. According to Lemma\ref{lem:maximalpriestley}, each maximal chain $C_i$ is of the form $Spec (D_i)$ where $D_i$ is a chain. Since $Spec(T)$ is order scattered, $C_i$ and hence $D_i$ is order scattered. Let $C:= \oplus_{i<m} Spec (D_i)$ and $f: C\rightarrow Spec(T)$ defined by setting $f(x,i):=x$. The duality between distributive lattices and their Priestley spaces, yields a lattice embedding from $T$ into $\Pi_{i<n} D_i$. Hence $(ii)$ holds. \endproof
\section{ Two-dimensional scattered posets}\label{section:two-dimensional} A linear extension $L$ of an ordered set $P$ is called \emph{separating} if there are elements $x,y,z\in P$ with $x<_P z$, $y$ incomparable with both $x$ and $z$ but $x<_L y<_L z$. Let $P$ be an ordered set. If the order of $P$ is the intersection of two non-separating linear extensions $C$ and $C'$ of $P$, $C'$ is called a \emph{complement} of $C$.
Dushnik and Miller\cite{dushnik-miller} gave the following characterization of ordered sets of dimension at most $2$. \begin{theorem} \label{thm:dim2}Let $P$ be an ordered set, the following properties are equivalent: \begin{enumerate}[{(i)}] \item $dim(P) \leq 2$. \item There is a linear extension of $P$ which is non-separating. \item $P$ is embeddable in the family of intervals of some chain, these intervals being ordered by inclusion. \item The incomparability graph of $P$ is a comparability graph. \end{enumerate} \end{theorem} We mention the following property. \begin{lemma} \label{basictool} Let $P$ be a poset of dimension $2$, $L$ be a non-separating extension of $P$ and $I$ be an initial segment of $L$. For $z \in P$, we define $D(z):=(\downarrow z) \cap I$. Then for every $x,y \in P \setminus I$, with $x \parallel _P y$ we have: \begin{enumerate} \item[ 1)] $D(x) \subseteq D(y)$ or $D(y) \subseteq D(x)$, \item[ 2)] If $D(x) \subset D(y)$ then $y<_L x$. \end{enumerate} \end{lemma} \begin{proof} 1) Suppose by contradiction that there are $u \in D(x) \setminus D(y)$ and $v \in D(y) \setminus D(x)$. Since $L$ is a linear extension of $P$, $I$ is an initial segment of $P$. Since $u, v\in I$, if $v\leq _P u$ then $v \in D(x)$ and if $u\leq _P v$ then $u \in D(y)$, a contradiction; so $u \parallel _P v$. With no loss of generality, we may suppose $u<_{L} v$. Since $u\leq _Pv$, if $v<_Lx$ then, since $L$ is non-separating, we have $v\leq_P x$, a contradiction. Hence $x<_L v$. Since $I$ is an initial segment of $L$ we have $x\in L$, contradicting the hypothesis that $x\not \in I$.
(2) Let $v\in D(y)\setminus D(x)$. Necessarily, $v <_P y$ and, since $I$ an initial segment of $L$, $v \parallel _P x$. If $x\parallel_{P}y$ and $x <_L y$ then, since $L$ is non-separating, we have $x <_L v$, which contradicts the fact that $I$ is an initial segment of $L$. Hence, either $ x \not\parallel_{P} y$, in which case $x<_{P}y$ or $y< _{L} x$. \end{proof}
\subsection{The dyadic tree}In the two-dimensional case we have:
\begin{proposition} If $T_2$ is embeddable in a product of two chains then both are non scattered. \end{proposition} We deduce this from the following proposition. \begin{proposition}\label{lem:separdyadic} Every non-separating extension of the dichotomic tree $T_2$ has order type $\omega (1+ \eta)$. \end{proposition} \begin{proof} We use the \emph{condensation method} (see \cite{rosenstein} pp. 71). Let $\mathcal L$ be a non-separating linear extension of the order on $T_2$ and $L$ be the corresponding chain. Two elements $x,y\in T_2$ are \emph{equivalent} if the interval they determine in $L$ is finite. This is an equivalence relation. Each classe being an interval of $L$. The set of these equivalences is naturally ordered and the chain $L$ is the lexicographical sum of these equivalence classes.
\begin{claim}\label{claim:equiv1}Each equivalence class is a subchain of $T_2$ and has order type $\omega$. \end{claim} \noindent{\bf Proof of Claim \ref{claim:equiv1}.} We observe that for every $x\in T_2$, one of the two covers of $x$ in $T_2$, namely $x0$ and $x1$, is a cover of $x$ in $L$. Indeed, suppose for an example $x0<_Lx1$. If $x0$ is not a cover of $x$ in $L$ there is some $y$ with $x<_L y<_Lx0$. With respect to $T_2$ this element $y$ is incomparable to $x$ and $x0$ (if $y$ was comparable to $x$ we would have $x_1\leq_{T_2} y$, whereas if $y$ was incomparable to $x0$, then since $T_2$ is a tree, $y$ would be comparable to $x$). Since $x<_{T_2} x0$, $\mathcal L$ is a separating linear extension, contradicting our hypothese. From this observation and the fact that $\downarrow x$ is finite for every $x\in T_2$, the claim follows. \endproof \noindent\begin{claim} \label{claim:equiv2}The set $D$ of equivalence classes has order type $1+\eta$. \end{claim} \noindent{\bf Proof of Claim \ref{claim:equiv2}.} Since $T_2$ has a least element, $D$ too. Also $D$ has no largest element. Otherwise, let $X$ be the largest class. Pick $x\in X$. Since $X$ is a subchain of $T_2$, one of the two covers of $x$ is not in $X$; its equivalence class is larger than $X$, a contradiction. Finally, not class $X$ has a cover in $D$. Otherwise, if $Y$ is a cover of $X$, let $y$ be the least element of $Y$. Since $\downarrow y$ is finite, there is $x\in X$ wich is incomparable to $y$ (w.r.t. $T_2$). Let $x'$ be the cover of $x$ in $T_2$ which does not belong to $X$. We have $x<_{T_2}x'$, $x<_{L}y<_{L}<x'$, $y$ incomparable to $x$ and $x'$ (w.r.t. $T_2$). This contradicts the fact that $\mathcal L$ is a non-separating extension. \endproof
With these claims, the proof of Proposition \ref{lem:separdyadic} is complete. \end{proof}
\subsection{Non-separating scattered extensions }
The "bracket relation" \begin{equation}\label{fred} \eta \rightarrow [\eta ]_{2}^{2} \end{equation} a famous unpublished result of F.Galvin, asserts that if the pairs of rational numbers are divided into finitely many classes then there is a subset $X$ of the rationals which is order-isomorphic to the rationals and such that all pairs being to the union of two classes (for a proof, see \cite{todorcevic95} Theorem 6.3 p.44 or \cite {fraissetr} A.5.4 p.412 and, for a far reaching generalization, see \cite {devlin}). This result expresses in a very economical way what the partitions of pairs look like. Indeed, what it really says is this: \begin{theorem}\label{G}Let $[\mathbb{Q}]^2$ be the set of pairs of rational numbers and $A_{1},
\dots, A_{n}$ be a partition of $[\mathbb{Q}]^2$. For every order $\leq_{\omega}$ on $\mathbb{Q}$ with order type $\omega$ there is a subset $X$ of $\mathbb{Q}$ of order type $\eta$ and indices $i$ and $j$ (with possibly $i=j$) such that all pairs of $X$ on which the natural order on $\mathbb{Q}$ and the order $\leq_{\omega}$ coincide belong to $A_{i}$ and all pairs of $X$ on which these two orders disagree belong to $A_{j}$. \end{theorem} The proof of Theorem \ref{G} from (\ref{fred}) is immediate: intersect the partition $A_{1}, \dots ,A_{n}$ with the partition $U$,$V$ associated with the two orders ($U$ being made of pairs on which the two orders coincide, and $V$ being made of the other pairs) and apply iteratively the bracket relation to the resulting partition in order to find $X$ whose pairs belong to the unions of two classes.
Partitions, or orders, associated to two linear orderings on the same set, like the natural order on the rational numbers and an order of type $\omega$ are called {\it sierpinskizations}. Clearly, $\Omega(\eta)$ is a sierpinskization of $\omega \eta$ with $\omega$, whereas $\Omega(\eta)^*$ is a sierpinskization of $\omega \eta$ and $\omega^*$. These two posets are the basic sierpinskizations of a non scattered chain with $\omega$ and $\omega^*$. Indeed, if $\alpha$ and $\alpha'$ are two non scattered countable chains then their sierpinskization with $\omega$ are equimorphic (see \cite{pouzet-zaguia} Corollary 3.4.2).
From Theorem \ref{G}, we have easily: \begin{proposition}\label{nonsepar,omega(eta)} Let $P:=(E, \leq)$ be a poset. If neither $\eta$, $\Omega(\eta)$ nor $\Omega(\eta)^*$ is embeddable in $P$ then for every non scattered linear extension $\mathcal L$ of the order on $P$, and every subset $A\subseteq E$ such that $(A, \mathcal L_{\restriction A})$ has type $\eta$ there is an antichain $A'$ of $P$ which is included in $ A$ and such that $(A', \mathcal L_{\restriction A'})$ has type $\eta$. \end{proposition} \begin{proof} Let $A\subseteq E$ such that $(A, \mathcal L_{\restriction A})$ has type $\eta$. Let $A_1$, resp. $A_2$, be the set of pairs $\{x, y\}$ of $[A]^2$ such that $x$ and $y$ are comparable, resp. incomparable (w.r.t. the order on $P$). Fix an order $\leq_{\omega}$ of type $\omega$ on $A$. Theorem \ref{G} yields a subset $A'$ of $A$ and $i,j\in \{1,2\}$ such that all pairs of $A$ on which the order $\mathcal L$ and the order $\leq_{\omega}$ coincide belong to $A_{i}$ and all pairs of $A$ on which these two orders disagree belong to $A_{j}$. As it is easy to check, the three cases $i=j=1$, $i=1, j=2$ and $i=2, j=1$ yield respectively that $P_{\restriction A'}$ is a chain of type $\eta$, contains a copy of $\Omega(\eta)$ and contains a copy of $\Omega(\eta)^*$. Thus these cases are impossible. The only remaining case $a=j=2$ yields the desired conclusion. \end{proof}
\begin{theorem} \label{equiv} The following properties are equivalent:
\begin{enumerate}[{(i)}]
\item $P$ is the intersection of two scattered chains. \item \begin{enumerate}
\item $P$ has a non separative scattered extension
and
\item Neither $\Omega( \eta )$ nor $\Omega^*( \eta)$ is embeddable in $P$. \end{enumerate} \end{enumerate} \end{theorem} \begin{proof} $(i)\Rightarrow (ii)$. Item (ii) (a) follows from Theorem \ref{thm:dim2}. Item (ii)(b) follows from the fact that $\Omega(\eta)$ is an obstruction.
$(ii)\Rightarrow (i)$. Let $\mathcal C$ be a non-separative scattered extension of $P$. Let $\mathcal C'$ be the complement of $\mathcal C$. To conclude, it suffices to prove that $\mathcal C'$ is scattered. Suppose that it is not. Apply Proposition \ref{nonsepar,omega(eta)} to $P$ and $\mathcal L:= \mathcal C'$. Clearly, neither $\eta$,
$\Omega( \eta )$ nor $\Omega^*( \eta )$ is embeddable in $P$. Thus, there is an antichain $A'$ of $P$ such that $C'_{\restriction A'}$ has type $\eta$. But, since $A$ is an antichain of $P$ and the order on $P$ is the intersection of $\mathcal C$ and $\mathcal C'$, it turns out that $\mathcal C_{\restriction A}$ is the dual of $\mathcal C'_{\restriction A}$, thus $(A', \mathcal C_{\restriction A'})$ has type $\eta$. This contradicts the fact that $\mathcal C$ is scattered.\end{proof}\\
\noindent{\bf Bibliographical comments.} The posets $T_2$, $\Omega (\eta)$, $B(\check \eta)$ have been considered previously. Pouzet and Zaguia \cite{pouzet-zaguia} proved that \emph{the set $\mathcal J(P)$ of ideals of a poset $P$ contains no chain isomorphic to $\eta$ if and only if $P$ contains no chain isomorphic to $\eta$ and
no subset isomorphic to $\Omega (\eta )$}. In \cite{duffus-pouzet-rival} it is shown that \emph{if a poset $P$ contains $B(\check \eta )$, then $N(P)$ contains a chain isomorphic to $\eta$}. In fact, $N(B(\check \eta))$ is isomorphic to the disjoint union $\mathbb{Q}\times 2 \cup I(\mathbb{Q})$ equipped with the following ordering: \begin{enumerate} \item $\emptyset \leq (x,0)\leq I\leq (y,1) \leq \mathbb{Q}$ for $x\in I\subseteq (\leftarrow y[$, with $I\in I(\mathbb{Q})$, $y\in\mathbb{Q}$. \item $I\leq J$ if $I\subseteq J$ and $I,J\in I(\mathbb{Q})$. \end{enumerate} In \cite{pousikadzag} it is shown that \emph{the class of posets whose MacNeille completion is scattered is characterized by eleven obstructions}. One can check that obstructions distinct from $\eta$ and $B(\check \eta)$ do no yield interesting obstructions to $\mathcal L_{\mathcal S}(<n)$. In an unpublished paper with E.C.Milner \cite{milner} it is shown that if {the set $\mathcal J(P)$ of ideals of a poset $P$ is topologically closed in $\mathfrak P(P)$, it is topologically scattered if and only if it is order scattered and the binary tree $T_2$ is not embeddable in $P$}. From this follows that \emph{an algebraic lattice $T$ is topologically scattered if and only if it is order scattered and neither $T_2$ nor $\Omega (\eta )$ are embeddable in the join-semilattice of compact elements of $T$}. In contrast, we may note that \emph{an algebraic distributive lattice is topologically scattered if and only if it is order scattered}, an important result due to Mislove \cite{mislove}.
\end{document} |
\begin{document}
\begin{center}{\Large\bf Plurisubharmonic geodesics and interpolating sets} \end{center}
\begin{center}{\large Dario Cordero-Erausquin and Alexander Rashkovskii} \end{center}
\begin{abstract} We apply a notion of geodesics of plurisubharmonic functions to interpolation of compact subsets of ${\mathbb C}^n$. Namely, two non-pluripolar, polynomially closed, compact subsets of ${\mathbb C}^n$ are interpolated as level sets $L_t=\{z: u_t(z)=-1\}$ for the geodesic $u_t$ between their relative extremal functions with respect to any ambient bounded domain. The sets $L_t$ are described in terms of certain holomorphic hulls. In the toric case, it is shown that the relative Monge-Amp\`ere capacities of $L_t$ satisfy a dual Brunn-Minkowski inequality. \end{abstract}
\section{Introduction}
In the classical complex interpolation theory of Banach spaces, originated by Calder\'{o}n \cite{Ca} (see \cite{BL} and, for more recent developments, \cite{CEK} and references therein), a given family of Banach spaces $X_\xi$ parameterized by boundary points $\xi$ of a domain $C\subset\mathbb C^N$ gives rise to a family of Banach spaces $X_\zeta$ for all $\zeta\in C$. A basic setting is interpolation of two spaces, $X_0$ and $X_1$, for a partition $\{C_0, C_1\}$ of $\partial C$. More specifically, one can take $C$ to be the strip $0<{\operatorname{Re}}\, \zeta <1$ in the complex plane and $C_0, C_1$ the corresponding boundary lines, then the interpolated norms depend only on $t=\operatorname{Im}\zeta$. In the finite dimensional case $X_j=({\mathbb C}^n,\|\cdot\|_j)$, $j=0,1$, they are defined in terms of the family of mappings $C\to{\mathbb C}^n$, bounded and analytic in the strip, continuous up to the boundary and tending to zero as ${\operatorname{Im}}\, \zeta\to\infty$, see details in \cite{BL}. In this setting, the volume of the unit ball $B_t$ of $({\mathbb C}^n,\|\cdot\|_t)$, $0<t<1$, was proved in \cite{CE} to be a logarithmically concave function of $t$.
When the given norms $\|\cdot\|_j$ on ${\mathbb C}^n$ are toric, i.e., satisfy $\|(z_1,\ldots,z_n)\|_j=\|(|z_1|,\ldots,|z_n|)\|_j$, the interpolated norms are toric as well and the balls $B_t$ are Reinhardt domains of ${\mathbb C}^n$ obtained as the multiplicative combinations (geometric means) of the balls $B_0$ and $B_1$. The logarithmic concavity implies that volumes of the multiplicative combinations \begin{equation}\label{geommean} K_t^\times = K_0^{1-t}\,K_1^t\subset{ \mathbb R}^n \end{equation} of any two convex bounded neighbourhoods $K_0$ and $K_1$ of the origin in ${ \mathbb R}^n$ satisfy the Brunn-Minkowski inequality \begin{equation}\label{volBM} {\operatorname{Vol}}(K_t^\times)\ge {\operatorname{Vol}}(K_0)^{1-t} {\operatorname{Vol}}(K_1)^t,\quad 0<t<1.\end{equation} Note also that in \cite{S2}--\cite{S4}, the interpolated spaces were related to convex hulls and complex geodesics with convex fibers. In particular, it put the interpolation in the context of analytic multifunctions.
In this note, we develop a slightly different -- albeit close -- approach to the interpolation of compact, polynomially convex subsets of ${\mathbb C}^n$ by sets arising from a notion of plurisubharmonic geodesics. The technique originates from results on geodesics in the spaces of metrics on compact K\"{a}hler manifolds due to Mabuchi, Semmes, Donaldson, Berndtsson and others (see \cite{G12} and the bibliography therein). Its local counterpart for plurisubharmonic functions from Cegrell classes on domains of ${\mathbb C}^n$ was introduced in \cite{BB} and \cite{R17a}. We will need here a special case when the geodesics can be described as follows.
Let
$$A=\{\zeta\in{\mathbb C}:\:0< \log|\zeta| < 1\}$$ be the annulus bounded by the circles
$$A_j=\{\zeta:\: \log|\zeta|=j\},\quad j=0,1.$$ Given two plurisubharmonic functions $u_0$ and $u_1$ in a bounded hyperconvex domain $\Omega\subset{\mathbb C}^n$, equal to zero on $\partial \Omega$, we consider the class $W(u_0,u_1)$ of all plurisubharmonic functions $u(z,\zeta)$ in the product domain $\Omega\times A$, such that $\limsup u(z,\zeta)\le u_j(z)$ for all $z\in\Omega$ as $\zeta\to A_j$. The function \begin{equation}\label{upenv} \widehat u(z,\zeta)=\sup\{u(z,\zeta):\: u\in W(u_0,u_1)\} \end{equation}
belongs to the class and satisfies $\widehat u(z,\zeta)=\widehat u(z,|\zeta|)$, which gives rise to the functions $u_t(z):=\widehat u(z,e^t)$, $0<t<1$, the {\it geodesic} between $u_0$ and $u_1$. When the functions $u_j$ are bounded, the geodesic $u_t$ tend to $u_j$ as $t\to j$, uniformly on $\Omega$. One of the main properties of the geodesics is that they linearize the energy functional \begin{equation}\label{enfunc} {\mathcal E}(u)=\int_\Omega u(dd^c u)^n, \end{equation} see \cite{BB}, \cite{R17a} (where actually more general classes of plurisubharmonic functions are considered).
Given two non-pluripolar compact sets $K_0,K_1\subset{\mathbb C}^n$, let $u_j$ denote the relative extremal functions of $K_j$, $j=0,1$, with respect to a bounded hyperconvex neighbourhood $\Omega$ of $K_0\cup K_1$, i.e., \begin{equation}\label{initial}
u_j(z)=\omega_{K_j,\Omega}(z)=\limsup_{y\to z}\, \sup\{u(y):\: u\in {\operatorname{PSH}}_-(\Omega),\ u|_{K_j}\le -1\}, \end{equation} where ${\operatorname{PSH}}_-(\Omega)$ is the collection of all nonpositive plurisubharmonic functions in $\Omega$. The functions $u_j $ belong to ${\operatorname{PSH}}_-(\Omega)$ and satisfy $(dd^cu_j)^n=0$ on $\Omega\setminus K_j$, see \cite{Kl}.
Assume, in addition, each $K_j$ to be polynomially convex (in the sense that it coincides with its polynomial hull). This implies $\omega_{K_j,\Omega'}=-1$ on $K_j$ for some (and thus any) bounded hyperconvex neighborhood $\Omega'$ of $K_j$ and that $\omega_{K_j,\Omega'}\in C(\overline{\Omega'})$. In particular, the functions $u_j=-1$ on $K_j$ and are continuous on $\overline\Omega$. The geodesics $u_t$ converge to $u_j$ uniformly as $t\to j$ \cite{R17a} and so, by the Walsh theorem, the upper envelope $ \widehat u(z,\zeta)$ (\ref{upenv}) is continuous on $\Omega\times A$, which, in turn, implies $u_t\in C(\overline\Omega\times [0,1])$.
As was shown in \cite{R17b}, functions $u_t$ in general are different from the relative extremal functions of any subsets of $\Omega$. Consider nevertheless the sets where they attain their minimal value, $-1$: \begin{equation}\label{L_t} L_t=\{z\in \Omega:\: u_t(z)=-1\},\quad 0< t< 1.\end{equation} By the continuity of the geodesic at the endpoints, the sets $L_t$ converge (say, in the Hausdorff metric) to $ K_j$ when $t\to j\in\{0,1\}$ and so, they can be viewed as interpolations between $K_0$ and $K_1$.
The curve $t\mapsto L_t$ can be in a natural way identified with the multifunction $\zeta\mapsto K_\zeta:=L_{\log|\zeta|}$. Note however that it is not an {\it analytic multifunction} (for the definition, see, e.g., \cite{O}, \cite{S1}, \cite{Po}) because its graph $\{(z,\zeta)\in \Omega\times A:\: \widehat u(z,\zeta)=-1\}$ is not pseudoconcave.
In Section~2, we show that the interpolating sets $L_t$ can be represented as sections $K_t=\{z:\: (z,e^t)\in \widehat K\} $ of the holomorphic hull $\widehat K$ of the set \begin{equation}\label{hull}K^A:=(K_0\times A_0) \cup (K_1\times A_1)\subset {\mathbb C}^{n+1}\end{equation} with respect to functions holomorphic in ${\mathbb C}^n\times ({\mathbb C}\setminus\{0\})$.
In Section~3, we study the relative Monge-Amp\`ere capacities ${\operatorname{Cap}\,}(L_t,\Omega)$ of the sets $L_t$; recall that for $K\Subset\Omega$,
$${\operatorname{Cap}\,}(K,\Omega)= \sup\{(dd^c u)^n(K):\: u\in{\operatorname{PSH}}_-(\Omega),\ u|_K\le -1\}=(dd^c\omega_{K,\Omega})^n(\Omega), $$ see \cite{Kl}.
It was shown in \cite{R17a} that the function $t\mapsto{\operatorname{Cap}\,}(L_t,\Omega)$ is convex,
which was achieved by using linearity of the energy functional (\ref{enfunc})
along the geodesics. In the case when $\Omega$ is the unit polydisk ${\mathbb D}^n$ and $K_j$ are Reinhardt sets, the convexity of the Monge-Amp\`ere capacities was rewritten in \cite{R17b} as convexity of covolumes of certain unbounded convex subsets $P_t$ of the positive orthant ${\mathbb R}_+^n$ (that is, volumes of their complements to ${\mathbb R}_+^n$). Here, we use a convex geometry technique to prove Theorem~\ref{BMcovol} stating that actually the covolumes of the sets $P_t$ are {\sl logarithmically convex}. Since in this case the sets $L_t$ are exactly the geometric means $K_t^\times$ of $K_0$ and $K_1$, this implies the dual Brunn-Minkowski inequality for their Monge-Amp\`ere capacities, \begin{equation}\label{logconvcap}{\operatorname{Cap}\,}(K_t^\times,{\mathbb D}^n)\le {\operatorname{Cap}\,}(K_0,{\mathbb D}^n)^{1-t} {\operatorname{Cap}\,}(K_1,{\mathbb D}^n)^t,\quad 0<t<1.\end{equation} In addition, an equality here occurs for some $t\in (0,1)$ if and only if $K_0=K_1$.
It is quite interesting that the volume of $K_t^\times$ satisfies the opposite Brunn-Minkowski inequality (\ref{volBM}), i.e., it is logarithmically {\sl concave}. Furthermore, so are the standard logarithmic capacity in the complex plane and the Newtonian capacity in ${ \mathbb R}^n$ with respect to the Minkowski addition \cite{B1}, \cite{B2}, \cite{Ran}. The difference here is that the relative Monge-Amp\`ere capacity is, contrary to the logarithmic or Newton capacities, a local notion, which leads to the dual Brunn-Minkowski inequality (\ref{logconvcap}), exactly like for the covolumes of coconvex bodies \cite{KT}.
A natural question that remains open is to know whether the logarithmic convexity of the relative Monge-Amp\`ere capacities is also true in the general, non-toric case. No non-trivial examples of (\ref{logconvcap}) in this setting are known so far.
\section{Level sets as holomorphic hulls}\label{sect:holhulls}
Let $K_0,K_1$ be two non-pluripolar compact subsets of a bounded hyperconvex domain $\Omega\subset{\mathbb C}^n$, and let $L_t=L_{t,\Omega}$ be the interpolating sets defined by (\ref{L_t}) for the geodesic $u_t=u_{t,\Omega}$ with the endpoints $u_j=\omega_{K_j,\Omega}$. We start with an observation that if the sets $K_j$ are polynomially convex, then the sets $L_t$ are actually independent of the choice of the domain $\Omega$ containing $K_0\cup K_1$.
\begin{lemma}\label{indep} If $\Omega'$ and $\Omega''$ are bounded hyperconvex neighborhoods of non-pluripolar, polynomially convex, compact sets $K_0$ and $K_1$, then $L_{t,\Omega'}=L_{t,\Omega''}$. \end{lemma}
\begin{proof} By the monotonicity of $\Omega\mapsto u_{t,\Omega}$, it suffices to show the equality for $\Omega'\Subset\Omega''$. Since $u_{t,\Omega''}\le u_{t,\Omega'}$, the inclusion $L_{t,\Omega'}\subset L_{t,\Omega''}$ is evident. Denote now $$\delta=-\inf\{u_{j,\Omega''}(z):\: z\in\partial\Omega',\ j=0,1\}\in(0,1).$$
Recall that the geodesics $u_{t,\Omega}$ come from the maximal plurisubharmonic functions $\widehat u_{\Omega}$ in $\Omega\times A$ for the annulus $A$ bounded by the circles $A_j$ where $\log|\zeta|=j$. Then the function $$\hat v:=\frac1{1-\delta}(\widehat u_{\Omega''}+\delta)\in{\operatorname{PSH}}(\Omega'\times A)\cap C(\overline{\Omega'\times A})$$ satisfies $(dd^c\hat v)^{n+1}=0$ in $\Omega'\times A$ and \begin{equation}\label{bdv}\lim \hat v(z,\zeta)= -1\ {\rm \ as\ } (z,\zeta)\to K_j\times A_j.\end{equation} Moreover, since $\hat v\ge 0$ on $\partial \Omega'\times A$ and its restriction to each $A_j$ satisfies $(dd^c\hat v)^{n}=0$ on $A_j\setminus K_j$, the boundary conditions (\ref{bdv}) imply $$ \lim \hat v(z,\zeta)\ge u_{j,\Omega'}\ {\rm as\ } \zeta\to A_j.$$ Therefore, we have $\hat v\ge \hat u$ in the whole $\Omega'\times A$.
If $z\in L_{t,\Omega''}$, this gives us $-1\ge u_{t,\Omega'}(z)$ and so, $z\in L_{t,\Omega'}$, which completes the proof. \end{proof}
Next step is comparing the sets $L_t$ with other interpolating sets, $K_t$, defined as follows. Set \begin{equation}\label{hatK} \widehat K=\widehat K(\Omega)=\{(z,\zeta)\in \Omega\times A:\: u(z,\zeta)\le M(u)\ \forall u\in {\operatorname{PSH}}_-(\Omega\times A)\},\end{equation} where $M(u)=\max_j M_j(u)$ and $$ M_j(u)=\limsup\,u(z,\zeta)\ {\rm as \ }(z,\zeta)\to K_j\times A_j, \quad j=0,1.$$ Note that the set $\widehat K$ will not change if one replaces ${\operatorname{PSH}}_-(\Omega\times A)$ by the collection of all bounded from above (or just bounded) plurisubharmonic functions on $\Omega\times A$.
Denote by $\widehat K_\zeta$ the section of $\widehat K$ over $\zeta\in A$: $$\widehat K_\zeta=\widehat K_\zeta(\Omega) = \{z\in\Omega:\: (z,\zeta)\in \widehat K\},\quad \zeta\in A.$$
The set $\widehat K$ is invariant under rotation of the $\zeta$-variable, so $\widehat K_\zeta$ depends only on $|\zeta|$. We set then $$K_t=\widehat K_{e^t},\quad 0<t<1.$$
\begin{theorem}\label{prop1}If $K_j$ are non-pluripolar, polynomially convex compact subsets of $\Omega$, then $L_t=K_t$ for all $0<t<1$. \end{theorem}
\begin{proof} First, we prove the inclusion \begin{equation} \label{incl1} L_t \subset K_t, \end{equation} that is, \begin{equation} \label{Mbound} u(z,t)\le M(u)\quad \forall z\in L_t\end{equation} for all $u\in{\operatorname{PSH}}_-(\Omega\times A)$. By the scalings $u\mapsto c\, u$, we can assume that $u-\min_j M_j(u)\le 1$ on $\Omega\times A$. Then the function
$$\phi(z,\zeta)=u(z,\zeta)- (1-\log|\zeta|)M_0(u) -(\log|\zeta|) M_1(u)-1$$ belongs to ${\operatorname{PSH}}_-(\Omega\times A)$ and $$\limsup\phi(z,\zeta)\le -1 {\rm\ as \ }(z,\zeta)\to K_j\times A_j.$$ In other words, $\phi_t(z):=\phi(z,e^t)$ is a subgeodesic for $u_0$ and $u_1$, so $\phi_t\le u_t$. Therefore, $\phi_t\le -1$ on $L_t$, which gives us (\ref{Mbound}).
To get the reverse inclusion, assume $z\in K_t$. Then, by definition of $\widehat K$, we get $u_t(z)\le M(u_t)=-1$ and, since $u_t\ge -1$ everywhere, $u_t(z)=-1$. \end{proof}
The set $\widehat K$ can actually be represented as a holomorphic hull of the set $$K^A=(K_0\times A_0) \cup (K_1\times A_1),$$ which is similar to what one gets in the classical interpolation theory. This can be concluded by standard arguments relating plurisubharmonic and holomorphic hulls (see, e.g., \cite{Range}).
\begin{proposition}\label{loc} Let $K_0,K_1$ be two non-pluripolar, polynomially convex compact subsets of a bounded hyperconvex domain $\Omega\subset{\mathbb C}^n$. Then the set $\widehat K$ defined by (\ref{hatK}) is the holomorphic hull of the set $K^A$ with respect to the collection of all functions holomorphic on $\Omega\times{\mathbb C}_*$ (here ${\mathbb C}_*={\mathbb C}\setminus\{0\}$). \end{proposition}
\begin{proof} The domain $\Omega\times{\mathbb C}_*$ is pseudoconvex, so it suffices to show that $\widehat K$ is the ${\mathcal F}$-hull $\widehat K_{\mathcal F}$ of the set (\ref{hull}) with respect to ${\mathcal F}={\operatorname{PSH}}(\Omega\times{\mathbb C}_*)$.
Take any hyperconvex domain $\Omega'$ such that $K_0\cup K_1\subset \Omega'\Subset\Omega$. Since ${\mathcal F}$ forms a subset of the collection of all bounded from above psh functions on $\Omega'\times A$, we have $\widehat K':=\widehat K(\Omega')\subset \widehat K_{\mathcal F}$. Moreover, by Lemma~\ref{indep} and Theorem~\ref{prop1}, we have $\widehat K'=\widehat K$, which implies $\widehat K\subset \widehat K_{\mathcal F}$.
Let $u_t$ be the geodesic of $u_0,u_1$ in $\Omega$. Then its psh image $\hat u(z,\zeta)$ can be extended to $\Omega\times {\mathbb C}_*$ as $\hat U(z,\zeta)=u_0(z)-\log|\zeta|$ for $-\infty<\log|\zeta|\le 0$ and $\hat U(z,\zeta)=u_1(z)+\log|\zeta|-1$ for $1\le\log|\zeta|< \infty$. Indeed, the function
$$\hat v(z,\zeta) =\max\{u_0(z)-\log|\zeta|, u_1(z)+\log|\zeta|-1\}$$ is psh on $\Omega\times A$, continuous on $\Omega\times \overline A$, and equal to $u_j$ on $\Omega\times A_j$. Therefore, it coincides with $\hat U$ on $\Omega\times({\mathbb C}_*\setminus A)$, Since $\hat v\le \hat u$ on $\Omega\times A$ and $\hat v=\hat u$ on $\Omega\times \partial A$, the claim is proved.
Let $(z^*,\zeta^*)\in \widehat K_{\mathcal F}$. By the definition of $\widehat K_{\mathcal F}$, since $\hat U\in{\operatorname{PSH}}(\Omega\times{\mathbb C}_*)$, $$\hat u(z^*,\zeta^*)=\hat U(z^*,\zeta^*)\le \sup \{\hat U(z,\zeta):\: (z,\zeta)\in K^A\}=-1,$$
so $z^*\in \widehat K_t$ with $t=\log|\zeta^*|$. \end{proof}
Finally, since the sets $L_t$ are independent of the choice of $\Omega$, we get the following description of the interpolated sets $K_t$.
\begin{corollary}\label{cor} Let $K_0,K_1$ be two non-pluripolar, polynomially convex compact subsets of $ {\mathbb C}^n$ and let $\Omega$ be a bounded hyperconvex domain containing $K_0\cup K_1$. Denote by $u_t$ the geodesic of the functions $u_j=\omega_{K_j,\Omega}$, $j=0,1$. Then for any $\zeta\in A$,
$$K_t=\{z\in\Omega:\: u_{t}(z)=-1\}=\{z\in{\mathbb C}^n: |f(z,\zeta)|\le\|f\|_{K^A}\ \forall f\in{\mathcal O}({\mathbb C}^n\times {\mathbb C}_*)\}$$
with $t=\log|\zeta|$. \end{corollary}
{\it Remark.} Note that the considered hulls are taken with respect to functions holomorphic in ${\mathbb C}^n\times {\mathbb C}_*$ and not in ${\mathbb C}^{n+1}$ (that is, not the polynomial hulls). This reflects the fact that in the definition of $K^A$, the circles $A_0$ and $A_1$ may be interchanged. Since for any polynomial $P(z,\zeta)$ and any $\zeta$ inside the disc $|\zeta|<e$, we have $|P(z,\zeta)|\le \max
\{|P(z,\xi)|: \: |\xi|=e\}$, each section of the {\sl polynomial} hull of $K^A$ must contain $K_1$, so such a hull does not provide any interpolation between $K_0$ and $K_1$.
\section{Log-convexity of Monge-Amp\`ere capacities}
Let, as before, $K_0$ and $K_1$ be non-pluripolar, polynomially convex compact subsets of a bounded hyperconvex domain $ \Omega\Subset{\mathbb C}^n$, $u_t$ be the geodesic between $u_j= \omega_{K_j,\Omega}$, and let $K_t$ be the corresponding interpolating sets as described in Section~\ref{sect:holhulls}. As was mentioned, their relative Monge-Amp\`ere capacities satisfy the inequality $${\operatorname{Cap}\,}(K_t,\Omega)\le(1-t)\,{\operatorname{Cap}\,}(K_0,\Omega) +t\,{\operatorname{Cap}\,}(K_1,\Omega).$$
Let now $\Omega={\mathbb D}^n$ and assume the sets $K_j$ to be Reinhardt. The polynomial convexity of $K_j$ means then that their logarithmic images $$Q_j={\operatorname{Log}\,} K_j=\{s\in{\mathbb R}_-^n:\: (e^{s_1},\ldots,e^{s_n})\in K_j\}$$ are complete convex subsets of ${\mathbb R}_-^n$, i.e., $Q_j+{\mathbb R}_-^n\subset{\mathbb R}_-^n$; when this is the case, we will also say that $K_j$ is complete logarithmically convex. In this situation, the sets $K_t$ are, as in the classical interpolation theory, the geometric means of $K_j$. Note however that our approach extends the classical -- convex -- setting to a wider one.
\begin{proposition} The interpolating sets $K_t$ of two non-pluripolar, complete logarithmically convex, compact Reinhardt sets $K_0, K_1\subset {\mathbb D}^n$ coincide with
$$K_t^\times:=K_0^{1-t}K_1^t=\{z:\: |z_l|=|\eta_l|^{1-t} |\xi_l|^{t}, \ 1\le l\le n,\ \eta\in K_0,\ \xi\in K_1\}.$$ \end{proposition}
\begin{proof} We prove this by using the representation of the sets $K_t$ as $L_t=\{z:\: u_t(z)=-1\}$ and a formula for the geodesics in terms of the Legendre transform \cite{Gu}, \cite{R17a}. By and large, this is Calder\'{o}n's method.
As was noted in \cite[Thm. 4.3]{R17a}, the inclusion $K_t^\times\subset L_t$ follows from convexity of the function $\check u_t(s)=u_t (e^{s_1},\ldots,e^{s_n})$ in $(s,t)\in {\mathbb R}_-^n\times (0,1)$ since $s\in \log K_t^\times$ implies $\check u_t(s)\le -1$. To prove the reverse inclusion, we use a representation for $\check u_t$ given by \cite[Thm. 6.1]{R17b}: $$ \check u_t={\mathcal L}[(1-t)\max\{h_{Q_0}+1,0\} + t \max\{h_{Q_1}+1,0\}],\quad 0<t<1,$$ where $$ {\mathcal L}[f](y)=\sup_{x\in{ \mathbb R}^n}\{\langle x,y\rangle -f(x)\}$$ is the {\it Legendre transform} of $f$, $$h_Q(a)=\sup_{s\in Q} \langle a,s\rangle,\quad a\in{\mathbb R}_+^n $$ is the support function of a convex set $Q\subset{\mathbb R}_-^n$, and $Q_j={\operatorname{Log}\,} K_j$.
Let $z\notin K_t^\times$, then one can assume that none of its coordinates equals zero, so the corresponding point $\xi=(\log|z_1|,\ldots,\log|z_n|)\in{\mathbb R}_-^n$ does not belong to $Q_t=(1-t)Q_0+tQ_1$. Therefore there exists $b\in{\mathbb R}_+^n$ such that $$ \langle b,\xi\rangle > h_{Q_t}(b)=(1-t)h_{Q_0}(b)+ t\, h_{Q_1}(b);$$ by the homogeneity, one can assume $h_{Q_0}(b),h_{Q_1}(b)>-1$ as well. Then \begin{eqnarray*} \check u_t(\xi) &=& \sup_{a\in{\mathbb R}_+^n}[\langle a,\xi\rangle - (1-t)\max\{h_{Q_0}(a)+1,0\} - t \max\{h_{Q_1}(a)+1,0\}]\\ &>& (1-t)[h_{Q_0}(b)-(h_{Q_0}(b)+1)] + t [h_{Q_1}(b)-(h_{Q_1}(b)+1)]=1, \end{eqnarray*} so $\xi$ does not belong to ${\operatorname{Log}\,} L_t$ and consequently $z\notin L_t$. \end{proof}
The crucial point for the Reinhardt case is a formula from \cite[Thm. 7]{ARZ} (see also \cite{R17b}) for the Monge-Amp\`ere capacity of complete logarithmically convex compact sets $K\subset{\mathbb D}^n$: $${\operatorname{Cap}\,} (K,{\mathbb D}^n)=n!\,{\operatorname{Covol}}(Q^\circ):=n!\, {\operatorname{Vol}}({{\mathbb R}_+^n\setminus Q^\circ}),$$ where $$Q^\circ=\{x\in{\mathbb R}_+^n: \langle x,y\rangle \le -1 \ \forall y\in Q\}$$ is the {\it copolar} to the set $Q={\operatorname{Log}\,} K$. In particular, \begin{equation}\label{capakt}{\operatorname{Cap}\,}(K_t)=n!\,{\operatorname{Covol}}(Q_t^\circ)\end{equation} for the copolar $Q_t^\circ$ of the set $Q_t=(1-t)Q_0+t\,Q_1$.
\begin{proposition}\label{BMcovol} We have \begin{equation}\label{BMQ}{\operatorname{Covol}}(Q_t^\circ)\le {\operatorname{Covol}}(Q_0^\circ)^{1-t}\,{\operatorname{Covol}}(Q_1^\circ)^t,\quad 0<t<1.\end{equation} If an equality here occurs for some $t\in (0,1)$, then $Q_0=Q_1$. \end{proposition}
\begin{proof} Let, as before, $h_Q$ be the restriction of the support function of a convex set $Q\subset{\mathbb R}_-^n$ to ${\mathbb R}_+^n$: $$ h_Q(x)=\sup\{\langle x,y\rangle:\: y\in Q\},\ x\in{\mathbb R}_+^n.$$ We have then \begin{eqnarray*} \int_{{\mathbb R}_+^n} e^{h_{Q_t}(x)}\,dx &=& \int_{{\mathbb R}_+^n} dx \int_{-h_{Q_t}(x)}^\infty e^{-s}\,ds = \int_0^\infty e^{-s}\,ds \int_{h_{Q_t}(x)\ge -s} dx \\ &=& \int_0^\infty e^{-s} {\operatorname{Vol}}(\{h_{Q_t}(x)\ge -s\})\,ds \\ &=& {\operatorname{Vol}}(\{h_{Q_t}(x)\ge -1\}) \int_0^\infty e^{-s}s^n \,ds\\ &=& n!\, {\operatorname{Covol}}({Q_t^\circ}). \end{eqnarray*} Note that $h_{Q_t}=(1-t) h_{Q_0}+ t \, h_{Q_1}$. Therefore, by H\"{o}lder's inequality with $p=(1-t)^{-1}$ and $q=t^{-1}$, we have \begin{equation}\label{Hold} \int_{{\mathbb R}_+^n} e^{h_{Q_t}(x)}\,dx\le\left(\int_{{\mathbb R}_+^n} e^{h_{Q_0}(x)}dx\right)^{1-t}\left(\int_{{\mathbb R}_+^n} e^{h_{Q_1}(x)}dx\right)^{t}, \end{equation} which proves (\ref{BMQ}).
An equality in (\ref{BMQ}) implies the equality case in H\"{o}lder's inequality (\ref{Hold}), which means the functions $e^{h_{Q_0}}$ and $e^{h_{Q_1}}$ are proportional, so $h_{Q_0}(x)=h_{Q_1}(x)+C$ for all $x\in{\mathbb R}_+^n$. Since both $h_{Q_0}(x)$ and $h_{Q_1}(x)$ converge to $0$ as $x\to 0$ along ${\mathbb R}_+^n$, we get $C=0$, which completes the proof. \end{proof}
Finally, by (\ref{capakt}), we get
\begin{theorem}\label{CapBM} For polynomially convex, non-pluripolar compact Reinhardt sets $K_j\Subset{\mathbb D}^n$, the Monge-Amp\`ere capacity ${\operatorname{Cap}\,}(K_t,{\mathbb D}^n)$ is a logarithmically convex function of $\,t$; in other words, the Brunn-Minkowski inequality (\ref{logconvcap}) holds. An equality in (\ref{logconvcap}) occurs for some $t\in (0,1)$ if and only if $K_0=K_1$. \end{theorem}
{\it Remark.} The general situation of compact, polynomially convex Reinhardt sets reduces to the case $K_0,K_1\subset{\mathbb D}^n$ because for $K$ in the polydisk ${\mathbb D}_R^n$ of radius $R$, we have ${\operatorname{Cap}\,}(K,{\mathbb D}_R^n)={\operatorname{Cap}\,}(\frac1R K,{\mathbb D}^n)$ and $(\frac1R K)_t = \frac1R K_t$.
{\bf Acknowledgement.} Part of the work was done while the second named author was visiting Universit\'e Pierre et Marie Curie in March 2017; he thanks Institut de Math\'ematiques de Jussieu for the hospitality. The authors are grateful to the anonymous referee for careful reading of the text.
\noindent {\sc Dario Cordero-Erausquin}
\noindent Institut de Math\'ematiques de Jussieu, Sorbonne Universit\'e, 4 place Jissieu, 75252 Paris Cedex 05, France
\noindent \emph{e-mail:} dario.cordero@imj-prg.fr
\noindent {\sc Alexander Rashkovskii}
\noindent Tek/Nat, University of Stavanger, 4036 Stavanger, Norway
\noindent \emph{e-mail:} alexander.rashkovskii@uis.no
\end{document} |
\begin{document}
\title{A Fourier type transform on translation invariant valuations on convex sets.} \begin{abstract} Let $V$ be a finite dimensional real vector space. Let $Val^{sm}(V)$ be the space of translation invariant smooth valuations on convex compact subsets of $V$. Let $Dens(V)$ be the space of Lebesgue measures on $V$. The goal of the article is to construct and study an isomorphism $\mathbb{F}_V\colon Val^{sm}(V)\tilde\rightarrow Val^{sm}(V^*)\otimes Dens(V)$ such that $\mathbb{F}_V$ commutes with the natural action of the full linear group on both spaces, sends the product on the source (introduced in \cite{alesker-gafa-04}) to the convolution on the target (introduced in \cite{bernig-fu}), and satisfies a Plancherel type formula. As an application, a version of the hard Lefschetz theorem for valuations is proved. \end{abstract} \tableofcontents \setcounter{section}{-1} \section{Introduction.}\label{S:introduction} \subsection{An overview of the main results.}\label{Ss:overview} Let $V$ be a finite dimensional real vector space, $\dim V=n$. The goal of the article is to construct an isomorphism between the space of translation invariant valuations on convex compact subsets of $V$ and the space of translation invariant valuations (twisted by the line of Lebesgue measures) on the dual space $V^*$. This isomorphism is analogous to the classical Fourier transform. It has various nice properties studied in detail in this article. As an application we prove a version of the hard Lefschetz theorem for translation invariant valuations. To state the main results more precisely let us fix some notation and remind definitions.
Let $\ck(V)$ denote the class of all convex compact subsets of $V$. Equipped with the Hausdorff metric, the space $\ck(V)$ is a locally compact space.
\begin{definition} a) A function $\phi :{\cal K}(V) \rightarrow \mathbb{C}$ is called a valuation if for any $K_1, \, K_2 \in {\cal K}(V)$ such that their union is also convex one has $$\phi(K_1 \cup K_2)= \phi(K_1) +\phi(K_2) -\phi(K_1 \cap K_2).$$
b) A valuation $\phi$ is called continuous if it is continuous with respect to the Hausdorff metric on ${\cal K}(V)$. \end{definition} The notion of valuation is very classical in convexity. For the classical theory of valuations we refer to the surveys by McMullen-Schneider \cite{mcmullen-schneider} and McMullen \cite{mcmullen-survey}. For the general background from convexity we refer to the book by Schneider \cite{schneider-book}. Approximately during the last decade there was a considerable progress in the valuation theory. New classification results of special classes of valuations have been obtained \cite{klain}, \cite{schneider-simple}, \cite{alesker-annals-99}-\cite{alesker-jdg-03}, \cite{alesker-su2}, \cite{ludwig-reitzner}. Also new structures on valuations have been discovered \cite{alesker-jdg-03}, \cite{alesker-gafa-04}, \cite{bernig-fu}. Moreover some parts of the classical theory of valuations on affine spaces have been generalized to more general context of arbitrary manifolds \cite{part1}-\cite{part4}, \cite{part3} (see also a survey \cite{alesker-survey} of these results).
Let us denote by $Val(V)$ the space of translation invariant continuous valuations. Equipped with the topology of uniform convergence on compact subsets of $\ck(V)$, $Val(V)$ is known to be a Banach space. In \cite{alesker-gafa-04} there was introduced a dense subspace of smooth valuations $Val^{sm}(V)\subset Val(V)$. The definition is recalled in Section \ref{Ss:product-convolution} below. Note that $Val^{sm}(V)$ is equipped with the natural Fr\'echet topology which is stronger than the topology induced from $Val(V)$. \begin{example} 1) A Lebesgue measure $vol$ on $V$ belongs to $Val^{sm}(V)$.
2) The Euler characteristic $\chi$ belongs to $Val^{sm}(V)$. (Recall that $\chi(K)=1$ for any $K\in \ck(V)$.)
3) Let us fix a compact strictly convex set $A\subset V$ with infinitely smooth boundary. The functional $K\mapsto vol(K+A)$ is a smooth translation invariant valuation. Here $K+A$ is the Minkowski sum $\{k+a|\,k\in K,a\in A\}$.
4) Let $0\leq i<n=\dim V$. Fix $A_1,\dots,A_i$ compact strictly convex subsets of $V$ with infinitely smooth boundary. Then the mixed volume $K\mapsto V(K[n-i],A_1,\dots,A_i)$ belongs to $Val^{sm}(V)$ (here $K[n-i]$ means that the set $K$ is taken $n-i$ times). For the notion of mixed volume see e.g. \cite{schneider-book}, especially Ch. 5,6. \end{example} The space $Val^{sm}(V)$ carries a canonical structure of commutative associative topological algebra with unit (the unit is the Euler characteristic). This structure was constructed by the author in \cite{alesker-gafa-04}; the main properties of it are recalled in Section \ref{Ss:product-convolution}.
Let us denote by $Dens(V)$ the complex one dimensional space of complex valued Lebesgue measures on $V$. The space $Val^{sm}(V^*)\otimes Dens(V)$ carries a canonical structure of commutative associative topological algebra with unit. This structure was recently constructed by Bernig and Fu \cite{bernig-fu}; the main properties of it are recalled in Section \ref{Ss:product-convolution}.
Next observe that group $GL(V)$ of all invertible linear transformations of $V$ acts naturally on $Val(V)$ (and $Val^{sm}(V)$) as follows: $(g\phi)(K)=\phi(g^{-1}K)$ for any $g\in GL(V),K\in \ck(V),$ and $\phi\in Val(V)$ or $Val^{sm}(V)$.
Our first main result says that these two topological algebras with actions of $GL(V)$ are isomorphic. More precisely we prove the following result. \begin{theorem}\label{T:main1} There exists an isomorphism of linear topological spaces $$\mathbb{F}_V\colon Val^{sm}(V)\rightarrow Val^{sm}(V^*)\otimes Dens(V)$$ which satisfies the following properties:
1) $\mathbb{F}_V$ commutes with the natural action of the group $GL(V)$ on both spaces;
2) $\mathbb{F}_V$ is an isomorphism of algebras when the source is equipped with the product and the target with the convolution.
3)(Plancherel type formula) Consider the composition $\ce_V$ $$Val^{sm}(V)\overset{\mathbb{F}_V}{\rightarrow} Val^{sm}(V^*)\otimes Dens(V)\overset{\mathbb{F}_{V^*}\otimes Id_{Dens(V)}}{\rightarrow}Val^{sm}(V)\otimes Dens(V^*)\otimes Dens(V)=Val^{sm}(V).$$
This composition $\ce_V$ satisfies $$(\ce_V\phi)(K)=\phi(-K).$$ \end{theorem} \begin{remark}\label{R:intro1} 1) On even valuations, the operator $\mathbb{F}_V$ was first introduced by the author in \cite{alesker-jdg-03} under a different name and notation (in \cite{alesker-jdg-03} it was denoted by $\mathbb{D}$).
2) Part (2) of Theorem \ref{T:main1} was first proved for {\itshape even} valuations by Bernig and Fu \cite{bernig-fu}.
3) The isomorphism $\mathbb{F}_V$ from Theorem \ref{T:main1} is not quite canonical. One can show that in certain precise sense there exist exactly four different isomorphisms satisfying the theorem provided $n>1$; for $n=1$ there exist exactly two such isomorphisms (see Remark \ref{R:non-unique} below for a precise statement).
\end{remark}
\begin{example}\label{E:dim1-2} Let us describe the isomorphism $\mathbb{F}_V$ in dimensions 1 and 2. First assume that $\dim V=1$. In this case the space of valuations is two dimensional: $Val(V)=Val^{sm}(V)=\mathbb{C}\cdot \chi\oplus \mathbb{C}\cdot vol_V$ where $vol_V$ is a non-zero Lebesgue measure on $V$. Let $vol_V^{-1}$ be the corresponding Lebesgue measure on $V^*$ (see (\ref{D:inverse-measure}) below). Then $Val^{sm}(V^*)\otimes Dens(V)=\mathbb{C} \cdot (vol^{-1}_V\otimes vol_V)\oplus \mathbb{C}\cdot (\chi\otimes vol(V))$. Then \begin{eqnarray}\label{E:euler-measure} \mathbb{F}_V(\chi)=vol^{-1}_V\otimes vol_V\\ \mathbb{F}_V(vol_V)=\chi\otimes vol_V. \end{eqnarray}
Let us assume now that $\dim V=2$. Let us fix a Euclidean metric on $V$. It induces identifications $V^*\simeq V,\, Dens(V)\simeq \mathbb{C}$. Under these identifications $\mathbb{F}_V\colon Val^{sm}(V)\tilde\rightarrow Val^{sm}(V)$. One has $Val^{sm}(V)=\mathbb{C}\chi\oplus Val_1^{sm}(V)\oplus \mathbb{C} vol_V$ where $Val_1^{sm}(V)$ denotes the subspace of 1-homogeneous valuations. Let us fix also an orientation on $V$. Then \begin{eqnarray*} \mathbb{F}_V(\chi)=vol_V,\\ \mathbb{F}_V(vol_V)=\chi. \end{eqnarray*}
In order to describe the action of $\mathbb{F}_V$ on $Val^{sm}_1(V)$ recall that by Hadwiger's theorem \cite{hadwiger-51} any valuation $\phi\in Val^{sm}_1(V)$ can be written uniquely in the form $$\phi(K)=\int_{S^1}h(\omega) dS_1(K,\omega)$$ where $h\colon S^1\rightarrow \mathbb{C}$ is a smooth function which is orthogonal on $S^1$ to the two dimensional space of linear functionals. Let us decompose $h$ to the even and odd parts: $$h=h_++h_-.$$ Let us decompose further the odd part $h_-$ to "holomorphic" and "anti-holomorphic" parts $$h_-=h_-^{hol}+h_-^{anti}$$ as follows. Let us decompose $h_-$ to the usual Fourier series on the circle $S^1$: $$h_-(\omega)=\sum_{k} \hat h_-(k)e^{ik\omega}.$$ Then by definition \begin{eqnarray*} h_-^{hol}(\omega):=\sum_{k>0} \hat h_-(k)e^{ik\omega},\\ h_-^{anti}(\omega):=\sum_{k<0} \hat h_-(k)e^{ik\omega}. \end{eqnarray*} Then the Fourier transform of the valuation $\phi$ is equal to $$(\mathbb{F}\phi)(K)=\int_{S^1}(h_+(J\omega)+h_-^{hol}(J\omega))dS_1(K,\omega)-\int_{S^1}h_-^{anti}(J\omega)dS_1(K,\omega)$$ where $J$ is the rotation of $\mathbb{R}^2$ by $\frac{\pi}{2}$ counterclockwise. (Notice the minus sign before the second integral.) Observe that $\mathbb{F}$ preserves the class of real valued even valuations, but for odd real valued valuations this is not true. This phenomenon also holds in higher dimensions.
\end{example}
\begin{remark}\label{R:intro10} In dimension higher than 2 the author does not know such a simple construction of $\mathbb{F}_V$, especially in the odd case studied in detail in the article. Note however that the Fourier transform of a Lebesgue measure and the Euler characteristic can be computed by the equalities (\ref{E:euler-measure}) in any dimension.
The construction in the odd case is quite involved and uses various characterization theorems on valuations, in particular Klain-Schneider characterization of simple valuations \cite{klain}, \cite{schneider-simple} (see Theorem \ref{T:klain-schneider} below), Irreducibility Theorem \cite{alesker-gafa-01} (see Theorem \ref{T:IrrThm} below), and also some additional representation theoretical computations based on the Beilinson-Bernstein localization \cite{beilinson-bernstein} (see Section \ref{Ss:beilinson-bernstein} below) . \end{remark}
As an application of the Fourier transform (combined with Bernig-Br\"ocker theorem \cite{bernig-brocker}) we prove a version of hard Lefschetz theorem for valuations. In order to state it let us denote by $Val_i^{sm}(V)\subset Val^{sm}(V)$ the subspace of $i$-homogeneous valuations ($\phi\in Val_i^{sm}(V)$ if and only if $\phi(\lambda K)=\lambda^i\phi(K)$ for any $\lambda\geq 0, K\in \ck(V)$). By McMullen's theorem \cite{mcmullen-euler} $$Val^{sm}(V)=\oplus_{i=0}^nVal_i^{sm}(V).$$ Let us fix a Euclidean metric on $V$. Let us denote by $V_1$ the first intrinsic volume (see \cite{schneider-book}, p. 210). Here we just recall that $V_1$ is the only (up to a constant) continuous isometry invariant 1-homogeneous valuation (this characterization is due to Hadwiger \cite{hadwiger-book}). \begin{theorem}[hard Lefschetz theorem]\label{T:hardLef-intro} Let $0\leq i<n/2$. Then the map $$Val^{sm}_i(V)\rightarrow Val_{n-i}^{sm}(V)$$ given by $\phi\mapsto (V_1)^{n-2i}\cdot \phi$ is an isomorphism. \end{theorem} \begin{remark}\label{R:intro2} 1) Theorem \ref{T:hardLef-intro} was proved by the author for {\itshape even} valuations in \cite{alesker-jdg-03}.
2) There is another version of the hard Lefschetz theorem for valuations (see Theorem \ref{T:hlold} below). In the even case it was proved by the author \cite{alesker-jdg-03}, and in the general one by Bernig and Br\"ocker \cite{bernig-brocker}. Our proof of Theorem \ref{T:hardLef-intro} (see Section \ref{lefschetz}) uses in an essential way this result of Bernig-Br\"ocker. Also we use the fact that the Fourier transform $\mathbb{F}_V$ establishes an equivalence of two versions of the hard Lefschetz theorem (Lemma \ref{L:hlopers} below). This fact was recently observed in the even case by Bernig and Fu \cite{bernig-fu}; our Lemma \ref{L:hlopers} is a straightforward generalization of their observation. \end{remark}
Another new construction presented in this article is a construction of pushforward under linear maps of translation invariant continuous valuations (twisted by the line of Lebesgue measures). Namely if $f\colon V\rightarrow W$ is a linear space of vector spaces, we define a linear map $$f_*\colon Val(V)\otimes Dens(V^*)\rightarrow Val(W)\otimes Dens(W^*).$$ We refer to Section \ref{pushforward} for the details. Here we notice that this pushforward map allows to compute the convolution of valuations in sense of Bernig and Fu \cite{bernig-fu} in two steps: first one takes the exterior product of valuations in sense of \cite{alesker-gafa-04}, and then the pushforward under the addition map $a\colon V\oplus V\rightarrow V$.
Another interesting property of the Fourier transform is that it intertwines the pullback of valuations (obviously defined, see Section \ref{pullback}) and the pushforward. With an oversimplification, one can say that the Fourier transform of the pullback of a valuation is equal to the pushforward of the Fourier transform. There are however some technical difficulties of making this statement rigorous due to the fact that the operations of pullback and pushforward do not preserve in general the class of smooth valuations. Nevertheless a rigorous result is possible though it sounds more technical: see Theorems \ref{T:z3}, \ref{T:z1} below.
\subsection{Organization of the article.}\label{Ss:organization} In Section \ref{S:RepTheory} we summarize a necessary background from representation theory. In Section \ref{S:other_background} we describe necessary facts mostly from valuation theory. These two sections do not contain new results.
In Section \ref{S:functorial} we introduce operations of pullback and pushforward on translation invariant valuations. We relate them to operations of product and convolution (Sections \ref{relation}, \ref{Ss:homomor-of-push}), and prove a version of the base change theorem (Section \ref{Ss:base-change}).
In Section \ref{S:isomorphism-val} we prove an isomorphism of $GL(V)$-modules $Val^{-,sm}_{n-p}(V)$ and $Val_p^{-,sm}(V^*)\otimes Dens(V)$ (here $Val_i^{-,sm}(V)$ denotes the space of smooth odd translation invariant $i$-homogeneous valuations on $V$). The existence of such isomorphism and some related representation theoretical calculations will be used in the construction of the Fourier transform in Section \ref{S:fourier-high-dim}.
In Section \ref{2-dim} we study separately the Fourier transform on valuations on a two dimensional plane. The two dimensional case will be used for higher dimensions in Section \ref{S:fourier-high-dim}.
Section \ref{S:fourier-high-dim} is the main one. Here we construct the Fourier transform in full generality and prove the main properties of it.
In Section \ref{lefschetz} a hard Lefschetz type theorem for valuations is proved.
The appendix contains a slight generalization of the construction of the exterior product of smooth valuations given in \cite{alesker-gafa-04}: here we explain how to multiply a smooth valuation by a continuous one. This generality is necessary in this article for technical reasons.
\subsection{Notation.}\label{Ss:notation} $\bullet$ $\ck(V)$ - the family of convex compact subsets of a vector space $V$.
$\bullet$ $\ck^{sm}(V)$ - the family of strictly convex compact subsets of a vector space $V$ with infinitely smooth boundary.
$\bullet$ $f^\vee$ - a dual map to a linear map $f$.
$\bullet$ $f\boxtimes g$, $f\times g$, $f\oplus g$ - operations with linear maps $f$ and $g$, see Section \ref{Ss:linear algebra}.
$\bullet$ $Gr_i(V)$ - the Grassmannian of $i$-dimensional linear subspaces of a vector space $V$.
$\bullet$ ${\cal F}_{p,p+1}(V)$ - the (real) variety of partial flags in $V$ $\{(E,F)|\, E\subset F, \dim E=p,\dim F=p+1\}$.
$\bullet$ ${\cal F}$ - the (real) variety of complete flags in a real vector space.
$\bullet$ ${}\!^ {\textbf{C}} {\cal F}$ - the (complex) variety of complete flags in a complex vector space.
$\bullet$ $\ct_{k,V}, \ct_{k,V;i},\ct^0_{k,V},\ct^0_{k,V;i}$ - certain vector bundles, see Section \ref{Ss:construction-fourier}.
$\bullet$ $|\omega_X|$ - the complex line bundle of densities over a manifolds $X$.
\section{Background from representation theory.}\label{S:RepTheory} \subsection{Some structure theory of reductive groups.}\label{Ss:Stucture} In this subsection we remind few basic definitions from the structure theory of real reductive groups. For simplicity we will do it only in the case of the group $GL_n(\mathbb{R})$. This will suffice for the purposes of this article.
Let $G_0=GL_n(\mathbb{R})$. It acts naturally on $\mathbb{R}^n$. Let us denote by $H_0\subset G_0$ the subgroup of diagonal invertible matrices ($H_0$ is a Cartan subgroup). \begin{definition}\label{D:parabolic} (i) A subgroup $P_0\subset G_0$ is called {\itshape parabolic} if there exists a partial flag of linear subspaces $0\ne F_1\subsetneqq F_2\subsetneqq\dots\subsetneqq F_k=\mathbb{R}^n$ such that $P_0$ is the stabilizer in $G_0$ of this flag.
(ii) A parabolic subgroup $P_0$ is called {\itshape standard} if $P_0\supset H_0$. \end{definition}
\begin{example}\label{E:blocks} Let us fix a positive integer $k$ and a decomposition $n=n_1+\dots n_k,\, n_i\in \mathbb{N}$. Let
$$P_0:=\left\{\left[\begin{array}{c|c|c|c}
A_1&*&\dots&*\\\hline
0&A_2&\dots&*\\\hline
\dots&\dots&\ddots&\dots\\\hline
0&0&\dots&A_k
\end{array}\right]\big| A_i\in
GL_{n_i}(\mathbb{R})\right\}$$ be the subgroup of block upper triangular invertible matrices. It is a standard parabolic.
Note that if one takes $k=1,n_1=n$ one gets $P_0=G_0$. If one takes $k=n, n_1=\dots =n_n=1$ then $P_0$ is equal to the subgroup of upper triangular invertible matrices (it is a minimal parabolic). \end{example} \defP_0{P_0} \defU_{P_0}{U_{P_0}}
For a parabolic subgroup $P_0$ let us denote by $U_{P_0}$ its unipotent radical. Thus $U_{P_0}$ is a normal subgroup of $P_0$. More explicitly, if $P_0$ is the stabilizer of a partial flag $0\ne F_1\subsetneqq F_2\subsetneqq\dots\subsetneqq F_k=\mathbb{R}^n$ then $U_{P_0}$ consists of transformations from $P_0$ inducing the identity map on all consecutive quotients $F_i/F_{i-1},\, i=0,\dots,k$. \begin{definition}\label{D:Levi} Let $P_0$ be a parabolic. An algebraic subgroup $M\subset P_0$ is called a {\itshape Levi subgroup} if the canonical homomorphism $P_0\rightarrow P_0/U_{P_0}$ induces an isomorphism $M\tilde \rightarrow P_0/U_{P_0}$. \end{definition}
A Levi subgroup always exists but not unique. In Example \ref{E:blocks} the unipotent radical is
$$U_{P_0}=\left\{\left[\begin{array}{c|c|c|c}
Id_{n_1}&*&\dots&*\\\hline
0&Id_{n_2}&\dots&*\\\hline
\dots&\dots&\ddots&\dots\\\hline
0&0&\dots&Id_{n_k}
\end{array}\right]\right\}.$$ A Levi subgroup can be chosen to be equal to \begin{eqnarray}\label{ex01}
M=\left\{\left[\begin{array}{c|c|c|c}
A_1&0&\dots&0\\\hline
0&A_2&\dots&0\\\hline
\dots&\dots&\ddots&\dots\\\hline
0&0&\dots&A_k
\end{array}\right]\big| A_i\in
GL_{n_i}(\mathbb{R})\right\}\simeq GL_{n_1}(\mathbb{R})\times\dots \times GL_{n_k}(\mathbb{R}). \end{eqnarray} \def{}\!^ {0} M{{}\!^ {0} M} Let us return back to a general parabolic $P_0$. If $M$ is its Levi subgroup then $P_0=M\cdot U_{P_0}$.
\begin{definition}\label{D:AssParab} Let $P_0=M_{P_0}\cdot U_{P_0},\, Q_0=M_{Q_0}\cdot U_{Q_0}$ be two parabolics, where $M_{P_0},\, M_{Q_0}$ are their Levi subgroups. Then $P_0$ and $Q_0$ are called {\itshape associated} if there exists $x\in G_0$ such that $$M_{Q_0}=x^{-1}M_{P_0}x.$$ \end{definition}
\subsection{Admissible and tempered growth representations and a theorem of Casselman-Wallach.} \label{Ss:casselman-wallach} \begin{definition}\label{part1-rep-1} Let $\pi$ be a continuous representation of a Lie group $G_0$ in a Fr\'echet space $F$. A vector $\xi \in F$ is called $G_0$-smooth if the map $g\mapsto \pi(g)\xi$ is an infinitely differentiable map from $G_0$ to $F$. \end{definition} It is well known (see e.g. \cite{wallach}, Section 1.6) that the subset $F^{sm}$ of smooth vectors is a $G_0$-invariant linear subspace dense in $F$. Moreover it has a natural topology of a Fr\'echet space (which is stronger than the topology induced from $F$), and the representation of $G_0$ in $F^{sm}$ is continuous. Moreover all vectors in $F^{sm}$ are $G_0$-smooth.
Let $G_0$ be a real reductive group. Assume that $G_0$ can be imbedded into the group $GL_N(\mathbb{R})$ for some $N$ as a closed subgroup invariant under the transposition. Let us fix such an imbedding $p:G_0\hookrightarrow GL_N(\mathbb{R})$. (In our applications
$G_0$ will be either $GL_n(\mathbb{R})$ or a direct product of several copies of $GL_n(\mathbb{R})$.) Let us introduce a norm $|\cdot |$ on $G_0$ as follows:
$$|g|:=\max\{||p(g)||,||p(g^{-1})||\}$$
where $||\cdot||$ denotes the usual operator norm in $\mathbb{R}^N$. \begin{definition} Let $\pi$ be a smooth representation of $G_0$ in a Fr\'echet space $F$ (namely $F^{sm}=F$). One says that this representation has {\itshape moderate growth} if for each continuous semi-norm $\lambda$ on $F$ there exists a continuous semi-norm $\nu_\lambda$ on $F$ and $d_{\lambda}\in \mathbb{R}$ such that
$$\lambda(\pi(g)v)\leq |g|^{d_\lambda}\nu_{\lambda}(v)$$ for all $g\in G,\, v\in F$. \end{definition}
The proof of the next lemma can be found in \cite{wallach}, Lemmas 11.5.1 and 11.5.2. \begin{lemma}\label{part1-wallach} (i) If $(\pi,G_0,H)$ is a continuous representation of $G_0$ in a Banach space $H$, then $(\pi,G,H^{sm})$ has moderate growth.
(ii) Let $(\pi, G_0,V)$ be a representation of moderate growth. Let $W$ be a closed $G_0$-invariant subspace of $V$. Then $W$ and $V/W$ have moderate growth. \end{lemma}
Remind that a continuous Fr\'echet representation $(\pi,G_0,{\cal F})$ is said to have {\itshape finite length} if there exists a finite filtration $$0=F_0\subset F_1\subset \dots\subset F_m=F$$ by $G_0$-invariant closed subspaces such that $F_i/F_{i-1}$ is irreducible, i.e. does not have proper closed $G_0$-invariant subspaces. The sequence $F_1,F_2/F_1,\dots,F_m/F_{m-1}$ is called the Jordan-H\"older series of the representation $\pi$. It is well known (and easy to see) that the Jordan-H\"older series of a finite length representation is unique up to a permutation.
\begin{definition} A Fr\'echet representation $(\rho,G_0,F)$ of a real reductive group $G_0$ is called {\itshape admissible} if its restriction to a maximal compact subgroup $K$ of $G_0$ contains an isomorphism class of any irreducible representation of $K$ with at most finite multiplicity. (Remind that a maximal compact subgroup of $GL_n(\mathbb{R})$ is the orthogonal group $O(n)$.) \end{definition}
\begin{theorem}[Casselman-Wallach, \cite{casselman}]\label{casselman-wallach} Let $G_0$ be a real reductive group. Let $(\rho,G_0,F_1)$ and $(\pi,G_0,F_2)$ be smooth representations of moderate growth in Fr\'echet spaces $F_1, F_2$. Assume in addition that $F_2$ is admissible of finite length. Then any continuous morphism of $G_0$-modules $f:F_1\rightarrow F_2$ has closed image. \end{theorem}
The following proposition is essentially a common knowledge; the proof can be found in \cite{part1}, Proposition 1.1.8. \begin{proposition}\label{part1-epi} Let $G_0$ be a real reductive Lie group. Let $F_1,F_2$ be continuous Fr\'echet $G_0$-modules. Let $\xi:F_1\rightarrow F_2$ be a continuous morphism of $G_0$-modules. Assume that the assumptions of the Casselman-Wallach theorem are satisfied, namely $F_1$ and $F_2$ are smooth and have moderate growth, and $F_2$ is admissible of finite length. Assume moreover that $\xi$ is surjective.
Let $X$ be a smooth manifold. Consider the map $$\hat \xi:C^\infty(X,F_1)\rightarrow C^\infty(X,F_2)$$ defined by $(\hat\xi (f))(x)=\xi(f(x))$ for any $x\in X$.
Then $\hat\xi$ is surjective. \end{proposition}
\subsection{Induced representations.}\label{Ss:InducedRep} Let $H\subset G_0$ be a closed subgroup. Let $\pi$ be a representation of $H$ in a Fr\'echet space $F$. Let us consider the space of continuous functions
$$\Phi:=\{f\colon G_0\rightarrow V|\,\, f(x\cdot h)=\pi(h)^{-1}(f(x))\mbox{ for any } x\in G_0,h\in H\}.$$ The group $G_0$ acts on $\Phi$ by left translation. The representation of $G_0$ in $\Phi$ is called the induced representation and denoted by $Ind_H^{G_0}\pi$. Note that the space $\Phi$ is a space of continuous sections of a $G_0$-equivariant vector bundle over $G_0/H$ with fiber $F$.
Let $P_0\subset G_0$ be a parabolic subgroup. Let us consider the natural representation of $G_0$ in the space of (complex valued) half-densities (see e.g \cite{guillemin-sternberg}, Ch. II \S 6) on $G_0/P_0$. It is easy to see that there exists a character $\rho_{P_0}\colon P_0\rightarrow \mathbb{C}^*$ such that this representation is isomorphic to $Ind_{P_0}^{G_0}\rho_{P_0}$.
\begin{remark}\label{R:character} It is easy to see that $U_{P_0}\subset [P_0,P_0]$. Hence any character of $P_0$ is trivial on $U_{P_0}$, and hence factorizes via $P_0/U_{P_0}$. \end{remark}
Let $\pi$ be a representation of $P_0/U_{P_0}$ considered as a representation of $P_0$. Let us denote by $$\ch(P_0,\pi):=Ind_{P_0}^{G_0}(\pi\otimes \rho_{P_0}).$$ We will need the next result which is standard in representation theory. \begin{theorem}\label{T:finiteness} Let $P_0\subset G_0$ be a parabolic subgroup. Let $\pi$ be a character of $P_0/U_{P_0}$. Then $Ind_{P_0}^{G_0}(\pi)$ is an admissible representation of finite length. \end{theorem}
Let now $P_0=M_{P_0}\cdot U_{P_0},\, Q_0=M_{Q_0}\cdot U_{Q_0}$ be two associated parabolics (see Definition \ref{D:AssParab}). Thus $M_{Q_0}=\alpha^{-1}M_{P_0}\alpha$ for some $\alpha\in G_0$. Let $\phi$ be a character of $M_{P_0}$ considered as a character of $P_0$. Let $\pi'$ be the character of $M_{Q_0}$ defined by $$\pi'(x)=\pi(\alpha\cdot x\cdot \alpha^{-1}).$$ The following result is a special case of a theorem by Harish-Chandra (see e.g. \cite{green-monster}, Proposition 4.1.20). \begin{theorem}[Harish-Chandra]\label{T:AssHCh} Let $P_0,Q_0\subset G_0$ be two associated parabolics as above. Let $\pi$ be a character of $P_0/U_{P_0}$. Let $\pi'$ be the character of $Q_0/U_{Q_0}$ as above. The representations $\ch(P_0,\pi)$ and $\ch(Q_0,\pi')$ have the same Jordan-H\"older series. \end{theorem} We will use below Theorem \ref{T:AssHCh} in two particular situations. To describe the first one, let us fix an integer $k=1,2,\dots n-1$. Let
$$P_0:=\left\{\left[\begin{array}{c|c|c}
A&*&*\\\hline
0&B&*\\\hline
0&0&c
\end{array}\right]\big| A\in GL_{n-k-1}(\mathbb{R}), B\in
GL_k(\mathbb{R}), c\in \mathbb{R}^*\right\}.$$
Then $G_0/P_0$ is the partial flag space $\{(F,E)|\, F\in Gr_{n-k-1}(\mathbb{R}^n), E\in Gr_{n-1}(\mathbb{R}^n)\}$. Let $p\colon G_0/P_0\rightarrow Gr_{n-k-1}(\mathbb{R}^n)$ be the natural map given by
$p(F,E)=F$. Let ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}\rightarrow G_0/P_0$ be the line bundle whose fiber over a partial flag $(F,E)$ is equal to $Dens(E/F)\otimes or(\mathbb{R}^n/E)$. Let $|\omega_{Gr_{n-k-1}}|\rightarrow Gr_{n-k-1}(\mathbb{R}^n)$
denote the line bundle of densities. Let ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ denote the space of sections ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}:=C(G_0/P_0,{\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}\otimes p^*(|\omega_{Gr_{n-k-1}}|))$. Clearly ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ is a $G_0$-module.
\begin{lemma}\label{L:normaliz1} $${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C} =\ch(P_0,\pi)$$ where $\pi\colon P_0\rightarrow \mathbb{C}^*$ is the character defined by
\begin{eqnarray}\label{def-pi}\pi\left(\left[\begin{array}{c|c|c}
A&*&*\\\hline
0&B&*\\\hline
0&0&c
\end{array}\right]\right)=
|\det A|^{\frac{k+1}{2}}|\det
B|^{-\frac{n-k}{2}-1} |c|^{-\frac{n-1}{2}+k}
sgn (c).\end{eqnarray} \end{lemma} {\bf Proof} is straightforward computation. \qed
Let us consider another parabolic subgroup
$$Q_0:=\left\{\left[\begin{array}{c|c|c}
B&*&*\\\hline
0&c&*\\\hline
0&0&A
\end{array}\right]\big| A\in GL_{n-k-1}(\mathbb{R}), B\in
GL_k(\mathbb{R}), c\in \mathbb{R}^*\right\}.$$ It is easy to see that $P_0$ and $Q_0$ are associated. Let $\xi\colon Q_0\rightarrow \mathbb{C}^*$ be the character defined by
$$\xi\left(\left[\begin{array}{c|c|c}
B&*&*\\\hline
0&c&*\\\hline
0&0&A
\end{array}\right]\right)=|\det B|^{-1}sgn (c).$$
\begin{lemma}\label{L:normaliz2} $$Ind_{Q_0}^{G_0}\xi=\ch(Q_0,\rho)$$
where $\rho\left(\left[\begin{array}{c|c|c}
B&*&*\\\hline
0&c&*\\\hline
0&0&A
\end{array}\right]\right)=
|\det A|^{\frac{k+1}{2}}|\det
B|^{-\frac{n-k}{2}-1} |c|^{-\frac{n-1}{2}+k}
sgn (c).$ \end{lemma} {\bf Proof} is straightforward computation. \qed
\begin{corollary}\label{C:arepr} The $G_0$-modules ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^{sm}$ and $(Ind_{Q_0}^{G_0}\xi)^{sm}$ have the same Jordan-H\"older series. \end{corollary} {\bf Proof.} Obviously $P_0$ and $Q_0$ are associated, indeed $M_{P_0}=\alpha\cdot M_{Q_0}\cdot \alpha^{-1}$ where $\alpha=\left[\begin{array}{cc}
0&Id_{n-k}\\
Id_k&0\\
\end{array}\right]$. Moreover $\xi(x)=\pi(a^{-1}xa)$ where $\pi$ is defined by (\ref{def-pi}). Hence the result follows from Theorem \ref{T:AssHCh}. \qed
Let us describe now the second situation where Theorem \ref{T:AssHCh} will be used. \begin{corollary}\label{C:arepr2} Let us consider a parabolic subgroup \begin{eqnarray*}
P_0'=\left\{\left[\begin{array}{c|c|c}
A&*&*\\\hline
0&c&*\\\hline
0&0&B
\end{array}\right]\big| A\in GL_p(\mathbb{R}),c\in
\mathbb{R}^*,B\in GL_{n-p-1}(\mathbb{R})\right\}. \end{eqnarray*} Let us denote by $\omega\colon P_0'\rightarrow \mathbb{C}^*$ the character such that $Ind_{P_0'}^{G_0}(\omega)$ is isomorphic to the representation of $G_0$ in densities on $G_0/P'_0$. Let $\alpha\colon P_0'\rightarrow \mathbb{C}^*$ be the character defined by
$$\alpha\left(\left[\begin{array}{c|c|c}
A&*&*\\\hline
0&c&*\\\hline
0&0&B
\end{array}\right]\right)=|\det(A)|\cdot sgn(c)\cdot
\omega.$$ Let us consider another parabolic subgroup \begin{eqnarray*}
Q_0'=\left\{\left[\begin{array}{c|c|c}
B&*&*\\\hline
0&c&*\\\hline
0&0&A
\end{array}\right]\big| A\in GL_p(\mathbb{R}),c\in
\mathbb{R}^*,B\in GL_{n-p-1}(\mathbb{R})\right\}. \end{eqnarray*} Let $\beta\colon Q_0'\rightarrow \mathbb{C}^*$ be the character defined by
$$\beta\left(\left[\begin{array}{c|c|c}
B&*&*\\\hline
0&c&*\\\hline
0&0&A
\end{array}\right]\right)=|\det(A)|\cdot
sgn(c).$$ Then $(Ind_{P_0'}^{G_0}(\alpha))^{sm}$ and $(Ind_{Q_0'}^{G_0}(\beta))^{sm}$ have the same Jordan-H\"older series. \end{corollary} {\bf Proof} is a straightforward computation similar to the proof of Corollary \ref{C:arepr}. \qed
\subsection{The Beilinson-Bernstein localization.}\label{Ss:beilinson-bernstein} \def\cd_\lam{{\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\lambda} We recall the Beilinson-Bernstein theorem on localization of $\textsl{g} $-modules following \cite{beilinson-bernstein} (see also \cite{bien}). Then we recall the version of this result for dominant but not regular characters following \cite{kashiwara}. We denote by capital letters the Lie groups, and by the corresponding small letters their Lie algebras.
Let $G$ be a complex reductive algebraic group. Let $T$ denote a Cartan subgroup of $G$. In our examples $G=GL_n(\mathbb{C})$. Let $B$ be a Borel subgroup of $G$ containing $T$. Let $t$ denote the Lie algebra of $T$. Let $b$ denote the Lie algebra of $B$. Let $n$ denote the nilpotent radical of $b$.
In the case when $G$ is complexification of a real reductive group $G_0$ let us denote by $K$ the complexification of a maximal compact subgroup of $G_0$. Thus if $G=GL_n(\mathbb{C})$ then $K=O(n,\mathbb{C})$ is the group of complex orthogonal matrices.
Let $R(t)\subset t^*$ be the set of roots of $t$ in $\textsl{g}$. The set $R(t)$ is naturally divided into the set of roots whose root spaces are contained in $n$ and its complement. Let $R^+(t)$ be the set of roots of $t$ in $g/b$. If $\alpha$ is a root of $t$ in $\textsl{g}$ then the dimension of the corresponding root subspace $\textsl{g}_{\alpha}$ is called the multiplicity of $\alpha$. Let $\rho _b$ be the half sum of the roots contained in $R^+(t)$ counted with their multiplicities.
\begin{definition}\label{D:regular-dominant} We say that $\lambda \in t^*$ is {\itshape dominant} if for any root $\alpha \in R^+(t_b)$ we have $<\lambda, \alpha ^{V}>\ne -1, -2, \dots$. We shall say that $\lambda \in t_b^*$ is $B$-{\itshape regular} if for any root $\alpha \in R^+(t_b)$ we have $<\lambda, \alpha ^{V}>\ne 0$. \end{definition}
For the definitions and basic properties of the sheaves of twisted differential operators we refer to \cite{bien}, \cite{kashiwara}. Here we will present only the explicit description of the sheaf $\cd_\lam$ in order to agree about the normalization.
\defU(\texttt{g}){U(\texttt{g})} \def{\cal O}_{\cf}{{\cal O}_{{\cal F}}} \def{\cal T}_{\cf}{{\cal T}_{{\cal F}}} Let ${}\!^ {\textbf{C}} {\cal F}$ be the complete flag variety of $G$ (then ${}\!^ {\textbf{C}} {\cal F}=G/B$). Let ${\cal O}_{\cf}$ denote the sheaf of regular functions on ${}\!^ {\textbf{C}} {\cal F}$. Let $U(\texttt{g})$ denote the universal enveloping algebra of $\textsf{g}$. Let $U^o$ be the sheaf $U(\texttt{g}) \otimes _{\mathbb{C}} {\cal O}_{\cf}$, and $\textsf{g}^o:=\textsf{g}\otimes _{\mathbb{C}}{\cal O}_{\cf}$. Let ${\cal T}_{\cf}$ be the tangent sheaf of ${}\!^ {\textbf{C}} {\cal F}$. We have a canonical morphism $\alpha\colon \textsf{g}^o\rightarrow {\cal T}_{\cf}$. Let also $b^o:=Ker \,\alpha
=\{\xi\in\textsf{g}^o|\,\xi_x\in b_x \forall x\in {}\!^ {\textbf{C}} {\cal F}\}. $ Let $\lambda\colon b\rightarrow \mathbb{C}$ be a linear functional which is trivial on $n$ (thus $\lambda \in t^*$). Then $\lambda$ defines a morphism $\lambda ^o\colon b^o\rightarrow {\cal O}_{\cf}$. We will denote by ${\cal D}_\lambda$ the sheaf of twisted differential operators corresponding to $\lambda -\rho _b$, i.e. $\cd_\lam$ is isomorphic to $U^o/{\cal I}_{\lambda}$, where ${\cal I}_{\lambda}$ is the two sided ideal generated by the elements of the form $\xi-(\lambda -\rho_b)^o(\xi)$ where $\xi$ is a local section of $b^o$.
Let $D_\lambda:=\Gamma ({}\!^ {\textbf{C}} {\cal F},\cd_\lam)$ denote the ring of global sections of $\cd_\lam$. We have a canonical morphism $U(\textsl{g})\rightarrow D_\lambda$. For the complete flag variety ${}\!^ {\textbf{C}} {\cal F}$ this map is onto (\cite{beilinson-bernstein}). The kernel of this homomorphism was also described in \cite{beilinson-bernstein}. To describe it, remind that we have the Harish-Chandra isomorphism $Z(U(\textsl{g}))\tilde \rightarrow (Sym^\bullet(t))^W$ where $Z(U(\textsl{g}))$ denotes the center of $U(\textsl{g})$, $(Sym^\bullet(t))^W$ is the algebra of elements of the full symmetric algebra of $t$ invariant under the Weyl group $W$. Hence the element $\lambda\in t^*$ defines a homomorphism $Z(U(\textsl{g}))\rightarrow \mathbb{C}$ called the infinitesimal character. Let $I_\lambda$ be the two-sided ideal in $U(\textsl{g})$ generated by the kernel of this homomorphism. Then by \cite{beilinson-bernstein}, $I_\lambda$ is equal to the kernel of the homomorphism $U(\textsl{g})\rightarrow D_\lambda$. \begin{remark}\label{R:infchar} The category of $D_\lambda$-modules coincides with the category of $\textsl{g}$-modules with the given infinitesimal character. \end{remark}
In this notation one has the following result proved in \cite{beilinson-bernstein}.
\begin{theorem}[Beilinson-Bernstein]\label{T:beilinson-bernstein} (1) If $\lambda \in t^*$ is dominant then the functor $\Gamma: {\cal D}_{\lambda}-mod \rightarrow U(\textsl{g})-mod$ is exact.
(2) If $\lambda \in t^*$ is dominant and regular then the functor $\Gamma$ is also faithful. \end{theorem}
Note also that always the functor $\Gamma$ has a left adjoint functor (called the localization functor) $\Delta: D_\lambda -mod\rightarrow \cd_\lam -mod$. It is defined as $\Delta (M)=\cd_\lam \otimes_{D_\lambda} M$. $\Delta(M)$ is called the {\itshape localization} of $M$.
The proof of the next lemma can be found in \cite{bien}, Proposition I.6.6.
\begin{lemma} Suppose $\Gamma: {\cal D}_{\lambda}-mod \rightarrow D_{\lambda}-mod$ is exact. Then the localization functor $\Delta:D_{\lambda}-mod\rightarrow\cd_\lam -mod$ is the right inverse of $\Gamma$: $$\Gamma\circ \Delta =Id.$$
\end{lemma} We have the following immediate corollary (see \cite{bien}, p. 24). \begin{corollary}\label{C:BBequivalence} Let $\lambda \in t^*$ is dominant and regular. Then the functor $$\Gamma\colon {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\lambda-mod\rightarrow D_\lambda-mod$$ is an equivalence of categories. Moreover this equivalence holds for $K$-equivariant versions of these categories. \end{corollary}
The following sufficient condition for $\lambda$ being regular and dominant will be useful. \begin{proposition}\label{P:reg-dom} Let $G_0=GL_n(\mathbb{R}),\, G=GL_n(\mathbb{C})$. Let $B_0\subset G_0$ be the subgroup of real invertible upper triangular matrices, $B$ be its complexification . Let $\lambda \in t^*$. Let $\chi\colon B_0\rightarrow \mathbb{C}^*$ be a character such that its (complexified) differential $b\rightarrow \mathbb{C}$ is equal to $\lambda -\rho_b$. Assume that the representation $Ind_{P_0}^{G_0}\chi$ has a non-zero finite dimensional $G_0$-submodule. Then $\lambda$ is dominant and regular. \end{proposition} \subsection{Some analysis on manifolds.}\label{Ss:analysis-on-manifolds} Let $X$ be a smooth manifold countable at infinity. Let $\ce\rightarrow X$ be a finite dimensional vector bundle. We denote by $C(X,\ce),\, C^\infty(X,\ce)$ the spaces of continuous, $C^\infty$-smooth sections respectively. The space $C(X,\ce)$ being equipped with the topology of uniform convergence on compact subsets of $X$ is a Fr\'echet space; if $X$ is compact then it is Banach space. The space $C^\infty(X,\ce)$ being equipped with the topology of uniform convergence on compact subsets of $X$ of all partial derivatives is a Fr\'echet space.
The following result is well known (see e.g. \cite{gelfand-vilenkin}).
\begin{theorem}\label{T:bilinear-forms} Let $X_1$and $X_2$ be compact smooth manifolds. Let $\ce_1$ and $\ce_2$ be smooth finite dimensional vector bundles over $X_1$ and $X_2$ respectively. Let ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$ be a Fr\'echet space. Let $$B:C^\infty(X_1,\ce_1)\times C^\infty(X_2,\ce_2)\rightarrow {\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$$ be a continuous bilinear map. Then there exists unique continuous linear operator $$b:C^\infty(X_1\times X_2,\ce_1\boxtimes \ce_2)\rightarrow {\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}$$ such that $b(f_1\otimes f_2)=B(f_1,f_2)$ for any $f_i\in C^\infty(X_i,\ce_i),\, i=1,2$. \end{theorem}
\section{Background on valuation theory.}\label{S:other_background} We collect in this section some necessary notation and results from the valuation theory and linear algebra. \subsection{Linear algebra.}\label{Ss:linear algebra} Let $V$ be a finite dimensional real vector space of dimension $n$. We denote \begin{eqnarray}\label{D:det} \det V:=\wedge^nV. \end{eqnarray} Also we denote by $Dens(V)$ the space of complex valued Lebesgue measures on $V$; thus $Dens(V)$ is a complex line. ($Dens$ stays for {\itshape densities}.)
Let us define $or(V)$ the {\itshape orientation line} of $V$ as follows. Let us denote by $Bas(V)$ the set of all basis in $V$. The group $GL(V)$ of linear invertible transformations acts naturally on $Bas(V)$ by $g((x_1,\dots,x_n))=(g(x_1),\dots,g(x_n))$. Then set \begin{eqnarray*}\label{D:or}
or(V)=\{f\colon Bas(V)\rightarrow \mathbb{C}|\, \, f(g(x))=\\sgn(\det(g))\cdot f(x)\mbox{ for any } g\in GL(V),\, x\in Bas(V)\}. \end{eqnarray*} Clearly $or(V)$ is a one dimensional complex vector space. The operation of passing to the biorthogonal basis gives an identification $Bas(V)\simeq Bas(V^*)$. It induces an isomorphism of vector spaces \begin{eqnarray}\label{or-dual} or(V)\simeq or(V^*) \end{eqnarray} which will be used throughout the article. Also one has another natural isomorphism \begin{eqnarray}\label{det-dens} Dens(V)\simeq \det(V^*)\otimes_\mathbb{C} or(V). \end{eqnarray}
Next let \begin{eqnarray}\label{Exact:Seq} 0\rightarrow U\rightarrow V\rightarrow W\rightarrow 0 \end{eqnarray} be a short exact sequence of finite dimensional vector spaces. Then one has canonical isomorphisms \begin{eqnarray}\label{Can:1} \det U\otimes \det W\tilde\rightarrow\det V,\\\label{Can:2} or(U)\otimes or(W)\tilde \rightarrow or(V),\\\label{Can:3} Dens(U)\otimes Dens(W)\tilde\rightarrow Dens(V). \end{eqnarray} The isomorphism (\ref{Can:1}) is given by $x\otimes y\mapsto x\wedge \tilde y$ where $\tilde y$ is an arbitrary lift of $y\in \det W$ to $\wedge^{\dim W}V$. To describe the isomorphisms (\ref{Can:2}) and (\ref{Can:3}) let us fix an arbitrary linear splitting of the exact sequence (\ref{Exact:Seq}), $s:W\rightarrow V$. Thus $V\simeq U\oplus W$. Then $Bas(U)\times Bas(W)\subset Bas(V)$. Thus the restriction from functions on $Bas(V)$ to functions on $Bas(U)\times Bas(W)$ defines the isomorphism (\ref{Can:2}) which is in fact independent of the choice of a splitting $s$. Next the usual product measure construction defines a map $Dens(U)\otimes Dens(W)\rightarrow Dens(V)$ which is an isomorphism independent of a choice of a splitting $s$.
Also we will use an isomorphism \begin{eqnarray}\label{Can:10} Dens(V^*)\tilde\rightarrow Dens(V)^* \end{eqnarray} which can be described as follows. Let us fix a basis of $V$, and let $(x_1,\dots,x_n)$ denote coordinates of a vector in $V$ in this basis. Let $(y_1,\dots,y_n)$ denote coordinates of a vector in $V^*$ in the biorthogonal basis. Let us choose the isomorphism
(\ref{Can:10}) such that the Lebesgue measure on $|dy_1\wedge\dots
\wedge dy_n|$ on $V^*$ goes to the linear functional on $Dens(V)$
whose value on the Lebesgue measure $|dx_1\wedge\dots \wedge dx_n|$ on $V$ is equal to 1. It is easy to see that this isomorphism does not depend on a choice of a basis. In this situation we also write \begin{eqnarray}\label{D:inverse-measure}
|dy_1\wedge\dots \wedge dy_n|=|dx_1\wedge\dots \wedge dx_n|^{-1}. \end{eqnarray}
For a linear map of vector spaces $f\colon V\rightarrow W$ we denote by $f^\vee\colon W^*\rightarrow V^*$ the dual space. (This notation is different from the probably more standard notation $f^*$, since we keep the symbol $f^*$ to denote the pullback of valuations, see Section \ref{pullback}.)
Let $f_i\colon V_i\rightarrow W_i,\, i=1,2,$ be two linear maps of vector spaces. We denote by \begin{eqnarray}\label{D:boxtimes-lin} f_1\boxtimes f_2\colon V_1\oplus V_2\rightarrow W_1\oplus W_2 \end{eqnarray} defined, as usual, by $(f_1\boxtimes f_2)(x_1,x_2)=(f_1(x_1),f_2(x_2))$.
Let $g_i\colon V\rightarrow W_i,\, i=1,2,$ be two linear maps. We denote by \begin{eqnarray}\label{D:times-lin} g_1\times g_2\colon V\rightarrow W_1\oplus W_2 \end{eqnarray} the map defined by $(g_1\times g_2)(x)=(g_1(x),g_2(x))$.
Let $h_i\colon V_i\rightarrow W,\, i=1,2,$ be two linear maps. We denote by \begin{eqnarray}\label{D:oplus-lin} h_1\oplus h_2\colon V_1\oplus V_2\rightarrow W \end{eqnarray} the map defined by $(h_1\oplus h_2)(x_1,x_2)=h_1(x_1)+h_2(x_2)$.
\subsection{McMullen's decomposition of valuations.}\label{Ss:mcmullen-decomp} Let $V$ be an $n$-dimensional real vector space. Let $Val(V)$ denote the space of translation invariant continuous valuations on $V$. Let $\alpha$ be a complex number. We say that $\phi$ is $\alpha$-homogeneous if $$\phi(\lambda K)=\lambda^\alpha\phi(K) \mbox{ for any } \lambda>0,\, K\in \ck(V).$$ Let us denote by $Val_\alpha(V)$ the subspace of $Val(V)$ of $\alpha$-homogeneous convex valuations. The following result is due to P. McMullen \cite{mcmullen-euler}. \begin{theorem}[\cite{mcmullen-euler}]\label{T:mcmullen-decomp} Let $n=\dim V$. Then $$Val(V)=\bigoplus_{i=0}^n Val_i(V).$$ \end{theorem}
\subsection{Characterization theorems on valuations.}\label{Ss:charact-homogeneous} In this section we describe several theorems on translation invariant continuous valuations which will be used in the article. \begin{proposition}\label{P:zero-n} (1) $Val_0(V)$ is spanned by the Euler characteristic $\chi$.
(2)(Hadwiger, \cite{hadwiger-book}) $Val_n(V)$ is spanned by a Lebesgue measure $vol$. \end{proposition} Note that part (1) of the proposition is obvious. Let us remind now a description of $(n-1)$-homogeneous valuations due to P. McMullen \cite{mcmullen-80}.
\def\PP^\vee_+(V){\mathbb{P}^\vee_+(V)} Let $\PP^\vee_+(V)$ denote the manifold of cooriented linear hyperplanes in $V$ (recall that coorientation of $E\subset V$ is just an orientation of $V/E$). Let $\cl\rightarrow \PP^\vee_+(V)$ denote the complex line bundle whose fiber over $E\in \PP^\vee_+(V)$ is equal to $Dens(E)$. Let us construct a continuous linear map \begin{eqnarray}\label{map-mcmullen} \Psi\colon C(\PP^\vee_+(V), \cl)\rightarrow Val_{n-1}(V). \end{eqnarray} For let us fix $\xi\in C(\PP^\vee_+(V),\cl)$. Let us define first a valuation $\phi_\xi$ on convex compact {\itshape polytopes} in $V$. Let $P\subset V$ be a convex compact polytope. Any $(n-1)$-dimensional face of $P$ carries a coorientation such that the exterior normal of it has a positive direction. Set \begin{eqnarray}\label{phixi} \Psi(\xi)(P):=\sum_{F\in\{(n-1)-\mbox{faces of }P\}}\xi(F). \end{eqnarray} \begin{lemma}\label{L:phixi} $\Psi(\xi)$ extends (uniquely) to a continuous valuation on $\ck(V)$. This valuation (also denoted by $\Psi(\xi)$) is continuous, translation invariant, and $(n-1)$-homogeneous. \end{lemma} {\bf Proof.} In fact this claim is due to Schneider \cite{schneider-75} in a slightly different language. We are going to explain this. Let us fix a Euclidean metric on $V$. It induces an identification of $\PP^\vee_+(V)$ with the unit sphere $S^{n-1}$, and an isomorphism of $\cl$ with the trivial line bundle. Then if $\xi\in C(\PP^\vee_+(V),\cl)\simeq C(S^{n-1})$ then for a polytope $P$ $$\Psi(\xi)(P)=\int_{S^{n-1}}\xi(\omega)dS_{n-1}(P,\omega)$$ where $dS_{n-1}(P,\bullet)$ dentes the $(n-1)$-th area measure (see e.g. \cite{schneider-book}, p.203). But the functional on $\ck(V)$ $$K\mapsto \int_{S^{n-1}}\xi(\omega)dS_{n-1}(K,\omega)$$ is a continuous translation invariant $(n-1)$-homogeneous valuation by \cite{schneider-75}. \qed
The next theorem is due to McMullen. \begin{theorem}[\cite{mcmullen-80}]\label{T:mcmullen-80} The map $\Psi$ is onto. The kernel is $n$-dimensional. \end{theorem} \begin{remark}\label{R:invariance}
The line bundle $\cl$ is obviously $GL(V)$-equivariant. The map $\Psi$ is $GL(V)$-equivariant. \end{remark} Let us describe explicitly the kernel of $\Psi$. First let us construct a linear map $$\wedge^{n-1}V^*\otimes or(V)\rightarrow C(\PP^\vee_+(V),\cl).$$ Fix an arbitrary cooriented hyperplane $E\in \PP^\vee_+(V)$. By (\ref{det-dens}), (\ref{or-dual}), (\ref{Can:2}) one has $$Dens(E)=\det E^*\otimes or(E)=\det E^*\otimes or(V)\otimes or(V/E).$$ Since $E$ is cooriented $or(V/E)=\mathbb{C}$. Hence $$Dens(E)=\det E^*\otimes or(V).$$ But one has a canonical map $\wedge ^{n-1}V^*\otimes or(V)\rightarrow \det E^*\otimes or(V)$ induced by the map $V^*\rightarrow E^*$ dual to the identity imbedding $E\rightarrow V$. When $E$ varies this defines the desired map $\wedge^{n-1}V^*\otimes or(V)\rightarrow C(\PP^\vee_+(V),\cl)$. Clearly it is $GL()$-equivariant and injective.
The claim is that the image of the above map is equal precisely to the kernel of $\Psi$. This is a $GL(V)$-equivariant interpretation of the well known fact from from convexity that the closed (in the weak topology on measures) linear span of all area measures $dS_{n-1}(K,\bullet)$ on $S^{n-1}$, when $K$ runs through $\ck(V)$, is equal to all measures $\mu$ on $S^{n-1}$ satisfying $$\int_{S^{n-1}}\omega\cdot d\mu(\omega)=0.$$ (This fact is an easy consequence of the Minkowski existence theorem, see e.g. \cite{schneider-book}, Theorem 7.1.2.)
Let $Val^+(V)$ denote the subspace of even valuations, i.e. such that $\phi(-K)=\phi(K)$ for any $K\in \ck(V)$, and let $Val^-(V)$ denote the subspace of odd valuations, i.e. such that $\phi(-K)=-\phi(K)$ for any $K\in \ck(V)$. Similarly $Val^\pm_i(V)$ denote the analogous subspaces in $i$-homogeneous valuations.
Let $C^+(\PP^\vee_+(V),\cl)$ denote the subspace of $C(\PP^\vee_+(V),\cl)$ of even section, i.e. sections which do not change when one reverses a coorientation of a hyperplane. Let $C^-(\PP^\vee_+(V),\cl)$ denote the subspace of $C(\PP^\vee_+(V),\cl)$ of odd section, i.e. sections which change the sign when one reverses a coorientation of a hyperplane. Let us denote $\Psi^+$ (resp. $\Psi^-$) the restriction of $\Psi$ to $C^+(\PP^\vee_+(V),\cl)$ (resp. $C^-(\PP^\vee_+(V),\cl)$).
Let $\mathbb{P}^\vee(V)$ be the manifold of linear hyperplanes in $V$ (without coorientation). Let $\cl^+\rightarrow \mathbb{P}^\vee(V)$ be the line bundle whose fiber over $E$ is equal to $Dens(E)$. Let $\cl^-\rightarrow \mathbb{P}^\vee(V)$ be the line bundle whose fiber over $E$ is equal to $Dens(E)\otimes or(V/E)$. It is easy to see that one has canonical isomorphisms \begin{eqnarray*} C^+(\PP^\vee_+(V),\cl)\simeq C(\mathbb{P}^\vee(V),\cl^+),\\ C^-(\PP^\vee_+(V),\cl)\simeq C(\mathbb{P}^\vee(V),\cl^-). \end{eqnarray*}
Then obviously one has \begin{eqnarray}\label{Psi-plus} \Psi^+\colon C(\mathbb{P}^\vee(V),\cl^+)\rightarrow Val^+_{n-1}(V),\\\label{Psi-minus} \Psi^-\colon C(\mathbb{P}^\vee(V),\cl^-)\rightarrow Val^-_{n-1}(V). \end{eqnarray} The following claim is obvious from the previous discussion. \begin{claim}\label{Cl:psi-plus-minus} $\Psi^+$ is an isomorphism of vector spaces. The kernel of $\Psi^-$ is equal to $\wedge^{n-1}V^*\otimes or(V)$. \end{claim}
The next theorem is very useful. In the even case it was proved by Klain \cite{klain}, and in the odd case by Schneider \cite{schneider-simple}. First recall that a valuation is called {\itshape simple} if it vanishes on all convex compact sets of dimension less than $n$. \begin{theorem}[\cite{klain},\cite{schneider-simple}]\label{T:klain-schneider} A translation invariant continuous valuation is simple if and only if it is representable as a sum of a Lebesgue measure and an odd $(n-1)$-homogeneous translation invariant continuous valuation. \end{theorem}
The group $GL(V)$ acts continuously and linearly in the space $Val(V)$ as follows: $(g\phi)(K)=\phi(g^{-1}K)$ for any $g\in GL(V), \phi\in Val(V),K\in \ck(V)$. Obviously this action preserves degree of homogeneity and parity of valuations. The next result was proved by the author \cite{alesker-gafa-01}, it will be used in the article many times. \begin{theorem}[Irreducibility Theorem, \cite{alesker-gafa-01}]\label{T:IrrThm} The natural representation of $GL(V)$ in $Val^\pm_i(V),\, i=0,1,\dots,n,$ is irreducible, i.e. there is no $GL(V)$-invariant proper closed subspace. \end{theorem}
\subsection{Klain-Schneider realizations of valuations.}\label{Ss:klain-schneider} In this section we describe $GL(V)$-equivariant realizations of $Val_i^+(V)$ as a subspace of the space of sections of certain line bundle over the Grassmannian $Gr_i(V)$, and of $Val_i^-(V)$ as a subspace of a quotient of the space of sections of certain line bundle over the partial flag space ${\cal F}_{i,i+1}(V)$. The exposition follows \cite{alesker-adv-00}, \cite{alesker-gafa-01}. The even case was also considered in \cite{klain-00} using a slightly different language. These realizations of even and smooth valuations we call respectively Klain and Schneider realizations. The reason for such terminology is that behind these constructions stays a deep Klain-Schneider theorem \ref{T:klain-schneider}.
Let us start with the even case. Let us denote by ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}\rightarrow Gr_i(V)$ the complex line bundle whose fiber over $E\in Gr_i(V)$ is equal to
$Dens(E)$. Let $\phi\in Val_i^+(V)$. For any $E\in Gr_i(V)$ let us consider the restriction $\phi|_E$. Clearly $\phi|_E\in Val_i(E)$. But by Proposition \ref{P:zero-n}(2) (due to Hadwiger) $Val_i(E)=Dens(E)$. Then $\phi$ defines a section in $C(Gr_i(V),{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O})$. We get a continuous $GL(V)$-equivariant map \begin{eqnarray}\label{D:klain-map} Val_i^+(V)\rightarrow C(Gr_i(V),{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}). \end{eqnarray} The key fact is that this map is injective. This can be easily deduced by induction on the dimension from the even case (due to Klain) of the Klain-Schneider theorem \ref{T:klain-schneider} (see \cite{alesker-adv-00}, Proposition 3.1, or \cite{klain-00}). We call this imbedding the Klain imbedding.
Let us consider the odd case. Let ${\cal F}_{i,i+1}(V)$ denote the partial flag variety
$${\cal F}_{i,i+1}(V):=\{(E,F)|\, E\subset F, \dim E=i,\dim F=i+1\}.$$
Let us denote by $\cx\rightarrow Gr_{i+1}(V)$ the (infinite dimensional) vector bundle whose fiber over $F\in Gr_{i+1}(V)$ is equal to $Val^-_i(F)$. Let $\phi\in Val^-_i(V)$. For any $F\in Gr_{i+1}(V)$
let us consider the restriction $\phi|_F\in Val_i^-(F)$. Thus we get a $GL(V)$-equivariant continuous map $$Val_i^-(V)\rightarrow C(Gr_{i+1}(V),\cx).$$ The key point is that this map is injective. This easily follows by the induction on dimension from the odd case (due to Schneider) of the Klain-Schneider theorem \ref{T:klain-schneider} (see \cite{alesker-gafa-01}, Proposition 2.6 for the details).
Let $\cn\rightarrow {\cal F}_{i,i+1}(V)$ denote the line bundle whose fiber over $(E,F)\in {\cal F}_{i,i+1}(V)$ is equal to $Dens(E)\otimes or(F/E)$. Applying the map $\Psi^-$ (see (\ref{Psi-minus})) to every subspace $F\in Gr_{i+1}(V)$ (instead of $V$) we get a continuous map \begin{eqnarray*} C({\cal F}_{i,i+1}(V),\cn)\rightarrow C(Gr_{i+1}(V),\cx). \end{eqnarray*} This map is onto. This follows from the fact that $\Psi^-$ is onto and has finite dimensional kernel.
Thus $Val_i^-(V)$ is realized as a subspace of a quotient of $C({\cal F}_{i,i+1}(V),\cn)$. We call this realization the Schneider realization.
\subsection{Product and convolution of smooth valuations.}\label{Ss:product-convolution} Let us denote by $Val^{sm}(V)$ the space of smooth valuations (in sense of Definition \ref{part1-rep-1}) under the natural action of the group $GL(V)$ on the space $Val(V)$.
Let $\ck^{sm}(V)$ denote the space of strictly convex compact subsets of $V$ with $C^\infty$-smooth boundary. A typical example of smooth valuation is a functional $K\mapsto vol(K+A)$ where $A\in \ck^{sm}(V)$ is fixed.
Product on smooth translation invariant valuations was defined by the author in \cite{alesker-gafa-04}. Let us summarize the main properties of the product in the following theorem. Let us fix on $V$ a positive Lebesgue measure $vol_V$. Below we denote by $vol_{V\times V}$ the product measure on $V\times V$ \begin{theorem}[\cite{alesker-gafa-04}]\label{T:product-val} There exists a bilinear map $$Val^{sm}(V)\times Val^{sm}(V)\rightarrow Val^{sm}(V)$$ which is uniquely characterized by the following two properties:
1) continuity;
2) if $\phi(\bullet)=vol_V(\bullet+A),\, \psi=vol_V(\bullet+B)$ then $$(\phi,\psi)\mapsto vol_{V\times V}(\Delta(\bullet)+(A\times B))$$ where $\Delta\colon V\rightarrow V\times V$ is the diagonal imbedding.
This bilinear map defines a product making $Val^{sm}(V)$ a commutative associative algebra with unit (which is the Euler characteristic). \end{theorem} \begin{example}[\cite{alesker-gafa-04}, Proposition 2.2]\label{E:product-2dim} Assume that $\dim V=2$. Let $\phi(K)$=V(K,A), $\psi(K)=V(K,B)$. Then $$(\phi\cdot\psi)(K)=\frac{1}{2} V(A,-B)vol(K).$$ \end{example}
Convolution on $Val^{sm}(V)\otimes Dens(V^*)$ was defined by Bernig and Fu in \cite{bernig-fu}. Let us summarize their result in the following theorem. \begin{theorem}[\cite{bernig-fu}]\label{T2} There exists a bilinear map $$Val^{sm}(V)\otimes Dens(V)^*\times Val^{sm}(V)\otimes Dens(V)^*\rightarrow Val^{sm}(V)\otimes Dens(V)^*$$ which is uniquely characterized by the following two properties:
1) continuity;
2) if $\phi(\bullet)=vol_V(\bullet+A)\otimes vol_V^{-1},\, \psi=vol_V(\bullet+B)\otimes vol_V^{-1}$ then $$(\phi,\psi)\mapsto vol_{V}(\bullet+A+ B)\otimes vol_V^{-1}.$$
This bilinear map defines a product making $Val^{sm}(V)\otimes Dens(V)^*$ a commutative associative algebra with unit (which is equal to $vol_V\otimes vol_V^{-1}$). \end{theorem}
\subsection{A technical lemma.}\label{Ss:aLemma} For a future reference we state a simple and well known lemma (see \cite{schneider-book}, p. 294, particularly equality (5.3.23)). \begin{lemma}\label{L:aLemma} Let $f\colon V\twoheadrightarrow W$ be a linear epimorphism of finite dimensional real vector spaces. Let $k:=\dim Ker (f)$. Let $vol_{Ker}, vol_W$ be Lebesgue measures on $Ker(f), W$ respectively. Let $vol_V:=vol_{Ker}\otimes vol_W$ be the corresponding Lebesgue measure on $V$. Let $A\in \ck(V), B\in \ck(Ker(f))$. Then
$$\frac{1}{k!}\frac{d^k}{d\varepsilon^k}|\big|_{\varepsilon=0} vol_V(A+\varepsilon B)=vol_{Ker}(B)\cdot vol_W(f(A)).$$ \end{lemma} {\bf Proof.} It is an application of the Fubini theorem. \qed
\section{Functorial properties of translation invariant valuations.}\label{S:functorial} \subsection{Pullback of valuations.}\label{pullback} \begin{definition}\label{D:pullback} Let $f\colon V\rightarrow W$ be a linear map of vector spaces. Let us define a map, called pullback, $$f^*\colon Val(W)\rightarrow Val(V)$$ by $(f^*\phi)(K)=\phi(f(K))$ for any $K\in \ck(V)$. \end{definition} \begin{proposition}\label{P:pullback1} (i) $f^*$ is a continuous map of Banach spaces.
(ii) $f^*$ preserves degree of homogeneity and parity of valuations.
(iii) $(f_1\circ f_2)^*=f_2^*\circ f_1^*$. \end{proposition} {\bf Proof} is obvious. \qed
\subsection{Pushforward of valuations twisted by densities.}\label{pushforward} For a linear map $f\colon V\rightarrow W$ we are going to define in this section a canonical map, called pushforward, $$f_*\colon Val(V)\otimes Dens(V^*)\rightarrow Val(W)\otimes Dens(W^*).$$ The main result of this section is the following proposition which is also a definition. \begin{proposition}\label{P:pushforward} (i) Let $f\colon V\rightarrow W$ be a linear map of vector spaces. Then there exists a continuous linear map, called pushforward, $$f_*\colon Val(V)\otimes Dens(V^*)\rightarrow Val(W)\otimes Dens(W^*)$$ which is uniquely characterized by the following property. Let us fix Lebesgue measures $vol_V$ on $V$, $vol_{Ker}$ on $Ker f$, $vol_{CoKer}$ on $CoKer f=W/Im (f)$, and let $vol_{Im}$ be the induced Lebesgue measure on $Im (f)$ (it is obtained as the image of the measure $\frac{vol_V}{vol_{Ker}}$ on $V/ Ker(f)$ under the isomorphism $f\colon V/Ker(f)\tilde\rightarrow Im(f)$). Then for any $A\in \ck(V)$ \begin{eqnarray*} f_*(vol_V(\bullet +A) \otimes vol_V^{-1})=\\\left(\int_{z\in CoKer(f)}vol_{Im}((\bullet +f(A))\cap z) dvol_{CoKer}(z)\right) \otimes (vol_{Im}\otimes vol_{CoKer})^{-1} \end{eqnarray*} where $vol_{Im}\otimes vol_{CoKer}$ is considered as a Lebesgue measure on $W$ under the isomorphism $Dens(W)\simeq Dens(Im(f))\otimes Dens(CoKer(f))$.
(ii)$f_*$ preserves the parity, and $f_*(Val_\bullet(V))\subset Val_{\bullet+\dim W-\dim V}(W)$.
(iii) $(f_1\circ f_2)_*=f_{1*}\circ f_{2*}$. \end{proposition} \begin{remark}\label{R:pushforward} Before we prove this proposition let us discuss two special cases of the pushforward. Let us fix Lebesgue measures $vol_V$ on $V$ and $vol_W$ on $W$. This choice induces isomorphisms $Dens(V)\tilde\rightarrow \mathbb{C}$, $Dens(W)\tilde\rightarrow \mathbb{C}$. Under these identifications, $f_*\colon Val(V)\rightarrow Val(W)$.
(1) Let us assume that $V$ is a subspace of $W$, and $f\colon V\hookrightarrow W$ is the imbedding map. Consider the Lebesgue measure $vol_{W/V}:=\frac{vol_{W}}{vol_V}$ on $W/V$. For any $\phi\in Val(V)$ $$(f_*\phi)(K)=\int_{z\in W/V}\phi(K\cap z)d vol_{W/V}(z).$$
(2) Let us assume that $W$ is a quotient space of $V$, and $f\colon V \twoheadrightarrow W$ is the quotient map. Let $\phi\in Val(V)$ has the form $\phi(K)=vol_V(K+A)$ where $A\in \ck(V)$ is fixed. Then for any $K\in \ck(W)$ $$(f_*\phi)(K)=vol_W(K+f(A)).$$ \end{remark}
{\bf Proof} of Proposition \ref{P:pushforward}. The uniqueness follows immediately from the McMullen's conjecture. Let us prove the existence. Let us decompose $f$ as a composition of a linear surjection $p\colon V\twoheadrightarrow X$ followed by a linear injection $j\colon X\hookrightarrow W$; thus $f=j\circ p$. Such a decomposition is unique up to an isomorphism (in the obvious sense).
Let us define $j_*\colon Val(X)\otimes Dens(X^*)\rightarrow Val(W)\otimes Dens(W^*)$ as in Remark \ref{R:pushforward}(1). Let us define now $p_*\colon Val(V)\otimes Dens(V^*)\rightarrow Val(X)\otimes Dens(X^*)$. Let us fix Lebesgue measures $vol_{Ker}$ on $Ker(p)$ and $vol_X$ on $X$. This induces a Lebesgue measure $vol_V:=vol_{Ker}\otimes vol_X$ on $V$. Let $\phi\in Val(V)$. Fix a set $L\in \ck(V)$. Consider a valuation $\tau$ on $Ker(p)$ defined by
$$\tau(S)=\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_{\varepsilon=0}\phi(L+\varepsilon S)$$ where $S\in \ck(Ker(p))$, $k=\dim (Ker(p))$. Recall that by a result of McMullen \cite{mcmullen-euler}, $\phi(L+\varepsilon S)$ is a polynomial in $\varepsilon\geq 0$ of degree at most $k$. It is easy to see that $\tau$ is a $k$-homogeneous translation invariant continuous valuation on $Ker(p)$. By a result of Hadwiger \cite{hadwiger-book} $\tau$ must be proportional to $vol_{Ker}$ with a constant depending on $L$ and $\phi$: $$\tau=C(L,\phi)vol_{Ker}.$$ It is easy to see that $C(L,\phi)$ depends continuously on $L\in \ck(V)$ and $\phi\in Val(V)$, and linearly on $\phi$. Let $K\in \ck(X)$. Let $\tilde K\in \ck(V)$ be an arbitrary convex compact set such that $p(\tilde K)=K$. Define $$\tilde \phi(K):=C(\tilde K,\phi).$$ \begin{claim}\label{claim} $\tilde\phi (K)$ does not depend on a choice of $\tilde K$ such that $p(\tilde K)=K$. \end{claim} {\bf Proof.} Since $C(\tilde K,\phi)$ is continuous in $\phi\in Val(V)$ when $\tilde K$ in fixed, by the McMullen's conjecture, it is enough to prove the claim for $\phi(\bullet)=vol_V(\bullet +A)$ for $A\in \ck(V)$. Let us fix $S\in \ck(Ker(p))$ with
$vol_{Ker}(S)=1$. Then $C(\tilde K,\phi)=\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_{\varepsilon=0}vol_V(\tilde K+A+\varepsilon S)$. But the last expression is equal to $vol_X(p(\tilde K+A))=vol_X(K+p(A))$ by Lemma \ref{L:aLemma}. \qed
Next $\tilde\phi$ is a continuous translation invariant valuation on $X$. Translation invariance is obvious. In order to prove continuity and valuation property let us fix a linear right inverse of $p$, $s\colon X\rightarrow V$. For any $K\in\ck(X)$ let us choose $\tilde K:=s(K)$. Thus $\tilde \phi(K)=C(s(K),\phi)$. Clearly the last expression is a continuous valuation in $K\in \ck(X)$. Let us define \begin{eqnarray}\label{p-star} p_*(\phi\otimes vol_V^{-1}):=\tilde\phi\otimes vol_X^{-1}. \end{eqnarray} It is easy to see that the definition of $p_*$ does not depend on a choice of Lebesgue measures $vol_V, vol_X$. Finally define $$f_*:=j_*\circ p_*.$$ It follows from the construction that $f_*$ satisfies the assumptions of the proposition.
(ii) is obvious.
(iii) Let $U\overset{f_2}{\rightarrow}V\overset{f_1}{\rightarrow} W$. We have to show \begin{eqnarray}\label{func} (f_1\circ f_2)_*=f_{1*}\circ f_{2*}.\end{eqnarray} First we will show this in a number of special cases. If $f_1$ is an injection and $f_2$ is a surjection the equality (\ref{func}) is clear by the construction of pushforward from the proof of part (i).
\begin{lemma}\label{inj_inj} The equality (\ref{func}) holds if $f_1,f_2$ are injections. \end{lemma} {\bf Proof.} We may and will assume for simplicity that $f_1,f_2$ are imbeddings of linear subspaces, thus $U\subset V\subset W$. Let us fix Lebesgue measures $vol_U,vol_{V/U},vol_{W/V}$ on $U,V/U,W/V$ respectively. Let $vol_V:=vol_U\otimes vol_{V/U},\, vol_W:=vol_V\otimes vol_{W/V}$ be Lebesgue measures on $V$, $W$ respectively. Let us fix $\phi\in Val(U)$, $K\in \ck(W)$. We have \begin{eqnarray*} (f_{2*}(\phi\otimes vol_U^{-1}))(K)=\left(\int_{z\in V/U}\phi(K\cap z)d vol_{V/U}(z)\right)\otimes vol_V^{-1},\end{eqnarray*} \begin{eqnarray*} (f_{1*}(f_{2*}(\phi\otimes vol_U^{-1})))(K)=\left(\int_{w\in W/V}\left(f_{2*}(\phi\otimes vol_U^{-1})(K\cap w)\otimes vol_V\right)\cdot d vol_{W/V}(w)\right)\otimes vol_W^{-1}=\\ \left(\int_{x\in W/U}\phi(K\cap x)d\left( \frac{vol_W}{vol_U}\right)(x)\right)\otimes vol_W^{-1}=(f_1\circ f_2)_*(\phi\otimes vol_U^{-1}). \end{eqnarray*} Lemma is proved. Q.E.D. \begin{lemma}\label{surj_surj} The equality (\ref{func}) holds if $f_1,f_2$ are surjections. \end{lemma} {\bf Proof.} Let us fix Lebesgue measures $vol_1,vol_2,vol_W$ on $Ker(f_1),Ker(f_2),W$ respectively. Set $vol_U:=vol_1\otimes vol_2\otimes vol_W\in Dens(U)$, $vol_V=vol_1\otimes vol_W\in Dens(V)$. Assume that $\phi\in Val(U)$ has the form $$\phi(\bullet)=vol_U(\bullet +A)$$ where $A\in \ck(U)$ is fixed. Then \begin{eqnarray*} (f_{2*}(\phi\otimes vol_U^{-1}))(K)=vol_V(K+f_2(A))\otimes vol_V^{-1},\end{eqnarray*} \begin{eqnarray}\label{s1} \left(f_{1*}(f_{2*}(\phi\otimes vol_U^{-1}))\right)(K)=vol_W(K+f_1(f_2(A)))\otimes vol_W^{-1}. \end{eqnarray} On the other hand \begin{eqnarray}\label{s2} (f_1\circ f_2)_*(\phi\otimes vol_U^{-1})(K)=vol_W(K+(f_1\circ f_2)(A))\otimes vol_W^{-1} \end{eqnarray} Comparing (\ref{s1}) and (\ref{s2}) one concludes the lemma using the McMullen's conjecture. \qed
\begin{lemma}\label{surj_inj} The equality (\ref{func}) holds if $f_1$ is a surjection, $f_2$ is an injection. \end{lemma} {\bf Proof.} We will assume for simplicity and without loss of generality that $U\subset V$ and $f_2\colon U\hookrightarrow V$ is the identity imbedding. Also we may assume that $W$ is a quotient space of $V$, and $f_1\colon V\twoheadrightarrow W$ is the canonical quotient map.
\underline{Step 1.} We consider the case $$Ker(f_1)\subset U.$$ Denote $k:=\dim Ker (f_1)$. Let us fix Lebesgue measures $vol_{Ker}$ on $Ker(f_1)$, $vol_U$ on $U$, $vol_{V/U}$ on $V/U$. Let $vol_V=vol_U\otimes vol_{V/U},$ $vol_W:=\frac{vol_V}{vol_{Ker}}$ be the Lebesgue measure on $W$. Let $\phi\in Val(U)$ has the form $$\phi(\bullet)=vol_U(\bullet +A)$$ for some $A\in \ck(U)$. Then for any $L\in \ck(V)$ we have \begin{eqnarray*} \left(f_{2*}(\phi\otimes vol_U^{-1})\right)(L)=\left(\int_{z\in V/U}vol_U((L\cap z)+A) d vol_{V/U}(z)\right) \otimes vol_V^{-1}. \end{eqnarray*} Now let us fix a linear right inverse of $f_1$ $$s\colon W\rightarrow V.$$ Let $M\in \ck(W)$. Let $\tilde M:= s(M)$. Let us fix a set $S\in \ck(Ker(f_1))$ with $vol_{Ker}(S)=1$. By the construction in the proof of part (i) we get \begin{eqnarray}\label{s3.5} \left(f_{1*}(f_{2*}(\phi\otimes vol_U^{-1}))\right)
(M)=\\\label{s4}\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_{\varepsilon=0}\left(\int_{z\in V/U} vol_U\left(((\tilde M+\varepsilon S)\cap z)+A\right)d vol_{V/U}(z)\right)\otimes vol_W^{-1}=\\\label{s4.5}
\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_{\varepsilon=0}\left(\int_{z\in V/U} vol_U\left((\tilde M\cap z)+\varepsilon S+A\right)d vol_{V/U}(z)\right)\otimes vol_W^{-1}=\\\label{s4.6} \left(\int_{z\in (V/Ker f_1)/(U/Ker f_1)}vol_W\left((M\cap z)+f_1(A)\right) d vol_{V/U}(z)\right)\otimes vol_W^{-1}. \end{eqnarray} where the equality (\ref{s4}) follows from the fact that $(\tilde M+\varepsilon S)\cap z=(\tilde M\cap z)+\varepsilon S$ since $S\subset Ker(f_1)\subset U$, and in (\ref{s4.5}) we used the identification $V/U=(V/Ker(f_1))/(U/Ker(f_1))$.
Let us compute now $(f_1\circ f_2)_*(\phi\otimes vol_U^{-1})$. The map $f_1\circ f_2$ factorizes as $$U\overset{p}{\twoheadrightarrow} U/Ker(f_1) \overset{j}{\hookrightarrow } V/Ker(f_1)=W.$$ By the construction from part (i) $$(f_1\circ f_2)_*=j_*\circ p_*.$$ We have for any $L\in \ck(V)$ $$\left(p_*(\phi\otimes vol_U^{-1})\right)(L)=vol_{U/Ker}(L+p(A))\otimes vol_{U/Ker}^{-1}.$$ Next for any $M\in \ck(W)$ \begin{eqnarray}\label{s5} \left(j_*(p_*(\phi\otimes vol_U^{-1}))\right) (M)=\\\label{s6} \left( \int_{z\in V/U}vol_{W} \left((M\cap z)+p(A)\right) d vol_{V/U}(z)\right)\otimes vol_W^{-1}. \end{eqnarray} Comparing (\ref{s4.5}) and (\ref{s6}) and observing that $p(A)=f_1(A)$, we conclude Step 1.
\underline{Step 2.} At this step we will assume that $f_1\circ f_2$ is injective.
Let us choose $s\colon W\rightarrow V$ a right inverse of $f_1$ so that $s(W)\supset f_2(U)$. Let $H:=Ker (f_1)$. To simplify the notation and without loss of generality we will identify $U$ with $f_2(U)$, and $W$ with $s(W)$. Let us fix Lebesgue measures $vol_U$ on $U$, $vol_{W/U}$ on $W/U$, $vol_H$ on $H$. Let $vol_W:=vol_U\otimes vol_{W/U}$, $vol_V:=vol_W\otimes vol_H$, $vol_{V/U}:=vol_{W/U}\otimes vol_H$ be the corresponding Lebesgue measures on $W,V,V/U$ respectively.
Let $\phi\in Val(U)$. Let us fix $K\in \ck(W)$. Then we have \begin{eqnarray}\label{s7} \left((f_1\circ f_2)_*(\phi\otimes vol_U^{-1})\right)(K)=\int_{z\in W/U}\phi(K\cap z)d vol_{W/U}(z)\otimes vol_W^{-1}. \end{eqnarray} Let us fix a subset $S\subset H$ with $vol_H(S)=1$. Denote $k:=\dim H$. Then for any $L\in \ck(V)$ we have \begin{eqnarray*} \left(f_{2*}(\phi\otimes vol_U^{-1})\right)(L)=\left(\int_{x\in V/U}\phi(L\cap x)d vol_{V/U}(x)\right)\otimes vol_V^{-1}. \end{eqnarray*} Next for any $K\in \ck(W)$ \begin{eqnarray} \left(f_{1*}(f_{2*}(\phi\otimes vol_U^{-1}))\right)(K)=\\\label{s8}
\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_{\varepsilon=0} \left(\int_{x\in V/U}\phi\left((K+\varepsilon S)\cap x\right) d vol_{V/U}(x)\right) \otimes vol_W^{-1}. \end{eqnarray} But since $K\subset W$ and $S\subset H$, $x\in V/U$, $$(K+\varepsilon S)\cap x=(K\cap x)+(\varepsilon S\cap x)$$ and $\varepsilon S\cap x$ is either a point or the empty set. Hence (\ref{s8}) can be continued as \begin{eqnarray*}
\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_{\varepsilon=0} \left(\varepsilon^k\int_{x\in W/U}\phi(K\cap x)d vol_{W/U} (x)\right)\otimes vol_W^{-1}=\\ \left(\int_{x\in W/U}\phi(K\cap x)d vol_{W/U} (x)\right)\otimes vol_W^{-1}\overset{(\ref{s7})}{=}\left((f_1\circ f_2)_*(\phi\otimes vol_U^{-1})\right)(K). \end{eqnarray*} This completes Step 2.
\underline{Step 3.} Let us consider finally the case of general surjection $f_1$ and general injection $f_2$.
We will assume again that $U$ is a subspace of $V$, and $W$ is a quotient space of $V$. Set $A:=U\cap Ker(f_1)$, $X:=V/A$. We can decompose uniquely $f_2\colon V\twoheadrightarrow W$ as a composition of two surjections $$V\overset{q}{\twoheadrightarrow} X\overset{p}{\twoheadrightarrow} W$$ where $p\colon V\twoheadrightarrow V/A=X$ is the canonical surjection. By Lemma \ref{surj_surj} \begin{eqnarray}\label{s8.01} f_{1*}=p_*\circ q_*. \end{eqnarray} Let us denote $Y:=U/A$. Let $t\colon U\twoheadrightarrow Y$ be the canonical surjection, and $j\colon Y\hookrightarrow X$ be the natural imbedding. Note that $Ker(q)=A\subset U$. Then by Step 1 and the construction of the pushforward from part (i) we get \begin{eqnarray}\label{s9} q_*\circ f_{2*}=(q\circ f_2)_*=(j\circ t)_*=j_*\circ t_*. \end{eqnarray} Clearly $p\circ j\colon Y\rightarrow W$ is injective. Hence by Step 2 \begin{eqnarray}\label{s10} p_*\circ j_*=(p\circ j)_*. \end{eqnarray} Using (\ref{s8.01}),(\ref{s9}), and (\ref{s10}) we obtain \begin{eqnarray*} f_{1*}\circ f_{2*}=p_*\circ q_*\circ f_{2*}=p_*\circ j_*\circ t_*=(p\circ j)_*\circ t_*=(f_1\circ f_2)_* \end{eqnarray*} where the last equality follows from the facts that $f_1\circ f_2=(p\circ j)\circ t$, $t$ is surjective, $p\circ j$ is injective, and from the construction of the pushforward from the proof of part (i). Lemma \ref{surj_inj} is proved. \qed
Now let us finish the proof of Proposition \ref{P:pushforward}(ii) for general $f_1,f_2$. Let us decompose $$f_1=j_1\circ p_1,\, f_2=j_2\circ p_2$$ where $j_1,j_2$ are injections, $p_1,p_2$ are surjections. By definition \begin{eqnarray}\label{s3} f_{1*}\circ f_{2*}=j_{1*}\circ p_{1*}\circ j_{2*}\circ p_{2*}. \end{eqnarray} Let us decompose $p_1\circ j_2=j_3\circ p_3$ where $j_3$ is an injection, $p_3$ is a surjection. By Lemma \ref{surj_inj}, $p_{1*}\circ j_{2*}=(p_1\circ j_2)_*=j_{3*}\circ p_{3*}$. Hence using this, (\ref{s3}), and Lemmas \ref{inj_inj}, \ref{surj_surj} we get \begin{eqnarray*} f_{1*}\circ f_{2*}=j_{1*}\circ j_{3*}\circ p_{3*} \circ p_{2*}= (j_1\circ j_3)_*\circ (p_3\circ p_2)_*=(f_1\circ f_2)_* \end{eqnarray*} where the last equality follows from the construction of the pushforward given in the proof of part (i). Proposition \ref{P:pushforward} is proved. \qed
\subsection{Relations to product and convolution.}\label{relation} We will explain the relation of pullback and pushforward to product and convolution. But first we will have to remind the notion of exterior product of smooth translation invariant valuations. Let $V,W$ be finite dimensional real vector spaces. In \cite{alesker-gafa-04} the author has defined a continuous linear map \begin{eqnarray}\label{r1} Val^{sm}(V)\times Val^{sm}(W)\rightarrow Val(V\times W) \end{eqnarray} called the {\itshape exterior product}. For $\phi\in Val^{sm}(V), \psi\in Val^{sm}(W)$ their exterior product is denoted by $\phi\boxtimes \psi$. The map (\ref{r1}) is uniquely characterized by the following property. Let $\phi(\bullet)=vol_V(\bullet +A),\, \psi(\bullet)=vol_W(\bullet +B)$ where $vol_V,\, vol_W$ are Lebesgue measures on $V,W$ respectively, $A\in \ck^{sm}(V),\, B\in \ck^{sm}(W)$. Then $$(\phi\boxtimes \psi)(K)=(vol_V\boxtimes vol_W)(K+(A\times B))$$ for any $K\in \ck(V\times W)$, and where $vol_V\boxtimes vol_W$ denotes the usual product measure. Note that the exterior product of two smooth valuations may not be smooth.
In Appendix of this article we show a slightly more precise statement which will be useful later for some technical reasons. Namely it is shown that the exterior product extends (uniquely) to a continuous bilinear map $$Val(V)\times Val^{sm}(W)\rightarrow Val(V\times W),$$ i.e. the first variable may be replaced by continuous valuations instead of smooth.
The following proposition is essentially the definition of the product of smooth valuations from \cite{alesker-gafa-04}. \begin{proposition}\label{P:r1} For any $\phi,\psi\in Val^{sm}(V)$ $$\phi\cdot \psi=\Delta^*(\phi\boxtimes \psi)$$ where $\Delta\colon V\hookrightarrow V\times V$ denotes the diagonal imbedding. \end{proposition}
Let us explain now the relation between the convolution and the pushforward. Clearly $Dens(V\times W)^*=Dens(V^*)\otimes Dens(W^*)$. Hence the exterior product (\ref{r1}) tensored with $Id_{Dens(V\times W)^*}$ gives a continuous bilinear map \begin{eqnarray*}\label{r2} \left(Val^{sm}(V)\otimes Dens(V^*)\right)\times\left(Val^{sm}(W)\otimes Dens(W^*)\right)\rightarrow\\ Val(V\times W)\otimes Dens(V\times W)^* \end{eqnarray*} which will also be called exterior product and denoted by $\boxtimes$. Let us denote by $a\colon V\times V\rightarrow V$ the addition map, i.e. $a(x,y)=x+y$. \begin{proposition}\label{P:r2} For any $\phi,\psi\in Val^{sm}(V)\otimes Dens(V^*)$ one has $$\phi\ast\psi=a_*(\phi\boxtimes \psi).$$ \end{proposition} {\bf Proof.} Let us fix a Lebesgue measure $vol_V$ on $V$. By continuity and the McMullen's conjecture it is enough to prove the proposition for $\phi(\bullet)=vol_V(\bullet+A),\, \psi(\bullet)=vol_V(\bullet+B)$ with $A,B\in \ck^{sm}(V)$. Then \begin{eqnarray*} (\phi\boxtimes \psi)(\bullet)=(vol_V\boxtimes vol_V)(\bullet+(A\times B))\otimes (vol_V\otimes vol_V)^{-1}. \end{eqnarray*} Next \begin{eqnarray*} (a_*(\phi\boxtimes \psi))(\bullet)=vol_V(\bullet+a(A\times B))\otimes vol_V^{-1}=vol_V(\bullet +A+B)\otimes vol_V^{-1}=(\phi\ast\psi)(\bullet). \end{eqnarray*} Q.E.D. \subsection{Homomorphism property of pushforward.}\label{Ss:homomor-of-push} The main result of this section is the following proposition. \begin{proposition}\label{P:homom-of-push} Let $p\colon X\twoheadrightarrow Y$ be a linear epimorphism of vector spaces. Then for any $\phi\in Val^{sm}(X)\otimes Dens(X^*)$ the pushforward $p_*\phi$ is smooth, i.e. $p_*\phi\in Val^{sm}(Y)\otimes Dens(Y^*)$, and $$p_*\colon Val^{sm}(X)\otimes Dens(X^*)\rightarrow Val^{sm}(Y)\otimes Dens(Y^*)$$ is a homomorphism of algebras (when both spaces are equipped with convolution). \end{proposition}
It is easy to see that $p_*\phi$ is smooth if $p$ is surjective and $\phi$ is smooth. In order to prove the second statement of the proposition we will need another proposition. \begin{proposition}\label{L:ext-push} Let \begin{eqnarray*} f_1\colon V_1\rightarrow W_1,\\ f_2\colon V_2\rightarrow W_2 \end{eqnarray*} be linear maps. Let $\phi_i\in Val(W_i)\otimes Dens(W_i^*)$, $i=1,2$. Assume that $f_1$ is surjective and $\phi_1$ is smooth. Then $$(f_1\boxtimes f_2)_*(\phi_1\boxtimes \phi_2)=f_{1*}\phi_1\boxtimes f_{2*}\phi_2.$$ \end{proposition} {\bf Proof.} Let us fix Lebesgue measures $vol_{Ker}$ on $Ker(f_1)$, $vol_{W_1}$ on $W_1$, $vol_{W_2}$ on $W_2$, $vol_{V_2}$ on $V_2$. Let $vol_{V_1}=vol_{Ker}\otimes vol_{W_1}$ be the induced Lebesgue measure on $V_1$. Observe that by the Appendix to this article, both sides of the last equality are continuous with respect to $\phi_1\in Val^{sm}(V_1)\otimes Dens(V_1^*),\phi_2\in Val(V_2)\otimes Dens(V_2^*)$.
Hence, by the McMullen's conjecture, we may assume that $$\phi_i(\bullet)=vol_{V_i}(\bullet +A_i)\otimes vol_{V_i}^{-1},\, i=1,2.$$ Then \begin{eqnarray}\label{hop1} (\phi_1\boxtimes \phi_2)(\bullet)=(vol_{V_1}\boxtimes vol_{V_2})(\bullet +(A_1\times A_2))\otimes (vol_{V_1}\boxtimes vol_{V_2})^{-1}. \end{eqnarray} Then $$ (f_{i*}\phi_i)(\bullet)=vol_{W_i}(\bullet +f_i(A_i))\otimes vol_{W_i}^{-1},i=1,2,$$ \begin{eqnarray*} (f_1\boxtimes f_2)_*(\phi_1\boxtimes\phi_2)(\bullet)= (vol_{W_1}\boxtimes vol_{W_2})(\bullet + (f_1(A_1)\times f_2(A_2)))\otimes (vol_{W_1}\boxtimes vol_{W_2})^{-1}=\\(f_{1*}\phi_1\boxtimes f_{2*}\phi_2)(\bullet). \end{eqnarray*} Proposition \ref{L:ext-push} is proved. \qed
{\bf Proof} of Proposition \ref{P:homom-of-push}. Let \begin{eqnarray*} a_X\colon X\times X\rightarrow X,\\ a_Y\colon Y\times Y\rightarrow Y \end{eqnarray*} be the addition maps. Then \begin{eqnarray*} p_*(\phi\ast\psi)=p_*(a_{X*}(\phi\boxtimes \psi))=(p\circ a_X)_*(\phi\boxtimes \psi)=\\ (a_Y\circ (p\times p))_*(\phi\boxtimes \psi)=a_{Y*}((p\times p)_*(\phi\boxtimes \psi)\overset{\mbox{Lemma } \ref{L:ext-push}}{=}\\ a_{Y*}(p_*\phi\boxtimes p_*\psi)= p_*\phi\ast p_*\psi. \end{eqnarray*} \qed
\subsection{Base change theorem.}\label{Ss:base-change} Recall that a commutative diagram of linear maps of vector spaces $$\square[A`B`C`D; f` g`u`v]$$ is called a {\itshape Cartesian square} if it is isomorphic to a diagram $$\square[Y\times_Z X`X`Y`Z; pr_X` pr_Y`u`v]$$
where $Y\times_Z X:=\{(y,x)\in Y\times X|\, v(y)=u(x)\}$ and $pr_X\colon Y\times_Z X\rightarrow X$ and $pr_Y\colon Y\times_Z X\rightarrow Y$ are the natural maps. \begin{lemma}\label{L:bch} (i) Let $$\square[\tilde X`\tilde Y`X`Y;\tilde f`\tilde g`g`f]$$ be a Cartesian square of vector spaces such that $f\oplus g\colon X\oplus \tilde Y\rightarrow Y$ is onto. Then there exists a canonical isomorphism $$\frac{Dens(\tilde X^*)}{Dens(X^*)}\tilde \rightarrow \frac{Dens(\tilde Y^*)}{Dens(Y^*)}.$$
(ii) The following transitivity property of the isomorphism from part (i) holds. Assume that we have the following commutative diagram: $$\bfig \putsquare<1`1`1`1;500`500>(0,500)[\tilde X`\tilde Y`X`Y;\tilde f_1`\tilde g`g`f_1]\putsquare<1`0`1`1;500`500>(500,500)[\phantom{\tilde Y}`\tilde Z`\phantom{Y}`Z;\tilde f_2``\hat g`f_2]\efig .$$ If the two small squares are Cartesian then the exterior contour is Cartesian. If $f_1\oplus g$ and $f_2\oplus \hat g$ are onto, then $(f_2\circ f_1)\oplus \hat g$ is onto.
Moreover in the last case the isomorphism $\frac{Dens(\tilde X)}{Dens(X)}\tilde\rightarrow \frac{Dens(\tilde Z)}{Dens(Z)}$ corresponding to the exterior contour of the diagram by the part (i) of the lemma, is equal to the composition of the isomorphisms $$\frac{Dens(\tilde X)}{Dens(X)}\tilde\rightarrow \frac{Dens(\tilde Y)}{Dens(Y)}\tilde \rightarrow \frac{Dens(\tilde Z)}{Dens(Z)}$$ corresponding to the small Cartesian squares. \end{lemma} {\bf Proof.} (i) We have the short exact sequence of vector spaces $$0\rightarrow \tilde X\overset{\tilde f\times -\tilde g}{\rightarrow}X\oplus \tilde Y\overset{f\oplus g}{\rightarrow}Y\rightarrow 0.$$ Hence $$Dens(X\oplus \tilde Y)\simeq Dens(\tilde X)\otimes Dens(Y).$$ But on the other hand $Dens(X\oplus \tilde Y)=Dens(X)\otimes Dens(\tilde Y)$. Hence $$Dens(\tilde X)\otimes Dens(Y)\simeq Dens(X)\otimes Dens(\tilde Y).$$ Dualization of this isomorphism implies part (i).
The proof of part (ii) we leave to the reader. \qed
We have the following result we call the base change property. Roughly put, it says that for a Cartesian square as above one has $$g^*\circ f_*=\tilde f_*\circ \tilde g^*.$$ \begin{theorem}[Base change theorem]\label{T:base-change} Let \begin{eqnarray}\label{ob0} \square[\tilde X`\tilde Y`X`Y;\tilde f`\tilde g`g`f] \end{eqnarray} be a Cartesian square of vector spaces such that $f\oplus g\colon X\oplus \tilde Y\rightarrow Y$ is onto. Consider the following two maps $$Val(X)\otimes Dens(\tilde X^*)\rightarrow Val(\tilde Y)\otimes Dens(\tilde Y^*),$$ the first map given is by the composition \begin{eqnarray*} Val(X)\otimes Dens(\tilde X^*)=\left(Val(X)\otimes Dens(X^*)\right)\otimes \frac{Dens(\tilde X^*)}{Dens(X^*)}\overset{f_*\otimes Id}{\rightarrow}\\\left(Val(Y)\otimes Dens(Y^*)\right)\otimes\frac{Dens(\tilde X^*)}{Dens(X^*)}=Val(Y)\otimes Dens(\tilde Y^*)\overset{g^*\otimes Id_{Dens(\tilde Y^*)}}{\rightarrow}\\Val(\tilde Y)\otimes Dens(\tilde Y^*) \end{eqnarray*} where we have used the identification $Dens(Y^*)\otimes \frac{Dens(\tilde X^*)}{Dens(X^*)}\simeq Dens(\tilde Y^*)$ from Lemma \ref{L:bch}(i); and the second map is given by the composition \begin{eqnarray*} Val(X)\otimes Dens(\tilde X^*)\overset{\tilde g^*\otimes Id_{Dens(\tilde X^*)}}{\rightarrow}Val(\tilde X)\otimes Dens(\tilde X^*)\overset{\tilde f_*}{\rightarrow}Val(\tilde Y)\otimes Dens(\tilde Y^*). \end{eqnarray*} Then these two maps coincide. \end{theorem} {\bf Proof.} We will prove this result in several steps. In the first two steps we will show that it is enough to prove the theorem under the assumption that each of $f$ and $g$ is either injection or surjection. Then we will prove the theorem in each of these cases.
\underline{Step 1.} Transitivity with respect to $f$.
Assume that we have a commutative diagram \begin{eqnarray*} \bfig \putsquare<1`1`1`1;500`500>(0,500)[\tilde X`\tilde Y`X`Y;\tilde f_1`\tilde g`g`f_1]\putsquare<1`0`1`1;500`500>(500,500)[\phantom{\tilde Y}`\tilde Z`\phantom{Y}`Z;\tilde f_2``\hat g`f_2]\efig \end{eqnarray*} such that $f_1\oplus g$ and $f_2\oplus \hat g$ are onto and two small squares satisfy conclusions of the theorem. Then the diagram of the exterior contour also satisfies these conclusions by Propositions \ref{P:pullback1}(iii), \ref{P:pushforward}(iii), and Lemma \ref{L:bch}(ii).
\underline{Step 2.} Transitivity with respect to $g$.
Assume that we have a commutative diagram \begin{eqnarray*} \bfig \putsquare<1`1`1`1;600`400>(0,750)[\tilde X` \tilde Y `X` Y;\tilde f`\tilde g_1`g_1`f] \putsquare<0`1`1`1;600`400>(0,350)[\phantom{X}`\phantom{Y}`\tilde Z `Z;`\tilde g_2`g_2`\hat f] \efig \end{eqnarray*} such that $f\oplus g_1$ and $\hat f\oplus g_2$ are onto, and the small squares satisfy the conclusions of the theorem. Then, as in Step 1, the diagram of the exterior contour also satisfies these conclusions by Propositions \ref{P:pullback1}(iii), \ref{P:pushforward}(iii), and Lemma \ref{L:bch}(ii).
\underline{Step 3.} Let us assume that $g$ is surjective.
Then we may and will assume that $$\tilde Y=Y\oplus L$$ and $g$ is the projection $pr_Y\colon Y\oplus L\rightarrow Y$. Hence $\tilde X=X\oplus L$, $\tilde g$ is the projection $pr_X\colon X\oplus L\rightarrow X$, and $\tilde f=f\boxtimes Id_L\colon X\oplus L\rightarrow Y\oplus L$. Thus the diagram (\ref{ob0}) becomes equal to the diagram \begin{eqnarray}\label{ob1}\square<1`1`1`1;800`400>[ X\oplus L`Y\oplus L`X`Y;f\boxtimes Id_L`pr_X`pr_Y`f].\end{eqnarray}
Observe that we have canonical isomorphisms $$\frac{D(\tilde X^*)}{D(X^*)}\simeq \frac{D(\tilde Y^*)}{D(Y^*)}\simeq D(L^*).$$
Let us denote $l:=\dim L$. Let us fix Lebesgue measures $vol_L$ on $L$ and $vol_X$ on $X$. Let us fix also $S\in \ck(L)$ such that $vol_L(S)=1$. Abusing the notation we will denote the first map in the statement of the theorem by $"pr_Y^*\circ f_*"$, and the second map by $"(f\times Id_L)_*\circ pr_X^*"$. We have to show that they coincide. We may decompose $f$ into a composition of injection and surjection and, using Step 1, prove the result separately in each case.
\underline{Case a.} Assume that $f$ is surjective.
Then $X$ may and will be assumed to be equal to $Y\oplus M$, and $f\colon Y\oplus M\rightarrow Y$ is the natural projection. Thus $\tilde X=Y\oplus M\oplus L$. Then the diagram (\ref{ob1}) becomes equal to \begin{eqnarray}\label{ob2}\square<1`1`1`1;800`400>[ Y\oplus M\oplus L`Y\oplus L`X=Y\oplus M`Y;f\boxtimes Id_L`pr_X`pr_Y`f].\end{eqnarray} Let us fix a Lebesgue measure $vol_M$ on $M$. Let $vol_Y=\frac{vol_X}{vol_M}\in Dens(Y)$. Let us denote $m:=\dim M$. Let us fix $T\in \ck(M)$ such that $vol_M(T)=1$. Let us fix $\phi\in Val(Y\oplus M)\otimes Dens((Y\oplus M\oplus L)^*)$. Finally let us fix an arbitrary subset $K\in \ck(Y\oplus L)$. Then we have \begin{eqnarray} "(pr_Y^*\circ f_*)"(\phi)(K)=("f_*"\phi)(pr_Y(K))=\\
\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0\phi(pr_Y(K)\times \varepsilon T)\otimes vol_M
=\\\label{ob3}\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0\phi\left(pr_X(K\times \varepsilon T)\right)\otimes vol_M.
\end{eqnarray} On the other hand \begin{eqnarray}
"(f\boxtimes Id_L)_*\circ pr_X^*"(\phi)(K)=\\\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0"pr_X^*"(\phi)(K\times \varepsilon T)\otimes vol_M=\\\label{ob4}
\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0\phi(pr_X(K\times \varepsilon T))\otimes vol_M. \end{eqnarray} Since (\ref{ob3})=(\ref{ob4}), Case a is proved.
\underline{Case b.} Assume that $f\colon X\rightarrow Y$ is injective.
It suffices to consider the case when $X$ is a subspace of $Y$, and $f$ is the identity imbedding. Let us fix Lebesgue measures $vol_X$ on $X$, $vol_Y$ on $Y$. Set $vol_{Y/X}:=\frac{vol_Y}{vol_X}\in Dens(Y/X)$. Then the diagram (\ref{ob1}) becomes equal to \begin{eqnarray}\label{ob5}\square<1`1`1`1;800`400>[ X\oplus L`Y\oplus L`X`Y;f\boxtimes Id_L`pr_X`pr_Y`f].\end{eqnarray} We have \begin{eqnarray} "pr^*_Y\circ f_*"(\phi)(K)=("f_*"\phi)(pr_Y(K))=\\ \int_{z\in Y/X}\phi(pr_Y(K)\cap z)dvol_{Y/X}(z)\otimes vol_{Y/X}^{-1}=\\\label{ob6} \int_{z\in Y/X}\phi(pr_Y(K\cap (z\times L)))dvol_{Y/X}(z)\otimes vol_{Y/X}^{-1}. \end{eqnarray} On the other hand we get \begin{eqnarray} "(f\boxtimes Id_L)_*\circ pr_X^*"(\phi)(K)=\\ \int_{z\in Y/X}(pr_X^*\phi)(K\cap (z\times L))dvol_{Y/X}\otimes vol_{Y/X}^{-1}=\\\label{ob7} \int_{z\in Y/X}\phi(pr_X(K\cap (z\times L)))dvol_{Y/X}\otimes vol_{Y/X}^{-1} \end{eqnarray} Comparing (\ref{ob6}) and (\ref{ob7}) and making appropriate identifications of subsets of $X$ inside $Y$, we conclude Case b. Thus Step 3 is completed.
\underline{Step 4.} Assume that $g$ is injective.
Then we may and will assume that $\tilde Y\subset Y$ and $g\colon \tilde Y\rightarrow Y$ is the identity imbedding. Let us fix Lebesgue measures $vol_{\tilde Y}$ on $\tilde Y$ and $vol_Y$ on $Y$. Set $$vol_{Y/\tilde Y}:=\frac{vol_Y}{vol_{\tilde Y}}\in Dens(Y/\tilde Y).$$ By Step 1, it suffices to prove the result in two cases: either $f$ is surjective or injective.
\underline{Case a.} Assume that $f$ is surjective.
Then we may assume that $X=Y\oplus M$ and $f=pr_Y\colon Y\oplus M\rightarrow Y$ is the natural projection. Then the diagram (\ref{ob0}) becomes equal to \begin{eqnarray}\label{ob8} \square<1`1`1`1;800`400>[ \tilde Y\oplus M`\tilde Y`Y\oplus M `Y;pr_{\tilde Y}`\tilde g=g\boxtimes Id_M`g`pr_Y]. \end{eqnarray} Let us fix a Lebesgue measure $vol_M$ on $M$, and $T\in \ck(M)$ such that $vol_M(T)=1$. Set $vol_X:=vol_Y\otimes vol_M\in Dens(Y\oplus M)=Dens(X)$. Let $\phi\in Val(Y\oplus M)\otimes Dens(\tilde Y\oplus M)$. Let us fix also $K\in \ck(\tilde Y)$.
We have \begin{eqnarray} ("g^*\circ pr_{Y*}")(\phi)(K)=("pr_{Y*}"(\phi))(K)=\\\label{ob9}
\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0\phi(K\times\varepsilon T). \end{eqnarray} On the other hand we have \begin{eqnarray}
("pr_{\tilde Y*}\circ \tilde g^*")(\phi)(K)=\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0("\tilde g^*"\phi)(K\times \varepsilon T)=\\\label{ob10}
\frac{1}{m!}\frac{d^m}{d\varepsilon^m}\big|_0\phi(K\times \varepsilon T). \end{eqnarray} Comparing (\ref{ob9}) and (\ref{ob10}) we conclude Case a.
\underline{Case b.} Assume that $f$ is injective.
We may and will assume that $X\subset Y$ and $f$ is the identity imbedding. Under these assumptions $\tilde X=X\cap \tilde Y$. Let us fix decompositions \begin{eqnarray*} X=\tilde X\oplus L,\\ \tilde Y=\tilde X\oplus M. \end{eqnarray*} Then $Y=\tilde X\oplus L\oplus M$ (since we assume that $f\oplus g\colon X\oplus \tilde Y\rightarrow Y$ is onto). Then the diagram (\ref{ob0}) becomes the following diagram of imbeddings \begin{eqnarray}\label{ob11} \square<1`1`1`1;1100`400>[ \tilde X`\tilde X\oplus M`\tilde X\oplus L `\tilde X\oplus L\oplus M;Id_{\tilde X}\boxtimes 0_M`Id_{\tilde X}\boxtimes 0_L`Id_{\tilde X\oplus M}\boxtimes 0_L`Id_{\tilde X\oplus L}\boxtimes 0_M]. \end{eqnarray} Let us fix Lebesgue measures $vol_{\tilde X}, vol_L,vol_M$ on $\tilde X,L,M$ respectively. Let \begin{eqnarray*} vol_X:=vol_{\tilde X}\otimes vol_L\in Dens(X),\\ vol_{\tilde Y}:=vol_{\tilde X}\otimes vol_M\in Dens(\tilde Y),\\ vol_Y:=vol_{\tilde X}\otimes vol_L\otimes vol_M\in Dens(Y). \end{eqnarray*} Fix $\phi\in Val(X)\otimes Dens(\tilde X^*)$. Fix $K\in \ck(\tilde Y)$. We have \begin{eqnarray} ("g^*\circ f_*")(\phi)(K)=("f_*"\phi)(K)=\\ \int_{x\in M}\phi(K\cap (x+(\tilde X\oplus L)))dvol_M(x)\otimes vol_M^{-1} =\\\label{ob12} \int_{x\in M}\phi((K\cap (x+\tilde X))\times \{0_L\}) \otimes vol_M^{-1}. \end{eqnarray} On the other hand we have \begin{eqnarray} ("\tilde f_*\circ \tilde g^*")(\phi)(K)=\\ \int_{x\in M} ("\tilde g^*"\phi)(K\cap (x+\tilde X))dvol_M(x)\otimes vol_M^{-1}=\\\label{ob13} \int_{x\in M}\phi((K\cap (x+\tilde X))\times \{0_L\})\otimes vol_M^{-1}. \end{eqnarray} Comparing (\ref{ob12}) and (\ref{ob13}) we conclude Case b. Thus Step 4 is proved.
\underline{Step 5.} Let us consider the general case. By Step 2 we may assume that $g$ is either injective of surjective. Now the theorem follows from Steps 3,4. \qed
The next result is in fact an equivalent reformulation of the base change theorem. It will be needed later. \begin{theorem}\label{T:base-change2} Let $$\square[\tilde X`\tilde Y`X`Y;\tilde f`\tilde g`g`f]$$ be a Cartesian square of vector spaces such that $f\oplus g\colon X\oplus \tilde Y\rightarrow Y$ is onto. Consider the following two maps $$Val(\tilde Y)\otimes Dens(\tilde Y^*)\rightarrow Val(X)\otimes Dens(Y^*).$$ The first map is the composition \begin{eqnarray*} Val(\tilde Y)\otimes Dens(\tilde Y^*)\overset{g_*}{\rightarrow}Val(Y)\otimes Dens(Y^*)\overset{f^*\otimes Id_{Dens(Y^*)}}{\rightarrow}Val(X)\otimes Dens(Y^*); \end{eqnarray*} and the second map is the composition \begin{eqnarray*} Val(\tilde Y)\otimes Dens(\tilde Y^*)\overset{\tilde f^*\otimes Id_{Dens(\tilde Y^*)}}{\rightarrow}\\Val(\tilde X)\otimes Dens(\tilde Y^*)=\left(Val(\tilde X)\otimes Dens(\tilde X^*)\right)\otimes \frac{Dens(\tilde Y^*)}{Dens(\tilde X^*)}\overset{\tilde g_*\otimes Id}{\rightarrow}\\\left(Val(X)\otimes Dens(X^*)\right)\otimes\frac{Dens(\tilde Y^*)}{Dens(\tilde X^*)}=Val(X)\otimes Dens(Y^*) \end{eqnarray*} where in the last equality we have used the identification from Lemma \ref{L:bch}(i).
Then these two maps coincide. \end{theorem} {\bf Proof.} This result is obtained from Theorem \ref{T:base-change} by flipping the diagram in the latter theorem with respect to the diagonal, twisting all the spaces by $\frac{Dens(\tilde Y^*)}{Dens(\tilde X^*)}$, and using the isomorphism $\frac{Dens(\tilde Y^*)}{Dens(\tilde X^*)}\otimes Dens(X^*)\simeq Dens(Y^*)$ from Lemma \ref{L:bch}(i). \qed
\section{An isomorphism of $GL(V)$-modules $Val^{-,sm}_{n-p}(V)$ and $Val_p^{-,sm}(V^*)\otimes Dens(V)$}\label{S:isomorphism-val} \setcounter{subsection}{1} \setcounter{theorem}{0}\setcounter{equation}{0} The main result of this section is the following proposition. \begin{proposition}\label{isomorphism} Let $1\leq p\leq n-1$. The $GL(V)$-modules $Val^{-,sm}_{n-p}(V)$ and $Val_p^{-,sm}(V^*)\otimes Dens(V)$ are isomorphic. \end{proposition} {\bf Proof.} \underline{Step 1.} By \cite{alesker-gafa-04} the product on valuations $$Val_{n-p}^{-,sm}\times Val_p^{-,sm}(V)\rightarrow Val_n(V)=Dens(V)$$ is a perfect pairing. It follows that the induced map \begin{eqnarray}\label{1} Val_{n-p}^{-,sm}\rightarrow (Val_p^{-,sm}(V))^{*,sm}\otimes Dens(V) \end{eqnarray} is an isomorphism of $GL(V)$-modules. Thus to prove the proposition we have to show that the $GL(V)$-modules $(Val_p^{-,sm}(V))^{*,sm}$ and $Val_p^{-,sm}(V^*)$ are isomorphic.
\underline{Step 2.} For a vector space $W$ let us denote by ${\cal F}_{k,k+1}(W)$ the manifold of partial flags
$${\cal F}_{k,k+1}(W):=\{(E,F)\,\,\,| \,\,\, E\in Gr_k(W),\, F\in Gr_{k+1}(W),\, E\subset F\}.$$ Let ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{k,k+1}(W)\rightarrow{\cal F}_{k,k+1}(W)$ be the vector bundle such that its fiber over a pair $(E\subset F)\in {\cal F}_{k,k+1}(W)$ is equal to $E$. Similarly let $\cn_{k,k+1}(W)\rightarrow{\cal F}_{k,k+1}(W)$ be the vector bundle such that its fiber over $(E\subset F)\in {\cal F}_{k,k+1}(W)$ is equal to $F$. Thus ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{k,k+1}(W)\subset \cn_{k,k+1}(W)$.
By \cite{alesker-adv-00}, $Val_k^{-,sm}(W)$ is isomorphic to an irreducible subquotient of $$C^\infty({\cal F}_{k,k+1}(W),\det {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{k,k+1}^*(W)\otimes or (\cn_{k,k+1}(W))).$$ Thus $Val_p^{-,sm}(V^*)$ is isomorphic to an irreducible subquotient of $$C^\infty({\cal F}_{p,p+1}(V^*), \det {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}^*(V^*)\otimes or (\cn_{p,p+1}(V^*))).$$
\underline{Step 3.} Now we will show that both $(Val_p^{-,sm}(V))^{*,sm}$ and $Val_p^{-,sm}(V^*)$ appear in the Jordan-H\"older series of the same degenerate principal series representation.
Step 2 implies that $(Val_p^{-,sm}(V))^{*,sm}$ is isomorphic to an irreducible subquotient of \begin{eqnarray}\label{2} C^\infty({\cal F}_{p,p+1}(V),\det {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}(V)\otimes or(
\cn_{p,p+1}(V))\otimes |\omega_{{\cal F}_{p,p+1}(V)}|) \end{eqnarray}
where $|\omega_X|$ denotes the line bundle of densities over a manifold $X$.
Recall that by Step 2 $Val_p^{-,sm}(V^*)$ is isomorphic to an irreducible subquotient of $$C^\infty({\cal F}_{p,p+1}(V^*), \det {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}^*(V^*)\otimes or (\cn_{p,p+1}(V^*))).$$
By taking the orthogonal complement, ${\cal F}_{p,p+1}(V^*)$ is identified with ${\cal F}_{n-p-1,n-p}(V)$. Then ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}^*_{p,p+1}(V^*)$ is identified with $\underline{V}/\cn_{n-p-1,n-p}(V)$, and $\cn_{p,p+1}(V^*)$ is identified with the bundle $(\underline{V}/{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{n-p-1,n-p}(V))^*$ where $\underline{V}={\cal F}_{n-p-1,n-p}(V)\times V$. Of course, all the identifications are $GL(V)$-equivariant. Hence \begin{eqnarray}\label{2.1} C^\infty({\cal F}_{p,p+1}(V^*), \det {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}^*(V^*)\otimes or (\cn_{p,p+1}(V^*)))=\\ \label{3} C^\infty\left({\cal F}_{n-p-1,n-p}(V),\det(\underline{V}/\cn_{n-p-1,n-p}(V)) \otimes or (\underline{V}/{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{n-p-1,n-p}(V))\right). \end{eqnarray}
By Corollary \ref{C:arepr2} the natural representations of $GL(V)$ in the spaces (\ref{2}) and (\ref{3}) have the same Jordan-H\"older series. Hence both $(Val_p^{-,sm}(V))^{*,sm}$ and $Val_p^{-,sm}(V^*)$ appear in the Jordan-H\"older series of (\ref{3}) which is isomorphic to (\ref{2.1}), i.e. to \begin{eqnarray}\label{4} \cx:=C^\infty({\cal F}_{p,p+1}(V^*), \det {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}^*(V^*)\otimes or (\cn_{p,p+1}(V^*))). \end{eqnarray}
\underline{Step 4.} In this last and the most technical step we will show that $(Val_p^{-,sm}(V))^{*,sm}$ cannot be isomorphic to any constituent of the Jordan-H\"older series of (\ref{4}) different from $Val_p^{-,sm}(V^*)$. This will finish the proof of the proposition. \def\texttt{g}{\texttt{g}} Remind that to any finitely generated $U(\texttt{g})$-module $M$ one can attach an algebraic subvariety of the variety of nilpotent element of $\texttt{g}$ which is called {\itshape associated variety} or {\itshape Bernstein variety}. We refer to \cite{borho-brylinski1} for the detail on this notion. It turns out that the associated variety of $(Val_p^{-,sm}(V))^{*,sm}$ is equal to the variety of complex symmetric nilpotent matrices of rank at most 1. Indeed by the Poincar\'e duality (\ref{1}) this space is isomorphic to $Val_{n-p}^{-,sm}(V)\otimes Dens(V)^*$. Clearly the associated variety of the last space coincides with that of $Val_{n-p}^{-,sm}(V)$. But by \cite{alesker-gafa-01}, Theorem 3.1, the associated variety of $Val_{n-p}^{-,sm}(V)$ is equal to the variety of complex symmetric nilpotent matrices of rank at most 1.
Let us remind few facts about the structure of the space (\ref{4}). Let $$q\colon {\cal F}_{p,p+1}(V^*)\rightarrow Gr_{p+1}(V^*)$$ be the canonical projection. Let $\ct_{p+1}(V^*)\rightarrow Gr_{p+1}(V^*)$ be the tautological bundle, i.e. the bundle whose fiber over $F\in Gr_{p+1}(V^*)$ is equal to $F$. It is clear that $\cn_{p,p+1}(V^*)=q^*(\ct_{p+1}(V^*))$. Hence \begin{eqnarray*} \wedge^p\cn_{p,p+1}^*(V^*)=q^*(\wedge^p\ct_{p+1}^*(V^*)),\\ or(\cn_{p,p+1}(V^*))=q^*(or(\ct_{p+1}(V^*)). \end{eqnarray*} Consider the map of vector bundles $\cn_{p,p+1}^*(V^*)\rightarrow {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}^*(V^*)$ dual to the natural imbedding ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}(V^*)\hookrightarrow \cn_{p,p+1}(V^*)$. The $p$-th exterior power of this map induces a map \begin{eqnarray*} {\cal Y}} \def\cz{{\cal Z}:=C^\infty(Gr_{p+1}(V^*),\wedge^p\ct_{p+1}^*(V^*)\otimes or(\ct_{p+1}(V^*)))\rightarrow\\ C^\infty({\cal F}_{p,p+1}(V^*),\det{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_{p,p+1}^*(V^*)\otimes or(\cn_{p,p+1}(V^*)))=\cx. \end{eqnarray*} Clearly this map is $GL(V)$-equivariant (and in fact injective). The Casselman-Wallach theorem implies that the image of this map is a closed subspace. We will identify ${\cal Y}} \def\cz{{\cal Z}$ with its image in $\cx$ under this map. By \cite{alesker-adv-00}, $Val_p^{-,sm}(V^*)$ imbeds $GL(V)$-equivariantly as a subspace in $\cx/{\cal Y}} \def\cz{{\cal Z}$. Moreover, by \cite{alesker-gafa-01}, Section 5, $Val_p^{-,sm}(V^*)$ is the only irreducible subquotient of $\cx/{\cal Y}} \def\cz{{\cal Z}$ whose associated variety consists of complex symmetric nilpotent matrices of rank at most 1.
Hence it remains to show that $(Val_p^{-,sm}(V))^{*,sm}$ cannot be isomorphic to any of the irreducible subquotients of ${\cal Y}} \def\cz{{\cal Z}=C^\infty(Gr_{p+1}(V^*),\wedge^p\ct_{p+1}^*(V^*)\otimes or(\ct_{p+1}(V^*)))$.
Before we will treat the general case, let us observe now that if $p+1=n$ then the last statement is trivial since $(Val_p^{-,sm}(V))^{*,sm}$ is infinite dimensional while ${\cal Y}} \def\cz{{\cal Z}=\wedge^pV^*\otimes or(V^*)$ is finite dimensional (since $Gr_{p+1}(V^*)$ is just a point). Hence for $p=n-1$ Proposition \ref{isomorphism} is proved. By symmetry, replacing $V$ by $V^*$, Proposition \ref{isomorphism} follows also for $p=1$.
Let us assume now that $2\leq p\leq n-2$, hence $n\geq 4$. In this case we are going to use the Beilinson-Bernstein localization theorem. First we will have to introduce more notation and remind some constructions from \cite{alesker-gafa-01}.
Let us fix a Euclidean metric on $V^*$. Let $G_0=GL(V^*)$. Let $\texttt{g}_0=Lie(G_0)$ be the Lie algebra of $G_0$. Let $\texttt{g}:=\texttt{g}_0\otimes _\mathbb{R}\mathbb{C}$ be its complexification. Let $K_0\subset G_0$ be the subgroup of the orthogonal transformations of $V^*$. Let $K$ be the complexification of $K_0$. Let $G$ be the complexification of $G_0$. Thus $G\simeq GL_n(\mathbb{C})$, $Lie(G)=\texttt{g}$.
Let ${}\!^ {\textbf{C}} Gr_k$ denote the Grassmannian of complex $k$-dimensional subspaces of $V^*\otimes_\mathbb{R}\mathbb{C}=:{}\!^ {\textbf{C}} V$. Denote
$${}\!^ {\textbf{C}} {\cal F}_{p,p+1}:=\{(E,F)|\, E\subset F,E\in{}\!^ {\textbf{C}} Gr_p,F\in {}\!^ {\textbf{C}} Gr_{p+1}\}.$$ Let ${}\!^ {\textbf{C}} {\cal F}$ be the variety of complete flags in ${}\!^ {\textbf{C}} V^*$. We have the canonical projection \begin{eqnarray}\label{obe1} \bar q\colon {}\!^ {\textbf{C}} {\cal F}\rightarrow {}\!^ {\textbf{C}} {\cal F}_{p,p+1}. \end{eqnarray} It is well known that the group $K$ acts on ${}\!^ {\textbf{C}} {\cal F}$ (and hence on ${}\!^ {\textbf{C}} {\cal F}_{p,p+1},{}\!^ {\textbf{C}} Gr_p$) with finitely many orbits.
Let us fix a basis $e_1,e_2,\dots,e_n$ in $V^*$. Let $T_0\subset G_0$ be the subgroup of diagonal transformations with respect to this basis. Let $B_0\subset G_0$ be the subgroup of upper triangular transformations. Let $T$ and $B$ be the complexifications of $T_0$ and $B_0$ respectively. Thus $T\subset G$ is a Cartan subgroup, $B\subset G$ is a Borel subgroup.
Let $P_0\subset G_0$ be the subgroup of transformations of preserving the flag $span_\mathbb{R}\{e_1,\dots,e_p\}\subset span_\mathbb{R}\{e_1,\dots,e_p,e_{p+1}\}$. Let $P\subset G$ be its complexification which is a parabolic subgroup of $G$. Then clearly $T_0\subset B_0\subset P_0$, $T\subset B\subset P$. \def\texttt{t}{\texttt{t}} \def\texttt{b}{\texttt{b}} \def\texttt{p}{\texttt{p}} By $\texttt{t},\texttt{b},\texttt{p}$ we will denote the Lie algebras of $T,B,P$ respectively.
Clearly in the basis $e_1,\dots,e_n$ the subgroup $T$ consists of complex diagonal invertible matrices, $B$ consists of complex upper triangular invertible matrices, and \begin{eqnarray*} P=\left\{ \left[\begin{array}{ccc}
A&*&*\\
0&b&*\\
0&0&C
\end{array}\right]\big|\,
A\in GL_p(\mathbb{C}),b\in \mathbb{C}^*,C\in
GL_{n-p-1}(\mathbb{C})\right\}. \end{eqnarray*} Let us consider the character $\chi\colon \texttt{p}/[\texttt{p},\texttt{p}]\rightarrow \mathbb{C}$ given by \begin{eqnarray}\label{obe2} \chi\left(\left[\begin{array}{ccc}
A&*&*\\
0&b&*\\
0&0&C
\end{array}\right]\right)=-Tr(A). \end{eqnarray} Let $\hat\chi\colon \texttt{b}/[\texttt{b},\texttt{b}]\rightarrow \mathbb{C}$ be the composition of $\chi$ with the canonical map $\texttt{b}/[\texttt{b},\texttt{b}]\rightarrow\texttt{p}/[\texttt{p},\texttt{p}]$. It is easy to see that \begin{eqnarray*} \hat\chi\left(\left[\begin{array}{cccccc}
x_1&*&&&&*\\
0&\ddots&*&&&\\
0&\dots&x_p&*&&\\
0&\dots&0&x_{p+1}&&\\
0&\dots&\dots&0&\ddots&*\\
0&\dots&\dots&\dots&0&x_n
\end{array}\right]\right)=-(x_1+\dots+x_p). \end{eqnarray*} \def\hat\chi{\hat\chi}
We will be interested in ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-modules on ${}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ and ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\hat\chi}$-modules on ${}\!^ {\textbf{C}} {\cal F}$. Clearly we have the pullback functor \begin{eqnarray*} \bar q^*\colon {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi({}\!^ {\textbf{C}} {\cal F}_{p,p+1})-mod\rightarrow{\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\hat\chi}({}\!^ {\textbf{C}} {\cal F})-mod. \end{eqnarray*} \begin{lemma}\label{obe3} The character $\hat\chi\colon \texttt{b}/[\texttt{b},\texttt{b}]\rightarrow\mathbb{C}$ is dominant and regular in sense of Definition \ref{D:regular-dominant}. \end{lemma} {\bf Proof.} Let $\bar \chi\colon B_0/[B_0,B_0]\rightarrow \mathbb{C}^*$ be the character of the group defined by \begin{eqnarray*} \bar\chi\left(\left[\begin{array}{ccc}
z_1&*&*\\
0&\ddots&*\\
0&\dots&z_n
\end{array}\right]\right)=
(z_1\dots z_p)^{-1} \end{eqnarray*} Consider the representation $Ind_{B_0}^{G_0}\bar\chi$. By Proposition \ref{P:reg-dom}, in order to prove that $\hat\chi$ is dominant and regular it is enough to show that $Ind_{B_0}^{G_0}\bar\chi$ has a non-zero finite dimensional submodule. But we have a natural non-zero map $\wedge^pV\rightarrow Ind_{B_0}^{G_0}\bar\chi$. Hence lemma is proved. \qed
\begin{corollary}\label{obe4} The functor of global sections \begin{eqnarray*}\Gamma\colon {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi({}\!^ {\textbf{C}} {\cal F}_{p,p+1})-mod \rightarrow \texttt{g} -mod\end{eqnarray*} is exact and faithful. \end{corollary} {\bf Proof.} Since the morphism $\bar q$ is projective and smooth $$\Gamma=\hat\Gamma\circ \bar q^*$$ where $\hat\Gamma$ is the functor of global sections on the category ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\hat\chi}({}\!^ {\textbf{C}} {\cal F})-mod$. $\hat\Gamma$ is exact and faithful by Lemma \ref{obe3} and the Beilinson-Bernstein theorem. The functor $\bar q^*$ is exact and faithful too since $\bar q$ is a smooth morphism. Hence $\Gamma$ is also exact and faithful. \qed
Let us now remind, following \cite{alesker-gafa-01}, the construction of a $K$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-module ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$ on ${}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ such that the space of its global sections is isomorphic, as a $(\texttt{g},K)$-module, to the Harish-Chandra module of $\cx$ (defined in (\ref{4})).
Let $B$ be the complexification of the Euclidean form on $V^*$. Thus $B\colon {}\!^ {\textbf{C}} V\times{}\!^ {\textbf{C}} V\rightarrow \mathbb{C}$ is a symmetric non-degenerate bilinear form. Let $U\subset {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ denote the open $K$-orbit of ${}\!^ {\textbf{C}} {\cal F}_{p,p+1}$. Let $j\colon U\hookrightarrow {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ denote the identity imbedding. Explicitly one has \begin{eqnarray*}
U=\{(E,F)\in {}\!^ {\textbf{C}} {\cal F}_{p,p+1}|\, \mbox{the restrictions of } B \mbox{ to } E \mbox{ and to } F \mbox{ are non-degenerate}\}. \end{eqnarray*} Let us fix an element $(E_0,F_0)\in U$. The stabilizer $S\subset K$ of this element is isomorphic to the group $O(p,\mathbb{C})\times O(1,\mathbb{C})\times O(n-p-1,\mathbb{C})$. Note that $S$ is a reductive group, and hence $U=K/S$ is an affine variety.
The category of $K$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-modules on the orbit $U$ is equivalent to the category of representations of the group of connected components of $S$. Let ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0$ be the $K$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-module on $U$ corresponding to the representation of $S\simeq O(p,\mathbb{C}) \times O(1,\mathbb{C})\times O(n-p-1,\mathbb{C})$ given by $$(A,B,C)\mapsto \det A\cdot \det B.$$ Let us define ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}:=j_*{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0$. It was shown in \cite{alesker-gafa-01} that the $(\texttt{g},K)$-module $\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O})$ is isomorphic to the Harish-Chandra module of $\cx$.
Let us describe now the $K$-equivariant sub-${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-module $\cn\subset{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$ corresponding to the Harish-Chandra module of ${\cal Y}} \def\cz{{\cal Z}$ (more precisely, $\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},\cn)$ coincides with the Harish-Chandra module of ${\cal Y}} \def\cz{{\cal Z}\subset \cx$).
Let us denote by $V\subset {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ the (open) subvariety
$$V:=\{(E,F)\in {}\!^ {\textbf{C}} {\cal F}_{p,p+1}|\mbox{ s.t. } B|_F \mbox{ is non-degenerate}\}.$$ Then $U\subset V$. Let $j'\colon U\hookrightarrow V$ and $j''\colon V\hookrightarrow {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ denote the identity imbedding morphisms. Set $$\cn:=j''_*(j'_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0)$$ where $j'_{!*}$ denotes the minimal (Goresky-Macpherson) extension of ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}$ under the open imbedding $j'$. It is easy to see that the morphism $j''\colon V\rightarrow {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ is affine. Hence the functor $j''_*$ is exact and we have $$\cn\subset j''_*(j'_{*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0)=j_*{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0={\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}.$$ As it was shown in \cite{alesker-gafa-01}, $\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},\cn)$ coincides with the Harish-Chandra module of ${\cal Y}} \def\cz{{\cal Z}\subset \cx$. Also let us define $$\ck:=j_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0.$$ Thus $\ck\subset \cn$. \begin{remark}\label{ny1} Since $Val_p^{-,sm}(V^*)\hookrightarrow \cx/{\cal Y}} \def\cz{{\cal Z}$, it corresponds to a sub-${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-module of ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}/\cn$. Note also that $\supp ({\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}/\cn)\ne {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$. \end{remark} \begin{lemma}\label{ny2} No irreducible subquotient of $\Gamma(\cn/\ck)=\Gamma(\cn)/\Gamma(\ck)$ has associated variety contained in the variety of complex symmetric nilpotent matrices of rank at most 1. \end{lemma} {\bf Proof.} Let $\cl\rightarrow {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ be the (algebraic) line bundle whose fiber over $(E,F)\in {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ is equal to $\wedge^pE^*$. Then the sheaf ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$ is the sheaf of differential operators with values in $\cl$. Tensoring by $\cl^*$ establishes an equivalence of the categories ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi({}\!^ {\textbf{C}} {\cal F}_{p,p+1})-mod$ and ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}({}\!^ {\textbf{C}} {\cal F}_{p,p+1})-mod$ where ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}(X)$ denotes the sheaf of rings of usual (untwisted) differential operators on a variety $X$. Since the line bundle $\cl$ is $G$-equivariant, the analogous equivalence holds for $K$-equivariant versions of these categories. This equivalence preserves singular supports of the corresponding modules.
Let ${\cal R}$ be a ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-module. Let ${\cal R}':=\cl^*\otimes {\cal R}$ be the corresponding ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-module. The associated variety of $\Gamma({\cal R},{}\!^ {\textbf{C}} {\cal F}_{p,p+1})$ (resp. $\Gamma({\cal R}',{}\!^ {\textbf{C}} {\cal F}_{p,p+1})$) is equal to the image under the moment map of the singular support of ${\cal R}$ (resp. ${\cal R}'$) (see \cite{alesker-gafa-01} where the discussion follows \cite{borho-brylinski2}). Hence it follows that $\Gamma({\cal R},{}\!^ {\textbf{C}} {\cal F}_{p,p+1})$ and $\Gamma({\cal R}',{}\!^ {\textbf{C}} {\cal F}_{p,p+1})$ have the same associated variety.
Set ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0':=\cl^*\otimes {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0$. Then ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$ is a $K$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-module on the open orbit $U\subset {}\!^ {\textbf{C}} {\cal F}_{p,p+1})$ corresponding to the same as ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0$ representation of the group of connected components of the group $S\simeq O(p,\mathbb{C})\times O(1,\mathbb{C})\times O(n-p-1,\mathbb{C})$, i.e. to $(A,B,C)\mapsto \det A \cdot \det B$.
Then \begin{eqnarray*} {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}':=\cl^*\otimes {\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}=j_*{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0',\\ \cn':=\cl^*\otimes \cn=j''_*(j'_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0') \end{eqnarray*} where now all the functors $j_*, j'_{!*},j''_*$ are in the category of ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-modules rather than ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_\chi$-modules.
Let $f\colon {}\!^ {\textbf{C}} {\cal F}_{p,p+1}\rightarrow {}\!^ {\textbf{C}} Gr_{p+1}$ be the canonical projection $(E,F)\mapsto F$. Let us denote by $\co$ the open $K$-orbit in ${}\!^ {\textbf{C}} Gr_{p+1}$. Explicitly
$$\co=\{F\in {}\!^ {\textbf{C}} Gr_{p+1}|\, B|_F \mbox{ is non-degenerate}\}.$$ It is clear that $V=f^{-1}(\co)$.
The stabilizer $S'\subset K$ of a point from $\co$ is isomorphic to the group $O(p+1,\mathbb{C})\times O(n-p-1,\mathbb{C})$. Let us denote by ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ the ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-module on $\co$ corresponding to the representation of the group of connected components of $S'\simeq O(p+1,\mathbb{C})\times O(n-p-1,\mathbb{C})$ given by $(M,N)\mapsto \det M$.
\begin{claim}\label{ny3} The $K$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-modules $f^*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ and $j'_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$ on $V=f^{-1}(\co)$ are isomorphic. \end{claim} {\bf Proof.} It is clear from the definitions of ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ and ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$ that the restriction of $f^*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ to $U$ is isomorphic to ${\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$. Hence we have a morphism of $K$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-modules $f^*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}\rightarrow j'_*{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$ which is an isomorphism over $U$. It is clear that $f^*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ is an irreducible ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-module, hence its image is equal to $j_{!*}'{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$. \qed
Let $l\colon \co\hookrightarrow {}\!^ {\textbf{C}} Gr_{p+1}$ denote the identity imbedding morphism. Since the morphism $f$ is smooth (in particular flat) by the flat base change theorem (see e.g \cite{hartshorne}, Proposition 9.3) we have \begin{eqnarray}\label{ny4} \cn':=\cl^*\otimes \cn=j''_*(j'_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0')\simeq j''_*(f^*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C})=f^*(l_*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}). \end{eqnarray} Set $\ck':=\cl^*\otimes \ck=j_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0'$. Since $j_{!*}=j''_{!*}\circ j'_{!*}$ we have \begin{eqnarray}\label{ny5} \ck'\simeq j''_{!*}(f^*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C})=f^*(l_{!*}{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}) \end{eqnarray} where the last equality also follows from the smooth base change theorem for ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}$-modules \cite{??}.
Since the morphism $f$ is projective and smooth, one has $\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},f^*(\bullet))=\Gamma({}\!^ {\textbf{C}} Gr_{p+1},\bullet)$. Then $\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},\cn'/\ck')=\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},f^*(l_*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}/l_{!*}{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}))= \Gamma({}\!^ {\textbf{C}} Gr_{p+1},l_*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}/l_{!*}{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C})$. Thus to finish the proof of Lemma \ref{ny2} it remains to show that the associated variety of any irreducible $(\texttt{g},K)$-subquotient of $\Gamma({}\!^ {\textbf{C}} Gr_{p+1}, l_*{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}/l_{!*}{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C})$ is {\itshape not} contained in the variety of complex symmetric nilpotent matrices of rank at most 1. But this statement was proved in fact in Theorem 4.3 in \cite{alesker-gafa-01}. \qed
To finish the proof of Proposition \ref{isomorphism} it remains to show that the Harish-Chandra module of $(Val_p^{-,sm}(V))^*$ cannot be isomorphic to the $(\texttt{g}, K)$-module $\Gamma({}\!^ {\textbf{C}} {\cal F},j_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0)$.
Since $\bar q\colon {}\!^ {\textbf{C}} {\cal F}\rightarrow {}\!^ {\textbf{C}} {\cal F}_{p,p+1}$ is a smooth projective morphism we have \begin{eqnarray}\label{ny6} \Gamma({}\!^ {\textbf{C}} {\cal F},\bar q^*(\bullet))=\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},\bullet). \end{eqnarray} Clearly $\bar q^*(j_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0)$ is an irreducible ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\hat\chi}$-module with support \begin{eqnarray}\label{ny6.1} \supp(\bar q^*(j_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0))={}\!^ {\textbf{C}} {\cal F}. \end{eqnarray} It is clear from (\ref{ny6}) and from the fact that $\hat\chi$ is dominant and regular, that $\bar q^*(j_{!*}{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}_0)$ is the Beilinson-Bernstein localization of $\Gamma({}\!^ {\textbf{C}} {\cal F}_{p,p+1},\bullet)$. Thus it suffices to prove the following claim. \begin{claim}\label{C:ny7} The support on ${}\!^ {\textbf{C}} {\cal F}$ of the Beilinson-Bernstein localization of the Harish-Chandra module of $(Val_p^{-,sm}(V))^*$ is not equal to ${}\!^ {\textbf{C}} {\cal F}$. \end{claim} {\bf Proof.} Remind that ${}\!^ {\textbf{C}} {\cal F}$ denotes the variety of complete flags in $V^*\otimes_\mathbb{R}\mathbb{C}$. By taking the orthogonal complement we can identify ${}\!^ {\textbf{C}} {\cal F}$ with the variety ${}\!^ {\textbf{C}} {\cal F}'$ of complete flags in $V\otimes_\mathbb{R}\mathbb{C}$. Also the group $G_0=GL(V^*)$ can be identified with the group $G_0'=GL(V)$ via the isomorphism $g\mapsto (g^t)^{-1}$. Let $G'$ denote the complexification of $G_0'$. Now we will work with $G_0',G'$ instead of $G_0,G$, and we will consider the Beilinson-Bernstein localization on ${}\!^ {\textbf{C}} {\cal F}'$. Let us denote similarly $\texttt{g}':=Lie(G')$, $K'\subset G'$ is the subgroup preserving the non-degenerate form on $V\otimes_\mathbb{R}\mathbb{C}$. Let us denote
$${}\!^ {\textbf{C}} {\cal F}'_{k,k+1}:=\{(E,F)|\, E\subset F, E\in {}\!^ {\textbf{C}} Gr_k(V\otimes_\mathbb{R}\mathbb{C}),F\in{}\!^ {\textbf{C}} Gr_{k+1}(V\otimes_\mathbb{R}\mathbb{C})\}.$$ Remind that we have the Poincar\'e duality isomorphism $(Val_p^{-,sm}(V))^{*,sm}\simeq Val_{n-p}^{-,sm}(V)\otimes Dens(V^*)$. Thus it suffices to prove that the support in ${}\!^ {\textbf{C}} {\cal F}'$ of the Beilinson-Bernstein localization of $Val_{n-p}^{-,sm}(V)$ is not equal to ${}\!^ {\textbf{C}} {\cal F}'$.
By Remark \ref{ny1} applied to $V$ instead of $V^*$ and $n-p$ instead of $p$, the Harish-Chandra module of $Val_{n-p}^{-,sm}(V)$ is isomorphic to the $(\texttt{g}',K')$-module of global section of certain $K'$-equivariant ${\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\bar \chi}$-module $\tau$ on ${\cal F}_{n-p,n-p+1}'$ whose support is not equal to ${}\!^ {\textbf{C}} {\cal F}'_{n-p,n-p+1}$. Here we denote by $\bar \chi$ an appropriate character of a stabilizer of a Lie algebra $Q$ of a point from ${}\!^ {\textbf{C}} {\cal F}'_{n-p,n-p+1}$. Note that the lifting $\bar{\bar\chi}$ of $\bar \chi$ to the Cartan algebra $t'\subset \texttt{g}'$ is dominant and regular.
Let $g\colon {}\!^ {\textbf{C}} {\cal F}'\rightarrow {}\!^ {\textbf{C}} {\cal F}'_{n-p,n-p+1}$ denote the canonical projection. Then we have the functor $g^*\colon {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\bar\chi}({}\!^ {\textbf{C}} {\cal F}'_{n-p,n-p+1})-mod\rightarrow {\cal D}} \def\ce{{\cal E}} \def\cf{{\cal F}_{\bar{\bar\chi}}({}\!^ {\textbf{C}} {\cal F}')-mod$. Since $g$ is projective and smooth we have $$\Gamma({}\!^ {\textbf{C}} {\cal F}',g^*(\bullet))=\Gamma({}\!^ {\textbf{C}} {\cal F}'_{n-p,n-p+1},\bullet).$$ Hence the Beilinson-Bernstein localization of the Harish-Chandra module of $Val^{-,sm}_{n-p}(V)$ is isomorphic to $g^*\tau$. Since $\supp \tau\ne {}\!^ {\textbf{C}} {\cal F}'_{n-p,n-p+1}$ then $\supp g^*\tau\ne {}\!^ {\textbf{C}} {\cal F}'$. Thus Claim \ref{C:ny7} is proved. \qed
Hence Proposition \ref{isomorphism} is proved as well. \qed
\begin{remark}\label{R:mult1thm} Note that in Step 4 of the proof of Proposition \ref{isomorphism} we have actually proven the following result: for any $n$-dimensional vector space $W$ and any $p=1,\dots,n-1$ the Jordan-H\"older series of the $GL(W)$-module $C^\infty\left({\cal F}_{p,p+1}(W),\det{\cal M}} \def\cn{{\cal N}} \def\co{{\cal O}^*_{p,p+1}(W)\otimes or(\cn_{p,p+1}(W))\right)$ contains $Val^{-,sm}_{p}(W)$ with multiplicity one. This fact will be used below in the proof of Proposition \ref{P:w10}. \end{remark}
\def\underline{V}{\underline{V}} \def\ct(V){\ct(V)} \def\ct(V^*){\ct(V^*)}
\section{The two dimensional case.}\label{2-dim} The goal of this section is to construct the Fourier transform on two-dimensional spaces and to establish its homomorphism property. Let $V$ be a {\itshape two dimensional} real vector space.
\subsection{A canonical isomorphism in two dimensions.} First we are going to construct a {\itshape canonical} isomorphism \begin{eqnarray}\label{can-isomor} Val_1^{-,sm}(V)\tilde\rightarrow Val_1^{-,sm}(V^*)\otimes \det V^*. \end{eqnarray} Let $\ct(V)\rightarrow \mathbb{P}(V)$ be the tautological vector bundle, i.e. the fiber of $\ct(V)$ over $E\in \mathbb{P}(V)$ is equal to $E$. Let $\underline{V}\rightarrow \mathbb{P}(V)$ be the trivial bundle, i.e. $\underline{V}=\mathbb{P}(V)\times V$. Then we have a canonical imbedding of vector bundles $\ct(V)\hookrightarrow \underline{V}$.
We have the canonical epimorphism \begin{eqnarray}\label{2-1} C^\infty(\mathbb{P}(V),Dens\ct(V)\otimes or(\underline{V}/\ct(V)))\twoheadrightarrow Val_1^{-,sm}(V). \end{eqnarray} Recall that the kernel of this map is two dimensional irreducible $GL(V)$-module. Observe that \begin{eqnarray*} Dens \ct(V)=\ct(V)^*\otimes or(\ct(V)),\\ or(\underline{V}/\ct(V))=or(\underline{V})\otimes or(\ct(V)). \end{eqnarray*} Using these identifications we get a canonical epimorphism \begin{eqnarray}\label{2-2} C^\infty(\mathbb{P}(V),\det \ct(V)^*)\otimes or(V)\twoheadrightarrow Val_1^{-,sm}(V). \end{eqnarray} By taking the orthogonal complement, we have a natural identification $\mathbb{P}(V)=\mathbb{P}(V^*)$. Let $E\subset V$ be a line. Then $E=(V^*/E^\perp)^*=(\det V^*\otimes ( E^\perp)^*)^*.$ Hence \begin{eqnarray}\label{2-3} E= E^\perp\otimes \det V. \end{eqnarray} Similarly \begin{eqnarray}\label{2-4} or E=or E^\perp\otimes or V. \end{eqnarray} Hence \begin{eqnarray*} C^\infty(\mathbb{P}(V),\ct(V)^*)=C^\infty(\mathbb{P}(V^*),\det \ct(V^*)^*\otimes\underline{V}^*)=\\ C^\infty(\mathbb{P}(V^*),\ct(V^*)^*)\otimes\det V^*. \end{eqnarray*} Hence (\ref{2-2}) can be rewritten as \begin{eqnarray}\label{2-5} C^\infty(\mathbb{P}(V^*),\ct(V^*)^*)\otimes Dens(V)\twoheadrightarrow Val_1^{-,sm}(V). \end{eqnarray} Replacing $V$ by $V^*$ in (\ref{2-2}) we get an epimorphism \begin{eqnarray}\label{2-6} C^\infty(\mathbb{P}(V^*),\ct(V^*)^*)\otimes orV^*\twoheadrightarrow Val_1^{-,sm}(V^*). \end{eqnarray} Tensoring (\ref{2-6}) by $\det V^*$ and observing that $or V=or V^*$ we get \begin{eqnarray}\label{2-7} C^\infty(\mathbb{P}(V^*),\ct(V^*)^*)\otimes Dens (V)\twoheadrightarrow Val_1^{-,sm}(V^*)\otimes \det V^*. \end{eqnarray} Comparing (\ref{2-5}) and (\ref{2-7}) we conclude that there exists a unique isomorphism of $GL(V)$-modules \begin{eqnarray}\label{2-8} \tilde \mathbb{F}_V\colon Val_1^{-,sm}(V)\tilde\rightarrow Val_1^{-,sm}(V^*)\otimes \det V^*=Val_1^{-,sm}(V^*)\otimes Dens(V)\otimes or V \end{eqnarray} which makes the following diagram commutative: \def\aaa{C^\infty(\mathbb{P}(V^*),\ct(V^*)^*)\otimes Dens (V)} \defVal_1^{-,sm}(V){Val_1^{-,sm}(V)} \defVal_1^{-,sm}(V^*)\otimes \det V^*{Val_1^{-,sm}(V^*)\otimes \det V^*} \begin{eqnarray*} \Atriangle[\aaa`Val_1^{-,sm}(V)`Val_1^{-,sm}(V^*)\otimes \det V^*;``\tilde\mathbb{F}_V] \end{eqnarray*} Remind also that in the even case we have an isomorphism \begin{eqnarray}\label{2-9} \mathbb{F}_V\colon Val^{+,sm}(V)\tilde\rightarrow Val^{+,sm}(V^*)\otimes Dens(V). \end{eqnarray} Combining (\ref{2-8}) and (\ref{2-9}) together we obtain a canonical isomorphism \begin{eqnarray}\label{2-10} \bar\mathbb{F}_V\colon Val^{sm}(V)\tilde \rightarrow (Val^{+,sm}(V^*)\oplus (Val_1^{-,sm}(V^*)\otimes or V))\otimes Dens(V). \end{eqnarray}
\subsection{Construction of the convolution product on $Val(V^*)\otimes Dens(V)$, $\dim V=2$.}\label{construction} Let us denote \begin{eqnarray*} {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_0:=Val_2(V^*)\otimes Dens(V),\\ {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_2:=Dens(V),\\ {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+:=Val_1^{+,sm}(V^*)\otimes Dens(V),\\ {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-:=Val_1^{-,sm}(V^*)\otimes Dens(V)\otimes or V,\\ {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}:=(Val^{+,sm}(V^*)\oplus (Val_1^{-,sm}(V^*)\otimes or V))\otimes Dens(V)={\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_0\oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_2\oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+\oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-. \end{eqnarray*}
Using the isomorphism $\bar \mathbb{F}_V$ we can define the product $\star$ on ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ by $$\phi\star\psi:=\bar\mathbb{F}_V(\bar\mathbb{F}_V^{-1}\phi\cdot \bar\mathbb{F}_V^{-1}\psi).$$ Then ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$ becomes a commutative graded algebra such that the graded components are ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_0,{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+\oplus{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-,{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_2$. Note that \begin{eqnarray}\label{2-11} {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\star{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+={\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\star{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_2=0,\\\label{2-11.5} {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+\star{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\subset {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-. \end{eqnarray} Let us denote $${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+:={\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_0\oplus{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+\oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_2.$$
Let us define now an algebra structure on $\cb:=Val^{sm}(V^*)\otimes Dens(V)$. First write \begin{eqnarray}\label{2-12} \cb={\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_0\oplus({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+\oplus ({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\otimes or (V)))\oplus {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_2. \end{eqnarray} Thus $\cb={\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+\oplus ({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\otimes or (V))$. Let us define $$\ast\colon \cb\otimes \cb\rightarrow \cb$$ as follows. First we have (using the equality $or V\otimes or V=\mathbb{C}$) $$\cb\otimes \cb=({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+\otimes {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+)\oplus({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+\otimes {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\otimes or V)\oplus({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\otimes {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}^+\otimes or V)\oplus({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\otimes {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-).$$ We define $\ast$ to be equal to $\star$ on the first summand, to $\star\otimes id_{or V}$ on the second and the third summands, and to $-\star$ on the fourth summand (with the minus sign!).
It is easy to see that $(\cb,\ast)$ is a commutative associative graded algebra with unit (since $({\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C},\star)$ is). The decomposition of $\cb$ into the graded components is given by (\ref{2-12}).
\subsection{Explicit computation of the convolution.} In this subsection we show that in the two dimensional case the convolution $\ast$ defined in Section \ref{construction} coincides with the convolution introduced by Bernig and Fu \cite{bernig-fu}.
Let us fix a Lebesgue measure $vol$ on $V^*$. It gives an isomorphism $Dens(V)\tilde \rightarrow \mathbb{C}$. For ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}\in \ck^{sm}(V^*)$ let us denote $\mu_A(K)=vol(K+A)\otimes vol^{-1}$ for any $K\in \ck(V^*)$. It is easy to see that $\mu_A\in Val^{sm}(V^*)\otimes Dens(V)$. \begin{proposition}\label{3-1} Let $V$ be a two dimensional real vector space. For the convolution product $\ast$ on $Val^{sm}(V^*)\otimes Dens(V)$ defined in Section \ref{construction} one has \begin{eqnarray}\label{3-2} \mu_A\ast\mu_B=\mu_{A+B} \end{eqnarray} for any $A,B\in\ck^{sm}(V^*)$. \end{proposition} This proposition implies that our convolution $\ast$ coincides with the Bernig-Fu convolution, and hence there is no abuse of notation. Before we prove this proposition we will prove another proposition. \begin{proposition}\label{3-3} Let us fix a Euclidean metric on $V$ and an orientation. Consider the isomorphisms $V\tilde\rightarrow V^*,\,Dens(V)\tilde\rightarrow \mathbb{C},\, or V\tilde \rightarrow\mathbb{C}$ induced by these choices. With these identifications consider $\bar\mathbb{F}_V\colon Val^{sm}(V)\tilde\rightarrow Val^{sm}(V)$. Then for any $A\in \ck^{sm}(V)$ \begin{eqnarray}\label{3-4} \bar\mathbb{F}_V(V(\bullet,A))=V(\bullet,J^{-1}A) \end{eqnarray} where $J\colon V\rightarrow V$ is the rotation by $\frac{\pi}{2}$ counterclockwise. \end{proposition} {\bf Proof.} First let us remind that for any smooth functions $h_1,h_2$ on the unit circle one can define the mixed volume $V(h_1,h_2)$ (see e.g. \cite{goodey-weil-84}) which is bilinear with respect to $h_1,h_2$. The idea is as follows. First if $h_1,h_2$ are supporting functionals of convex sets $A_1,A_2$ respectively, let us define $V(h_1,h_2)$ to be equal to $V(A_1,A_2)$. Since every smooth function on the circle is a difference of supporting functions of convex bodies, let us extend this expression by bilinearity. We get a well defined notion. Thus we may and will identify convex set $A$ with its supporting function. We may assume that it is smooth on $S^1=\mathbb{P}_+(V)$.
\underline{Case 1.} Let us assume first that $A$ is an even function. Then the value at $l\in \mathbb{P}(V)$ of the Klain imbedding of $V(\bullet,A)$ to $C^{+,\infty}(\mathbb{P}(V))$ is equal to $\kappa A(l^\perp)$ where $\kappa$ is a normalizing constant. By the definition of $\bar\mathbb{F}_V$ on even valuations, the value at $l\in \mathbb{P}(V)$ of the Klain imbedding of $\bar \mathbb{F}_V(V(\bullet,A))$ is equal to $\kappa A(l^\perp)=\kappa (J^{-1} A)(l)$. Thus the proposition is proved in the even case.
\underline{Case 2.} Let us assume now that $A$ is an odd function. Recall that for $K\in \ck(V)$ $$V(K,A)=\frac{1}{2} \int_{l\in \mathbb{P}_+(V)}A(l)dS_1(K,l)$$ where $S_1(K,\cdot)$ is the first area measure of $K$ (see e.g. \cite{schneider-book}, formula (5.1.18)). By the construction of $\bar\mathbb{F}_V$ on odd valuations $$\bar\mathbb{F}_V(V(\bullet,A))(K)=\frac{1}{2} \int_{l\in\mathbb{P}_+(V)}A(Jl)dS_1(K,l)=\frac{1}{2} \int_{l\in\mathbb{P}_+(V)}(J^{-1}A)(l)dS_1(K,l).$$ Thus the proposition is proved. \qed
{\bf Proof of Proposition \ref{3-2}.} Let us fix a Euclidean metric and an orientation on $V$. It is easy to check (see e.g. \cite{bernig-fu}, Corollary 1.3) that the equality (\ref{3-2}) for all $A,B\in \ck^{sm}(V^*)$ is equivalent to the following two conditions:
(a) $vol$ is the unit element with respect to $\ast$, i.e. $vol\ast x=x$ for any $x$;
(b) $V(\bullet,A)\ast V(\bullet, B)=\frac{1}{2}V(A,B)\chi(\bullet)$ for any $A,B\in \ck^{sm}(V^*)$.
The property (a) holds due to the corresponding property of the product $\star$ on ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}$. Let us prove (b). We will prove it in a more general form when $A$ and $B$ are smooth functions on the unit circle. Since by (\ref{2-11}) ${\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^+\star{\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-=0$, the proof splits into two cases:
(a) both $A$ and $B$ are even;
(b) both $A$ and $B$ are odd.
Though the even case was considered in \cite{bernig-fu} (for all dimensions), we will prove it here in two dimensions for the sake of completeness. Thus let us first consider the case (a). Since $A,B$ are even, we have \begin{eqnarray*} V(\bullet,A)\ast V(\bullet,B)=\bar\mathbb{F}_V\left(\bar\mathbb{F}_V^{-1}(V(\bullet,A))\cdot\bar\mathbb{F}_V^{-1}(V(\bullet,B))\right)=\\ \bar\mathbb{F}_V\left(V(\bullet,J A)\cdot V(\bullet,J B)\right)\overset{\mbox{Example } \ref{E:product-2dim}}{=}\bar\mathbb{F}_V\left(\frac{1}{2}V(J A,-J B)\cdot vol(\bullet)\right)=\\ \frac{1}{2}V(A,-B)\cdot\chi(\bullet)=\frac{1}{2}V(A,B)\cdot \chi(\bullet). \end{eqnarray*} Now let us consider the case (b), i.e. $A,B$ are odd. Then $V(\bullet,A),\, V(\bullet,B)\in {\cal A}} \def\cb{{\cal B}} \def\cc{{\cal C}_1^-\otimes or V$. Hence we have by the definition of $\ast$ $$V(\bullet,A)\ast V(\bullet,B)=-\bar\mathbb{F}_V\left(\bar\mathbb{F}_V^{-1}(V(\bullet,A))\cdot\bar\mathbb{F}_V^{-1}(V(\bullet,B))\right).$$ Similarly to the previous case the last expression is equal to $$-\frac{1}{2}V(A,-B)\chi(\bullet)=\frac{1}{2}V(A,B)\chi(\bullet).$$ Proposition is proved. \qed
\defVal^{sm}{Val^{sm}} \subsection{Isomorphisms of algebras $Val^{sm}(V)$ and $Val^{sm}(V^*)\otimes Dens(V)$.}\label{two-dim-isomorphism} We are going to prove the following result. \begin{theorem}\label{T:ISO} Let $V$ be a two dimensional real vector space. There exists an isomorphism of topological vector spaces $$\mathbb{F}_V\colon Val^{sm}(V)\tilde \rightarrowVal^{sm}(V^*)\otimes Dens(V)$$ which satisfies the following properties:
(1) $\mathbb{F}_V$ commutes with the natural $GL(V)$-action;
(2) $\mathbb{F}_V$ is an isomorphism of algebras, i.e. $\mathbb{F}_V(\phi\cdot \psi)=\mathbb{F}_V(\phi)\ast \mathbb{F}_V(\psi)$ for any $\phi,\psi\in Val^{sm}(V)$;
(3) This isomorphism takes real valued valuations to real valued.
\end{theorem} {\bf Proof.}
The isomorphism on even valuations $\mathbb{F}_V\colon Val^{+,sm}(V)\tilde\rightarrow Val^{+,sm}(V^*)\otimes Dens(V)$ was defined by the author in \cite{alesker-jdg-03}. Its homomorphism property was proved by Bernig and Fu in \cite{bernig-fu}. We will extend this map to odd valuations. The construction will depend on a choice of orientation of $V$. Thus let us fix an orientation of $V$.
Consider the action of the subgroup $SL(V)\subset GL(V)$ on $Val^{-,sm}_1(V^*)\otimes Dens(V)$. Then \begin{eqnarray*} Val^{-,sm}_1(V^*)\otimes Dens(V)=W_1\oplus W_2 \end{eqnarray*} where $W_1,W_2\subset Val^{-,sm}_1(V^*)\otimes Dens(V)$ are closed $SL(V)$-invariant $SL(V)$-irreducible infinite dimensional subspaces. $W_1$ and $W_2$ are not isomorphic to each other as $SL(V)$-modules. Moreover one can choose a Cartan subalgebra and a root system of the Lie algebra of $sl(V)$ (compatible in an appropriate way with the orientation of $V$) so that $W_1$ is a highest weight module, and $W_2$ is a lowest weight module. The property of being either highest or lowest module depends only on the orientation of $V$, and not on a Cartan subalgebra and a positive root system. \begin{lemma}\label{L:gl(2)} The map $$\mathbb{L}:=Id_{W_1}\oplus (-Id_{W_2})\colon Val^{-,sm}_1(V^*)\otimes Dens(V)\rightarrow Val^{-,sm}_1(V^*)\otimes Dens(V)\otimes or(V)$$ is an isomorphism of $GL(V)$-modules. \end{lemma} Here is a warning: the notation of the lemma is a bit misleading. The target space seems to be different form the source space since it is twisted by $or(V)$. The meaning is that we just consider the target space to be equal to the source space as a vector space, but the action of $GL(V)$ on the target is different: is it twisted by the sign of the determinant of a matrix.
Let us postpone the proof of this lemma and finish the proof of the theorem. Let us define $\mathbb{F}_V\colon Val^{-,sm}_1(V)\rightarrow Val^{-,sm}_1(V^*)\otimes Dens(V)$ by \begin{eqnarray*} \mathbb{F}_V:=\mathbb{L}^{-1}\circ \bar \mathbb{F}_V \mbox{ on } Val^{-,sm}_1(V)
\end{eqnarray*} where $\bar \mathbb{F}_V$ was defined in (\ref{2-10}).
In order to prove that $\mathbb{F}_V$ is an isomorphism of algebras it remains to show that for any $\phi,\psi\in Val^{-,sm}_1(V)$ one has $$\mathbb{F}_V(\phi\cdot \psi)=\mathbb{F}_V(\phi)\ast\mathbb{F}_V(\psi).$$ In the notation of Section \ref{construction} one has \begin{eqnarray*} \mathbb{F}_V(\phi\cdot \psi)=\bar\mathbb{F}_V\phi\star\bar\mathbb{F}_V\psi;\\ \mathbb{F}_V(\phi)\ast\mathbb{F}_V(\psi)=-\mathbb{F}_V(\phi)\star\mathbb{F}_V(\psi)= -\mathbb{L}^{-1}(\bar\mathbb{F}_V\phi)\star\mathbb{L}^{-1}(\bar\mathbb{F}_V\psi). \end{eqnarray*} Thus we have to show that for any $u,v\in Val^{-,sm}_1(V^*)\otimes Dens(V)$ one has \begin{eqnarray}\label{starrr} u\star v=-\mathbb{L}^{-1}(u)\star \mathbb{L}^{-1}(v). \end{eqnarray} It is clear that if $u\in W_1,v\in W_2$ then (\ref{starrr}) holds. For $u,v\in W_i$, $i=1,2$, the equality (\ref{starrr}) is equivalent to $u\star v=0$. Let us prove it for $i=1$; the case $i=2$ is considered similarly. Let us observe that $\star$ induces the $SL(V)$-equivariant map $W_1\rightarrow W_1^*\otimes Dens(V)$. Since $W_1$ is a highest weight irreducible infinite dimensional $SL(V)$-module, $W_1^*\otimes Dens(V)$ is a lowest weight irreducible infinite dimensional $SL(V)$-module. Hence they cannot be isomorphic. This proves that $\mathbb{F}_V$ is a homomorphism of algebras.
It it clear from the construction of $\mathbb{F}_V$ that $\mathbb{F}_V$ maps real valued valuations to real valued. Hence theorem is proved modulo Lemma \ref{L:gl(2)}. \qed
{\bf Proof of Lemma \ref{L:gl(2)}.} It follows from general representation theory of the group $GL_2(\mathbb{R})$ (see e.g. \cite{jacquet-langlands}, Ch. I, Theorem 5.11(VI)) that the irreducible representations $Val^{-,sm}_1(V^*)\otimes Dens(V)$ and $Val^{-,sm}_1(V^*)\otimes Dens(V)\otimes or(V)$ of the group $GL(V)\simeq GL_2(\mathbb{R})$ are isomorphic. Let us fix such an isomorphism $\mathbb{L}'$. Since $W_1,W_2$ are irreducible non-isomorphic $SL(V)$-modules, $\mathbb{L}'$ must have the form $$\mathbb{L}'=\alpha Id_{W_1}\oplus \beta Id_{W_2}$$ where $\alpha,\beta\in \mathbb{C}^*$. Dividing by $\alpha$ we may assume that $\alpha=1$. The composition $$(\mathbb{L}'\otimes Id_{or(V)})\circ \mathbb{L}'\colon Val^{-,sm}_1(V)\otimes Dens(V)\rightarrow Val^{-,sm}_1(V)\otimes Dens(V)$$ is an automorphism of $GL(V)$-module, hence it must have the form $\gamma (Id_{W_1}\oplus Id_{W_2})$. On the other hand this composition is equal to $Id_{W_1}\oplus \beta^2 Id_{W_2}$. Hence $\beta^2=1$, i.e. $\beta=\pm 1$. Since the identity map $Id_{W_1\oplus W_2}$ is {\itshape not} a morphism of $GL(V)$-modules $Val^{-,sm}_1(V^*)\otimes Dens(V)$ and $Val^{-,sm}_1(V^*)\otimes Dens(V)\otimes or(V)$, it implies that $\beta=-1$. Lemma is proved. Q.E.D. \subsection{Plancherel type formula in two dimensions.}\label{Ss:composition-F-2d} Let us consider the map \begin{eqnarray*} \mathbb{F}_{V^*}\otimes Id_{Dens(V)}\colon Val^{sm}(V^*)\otimes Dens(V)\tilde\rightarrow Val^{sm}(V). \end{eqnarray*} Let us denote by $\ce_V\colon Val^{sm}(V)\rightarrow Val^{sm}(V)$ the operator given by $(\ce_V\phi)(K)=\phi(-K)$ for any $\phi\in Val^{sm}(V)$, $K\in\ck(V)$. \begin{proposition}\label{P:composition-2-dim} Let $V$ be a two dimensional vector space. Then $$(\mathbb{F}_{V^*}\otimes Id_{Dens(V)})\circ \mathbb{F}_V=\ce_V.$$ \end{proposition} {\bf Proof.} Obviously, on 0- and 2-homogeneous valuations both operators are obviously equal to identity. For 1-homogeneous valuations the result follows from Proposition \ref{3-3}. \qed
\section{Fourier transform on valuations in higher dimensions.}\label{S:fourier-high-dim} The goal of this section is to construct the Fourier transform in higher dimensions and to prove its main properties.
\subsection{Construction of the Fourier transform.}\label{Ss:construction-fourier} Let $0\leq k\leq n$. Let us consider the (infinite dimensional) vector bundle $$\ct^0_{k,V}\rightarrow Gr_{n-k}(V)$$ whose fiber over $F\in Gr_{n-k}(V)$ is equal to $Val^{sm}(V/F)$. Similarly let $\ct_{k,V;i}^{0}\rightarrow Gr_{n-k}(V),\,\ct_{k,V;i}^{0, \pm}\rightarrow Gr_{n-k}(V)$ be the vector bundles whose fiber over $F\in Gr_{n-k}(V)$ is equal to $Val^{sm}_i(V/F)$ or $Val^{\pm,sm}_i(V/F)$ respectively. Let \begin{eqnarray}\label{w4}
\ct_{k,V}:=\ct^0_{k,V}\otimes |\omega_{Gr_{n-k}(V)}|,\\ \ct_{k,V;i}:=\ct_{k,V;i}^0\otimes
|\omega_{Gr_{n-k}(V)}|,\\\label{w5}
\ct^\pm_{k,V;i}:=\ct^{0,\pm}_{k,V;i}\otimes |\omega_{Gr_{n-k}(V)}|. \end{eqnarray} Also for a subspace $F\subset V$ let us denote by $p_F$ the canonical map $$p_F\colon V\rightarrow V/F.$$
Consider the natural map $$\Xi_{k,V}\colon C^\infty (Gr_{n-k}(V),\ct_{k,V;i})\rightarrow Val_i^{sm}(V)$$ defined by $\xi\mapsto \int_{F\in Gr_{n-k}(V)}p^*_F(\xi(F)).$ Note that the map $\Xi_{k,V}$ is $GL(V)$-equivariant, hence its image is indeed contained in $Val_i^{sm}(V)$. \begin{lemma}\label{L:pull-cosm} Let $f_i\colon V_i\rightarrow W_i,\, i=1,2$ be two linear maps such that $f_2$ is injective. Then for any $\phi_1\in Val(W_1),\, \phi_2\in Val^{sm}(W_2)$ the pushforward $f_2^*(\phi_2)\in Val^{sm}(V_2)$ and \begin{eqnarray}\label{E:pull-cosm} (f_1\boxtimes f_2)^*(\phi_1\boxtimes\phi_2)=f_1^*\phi_1\boxtimes f_2^*\phi_2. \end{eqnarray} \end{lemma} \begin{remark} In the statement of the proposition we use the construction of the product of a continuous valuation and a smooth one from the appendix. \end{remark} {\bf Proof} of Lemma \ref{L:pull-cosm}. It is clear that $f_2^*(\phi_2)\in Val^{sm}(V_2)$. Next both sides of (\ref{E:pull-cosm}) are continuous with respect to $\phi_1\in Val(W_1),\,\phi_2\in Val^{sm}(W_2)$. Hence by the McMullen's conjecture we may assume that $$\phi_i(\bullet)=vol_i(\bullet +A_i),\, i=1,2,$$ where $vol_i$ is a Lebesgue measure on $W_i$, $A_i\in \ck^{sm}(W_i)$. Then one has \begin{eqnarray}\label{E:pc1} (\phi_1\boxtimes \phi_2)(K)=(vol_1\boxtimes vol_2)(K+(A_1\times A_2)),\\\label{E:pc2} (f_1\boxtimes f_2)^*(\phi_1\boxtimes \phi_2)(K)=(vol_1\boxtimes vol_2)\left((f_1\boxtimes f_2)(K)+(A_1\times A_2)\right). \end{eqnarray} On the other hand \begin{eqnarray*} (f_i^*\phi_i)(K)=vol_i(f_i(K)+A_i), i=1,2. \end{eqnarray*}
Note that $(f_1\boxtimes f_2)^*=(f_1\boxtimes Id)^*\circ (Id\boxtimes f_2)^*$. Let us compute first $(Id\boxtimes f_2)^*(\phi_1\boxtimes \phi_2)$. First we will identify $V_2$ with its image in $W_2$. Let us fix Lebesgue measures $vol_{W_2}$ on $W_2$ and $vol_{W_2/V_2}$ on $V_{W_2/V_2}$ such that $vol_2=vol_{V_2}\otimes vol_{W_2/V_2}$. We have \begin{eqnarray}\label{E:pc3} (f_2^*\phi_2)(\bullet)=\int_{z\in W_2/V_2} vol_{V_2}(\bullet +(A_2\cap z))dvol_{W_2/V_2}(z). \end{eqnarray}
Next for any $K\in \ck(V_1\times V_2)$ we have \begin{eqnarray} (Id\times f_2)^*(\phi_1\boxtimes \phi_2)(K)=\\\int_{z\in W_2/V_2}(vol_1\boxtimes vol_2)\left((K+(A_1\times A_2))\cap (V_1\times z)\right)dvol_{W_2/V_2}(z)=\\\label{E:pc4} \int_{z\in W_2/V_2}(vol_1\boxtimes vol_2)\left(K+A_1\times (A_2\cap z)\right)dvol_{W_2/V_2}(z). \end{eqnarray} Now observe that the map $W_2/V_2\rightarrow Val(W_2)$ defined by $$z\mapsto vol_{V_2}(\bullet +(A_2\cap z))=:\phi(z)(\bullet)$$ is a bounded map which is continuous almost everywhere (more precisely, this map is continuous in the interior of the image of $A_2$ in $W_2/V_2$). The expression (\ref{E:pc4}) is equal to \begin{eqnarray}\label{E:pc5} \int_{z\in W_2/V_2}(\phi_1\boxtimes \phi(z))(K)dvol_{W_2/V_2}(z), \end{eqnarray} and by the mentioned above continuity we may rewrite the expression (\ref{E:pc5}) as $$\left(\phi_1\boxtimes \int_{z\in W_2/V_2}\phi(z)dvol_{W_2/v_2}(z)\right)(K).$$
By (\ref{E:pc3}) $\int_{z\in W_2/V_2}\phi(z) dvol_{W_2/V_2}(z)=f_2^*\phi_2$. Thus we have proven that $(Id\times f_2)^*(\phi_1\boxtimes \phi_2)=\phi_1\boxtimes f_2^*\phi_2$.
Now we may assume that $f_2=Id$, and it remains to show that $$(f_1\times Id)^*(\phi_1\boxtimes\phi_2)=f_1^*\phi_1\boxtimes \phi_2.$$ Let us decompose $f_1=g\circ h$ where $h$ is surjection and $g$ is injection. Then $(f_1\boxtimes Id)^*=(h\boxtimes Id)^*\circ (g\boxtimes Id)^*$. Since we have assumed that both $\phi_1$ and $\phi_2$ can be chosen smooth, by the above proven case of injections one has $(g\boxtimes Id)^*(\phi_1\boxtimes\phi_2)=g^*\phi_1\boxtimes \phi_2$.
Thus it remains to show that for any surjection $h\colon V_1\twoheadrightarrow W_1$, and any $A_i\in \ck^{sm}(W_i)$, $\phi_i(\bullet)=vol_i(\bullet +A_i),\, i=1,2,$ one has $$(h\boxtimes Id)^*(\phi_1\boxtimes\phi_2) =h^*\phi_1\boxtimes \phi_2.$$
Let $M:=Ker (h)$, $m:=\dim M$. We identify $W_1$ with $V_1/M$. Let us fix a Lebesgue measure $vol_M$ on $M$. Let $vol_{V_1}:=vol_M\otimes vol_1$. Let us fix $\tilde A_1\in \ck^{sm}(V_1)$ such that $h(\tilde A_1)=A_1$. Let us fix $S\in\ck(M)$ with $vol_M(S)=1$. Then by Lemma \ref{L:aLemma} $$(h^*\phi_1)(\bullet)=vol_1(h(\bullet)+A_1)=
\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_0 vol_{V_1}(\bullet+\tilde A_1+\varepsilon S).$$ Hence for $K\in \ck(V_1\times W_2)$ we have \begin{eqnarray}\label{E:pc10}(h^*\phi_1\boxtimes
\phi_2)(K)=\frac{1}{k!}\frac{d^k}{d\varepsilon^k}\big|_0 (vol_{V_1}\boxtimes vol_2)\left(K+((\tilde A_1+\varepsilon S)\times A_2)\right).\end{eqnarray} Again by Lemma \ref{L:aLemma} the right hand side of (\ref{E:pc10}) is equal to $$(vol_1\boxtimes vol_2)\left((h\boxtimes Id)(K)+(A_1\times A_2)\right)=(h\boxtimes Id)^*(\phi_1\boxtimes \phi_2)(K).$$ Hence $(h\boxtimes Id)^*(\phi_1\boxtimes \phi_2)=h^*\phi_1\boxtimes \phi_2$. Hence lemma is proved. \qed
\begin{proposition}\label{exter-pull} Let $0\leq k,l\leq n$. Let \begin{eqnarray*} \phi=\Xi_{k,V}(\xi),\, \psi=\Xi_{l,W}(\eta) \end{eqnarray*} with $\xi\in C^\infty(Gr_{n-k}(V),\ct_{k,V}),\, \eta\in C^\infty(Gr_{n-l}(V),\ct_{l,V})$.
Then \begin{eqnarray}\label{w1} \phi\boxtimes\psi=\int_{F\in Gr_{n-k}(V)}\int_{E\in Gr_{n-l}(W)}(p_F\boxtimes p_E)^*(\xi(F)\boxtimes \eta(E)) \end{eqnarray} where $p_F\boxtimes p_E\colon V\times W\rightarrow V/F\times W/E$. \end{proposition} \begin{remark} In (\ref{w1}) $\xi(F)\boxtimes \eta(E)$ is considered as an element of $Val(V/F\times V/E)\otimes
|\omega_{Gr_{n-k}(V)}|\big|_F\otimes |\omega_{Gr_{n-l}(V)}|\big|_E$. \end{remark} {\bf Proof} of Proposition \ref{exter-pull}. We have \begin{eqnarray*} \phi\boxtimes \psi=\int_{F\in Gr_{n-k}(V)}p_F^*(\xi(F))\boxtimes\psi\overset{\mbox{Lemma }\ref{L:pull-cosm}}{=}\\ \int_{F\in Gr_{n-k}(V)}(p_F\boxtimes Id_W)^*\left(\xi(F)\boxtimes \psi\right)\overset{\mbox{Lemma } \ref{L:pull-cosm}}{=}\\ \int_{F\in Gr_{n-k}(V)}(p_F\boxtimes Id_W)^*\int_{E\in Gr_{n-l}(W)}(Id_F\boxtimes p_E)^*\left(\xi(F)\boxtimes \eta(E)\right)=\\ \int_{F\in Gr_{n-k}(V)}\int_{E\in Gr_{n-l}(W)}(p_F\boxtimes p_E)^*\left(\xi(F)\boxtimes \eta(E)\right). \end{eqnarray*} Proposition \ref{exter-pull} is proved. \qed
\begin{proposition}\label{P:w5} For any $0\leq k\leq n$ \begin{eqnarray*} \Xi_{k,V}\left(C^\infty(Gr_{n-k}(V),\ct^+_{k,V;k})\right)=Val^{+,sm}_k(V),\\ \Xi_{k+1,V}\left(C^\infty(Gr_{n-k-1}(V),\ct^-_{k+1,V;k})\right)=Val^{-,sm}_k(V). \end{eqnarray*} \end{proposition} {\bf Proof.} The map $\Xi_{k,V}$ is $GL(V)$-equivariant non-zero map. Hence the result follows from the Irreducibility theorem and the Casselman-Wallach theorem. \qed
Let us remind the construction of an isomorphism \begin{eqnarray*} \mathbb{F}_V\colon Val_k^{+,sm}(V)\tilde\rightarrow Val_{n-k}^{+,sm}(V^*)\otimes Dens(V) \end{eqnarray*} on even valuations from \cite{alesker-jdg-03} (where it was denoted by $\mathbb{D}$). First let us do it for $k=n$. Then $$Val^+_n(V)=Val_n(V)=Dens(V).$$ On the other hand $$Val_0^{+,sm}(V^*)\otimes Dens(V)=Val_0(V^*)\otimes Dens(V)=\mathbb{C}\otimes Dens(V)=Dens(V).$$ Take $$\mathbb{F}_V\colon Val_n(V)\tilde\rightarrow Val_0(V)\otimes Dens(V)$$ to be the identity isomorphism.
Let us consider the case $k<n$. Let $F\in Gr_{n-k}(V)$. We have \begin{eqnarray}\label{w6} \mathbb{F}_{V/F}\colon Val_k^{sm}(V/F)\tilde\rightarrow Val_0^{sm}(F^\perp)\otimes Dens(F^{\perp *}). \end{eqnarray} Let $p_F^\vee\colon F^\perp\hookrightarrow V^*$ be the map dual to the projection $p_F\colon V\rightarrow V/F$. Let us consider the map \begin{eqnarray}\label{w7} S^+_k\colon C^\infty(Gr_{n-k}(V),\ct^+_{k,V;k})\rightarrow Val_{n-k}^{+,sm}(V^*)\otimes Dens(V) \end{eqnarray} given by \begin{eqnarray*} S^+_k(\xi)=\int_{F\in Gr_{n-k}(V)}p^\vee_{F*} (\mathbb{F}_{V/F}(\xi(F))). \end{eqnarray*} \begin{theorem}[\cite{alesker-jdg-03}]\label{T:w8} There exists a unique map $$\mathbb{F}_V\colon Val_k^{+,sm}(V)\rightarrow Val_{n-k}^{+,sm}(V^*)\otimes Dens(V)$$ which makes the following diagram commutative \defC^\infty(Gr_{n-k}(V),\ct^+_{k,V;k}){C^\infty(Gr_{n-k}(V),\ct^+_{k,V;k})} \defVal^{+,sm}_k(V){Val^{+,sm}_k(V)} \defVal_{n-k}^{+,sm}(V^*)\otimes Dens(V){Val_{n-k}^{+,sm}(V^*)\otimes Dens(V)} \def\Xi_{k,V}{\Xi_{k,V}} \defS^+_k{S^+_k} \def\FF_V{\mathbb{F}_V} $$\Vtriangle[C^\infty(Gr_{n-k}(V),\ct^+_{k,V;k})`Val^{+,sm}_k(V)`Val_{n-k}^{+,sm}(V^*)\otimes Dens(V);\Xi_{k,V}`S^+_k`\FF_V].$$ This map $\mathbb{F}_V$ is a $GL(V)$-equivariant isomorphism of topological vector spaces. \end{theorem} Let us construct $\mathbb{F}_V$ on {\itshape odd} $k$-homogeneous valuations. Thus let $1\leq k\leq n-1$.
First let us consider the case $k=1$. We have $$\Xi_{2,V}\colon C^\infty(Gr_{n-2}(V),\ct^-_{2,V;1})\twoheadrightarrow Val_1^{-,sm}(V).$$ Consider the map \begin{eqnarray*} S^-_2\colon C^\infty(Gr_{n-2}(V),\ct^-_{2,V;1})\rightarrow Val^{-,sm}_{n-1}(V) \end{eqnarray*} given by $$S^-_2(\xi)=\int_{F\in Gr_{n-2}(V)}p^\vee_{F*}(\mathbb{F}_{V/F}(\xi(F)))$$ where $\mathbb{F}_{V/F}\colon Val_1^{-,sm}(V/F)\tilde\rightarrow Val_1^{-,sm}(F^\perp)\otimes Dens(F^{\perp *})$ is the Fourier transform defined in the two dimensional case by Theorem \ref{T:ISO}.
\begin{proposition}\label{P:w9} There exists a unique map $$\mathbb{F}_V\colon Val_1^{-,sm}(V)\rightarrow Val_{n-1}^{-,sm}(V^*)\otimes Dens(V)$$ making the following diagram commutative $$\Vtriangle[C^\infty (Gr_{n-2}(V),\ct^{-}_{2,V;1})`Val^{-,sm}_1(V)` Val^{-,sm}_{n-1}(V^*)\otimes Dens(V);\Xi_{2,V}`S_2^-`\mathbb{F}_V].$$ This map $\mathbb{F}_V$ is a $GL(V)$-equivariant isomorphism of topological vector spaces. \end{proposition} Before we prove this proposition let us prove the following result. \begin{proposition}\label{P:w10} Let $1\leq k\leq n-1$. The $GL(V)$-module $C^\infty(Gr_{n-k},\ct^-_{k+1,V;k})$ is admissible of finite length, and the Jordan-H\"older series of it contains $Val_k^{-,sm}(V)$ with multiplicity one. \end{proposition} {\bf Proof.} Let us fix an isomorphism $V\simeq \mathbb{R}^n$. Let $Q_0$ be the subgroup of $GL_n(\mathbb{R})$ consisting of matrices \begin{eqnarray*}
Q_0=\left\{\left[\begin{array}{c|c|c}
A&*&*\\\hline
0&B&*\\\hline
0&0&c
\end{array}\right]\big| A\in GL_{n-k-1}(\mathbb{R}),B\in
GL_k(\mathbb{R}),c\in \mathbb{R}^*.\right\}. \end{eqnarray*} $Q_0$ is a parabolic subgroup of $GL_n(\mathbb{R})$. Let us denote by ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}$ the complex line bundle over ${\cal F}_{n-k-1,n-1}(\mathbb{R}^n)\simeq GL_n(\mathbb{R})/Q_0$ whose fiber over $(F,E)\in{\cal F}_{n-k-1,n-1}(V)$ is equal to $Dens(E/F)\otimes or(V/E)$. ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}$ is $GL_n(\mathbb{R})$-equivariant in a natural way.
Let $p\colon {\cal F}_{n-k-1,n-1}(\mathbb{R}^n)\rightarrow Gr_{n-k-1}(\mathbb{R}^n)$ be the natural map, i.e. $p(F,E)=F$. For any $F\in Gr_{n-k-1}(\mathbb{R}^n)$ the space $Val_k^{-,sm}(\mathbb{R}^n/F)$ is canonically a quotient of $C^\infty(p^{-1}(F),{\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X})$. Hence the $GL_n(\mathbb{R})$-module $C^\infty(Gr_{n-k-1}(V),\ct^-_{k+1,V;k})$ is a quotient of the
$GL_n(\mathbb{R})$-module $C^\infty(GL_n(\mathbb{R})/Q_0,{\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}\otimes p^*(|\omega_{Gr_{n-k}(V)}|))$.
Let now $P_0\subset GL_n(\mathbb{R})$ be the parabolic subgroup of matrices
$$P_0=\left\{\left[\begin{array}{cc|c}
B&*&*\\
0&c&*\\\hline
0&0&A
\end{array}\right]\big| A\in GL_{n-k-1}(\mathbb{R}),B\in
GL_k(\mathbb{R}),c\in \mathbb{R}^*.\right\}.$$
Recall that $Val^{-,sm}_k(\mathbb{R}^n)$ is a subquotient of $Ind^{GL_n(\mathbb{R})}_{P_0}\xi$ where $\xi\colon P_0\rightarrow \mathbb{C}$ is the character given by
$$\xi\left(\left[\begin{array}{cc|c}
B&*&*\\
0&c&*\\\hline
0&0&A
\end{array}\right]\right)=sgn (c)\cdot |\det B|^{-1}.$$ Moreover the Jordan-H\"older series of $Ind^{GL_n(\mathbb{R})}_{P_0}\xi$ contains $Val^{-,sm}_k(\mathbb{R}^n)$ with multiplicity one by Remark \ref{R:mult1thm}. The $GL(V)$-modules
$C^\infty(GL_n(\mathbb{R})/Q_0,{\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}\otimes p^*(|\omega_{Gr_{n-k}(V)}|))$ and $Ind^{GL_n(\mathbb{R})}_{P_0}\xi$ have the same Jordan-H\"older series by Corollary \ref{C:arepr}. Hence Proposition \ref{P:w10} is proved. \qed
{\bf Proof} of Proposition \ref{P:w9}. It is clear that the map $$S^-_2\colon C^\infty(Gr_{n-2},\ct^-_{2,V;1})\rightarrow Val^{-,sm}_{n-1}(V^*)\otimes Dens(V)$$ is non-vanishing and $GL(V)$-equivariant. By Proposition \ref{isomorphism} the $GL(V)$-modules $Val^{-,sm}_1(V)$ and $Val^{-,sm}_{n-1}(V^*)\otimes Dens(V)$ are isomorphic. Hence by Proposition \ref{P:w10} all the irreducible subquotients of $C^\infty(Gr_{n-2},\ct^-_{2,V;1})$ which are non-isomorphic to $Val_1^{-,sm}(V)$, are contained in $Ker(S^-_2)$. Hence $\mathbb{F}_V$ does exist indeed. Moreover $\mathbb{F}_V$ is continuous by the open mapping theorem for Fr\'echet spaces (see e.g. \cite{schaefer}, Ch. III, \S 2). The rest of the statements of the theorem follow from the Irreducibility theorem and the Casselman-Wallach theorem. \qed
Now let us consider the case $k=n-1$. By Proposition \ref{P:w9} we have the isomorphism $$\mathbb{F}_{V^*}\colon Val_1^{-,sm}(V^*)\tilde\rightarrow Val_{n-1}^{-,sm}(V)\otimes Dens(V^*).$$ Define $$\mathbb{F}_V\colon Val_{n-1}^{-,sm}(V)\rightarrow Val_1^{-,sm}(V^*)\otimes Dens(V)$$ by \begin{eqnarray}\label{D:w11} \mathbb{F}_V:=\ce_{V^*}\circ (\mathbb{F}_{V^*}^{-1}\otimes Id_{Dens(V)}) \end{eqnarray} where $(\ce_{V^*}\phi)(K)=\phi(-K)$ (thus $\ce_{V^*}$ is just multiplication by -1 on $Val^{-,sm}(V^*)\otimes Dens(V)$). \begin{remark} For $n=2,k=1$ both definitions of $\mathbb{F}_V\colon Val_1^{-,sm}(V)\tilde\rightarrow Val_1^{-,sm}(V^*)\otimes Dens(V)$ coincide with the construction discussed in Section \ref{two-dim-isomorphism} due to Proposition \ref{P:composition-2-dim}. \end{remark} Now let us consider the general case $1\leq k\leq n-1$. We have the epimorphism $$\Xi_{k+1,V}\colon C^\infty(Gr_{n-k-1}(V),\ct^-_{k+1,V;k})\rightarrow Val_k^{-,sm}(V).$$ Consider the map $$S^-_{k+1}\colon C^\infty(Gr_{n-k-1}(V),\ct^-_{k+1,V;k})\rightarrow Val_{n-k}^{-,sm}(V^*)\otimes Dens(V)$$ defined by $S^-_{k+1}(\xi)=\int_{F\in Gr_{n-k-1}(V)}p^\vee_{F*}(\mathbb{F}_{V/F}(\xi))$ where $\mathbb{F}_{V/F}$ in the right hand side is defined by (\ref{D:w11}). \begin{theorem}\label{T:w12} Let $1\leq k\leq n-1$. There exists a unique map $\mathbb{F}_V\colon Val_k^{-,sm}(V)\rightarrow Val_{n-k}^{-,sm}(V^*)\otimes Dens(V)$ making the following diagram commutative $$\Vtriangle[C^\infty(Gr_{n-k-1}(V),\ct^-_{k+1,V;k})`Val_k^{-,sm}(V)` Val_{n-k}^{-,sm}(V^*)\otimes Dens(V);\Xi_{k+1,V}`S^-_{k+1}`\mathbb{F}_V].$$ Thus map $\mathbb{F}_V$ is a $GL(V)$-equivariant isomorphism of linear topological spaces. \end{theorem} \begin{remark} It is clear that the construction of $\mathbb{F}_V$ given in Theorem \ref{T:w12} coincides with the previous constructions for $k=1,n-1$. \end{remark} {\bf Proof} of Theorem \ref{T:w12}. It is clear that the map $$S^-_{k+1}\colon C^\infty(Gr_{n-k-1},\ct^-_{k+1,V,k})\rightarrow Val^{-,sm}_k(V^*)\otimes Dens(V)$$ is non-vanishing and $GL(V)$-equivariant. By Proposition \ref{isomorphism} the $GL(V)$-modules $Val^{-,sm}_k(V)$ and $Val^{-,sm}_{n-k}(V^*)\otimes Dens(V)$ are isomorphic. Hence by Proposition \ref{P:w10} all the irreducible subquotients of $C^\infty(Gr_{n-k-1},\ct^-_{k+1,V;k})$ which are non-isomorphic to $Val_k^{-,sm}(V)$, are contained in $Ker(S^-_{k+1})$. Hence $\mathbb{F}_V$ does exist indeed. Moreover $\mathbb{F}_V$ is continuous by the open mapping theorem for Fr\'echet spaces (see e.g. \cite{schaefer}, Ch. III, \S 2). The rest of the theorem follow from the Irreducibility theorem and the Casselman-Wallach theorem. \qed
\subsection{Relations of the Fourier transform to the pullback and pushforward.}
\begin{theorem}\label{T:z3} Let $i\colon L\hookrightarrow V$ be an injection of linear spaces. Let $\phi\in Val^{sm}(V)$. Then $i^*\phi\in Val^{sm}(L)$, and \begin{eqnarray}\label{z4} \mathbb{F}_L(i^*\phi)=i^\vee_*(\mathbb{F}_V\phi). \end{eqnarray} \end{theorem} {\bf Proof.} It is clear that $i^*\phi\in Val^{sm}(L)$. Let us prove the equality (\ref{z4}). Obviously this equality is true if $i$ is an isomorphism.
\underline{Case 1.} Assume that $\phi\in Val^{+,sm}_k(V)$. If $\dim L<k$ then both sides of (\ref{z4}) vanish.
\underline{Case 1a.} Let us assume in addition that $\dim L=k$. By Proposition \ref{P:w5} there exists $\xi\in C^\infty(Gr_{n-k}(V),\ct^+_{k,V;k})$ such that $$\phi=\Xi_{k,V}(\xi).$$ More explicitly $$\phi=\int_{F\in Gr_{n-k}(V)}p^*_{F}(\xi(F)).$$ Then \begin{eqnarray*} i^*\phi=\int_{F\in Gr_{n-k}(V)}(p_F\circ i)^*(\xi(F))=\int_{F\in Gr_{n-k}(V),F\cap L=\{0\}}(p_F\circ i)^*(\xi(F)). \end{eqnarray*} Now observe that for any $F\in Gr_{n-k}(V)$ such that $F\cap L=\{0\}$ the map $p_F\circ i\colon L\rightarrow V/F$ is an isomorphism. Hence \begin{eqnarray*} \mathbb{F}_L(i^*\phi)=\int_{F\in Gr_{n-k}(V),F\cap L=\{0\}}\mathbb{F}_L((p_F\circ i)^*(\xi(F)))=\\\int_{F\in Gr_{n-k}(V),F\cap L=\{0\}}(i^\vee_*\circ p^\vee_{F*})(\mathbb{F}_{V/F}(\xi(F)))=\\ i^\vee_*\left(\int_{F\in Gr_{n-k}(V),F\cap L=\{0\}}p_{F*}^\vee(\mathbb{F}_{V/F}(\xi(F)))\right)=\\i^\vee_*\left(\int_{F\in Gr_{n-k}(V)} p_{F*}^\vee(\mathbb{F}_{V/F}(\xi(F)))\right)=i^\vee_*(\mathbb{F}_V\phi) \end{eqnarray*} where the last equality holds by the definition of $\mathbb{F}_V$. Case 1a is proved.
\underline{Case 1b.} Let us assume that $\dim L>k$. Let $E\subset L$ be an arbitrary $k$-dimensional subspace of $L$, and let $j\colon E\hookrightarrow L$ denote the imbedding map. Using Case 1a we have \begin{eqnarray*} \mathbb{F}_E(j^*(i^*\phi))=\mathbb{F}_E((ij)^*\phi)= (ij)^\vee_*(\mathbb{F}_V\phi)=j^\vee_*(i^\vee_*(\mathbb{F}_V\phi)). \end{eqnarray*} On the other hand using again Case 1a, we have \begin{eqnarray*} \mathbb{F}_E(j^*(i^*\phi))=j^\vee_*(\mathbb{F}_L(i^*\phi)). \end{eqnarray*} Thus we get $$j^\vee_*(i^\vee_*(\mathbb{F}_V\phi))=j^\vee_*(\mathbb{F}_L(i^*\phi)).$$ Thus Theorem \ref{T:z3} in the even case follows from the following lemma. \begin{lemma}\label{L:z5} Let $L$ be an $l$-dimensional vector space. Let $0\leq k<l$. Let $\phi\in Val^+_{l-k}(L)\otimes Dens(L^*)$. Assume that for any surjection $p\colon L\twoheadrightarrow K$ of rank $k$ $$p_*\psi=0.$$ Then $\psi=0$. \end{lemma} {\bf Proof.} Let ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}$ be the subspace of $Val^+_{l-k}(L)$ consisting of $\psi$ such that $p_*\psi=0$ for any surjection $p$ of rank $k$. It is easy to see that ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}$ is $GL(V)$-invariant closed subspace, ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}\ne Val^+_{l-k}(L)$. Hence by the Irreducibility theorem ${\cal V}} \def\cw{{\cal W}} \def\cx{{\cal X}=0$. \qed
Theorem \ref{T:z3} is proved in the even case.
\underline{Case 2.} Assume that $\phi\in Val_k^{-,sm}(V)$. The proof of this case will be similar to the proof of Case 1. First observe that if $\dim L\leq k$ then both sides of (\ref{z4}) vanish.
\underline{Case 2a.} Let us assume in addition that $\dim L=k+1$. By Proposition \ref{P:w5} there exists $\eta\in C^\infty(Gr_{n-k-1}(V),\ct^-_{k+1,V;k})$ such that $$\phi=\Xi_{k+1,V}(\eta).$$ More explicitly $$\phi=\int_{F\in Gr_{n-k-1(V)}}p_F^*(\eta(F)).$$ Analogously to Case 1a we have \begin{eqnarray*} i^*\phi=\int_{F\in Gr_{n-k-1}(V),F\cap L=\{0\}}(p_F\circ i)^*(\eta(F)). \end{eqnarray*} For any $F\in Gr_{n-k-1}(V)$ such that $F\cap L=\{0\}$ the map $p_F\circ i\colon L\rightarrow V/F$ is an isomorphism. Hence \begin{eqnarray*} \mathbb{F}_L(i^*\phi)=\int_{F\in Gr_{n-k-1}(V),F\cap L=\{0\}}\mathbb{F}_L\left((p_F\circ i)^*(\eta(F))\right)=\\ \int_{F\in Gr_{n-k-1}(V),F\cap L=\{0\}}(p_F\circ i)^\vee_*\left(\mathbb{F}_{V/F}(\eta(F))\right)=\\i^\vee_*\left(\int_{F\in Gr_{n-k-1}(V)}p^\vee_{F*}(\mathbb{F}_{V/F}(\eta(F))\right)=i^\vee_*(\mathbb{F}_V\phi) \end{eqnarray*} where the last equality holds by the definition of $\mathbb{F}_V$.
\underline{Case 2b.} Let us assume now that $\dim L>k+1$. Let $E\subset L$ be an arbitrary subspace of dimension $k+1$, and let $j\colon E\hookrightarrow L$ be the imbedding map. By Case 2a we have \begin{eqnarray*} \mathbb{F}_E(j^*(i^*\phi))=\mathbb{F}_E((ij)^*\phi)=\\ (ij)^\vee_*(\mathbb{F}_V\phi)=j^\vee_*(i^\vee_*(\mathbb{F}_V\phi)). \end{eqnarray*} Using again Case 2a we have \begin{eqnarray*} \mathbb{F}_E(j^*(i^*\phi))=j^\vee_*(\mathbb{F}_L(i^*\phi)). \end{eqnarray*} Thus we obtain $$j^\vee_*(i^\vee_*(\mathbb{F}_V\phi))=j^\vee_*(\mathbb{F}_L(i^*\phi)).$$ Then the proof of Case 2 follows immediately from the following lemma. \begin{lemma}\label{L:z6} Let $L$ be an $l$-dimensional vector space. Let $1\leq k\leq l-1$. Let $\psi\in Val^-_{l-k}(L)\otimes Dens(L^*)$. Assume that for any surjection $p\colon L\twoheadrightarrow N$ of rank $k+1$ $$p_*\psi=0.$$ Then $\psi=0$. \end{lemma} {\bf Proof.} The proof is completely analogous to the proof of Lemma \ref{L:z5} and will be omitted. \qed
Thus Theorem \ref{T:z3} is proved. \qed
\begin{theorem}\label{T:z1} Let $\xi\in C^\infty(Gr_{n-k}(V),\ct_{k,V})$. Then \begin{eqnarray}\label{z2} \mathbb{F}_V(\Xi_{k,V}(\xi))=\int_{F\in Gr_{n-k}(V)}p^\vee_{F*}(\mathbb{F}_{V/F}(\xi(F))). \end{eqnarray} \end{theorem} {\bf Proof.} We may and will assume that $\xi\in C^\infty(Gr_{n-k}(V),\ct^\pm_{k,V;i})$.
\underline{Case 1.} Let us assume that $\xi\in C^\infty(Gr_{n-k}(V),\ct^+_{k,V;i})$. If $k<i$ then both sides of (\ref{z2}) vanish. If $k=i$ then (\ref{z2}) is just the definition of $\mathbb{F}_V$. Thus let us assume that $k>i$. \def\cf_{n-k,n-i}(V){{\cal F}_{n-k,n-i}(V)} Let us denote by ${\cal F}_{n-k,n-i}(V)$ the manifold of partial flags
$${\cal F}_{n-k,n-i}(V):=\{(F,E)|\, F\subset E,\dim F=n-k,\dim E=n-i\}.$$ Let \begin{eqnarray*} p_{n-k}\colon \cf_{n-k,n-i}(V)\rightarrow Gr_{n-k}(V),\\ p_{n-i}\colon \cf_{n-k,n-i}(V)\rightarrow Gr_{n-i}(V) \end{eqnarray*} be the natural projections defined by $p_{n-k}((F,E))=F,\, p_{n-i}((F,E))=E$. Recall that we denote by $\ct^{0,+}_{k,V;i}$ the vector bundle over $Gr_{n-k}(V)$ whose fiber over $F\in Gr_{n-k}(V)$ is equal to $Val_i^{+,sm}(V/F)$, and by
$$\ct^+_{k,V;i}:=\ct^{0,+}_{k,V,i}\otimes |\omega_{Gr_{n-k}(V)}|.$$ Note that the fiber of $\ct^{0,+}_{i,V;i}(V)$ over $L\in Gr_{n-i}(V)$ is equal to $Dens(V/L)$. Let us denote \def\tilde\ct^{0,+}_{i,V;i}{\tilde\ct^{0,+}_{i,V;i}} \def\tilde\ct^0_{i,V;i}{\tilde\ct^0_{i,V;i}} \begin{eqnarray*} \tilde\ct^{0,+}_{i,V;i}:=p^*_{n-i}(\ct^{0,+}_{i,V;i}),\\
\tilde\ct^0_{i,V;i}:=\tilde\ct^{0,+}_{i,V;i}\otimes |\omega_{{\cal F}_{n-k,n-i}(V)}|. \end{eqnarray*} We have the canonical map \begin{eqnarray}\label{z7} \Psi^+_{k,i}\colon C^\infty({\cal F}_{n-k,n-i}(V),\tilde\ct^+_{i,V;i})\rightarrow C^\infty(Gr_{n-k}(V),\ct^+_{k,V;i}) \end{eqnarray} given by $$(\Psi^+_{k,i}(\zeta))(F)=\int_{L\in Gr_{n-i}(V),L\supset F} q^*_{L,F}(\zeta(L,F))$$ where $q_{L,F}\colon V/F\rightarrow V/L$ is the canonical map. It is clear that $\Psi^+_{k,i}$ is $GL(V)$-equivariant continuous map. \begin{claim}\label{C:z8} $\Psi^+_{k,i}$ is onto. \end{claim} {\bf Proof.} This claim easily follows from Propositions \ref{P:w5} and \ref{part1-epi}. \qed
We have \begin{eqnarray*} \Xi_{k,V}(\Psi^+_{k,i}(\zeta))=\int_{F\in Gr_{n-k}(V)}p_F^*\left(\int_{L\in Gr_{n-i}(V),L\supset F}q^*_{L,F}(\zeta(L,F))\right)=\\ \int_{F\in Gr_{n-k}(V)}\left(\int_{L\in Gr_{n-i}(V),L\supset F}p_L^*(\zeta(L,F))\right)=\int_{L\in Gr_{n-i}(V)}p_L^*\left(\int_{F\in Gr_{n-k}(L)}\zeta(L,F)\right)=\\ \int_{L\in Gr_{n-i}(V)}p_L^*(\eta(L)) \end{eqnarray*} where $\eta(L):=\int_{F\in Gr_{n-k}(L)}\zeta(L,F)$, thus $\eta\in C^\infty(Gr_{n-i}(V),\ct^+_{i,V;i})$. Hence by the definition of $\mathbb{F}_V$ one has \begin{eqnarray*} \mathbb{F}_V\left(\Xi_{k,V}(\Psi^+_{k,i}\zeta)\right)=\int_{L\in Gr_{n-i}(V)}p^\vee_{L*}(\mathbb{F}_{V/L}(\eta(L))). \end{eqnarray*} But by the continuity of the Fourier transform $\mathbb{F}_{V/L}(\eta(L))=\int_{F\in Gr_{n-i}(L)}\mathbb{F}_{V/L}(\zeta(L,F)).$ Also \begin{eqnarray*} p_L=q_{L,F}\circ p_F,\\ p_L^\vee=p_F^\vee\circ q^\vee_{L,F}. \end{eqnarray*} Hence \begin{eqnarray}\label{z9} \mathbb{F}_V(\Xi_{k,V}(\Psi_{k,i}(\zeta)))= \int_{{\cal F}_{n-k,n-i}(V)}p^\vee_{F*}\left(q^\vee_{L,F*}(\mathbb{F}_{V/L}(\zeta(L)))\right). \end{eqnarray} By the definition of $\mathbb{F}_{V/F}$ we have \begin{eqnarray*} \int_{L\in Gr_{n-i}(V),\, L\supset F} q^\vee_{L,F*}\left(\mathbb{F}_{V/L}(\zeta(L,F))\right)= \mathbb{F}_{V/F}\left(\int_{L\in Gr_{n-i}(V),\, L\supset F} q^*_{L,F}(\zeta(L,F))\right)=\\\mathbb{F}_{V/F}(\Psi^+_{k,i}(\zeta)(F)). \end{eqnarray*} Substituting this into (\ref{z9}) we get \begin{eqnarray*} \mathbb{F}_V\left(\Xi_{k,V}(\Psi^+_{k,V}(\zeta))\right)= \int_{F\in Gr_{n-k}(V)}p^\vee_{F*}\left(\mathbb{F}_{V/F}(\Psi^+_{k,V}(\zeta)(F))\right). \end{eqnarray*} Since $\Psi^+_{k,i}$ is onto (by Claim \ref{C:z8}), Case 1 is proved.
\underline{Case 2.} Now we assume that $\xi\in C^\infty(Gr_{n-k}(V),\ct^-_{k,V;i})$.
If $k<i+1$ then both sides of (\ref{z2}) vanish. If $k=i+1$ then the result is just the definition of $\mathbb{F}_V$. Let us assume that $k>i+1$. The proof will be analogous to Case 1. Similarly to Case 1 we denote by ${\cal F}_{n-k,n-i-1}(V)$ the manifold of partial flags
$${\cal F}_{n-k,n-i-1}(V):=\{(F,E)|\,F\subset E\subset V, \dim F=n-k,\, \dim E=n-i-1\}.$$ We denote \def\cf_{n-k,n-i-1}(V){{\cal F}_{n-k,n-i-1}(V)} \begin{eqnarray*} p_{n-k}\colon\cf_{n-k,n-i-1}(V)\rightarrow Gr_{n-k}(V),\\ p_{n-i-1}\colon \cf_{n-k,n-i-1}(V)\rightarrow Gr_{n-i-1}(V) \end{eqnarray*} the natural projections. Define \begin{eqnarray*} \tilde\ct^{0,-}_{i+1,V;i}:=p^*_{n-i-1}(\ct^{0,-}_{i+1,V;i}),\\ \tilde\ct^-_{i+1,V;i}:=\tilde\ct^{0,-}_{i+1,V;i}\otimes
|\omega_{\cf_{n-k,n-i-1}(V)}|. \end{eqnarray*} We have the canonical map \begin{eqnarray}\label{z10} \Psi^-_{k,i}\colon C^\infty(\cf_{n-k,n-i-1}(V),\tilde\ct^-_{i+1,V;i})\rightarrow C^\infty(Gr_{n-k},\ct^-_{k,V;i}) \end{eqnarray} given, for any $F\in Gr_{n-k}(V)$, by $$(\Psi^-_{k,i}(\zeta))(F)=\int_{L\in Gr_{n-i-1}(V),L\supset F}q^*_{L,F}(\zeta(L,F))$$ where as previously $q_{L,F}\colon V/F\rightarrow V/L$ is the canonical map. It is clear that $\Psi^-_{k,i}$ is a $GL(V)$-equivariant continuous map. \begin{claim}\label{C:z11} $\Psi^-_{k,i}$ is onto. \end{claim} {\bf Proof} easily follows from Proposition \ref{P:w5}. \qed
The rest of the proof is parallel to Case 1. We have \begin{eqnarray*} \Xi_{k,V}(\Psi^-_{k,i}(\zeta))=\int_{F\in Gr_{n-k}(V)}p^*_F\left(\int_{L\in Gr_{n-i-1}(V),L\supset F}q^*_{L,F}(\zeta(L,F))\right)=\\ \int_{F\in Gr_{n-k}(V)}\int_{L\in Gr_{n-i-1}(V),L\supset F}p_L^*(\zeta(L,F))=\int_{L\in Gr_{n-i-1}(V)}p_L^*\left(\int_{F\in Gr_{n-k}(L)}\zeta(L,F)\right)=\\ \int_{L\in Gr_{n-i-1}(V)}p_L^*(\eta(L)). \end{eqnarray*} where $\eta(L):=\int_{F\in Gr_{n-k}(L)}\zeta(L,F)$, thus $\eta\in C^\infty(Gr_{n-i-1}(V),\ct^-_{i+1,V;i})$. Hence by the definition of $\mathbb{F}_V$ we have \begin{eqnarray*} \mathbb{F}_V\left(\Xi_{k,V}(\Psi^-_{k,i}(\zeta))\right)=\int_{L\in Gr_{n-i-1}(V)}p^\vee_{L*}\left(\mathbb{F}_{V/L}(\eta(L))\right). \end{eqnarray*} But by the continuity of the Fourier transform one has $\mathbb{F}_{V/L}(\eta(L))=\int_{F\in Gr_{n-k}(L)}\mathbb{F}_{V/L}(\zeta(L,F))$. Hence \begin{eqnarray}\label{z12} \mathbb{F}_V\left(\Xi_{k,V}(\Psi^-_{k,i}(\zeta))\right)= \int_{\cf_{n-k,n-i-1}(V)}p^\vee_{F*}\left(q^\vee_{L,F*}(\mathbb{F}_{V/L}(\zeta(L,F)))\right). \end{eqnarray} By the definition of $\mathbb{F}_{V/F}$ we have \begin{eqnarray*} \int_{L\in Gr_{n-i-1}(V),L\supset F} q^\vee_{L,F*}\left(\mathbb{F}_{V/L}(\zeta(L))\right)=\mathbb{F}_{V/F}\left(\int_{L\in Gr_{n-i-1}(V),L\supset F}q^*_{L,F}(\zeta(L,F))\right)=\\ \mathbb{F}_{V/F}\left(\Psi^-_{k,i}(\zeta)(F)\right). \end{eqnarray*} Substituting this into (\ref{z12}) we get $$\mathbb{F}_V\left(\Xi_{k,V}(\Psi^-_{k,i}(\zeta))\right)=\int_{F\in Gr_{n-k(V)}}p^\vee_{F*}\left(\mathbb{F}_{V/F}(\Psi^-_{k,i}(\zeta)(F))\right).$$ Since $\Psi^-_{k,i}$ is onto (by Claim \ref{C:z11}), Case 2 is proved. Thus Theorem \ref{T:z1} is proved. \qed
\subsection{A Plancherel type formula in higher dimensions}\label{Ss:composition-F-high} The main result of this section is Theorem \ref{T:com02} below.
Let us introduce more notation. Let \begin{eqnarray}\label{D:com1} {\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V;i}^{0,\pm}\rightarrow Gr_k(V) \end{eqnarray} denote the vector bundle whose fiber over $E\in Gr_k(V)$ is equal to $Val^{\pm,sm}_i(E)\otimes Dens(E^*)$. Let \begin{eqnarray}\label{D:com2}
{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V;i}^\pm:={\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V;i}^{0,\pm}\otimes |\omega_{Gr_k(V)}|. \end{eqnarray} Similarly let \begin{eqnarray}\label{D:com1.1} {\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V}^{0}\rightarrow Gr_k(V) \end{eqnarray} denote the vector bundle whose fiber over $E\in Gr_k(V)$ is equal to $Val^{sm}(E)\otimes Dens(E^*)$. Let \begin{eqnarray}\label{D:com1.2}
{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V}:={\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V}^{0}\otimes |\omega_{Gr_k(V)}|. \end{eqnarray}
For a vector space $W$ we can consider the map \begin{eqnarray*} \mathbb{F}_W\otimes Id_{Dens(W^*)}\colon Val^{sm}(W)\otimes Dens(W^*)\rightarrow\\ (Val^{sm}(W^*)\otimes Dens(W))\otimes Dens(W^*)=Val^{sm}(W^*). \end{eqnarray*} \begin{lemma}\label{L:com3}
Let $V$ be an $n$-dimensional vector space. Let $\eta\in
C^\infty(Gr_2(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}^-_{2,V;1})$. Then \begin{eqnarray}\label{com4} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_2(V)}i_{E*}(\eta(E))\right)=\int_{Gr_2(V)}(i^\vee_E)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E)) \end{eqnarray} where $i_E\colon E\hookrightarrow V$ denotes the imbedding map. \end{lemma} {\bf Proof.} First note that $i_{E*}(\eta(E))\in Val_{n-1}^-(V)\otimes Dens(V^*)$. Recall that by the definition of the Fourier transform $\mathbb{F}_V\colon Val^{-,sm}_{n-1}(V)\rightarrow Val_1^{-,sm}(V^*)\otimes Dens(V)$ $$\mathbb{F}_V=\ce_V\circ (\mathbb{F}^{-1}_{V^*}\otimes Id_{Dens(V)}).$$ Hence the equality (\ref{com4}) is equivalent to \begin{eqnarray}\label{com5} \int_{E\in Gr_2(V)}i_{E*}(\eta(E))=(\ce_V\circ \mathbb{F}_{V^*})\left(\int_{E\in Gr_2(V)}(i^\vee_E)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E))\right). \end{eqnarray} The right hand side of (\ref{com5}) is equal to $$\mathbb{F}_{V^*}\left(\int_{E\in Gr_2(V)}(i^\vee_E)^*\left(\ce_E\circ(\mathbb{F}_E\otimes Id_{Dens(E^*)})\right)(\eta(E))\right).$$ But by the Plancherel formula in dimension two (Proposition \ref{P:composition-2-dim}) we know that $$\ce_E\circ(\mathbb{F}_E\otimes Id_{Dens(E^*)})=\mathbb{F}_{E^*}^{-1}.$$ Hence (\ref{com5}) is equivalent to \begin{eqnarray*} \int_{E\in Gr_2(V)}i_{E*}(\eta(E))=\mathbb{F}_{V^*}\left(\int_{E\in Gr_2(V)}(i^\vee_E)^*\mathbb{F}_E^{-1}(\eta(E))\right). \end{eqnarray*} The last equality is a special case of Theorem \ref{T:z1}. \qed.
\begin{lemma}\label{L:com01} The composition map $$Val_1^{-,sm}(V)\overset{\mathbb{F}_V}{\rightarrow} Val_{n-1}^{-,sm}(V^*)\otimes Dens(V)\overset{\mathbb{F}_{V^*}\otimes Id_{Dens(V)}}{\rightarrow} Val_1^{-,sm}(V)$$ is multiplication by -1. \end{lemma} {\bf Proof.} Let $\phi\in Val_1^{-,sm}(V)$. By Proposition \ref{P:w5} there exists $\xi\in C^\infty(Gr_2(V),\ct^-_{2,V;1})$ such that $\phi=\Xi_{2,V}(\xi)$. Then by the definition of $\mathbb{F}_V$ \begin{eqnarray*} \mathbb{F}_V(\phi)=\int_{F\in Gr_{n-2}(V)}p^\vee_{F*}(\mathbb{F}_{V/F}(\xi(F)))=\int_{E\in Gr_2(V^*)}i_{E*}(\mathbb{F}_{V/E^\perp}(\xi(E^\perp)). \end{eqnarray*} Then applying Lemma \ref{L:com3} we get \begin{eqnarray}\label{com7} (\mathbb{F}_{V^*}\otimes Id_{Dens(V)})(\mathbb{F}_V(\phi))=\int_{E\in Gr_2(V^*)}(i^\vee_E)^*\left((\mathbb{F}_E\otimes Id_{Dens(E^*)})\circ \mathbb{F}_{V/E^\perp}\right)(\xi(E^\perp)) \end{eqnarray} But $(\mathbb{F}_E\otimes Id_{Dens(E^*)})\circ \mathbb{F}_{V/E^\perp}=(\mathbb{F}_E\otimes Id_{Dens(E^*)})\circ \mathbb{F}_{E^*}=\ce_{E^*}$ by the two-dimensional case (Proposition \ref{P:composition-2-dim}). Substituting this into (\ref{com7}) we get \begin{eqnarray*} (\mathbb{F}_{V^*}\otimes Id_{Dens(V)})(\mathbb{F}_V(\phi))=-\int_{E\in Gr_2(V^*)}(i^\vee_E)^*(\xi(E^\perp))=-\int_{F\in Gr_{n-2}(V)}p_F^*(\xi(F))=-\phi. \end{eqnarray*} Lemma is proved. Q.E.D. \begin{lemma}\label{L:com01.1} The composition map $$Val^{+,sm}(V)\overset{\mathbb{F}_V}{\rightarrow}Val^{+,sm}(V^*)\otimes Dens(V)\overset{\mathbb{F}_{V^*}\otimes Id_{Dens(V)}}{\rightarrow} Val^{+,sm}(V)$$ is equal to the identity. \end{lemma} {\bf Proof.} Let $0\leq k\leq n$. Let $\cl_k(V)\rightarrow Gr_k(V)$ denote the line bundle whose fiber over $F\in Gr_k(V)$ is equal to $Dens(F)$. We have the Klain imbeddings \begin{eqnarray*} \tau\colon Val^{+,sm}_k(V)\hookrightarrow C^\infty(Gr_k(V),\cl_k(V)),\\ \tau'\colon Val_{n-k}^{+,sm}\otimes Dens(V)\hookrightarrow C^\infty(Gr_{n-k}(V^*),\cl_{n-k}(V^*))\otimes Dens(V). \end{eqnarray*} Taking the orthogonal complement we have the identification $$Gr_k(V)\simeq Gr_{n-k}(V^*).$$ Also for any $F\in Gr_k(V)$ we have $$Dens(F)=Dens(F^\perp)\otimes Dens(V).$$ Thus we have a $GL(V)$-equivariant isomorphism $$\gamma\colon C^\infty(Gr_k(V),\cl_k(V))\tilde\rightarrow C^\infty(Gr_{n-k}(V^*),\cl_{n-k}(V))\otimes Dens(V).$$ Let $\phi\in Val^{+,sm}_k(V)$. By \cite{alesker-jdg-03} $$\tau'(\mathbb{F}_V\phi)=\gamma(\tau\phi).$$ It is easy to see that for any $\psi\in Val_{n-k}^{+,sm}(V^*)\otimes Dens(V)$ one has $$\tau\left((\mathbb{F}_{V^*}\otimes Id_{Dens(V)})(\psi)\right)=\gamma^{-1}(\tau'\psi).$$ Hence \begin{eqnarray*} \tau\left(((\mathbb{F}_{V^*}\otimes Id_{dens(V)})\circ \mathbb{F}_V)(\phi)\right)=\gamma^{-1}(\tau'(\mathbb{F}_V\phi))= \gamma^{-1}(\gamma(\tau\phi))=\tau\phi. \end{eqnarray*} Lemma is proved. \qed
\begin{proposition}\label{T:fourier-id} Let $p\colon V\twoheadrightarrow W$ be a surjective linear map of vector spaces. Then the following diagram is commutative \begin{eqnarray}\label{fourier-id} \square<1`1`1`1;1700`600>[Val^{sm}(V)\otimes Dens(V^*)` Val^{sm}(V^*)`Val^{sm}(W)\otimes Dens(W^*)` Val^{sm}(W^*);\mathbb{F}_V\otimes Id_{Dens(V^*)}`p_*`p^{\vee*}`\mathbb{F}_W\otimes Id_{Dens(W^*)}] \end{eqnarray} \end{proposition} {\bf Proof.} \underline{Case 1.} Let us consider the case of even valuations.
By Lemma \ref{L:com01.1}, on even valuations $\mathbb{F}_V\otimes Id_{Dens(V^*)}=\mathbb{F}_{V^*}^{-1},\,\mathbb{F}_W\otimes Id_{Dens(W^*)}=\mathbb{F}_{W^*}^{-1}$. Hence the commutativity of the diagram (\ref{fourier-id}) is equivalent to commutativity of the following diagram \begin{eqnarray*} \square<1`1`1`1;1700`600>[Val^{+,sm}(V^*)`Val^{+,sm}(V)\otimes Dens(V^*)` Val^{+,sm}(W^*)`Val^{+,sm}(W)\otimes Dens(W^*);\mathbb{F}_{V^*}`p^{\vee*}`p_*`\mathbb{F}_{W^*}]. \end{eqnarray*} But the last diagram is a special case of Theorem \ref{T:z3}. Thus the theorem is proved in the even case.
\underline{Case 2.} Let us consider the odd case.
Let us denote $m:=\dim W$. Let $1\leq j\leq n-1$. We have to show that the following diagram is commutative: \begin{eqnarray}\label{fourier-id2} \square<1`1`1`1;1800`600>[Val^{-,sm}_j(V)\otimes Dens(V^*)` Val^{-,sm}_{n-j}(V^*)`Val^{-,sm}_{j-n+m}(W)\otimes Dens(W^*)` Val^{-,sm}_{n-j}(W^*);\mathbb{F}_V\otimes Id_{Dens(V^*)}`p_*`p^{\vee*}`\mathbb{F}_W\otimes Id_{Dens(W^*)}] \end{eqnarray}
\underline{Case 2a.} Let us assume in addition that $\dim W=2$.
Then necessarily $j=n-1$. In this case, by Lemma \ref{L:com01}, we have $\mathbb{F}_V\otimes Id_{Dens(V^*)}=-\mathbb{F}_{V^*}^{-1},\, \mathbb{F}_W\otimes Id_{Dens(W^*)}=-\mathbb{F}_{W^*}^{-1}$. Hence the commutativity of the diagram (\ref{fourier-id2}) is equivalent to the commutativity of the diagram \begin{eqnarray}\label{fourier-id3} \square<1`1`1`1;1600`600>[Val^{-,sm}_{1}(V^*)`Val^{-,sm}_{n-1}(V)\otimes Dens(V^*)`Val^{-,sm}_{1}(W^*)`Val^{-,sm}_{1}(W)\otimes Dens(W^*);\mathbb{F}_{V^*}`p^{\vee*}`p_*`\mathbb{F}_{W^*}]. \end{eqnarray} The last diagram is again a special case of Theorem \ref{T:z3}.
\underline{Case 2b.} Let us assume that $j-n+m=1$.
In other words we have to show that the following diagram is commutative \begin{eqnarray}\label{diag-case2b} \square<1`1`1`1;1800`600>[Val^{-,sm}_j(V)\otimes Dens(V^*)` Val^{-,sm}_{n-j}(V^*)`Val^{-,sm}_{1}(W)\otimes Dens(W^*)` Val^{-,sm}_{m-1}(W^*);\mathbb{F}_V\otimes Id_{Dens(V^*)}`p_*`p^{\vee*}`\mathbb{F}_W\otimes Id_{Dens(W^*)}]. \end{eqnarray}
Let us fix an arbitrary 2-dimensional subspace $i\colon E\hookrightarrow W$. Let us form a Cartesian square $$\square<1`3`3`1;500`400>[Z`V`E`W;\tilde i`\tilde p`p`i].$$ Note that $p,\tilde p$ are surjections, $i,\tilde i$ are injections. Set $z:=\dim Z=2+n-m$.
Let us consider the following cube diagram where we denote $Dens(\bullet)$ by $D(\bullet)$ for brevity:
$$ \bfig \putsquare<1`1`1`1;2250`1500>(0,1800)[Val^{-,sm}_j(V)\otimes D(V^*)`Val^{-,sm}_{n-j}(V^*)`Val_j^{-,sm}(Z)\otimes D(V^*)`Val^{-,sm}_{z-j}(Z^*)\otimes D(Z)\otimes D(V^*);\mathbb{F}_V\otimes Id_{D(V^*)}`\tilde i^*\otimes Id_{D(V^*)}`\tilde i^{\vee}_*\otimes Id_{D(V^*)}`\mathbb{F}_Z\otimes Id_{D(V^*)}] \putsquare<1`1`1`1;2250`1500>(825,900)[Val^{-,sm}_1(W)\otimes D(W^*) `Val^{-,sm}_{n-j}(W^*)`Val^{-,sm}_1(E)\otimes D(W^*)`Val^{-,sm}_1(E^*)\otimes D(E)\otimes D(W^*);\mathbb{F}_W\otimes Id_{D(W^*)}`i^*\otimes Id_{D(W^*)}`i^\vee_*\otimes Id_{D(W^*)}`\mathbb{F}_E\otimes Id_{D(W^*)}] \putmorphism(0,3300)(1,-1)[``p_*]{900}1r \putmorphism(2250,3300)(1,-1)[`` p^{\vee *}]{900}1r \putmorphism(2200,1800)(1,-1)[``"\tilde p_*"]{900}1l \putmorphism(0,1800)(1,-1)[``"\tilde p^{\vee }_*"]{900}1l\efig$$
The following facets of this cube commute:
$\bullet$ the left facet, by base change Theorem \ref{T:base-change2};
$\bullet$ the right facet, by base change Theorem \ref{T:base-change};
$\bullet$ the bottom facet, by Case 2a (since $\dim E=2$);
$\bullet$ the back facet, by Theorem \ref{T:z3};
$\bullet$ the front facet, by Theorem \ref{T:z3}.
These properties and a straightforward diagram chasing imply that \begin{eqnarray*} (i^\vee_*\otimes Id_{D(W^*)})\circ\left(p^{\vee *}\circ (\mathbb{F}_V\otimes Id_{D(V^*)})-(\mathbb{F}_W\circ Id_{D(W^*)})\circ p_*)\right)=0. \end{eqnarray*} Since $i^\vee$ is an arbitrary surjection of rank two, Lemma \ref{L:z6} implies that $$p^{\vee *}\circ (\mathbb{F}_V\otimes Id_{D(V^*)})-(\mathbb{F}_W\circ Id_{D(W^*)})\circ p_*)=0.$$ This means that the diagram (\ref{diag-case2b}) is commutative. Thus Case 2b is proved.
\underline{Case 2c.} Let us consider the general odd case.
Let us fix an {\itshape arbitrary} surjection $q\colon W\twoheadrightarrow X$ with $\dim X=n-j+1$. Let us consider the following diagram: $$\bfig \putsquare<1`1`1`1;1700`600>(0,750)[Val^{-,sm}_j(V)\otimes Dens(V^*)` Val^{-,sm}_{n-j}(V^*)`Val^{-,sm}_{j-n+m}(W)\otimes Dens(W^*)` Val^{-,sm}_{n-j}(W^*);\mathbb{F}_V\otimes Id_{Dens(V^*)}`p_*`p^{\vee*}`\mathbb{F}_W\otimes Id_{Dens(W^*)}] \putsquare<0`1`1`1;1700`600>(0,150)[\phantom{Val^{-,sm}_{j-n+m}(W)\otimes Dens(W^*)}`\phantom{Val^{-,sm}_{n-j}(W^*)}`Val^{-,sm}_1(X)\otimes Dens(X^*)`Val^{-,sm}_{n-j}(X^*);`q_*`q^{\vee*}`\mathbb{F}_X\otimes Id_{Dens(X^*)}] \efig$$ By Case 2b, the lower square of this diagram commutes. Also by Case 2b, the big exterior contour of this diagram commutes. It follows that $$q^{\vee *}\circ \left(p^{\vee ^*}\circ (\mathbb{F}_V\otimes Id_{Dens(V^*)})-(\mathbb{F}_W\otimes Id_{Dens(W^*)})\circ p_*\right)=0.$$ Since the last equality holds for an arbitrary surjection $q$ of rank $n-j+1$, $q^\vee$ is an arbitrary injection of an $(n-j+1)$-dimensional subspace. But by Schneider's theorem \cite{schneider-simple} odd $k$-homogeneous valuations are uniquely determined by their restrictions to all $(k+1)$-dimensional subspaces. Hence $$p^{\vee*}\circ(\mathbb{F}_V\otimes Id_{Dens(V^*)})-(\mathbb{F}_W\otimes Id_{Dens(W^*)})\circ p_*=0.$$ This means that the diagram (\ref{fourier-id2}) commutes. Proposition is proved. \qed
The following theorem generalizes Lemma \ref{L:com3}. \begin{theorem}\label{T:com8} Let $0\leq k\leq n$. Let $\eta\in C^\infty(Gr_k(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V})$ (${\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V}$ was defined in (\ref{D:com1.1}), (\ref{D:com1.2})). Then \begin{eqnarray}\label{tttt} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}i_{E*}(\eta(E))\right)=\int_{E\in Gr_k(V)}(i_E^\vee)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E)). \end{eqnarray} \end{theorem} {\bf Proof.} \underline{Case 1.} Let us consider the even case, i.e. $\eta(E)\in Val_i^{+,sm}(E)\otimes Dens(E^*)$ for any $E\in Gr_k(V)$, $0\leq i\leq k$.
\underline{Case 1a.} Assume in addition that $i=0$. Thus $\eta(E)\in Dens(E^*)$ for any $E\in Gr_k(V)$.
We have the canonical identification \begin{eqnarray}\label{se1} Dens(E^*)=Dens(V/E)\otimes Dens(V^*). \end{eqnarray} It is easy to see from the definitions that under this identification the map $$i_{E*}\colon Dens(E^*)\rightarrow Val(V)\otimes Dens(V^*)$$ coincides with the map $$p_E^*\otimes Id_{Dens(V^*)}\colon Dens(V/E)\otimes Dens(V^*)\rightarrow Val(V)\otimes Dens(V^*)$$ where $p_E\colon V\rightarrow V/E$ is the canonical projection.
Let us denote by $\tilde \eta\in C^\infty(Gr_k(V),\ct_{k,V;k})\otimes Dens(V^*)$ the section corresponding to $\eta$ under the isomorphism (\ref{se1}). Thus \begin{eqnarray}\label{se2} \int_{E\in Gr_k(V)}i_{E*}(\eta(E))=\int_{E\in Gr_k(V)}(p_E^*\otimes Id_{Dens(V^*)})(\tilde \eta(E)). \end{eqnarray}
Let us fix a Lebesgue measure $vol_V$ on $V$. Set $$\hat \eta:=\tilde \eta\cdot vol_V\in C^\infty(Gr_k(V),\ct_{k,V;k}).$$ Then $$\int_{E\in Gr_k(V)}i_{E*}(\eta(E))=\left(\int_{E\in Gr_k(V)}p_E^*(\hat \eta(E))\right)\otimes vol_V^{-1}.$$ Hence by the definition of the Fourier transform \begin{eqnarray*} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}i_{E*}(\eta(E))\right)= \left(\int_{E\in Gr_k(V)}p_{E*}^\vee(\mathbb{F}_{V/E}(\hat\eta(E)))\right)\cdot vol_V^{-1}. \end{eqnarray*} Hence it suffices to check that for any $E\in Gr_k(V)$ \begin{eqnarray}\label{se3} \left(p_{E*}^\vee(\mathbb{F}_{V/E}(\hat\eta(E)))\right)\cdot vol_V^{-1}=(i_{E*}^\vee)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E)). \end{eqnarray} To prove the identity (\ref{se3}) let us fix a Lebesgue measure $vol_E$ on $E$. Let $vol_{V/E}:=\frac{vol_V}{vol_E}$ be the corresponding Lebesgue measure on $V/E$. It is sufficient to prove (\ref{se3}) for $\eta(E)=vol_E^{-1}$. Then under the isomorphism (\ref{se1}) we have $$\eta(E)=vol_{V/E}\otimes vol_V^{-1}.$$ Hence $$\hat\eta(E)=vol_{V/E}.$$ We have $$(\mathbb{F}_E\otimes Id_{Dens(E^*)}(\eta(E))=vol_E^{-1}\in Dens(E^*).$$ Hence \begin{eqnarray}\label{se4} (i_{E*}^\vee)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E))=(i_{E*}^\vee)^*(vol_E^{-1}). \end{eqnarray} Next \begin{eqnarray*} \mathbb{F}_{V/E}(\hat \eta(E))=\mathbb{F}_{V/E}(vol_{V/E})=\chi_{(V/E)^*}\otimes vol_{V/E}\in Val_0((V/E)^*)\otimes Dens(V/E). \end{eqnarray*} Hence \begin{eqnarray*} \left(p_{E*}^\vee(\mathbb{F}_{V/E}(\hat\eta(E)))\right)\cdot vol_V^{-1}=\left(p_{E*}^\vee(\chi_{(V/E)^*}\otimes vol_{V/E})\right)\cdot vol_V^{-1}. \end{eqnarray*} It is easy to see that the last expression is equal to the right hand side of (\ref{se4}). This implies (\ref{se3}). Hence Case 1a is proved.
\underline{Case 1b.} Assume that $i>0$.
Recall that we denote by ${\cal F}_{k-i,k}(V)$ the partial flag manifold
$${\cal F}_{k-i,k}(V):=\{(L,E)|\, L\subset E,\, L\in Gr_{k-i}(V),\, E\in Gr_k(V)\}.$$ For $(L,E)\in {\cal F}_{k-i,k}(V)$ let us denote by $i_{E,L}$ the imbedding $L\hookrightarrow E$. Let us denote by ${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^0$ the line bundle over ${\cal F}_{k-i,k}(V)$ whose fiber over $(L,E)$ is equal to $Dens(L^*)$. Let
$${\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}:={\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I}^0\otimes |\omega_{{\cal F}_{k-i,i}(V)}|.$$
\begin{claim}\label{Cl:seg3} For any $\eta\in C^\infty(Gr_k(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}^+_{k,V;i}),k>i,$ there exists $\xi\in C^\infty({\cal F}_{k-i,k}(V),{\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I})$ such that for any $E\in Gr_k(V)$ \begin{eqnarray}\label{seg4} \eta(E)=\int_{L\in Gr_{k-i}(E)}i_{E,L*}(\xi(L,E)). \end{eqnarray} \end{claim} {\bf Proof.} Define the map $C^\infty({\cal F}_{k-i,k}(V),{\cal G}} \def\ch{{\cal H}} \def\ci{{\cal I})\rightarrow C^\infty(Gr_k(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}^+_{k,V;i})$ by the right hand side of (\ref{seg4}). This map is $GL(V)$-equivariant. Then Irreducibility Theorem, Casselman-Wallach theorem, and Proposition \ref{part1-epi} imply the claim. \qed
Let us continue proving Case 1b. We have \begin{eqnarray*} \int_{E\in Gr_k(V)}i_{E*}(\eta(E))=\int_{E\in Gr_k(V)}\int_{L\in Gr_{k-i}(E)}i_{E*}(i_{E,L*}(\xi(L,E)))=\\\int_{E\in Gr_k(V)}\int_{L\in Gr_{k-i}(E)}i_{L*}(\xi(L,E))= \int_{L\in Gr_{k-i}(V)}i_{L*}\left(\int_{E\in Gr_k(V),E\supset L}\xi(L,E)\right). \end{eqnarray*} Let us denote by $\hat\xi(L)$ the inner integral in the last expression. Thus $\hat\xi\in C^\infty(Gr_{k-i}(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k-i,V;0})$. Hence by Case 1a \begin{eqnarray}\label{sss5} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}i_{E*}(\eta(E))\right)=\\\label{sss6} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{L\in Gr_{k-i}(V)}i_{L*}(\hat\xi(L))\right)=\\\label{sss7} \int_{L\in Gr_{k-i}(V)}(i_L^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\hat\xi(L)). \end{eqnarray} Using again Case 1a we obtain that the right hand side of the equality (\ref{tttt}) is equal to \begin{eqnarray*} \int_{E\in Gr_k(V)}(i_{E}^\vee)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})\left(\int_{L\in Gr_{k-i}(E)}i_{E,L*}(\xi(L,E))\right)=\\ \int_{E\in Gr_k(V)}(i_{E}^\vee)^*\left(\int_{L\in Gr_{k-i}(E)}(i_{E,L}^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\xi(L,E))\right)=\\ \int_{E\in Gr_k(V)}\int_{L\in Gr_{k-i}(E)}(i_L^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\xi(L,E))=\\ \int_{L\in Gr_{k-i}(E)}(i_L^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\hat\xi(L))\overset{\mbox{by } (\ref{sss7})}{=}\\ (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}i_{E*}(\eta(E))\right). \end{eqnarray*} Thus Case 1b is proved. Hence Case 1 is proved too.
\underline{Case 2.} Let us consider the odd case, i.e. $\eta(E)\in Val^{-,sm}_i(E)\otimes Dens(E^*)$ for any $E\in Gr_k(V)$, $1\leq i\leq k-1$.
Then the equality (\ref{tttt}) becomes equivalent to \begin{eqnarray*} \int_{E\in Gr_2(V)}i_{E*}(\eta(E))=\mathbb{F}_V\left(\int_{E\in Gr_2(V)}(i_E^\vee)^*(\mathbb{F}_E^{-1}(\eta(E)))\right). \end{eqnarray*} The last equality is a special case of Theorem \ref{T:z1}. Case 2a is proved.
\underline{Case 2a.} Assume that $i=1$.
Let us fix an arbitrary surjection $p\colon V\twoheadrightarrow W$ of rank $k$. We have \begin{eqnarray}\label{sem0.5} p^{\vee*}\left((\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}(\eta(E))\right)\right)\overset{\mbox{Prop. }(\ref{T:fourier-id})}{=}\\ (\mathbb{F}_W\otimes Id_{Dens(W^*)})\left(p_*\left(\int_{E\in Gr_k(V)}i_{E*}i_{E*}(\eta(E))\right)\right)=\\ (\mathbb{F}_W\otimes Id_{Dens(W^*)})\left(\int_{E\in Gr_k(V)}(p\circ i_E)_*(\eta(E))\right)=\\ (\mathbb{F}_W\otimes Id_{Dens(W^*)})\left(\int_{E\in Gr_k(V),\, E\cap Ker (p)=\{0\}}(p\circ i_E)_*(\eta(E))\right)=\\\label{sem1} \int_{E\in Gr_k(V),\, E\cap Ker (p)=\{0\}}(p\circ i_E)^{\vee*}(\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E)) \end{eqnarray} where the last equality follows from the fact that $p\circ i_E\colon E\rightarrow W$ is an isomorphism for any $E\in Gr_k(V)$ such that $E\cap Ker (p)=\{0\}$. Next the expression (\ref{sem1}) is equal to \begin{eqnarray} \int_{E\in Gr_k(V)}(p\circ i_E)^{\vee*}\left((\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E))\right)=\\ \int_{E\in Gr_k(V)}p^{\vee*}\left(i_E^{\vee*}\left((\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E))\right)\right)=\\\label{sem2} p^{\vee*}\left(\int_{E\in Gr_k(V)}i_E^{\vee*}\left((\mathbb{F}_E\otimes Id_{Dens(E^*)})(\eta(E))\right)\right). \end{eqnarray} Thus we have shown that (\ref{sem0.5})$=$(\ref{sem2}). In other words for any surjection $p\colon V\twoheadrightarrow W$ of rank $k$ $$p^{\vee*}(\mbox{l.h.s. of }(\ref{tttt}))= p^{\vee*}(\mbox{r.h.s. of }(\ref{tttt})).$$ Since $i=1$, this implies (\ref{tttt}). Thus Case 2a is proved.
\underline{Case 2b.} Assume now that $i>1$.
Let us consider the (infinite dimensional) vector bundle $\ch^0$ over the partial flag manifold ${\cal F}_{k-i+1,k}(V)$ whose fiber over $(L,E)\in {\cal F}_{k-i+1,k}(V)$ is equal to $Val_1^{-,sm}(L)\otimes Dens(L^*)$. Let
$$\ch:=\ch^0\otimes |\omega|_{{\cal F}_{k-i+1,k}(V)}|.$$ \begin{claim}\label{Cl:seg1} For any $\eta\in C^\infty(Gr_k(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}^-_{k,V;i}),\, k>l>1,$ there exists $\xi\in C^\infty({\cal F}_{k-i+1,k}(V),\ch)$ such that for any $E\in Gr_k(V)$ \begin{eqnarray}\label{seg2} \eta(E)=\int_{L\in Gr_{k-i+1}(E)}i_{E,L*}(\xi(L,E)). \end{eqnarray} \end{claim} {\bf Proof.} The map $C^\infty({\cal F}_{k-i+1,k}(V),\ch)$ given by the right hand side of (\ref{seg2}) is $GL(V)$-equivariant. Then the Irreducibility theorem, the Casselman-Wallach theorem, and Proposition \ref{part1-epi} imply the claim. \qed
Next we have \begin{eqnarray*} \int_{E\in Gr_k(V)}i_{E*}(\eta(E))=\int_{E\in Gr_k(V)}\int_{L\in Gr_{k-i+1}(E)}i_{E*}(i_{E,L*}(\xi(L,E)))=\\\int_{E\in Gr_k(V)}\int_{L\in Gr_{k-i+1}(E)}i_{L*}(\xi(L,E))= \int_{L\in Gr_{k-i+1}(V)}i_{L*}\left(\int_{E\in Gr_k(V),E\supset L}\xi(L,E)\right). \end{eqnarray*} Let us denote by $\hat\xi(L)$ the inner integral in the last expression. Thus $\hat\xi\in C^\infty(Gr_{k-i}(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k-i+1,V;1}^-)$. Hence by Case 2a \begin{eqnarray}\label{sst5} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}i_{E*}(\eta(E))\right)=\\\label{sst6} (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{L\in Gr_{k-i+1}(V)}i_{L*}(\hat\xi(L))\right)=\\\label{sst7} \int_{L\in Gr_{k-i+1}(V)}(i_L^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\hat\xi(L)). \end{eqnarray} Using again Case 2a we obtain that the right hand side of the equality (\ref{tttt}) is equal to \begin{eqnarray*} \int_{E\in Gr_k(V)}(i_{E}^\vee)^*(\mathbb{F}_E\otimes Id_{Dens(E^*)})\left(\int_{L\in Gr_{k-i+1}(E)}i_{E,L*}(\xi(L,E))\right)=\\ \int_{E\in Gr_k(V)}(i_{E}^\vee)^*\left(\int_{L\in Gr_{k-i+1}(E)}(i_{E,L}^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\xi(L,E))\right)=\\ \int_{E\in Gr_k(V)}\int_{L\in Gr_{k-i+1}(E)}(i_L^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)})(\xi(L,E))=\\ \int_{L\in Gr_{k-i+1}(E)}(i_L^\vee)^*(\mathbb{F}_L\otimes Id_{Dens(L^*)}(\hat\xi(L))\overset{\mbox{by } (\ref{sst7})}{=}\\ (\mathbb{F}_V\otimes Id_{Dens(V^*)})\left(\int_{E\in Gr_k(V)}i_{E*}(\eta(E))\right). \end{eqnarray*} Thus Case 2a is proved. Hence Case 2 is proved too, and Theorem \ref{T:com8} is proved. \qed
The following immediate reformulation of Theorem \ref{T:com8} will be useful in the proof of Theorem \ref{T:hfh1} below. \begin{corollary}\label{Cor:com8.7} Let $0\leq k\leq n$. Let $\tilde\eta\in C^\infty(Gr_k(V),{\cal S}} \def\ct{{\cal T}} \def\cu{{\cal U}_{k,V})\otimes Dens(V)$. Then \begin{eqnarray} \mathbb{F}_V\left(\int_{E\in Gr_k(V)}(i_{E*}\otimes Id_{Dens(V)})(\tilde\eta(E))\right)=\\\label{wow}\int_{E\in Gr_k(V)}(i_E^{\vee*}\otimes Id_{Dens(V)})\left((\mathbb{F}_E\otimes Id_{Dens(V/E)})(\tilde\eta(E))\right) \end{eqnarray} where $i_{E*}\otimes Id_{Dens(V)}$ in the left hand side is the map $\left(Val^{sm}(E)\otimes Dens(E^*)\right)\otimes Dens(V)\rightarrow Val^{sm}(V)$, and $\mathbb{F}_E\otimes Dens(V/E)$ in the right hand side is the map \begin{eqnarray*}Val^{sm}(E)\otimes Dens(E^*)\otimes Dens(V)=Val^{sm}(E)\otimes Dens(V/E)\overset{\mathbb{F}_E\otimes Id_{Dens(V/E)}}{\rightarrow} \\\left(Val^{sm}(E^*)\otimes Dens(E)\right)\otimes Dens(V/E)=Val^{sm}(E^*)\otimes Dens(V).\end{eqnarray*} \end{corollary}
\begin{theorem}[Plancherel formula]\label{T:com02} $$(\mathbb{F}_{V^*}\otimes Id_{Dens(V)})\circ \mathbb{F}_V=\ce_V.$$ \end{theorem} {\bf Proof.} In the even case this theorem is precisely Lemma \ref{L:com01.1}. Let us consider the odd $i$-homogeneous case $$(\mathbb{F}_{V^*}\otimes Id_{Dens(V)})\circ \mathbb{F}_V\colon Val_i^{-,sm}(V)\rightarrow Val_i^{-,sm}(V).$$ If $i=1$ the result is just Lemma \ref{L:com01}. If $i=n-1$ then the result follows immediately from the definition of the Fourier transform $\mathbb{F}_V\colon Val_{n-1}^{-,sm}(V)\rightarrow Val_1^{-,sm}(V^*)\otimes Dens(V)$ since $\mathbb{F}_V:=-(\mathbb{F}_{V^*}\otimes Id_{Dens(V)})^{-1}$.
Let us assume that $2\leq i\leq n-2$. Fix $\phi\in Val_i^{-,sm}(V)$. By Proposition \ref{P:w5} there exists $\xi\in C^\infty(Gr_{n-i-1}(V),\ct_{i+1,V;i}^-)$ such that $\phi=\Xi_{i+1,V}(\xi)$. By the definition of $\mathbb{F}_V$ in this case, $$\mathbb{F}_V(\xi)=\int_{F\in Gr_{n-i-1}(V)}p_{F*}^{\vee}(\mathbb{F}_{V/F}(\xi(F))).$$
By Theorem \ref{T:com8} we have \begin{eqnarray} (\mathbb{F}_V\otimes Id_{Dens(V^*)})(\mathbb{F}_V(\phi))=\\ \int_{F\in Gr_{n-i-1}(V)}p_F^*\left((\mathbb{F}_{(V/F)^*}\otimes Id_{Dens(V/F)})(\mathbb{F}_{V/F}(\xi(F)))\right)=\\\label{oo} -\int_{F\in Gr_{n-i-1}(V)}p_F^*(\xi(F)) \end{eqnarray} where the last equality holds by the proved above case corresponding to $n=i+1$. But the expression (\ref{oo}) is equal to $-\Xi_{i+1,V}(\xi)=-\phi$. Hence theorem is proved. \qed
Let us fix now a Euclidean metric on $V$. This induces isomorphisms \begin{eqnarray}\label{pppp} V^*\tilde\rightarrow V, Dens(V)\tilde\rightarrow\mathbb{C}.\end{eqnarray} Under these identifications $\mathbb{F}_V\colon Val^{sm}(V)\tilde \rightarrow Val^{sm}(V)$. From Theorem \ref{T:com02} we easily deduce \begin{corollary}\label{C:plancher} Under the identifications (\ref{pppp}) one has $\mathbb{F}_V^4=Id$. Moreover $\mathbb{F}_V^2\ne Id$ provided $\dim V>1$. \end{corollary}
\subsection{Homomorphism property of the Fourier transform in higher dimensions.}\label{Ss:homomorphism-F-high} \begin{theorem}\label{T:hfh1} The map $$\mathbb{F}_V\colon Val^{sm}(V)\rightarrow Val^{sm}(V^*)\otimes Dens(V)$$ is an isomorphism of algebras. \end{theorem} {\bf Proof.} It remains to show that $\mathbb{F}_V$ is a {\itshape homomorphism} of algebras, i.e. \begin{eqnarray}\label{pr-conv} \mathbb{F}_V(\phi\cdot \psi)=\mathbb{F}_V(\phi)\ast \mathbb{F}_V(\psi) \end{eqnarray} for any $\phi,\psi\in Val^{sm}(V)$. We will consider a number of cases.
\underline{Case 1.} Assume that $\phi$ and $\psi$ are even.
This case was proved by Bernig and Fu \cite{bernig-fu}. We will not prove this case here though it can be proved similarly to Case 2 below, but simpler.
\underline{Case 2.} Assume that $\phi\in Val^{+,sm}_i(V),\, \psi\in Val^{-,sm}_j(V)$.
We will need a general lemma. \begin{lemma}\label{L:prod-push} Let $F\subset V$ be a linear subspace. Let $i_F\colon F\hookrightarrow V$, $p_F\colon V\twoheadrightarrow V/F$ be the canonical imbedding and projection maps. Let $E\subset V$ be another linear subspace, and let $p_E\colon V\twoheadrightarrow V/E$ be the canonical projection.
Let $\mu\in Dens(V/F)\subset Val^{sm}(V/F)$, $\zeta\in Val^{sm}(V/E)$. Then \begin{eqnarray}\label{forl} (p_F\times p_E)^*(\mu\boxtimes \zeta)=(i_{F*}\otimes Id_{Dens(V)})((p_E\circ i_F)^*(\zeta)\otimes \mu) \end{eqnarray} where \begin{eqnarray*} p_F\times p_E\colon V\rightarrow V/F\times V/E,\\ (p_E\circ i_F)^*(\zeta)\otimes\mu\in Val(F)\otimes Dens(V/F)=\left(Val(F)\otimes Dens(F^*)\right)\otimes Dens(V),\\ i_{F*}\otimes Id_{Dens(V)}\colon\left(Val(F)\otimes Dens(F^*)\right)\otimes Dens(V)\rightarrow Val(V). \end{eqnarray*} \end{lemma} {\bf Proof.} By the McMullen's conjecture we may assume that $$\zeta(\bullet)=vol_{V/E}(\bullet+A)$$ where $vol_{V/E}$ is a Lebesgue measure on $V/E$, and $A\in \ck^{sm}(V/E)$. Let us compute first the left hand side of (\ref{forl}). One has \begin{eqnarray}\label{op1} (p_F\times p_E)^*(\mu\boxtimes \zeta)(K)=\\ (\mu\boxtimes\zeta)((p_F\times p_E)(K))=\\\label{op2} \int_{z\in V/F}vol_{V/E}\left(((p_F\times p_E)(K)\cap(\{z\}\times V/E))+A\right)d\mu(z). \end{eqnarray} Let us compute now the right hand side of (\ref{forl}). One has \begin{eqnarray*} (i_{F*}\otimes Id_{Dens(V)})\left((p_E\circ i_F)^*(\zeta)\otimes \mu\right)(K)=\\ \int_{z\in V/F}\left((p_E\circ i_F)^*\zeta\right)(K\cap z)d \mu(z)=\\ \int_{z\in V/F}\zeta\left((p_E\circ i_F)(K\cap z)\right)d \mu(z). \end{eqnarray*} Hence \begin{eqnarray}\label{op3} (i_{F*}\otimes Id_{Dens(V)})\left((p_E\circ i_F)^*(\zeta)\otimes \mu\right)(K)=\\\label{op4} \int_{z\in V/F}\zeta(p_E(K\cap z))d\mu(z)=\\\label{op5} \int_{z\in V/F}vol_{V/E}(p_E(K\cap z)+A)d\mu(z). \end{eqnarray} But obviously $$\{z\}\times p_E(K\cap z)=(p_F\times p_E)(K)\cap (\{z\}\times V/E).$$ Substituting this into (\ref{op5}) and comparing with (\ref{op2}) we get that $$(\ref{op1})=(\ref{op3}).$$ Thus lemma is proved. \qed
Let us return now to the proof of Case 2 of the theorem. First observe that if $i+j>n-1$ then both sides of (\ref{pr-conv}) vanish. Thus we will assume that $i+j\leq n-1$.
\underline{Case 2a.} Let us assume in addition that $i+j=n-1$.
By Proposition \ref{P:w5} there exist $\mu\in C^\infty(Gr_{n-i}(V),\ct^+_{i,V;i})$ and $\zeta\in C^\infty(Gr_{i}(V),\ct^-_{j+1,V;j})$ such that $\phi=\Xi_{i,V}(\mu),\psi=\Xi_{j+1,V}(\zeta).$ In other words \begin{eqnarray*} \phi=\int_{E\in Gr_{n-i}(V)}p_E^*(\mu(E)),\\ \psi=\int_{F\in Gr_{i}(V)}p_F^*(\zeta(F)). \end{eqnarray*} Then we have \begin{eqnarray}\label{2a1} \mathbb{F}_V(\phi\cdot \psi)\overset{\mbox{Prop. }\ref{exter-pull}} {=}\\\label{2a2}\mathbb{F}_V\left(\int_{E\in Gr_{n-i}(V)}\int_{F\in Gr_{i}(V)}(p_F\times p_E)^*(\mu(F)\boxtimes \zeta(E))\right)\overset{\mbox{Lemma } \ref{L:prod-push}}{=}\\\label{2a3} \mathbb{F}_V\left(\int_{E\in Gr_{n-i}(V)}\int_{F\in Gr_{i}(V)}(i_{F*}\otimes Id_{Dens(V)})\left((p_E\circ i_F)^*(\zeta(E))\otimes \mu(F)\right)\right) \end{eqnarray} Let us denote \begin{eqnarray}\label{locus}
\cz:=\{(E,F)\in Gr_{n-i}(V)\times Gr_i(V)|\, E\cap F=0\}. \end{eqnarray} Thus $\cz\subset Gr_{n-i}(V)\times Gr_i(V)$ is an open dense subset whose complement is a (singular) subvariety of positive codimension. Then clearly \begin{eqnarray*} (\ref{2a3})=\mathbb{F}_V\left(\int_\cz(i_{F*}\otimes Id_{Dens(V)})\left((p_E\circ i_F)^*(\zeta(E))\otimes \mu(F)\right)\right). \end{eqnarray*} Let us denote for brevity $$\omega(E,F):=(p_E\circ i_F)^*(\zeta(E))\otimes \mu(F).$$ It is easy to see that
$\bullet$ $\omega$ is a {\itshape continuous} section of the bundle $\ct^+_{i,V;i}\boxtimes \ct^-_{j+1,V;j}$ over $Gr_{n-i}(V)\times Gr_i(V)$;
$\bullet$ $\omega$ is smooth over $\cz$.
Let us fix a sequence $\gamma_N\colon Gr_{n-i}(V)\times Gr_i(V)\rightarrow [0,1]$, $N\in \mathbb{N}$, of $C^\infty$-smooth functions vanishing in a neighborhood (depending on $N$) of the complement of $\cz$, and converging uniformly on compact subsets of $\cz$ to the function 1 with all partial derivatives.
From the mentioned properties of $\omega$ and the choice of a sequence $\{\gamma_N\}$ it easily follows that $$\lim_{N\rightarrow \infty} \int_\cz \gamma_N\cdot (i_{F*}\otimes Id_{Dens(V)})\omega(E,F)= \int_\cz (i_{F*}\otimes Id_{Dens(V)})\omega(E,F)$$ where the convergence is understood in the space $Val^{sm}(V)$. Hence \begin{eqnarray*} (\ref{2a3})=\lim_{N\rightarrow \infty}\mathbb{F}_V\left(\int_\cz \gamma_N\cdot(i_{F*}\otimes Id_{Dens(V)})\left((p_E\circ i_F)^*(\zeta(E))\otimes \mu(F)\right)\right)=\\ \lim_{N\rightarrow \infty}\int_{E\in Gr_{n-i}(V)}\mathbb{F}_V\left(\int_{F\in Gr_i(V)}\gamma_N\cdot(i_{F*}\otimes Id_{Dens(V)})\left((p_E\circ i_F)^*(\zeta(E))\otimes \mu(F)\right)\right). \end{eqnarray*}
We may apply Corollary \ref{Cor:com8.7} to the last expression; it is equal to \begin{eqnarray*} \lim_{N\rightarrow \infty}\int_{E\in Gr_{n-i}(V)}\int_{F\in Gr_i(V)}\gamma_N\cdot (i_F^{\vee*}\otimes Id_{Dens(V)})\left((\mathbb{F}_F\otimes Id_{Dens(V/F)})((p_E\circ i_F)^*(\zeta(E))\otimes \mu(F))\right). \end{eqnarray*} The last limit is clearly equal to \begin{eqnarray} \int_{(E,F)\in \cz} (i_F^{\vee*}\otimes Id_{Dens(V)})\left((\mathbb{F}_F\otimes Id_{Dens(V/F)})((p_E\circ i_F)^*(\zeta(E))\otimes \mu(F))\right)=\\\label{2a4} \int_{(E,F)\in \cz} (i_F^{\vee*}\otimes Id_{Dens(V)})\left(\mathbb{F}_F\left((p_E\circ i_F)^*(\zeta(E))\right)\otimes \mu(F)\right). \end{eqnarray} Since for $(E,F)\in \cz$ the map $p_E\circ i_F\colon F\rightarrow V/E$ is an isomorphism, we have $$\mathbb{F}_F\circ (p_E\circ i_F)^*=(p_E\circ i_F)^\vee_*\circ \mathbb{F}_{V/E}.$$ Hence \begin{eqnarray*} (\ref{2a4})=\int_\cz(i_F^{\vee*}\otimes Id_{Dens(V)})\left(i^\vee_{F*}(p^\vee_{E*}(\mathbb{F}_{V/E}\zeta(E)))\otimes \mu(F)\right)=\\ \int_{F\in Gr_i(V)}(i_F^{\vee*}\otimes Id_{Dens(V)})\left(i^\vee_{F*}\left(\int_{E\in Gr_{n-i}(V)}(p^\vee_{E*}(\mathbb{F}_{V/E}\zeta(E)))\right)\otimes \mu(F)\right)=\\ \int_{F\in Gr_i(V)}(i_F^{\vee*}\otimes Id_{Dens(V)})\left(i^\vee_{F*}(\mathbb{F}_V\psi)\otimes\mu(F)\right). \end{eqnarray*} To summarize the above computation, we have obtained the equality \begin{eqnarray}\label{2a5} \mathbb{F}_V(\phi\cdot\psi)=\int_{F\in Gr_i(V)}(i_F^{\vee*}\otimes Id_{Dens(V)})\left(i^\vee_{F*}(\mathbb{F}_V\psi)\otimes\mu(F)\right). \end{eqnarray}
Hence to finish the proof of Case 2a it remains to prove the following lemma. \begin{lemma}\label{L:2a6} Let $\mu\in C^\infty(Gr_i(V),\ct_{i,V;i})$. Let $\phi=\int_{F\in Gr_i(V)}p_F^*(\mu(F))$. Let $\xi\in Val^{sm}(V^*)\otimes Dens(V)$. Then \begin{eqnarray}\label{2a7} \int_{F\in Gr_i(V)}(i_F^{\vee*}\otimes Id_{Dens(V)})\left(i^\vee_{F*}(\xi)\otimes\mu(F)\right)=\xi\ast \mathbb{F}_V(\phi). \end{eqnarray} \end{lemma} {\bf Proof.} First recall that $Dens(V/F)=Dens(F^*)\otimes Dens(V)$. Under this identification, for any $F\in Gr_i(V)$ we have $\mu(F)\in Dens(F^*)\otimes Dens(V)\otimes
|\omega_{Gr_i(V)}|\big|_F$. Next we have $$\mathbb{F}_V(\phi)=\int_{F\in Gr_i(V)}(i_F^{\vee*}\otimes Id_{Dens(V)})(\mu(F)).$$ Let us fix a Lebesgue measure $vol_V$ on $V$. Then by Proposition \ref{exter-pull} \begin{eqnarray*}\xi\boxtimes \mathbb{F}_V(\phi)=\int_{F\in Gr_i(V)}\xi\boxtimes (i_F^{\vee*}\otimes Id_{Dens(V)})(\mu(F))=\\ \int_{F\in Gr_i(F)}\xi\boxtimes \left(i_F^{\vee*}(\mu(F)\otimes vol_V^{-1})\otimes vol_V\right). \end{eqnarray*} Hence, using Proposition \ref{P:r2}, we get \begin{eqnarray*} \xi\ast\mathbb{F}_V(\phi)=a_*(\xi\boxtimes \mathbb{F}_V(\phi))=\\ \int_{F\in Gr_i(F)}a_*\left(\xi\boxtimes \left(i_F^{\vee*}(\mu(F)\otimes vol_V^{-1})\otimes vol_V\right)\right) \end{eqnarray*} where $a\colon V^*\times V^*\rightarrow V^*$ is the addition map. Thus in order to prove the lemma, it suffices to show that for any $F\in Gr_i(V)$, any $\xi\in Val^{sm}(V^*)\otimes Dens(V)$, and any $\mu\in Dens(V/F)=Dens(F^*)\otimes Dens(V)$ one has \begin{eqnarray}\label{14-1} a_*\left(\xi\boxtimes \left(i_F^{\vee*}(\mu\otimes vol_V^{-1})\otimes vol_V\right)\right)=(i_F^{\vee*}\otimes Id_{Dens(V)})\left(i^\vee_{F*}(\xi)\otimes\mu\right). \end{eqnarray}
Let us fix also a Lebesgue measure $vol_F$ on $F$. Let us denote $vol_{V/F}:=\frac{vol_V}{vol_F}$ the corresponding Lebesgue measure on $V/F$. It is enough to prove the equality (\ref{14-1}) for $\mu=vol_{V/F}$. Then $\mu\otimes vol_V^{-1}=vol_F^{-1}\in Dens(F^*)$. Furthermore, by the McMullen's conjecture we may assume that $\xi(\bullet)=vol_V^{-1}(\bullet +A)\otimes vol_V$ where $A\in \ck^{sm}(V^*)$.
Let us fix $K\in \ck(V^*)$. Then we have \begin{eqnarray} (\mbox{r.h.s. of } (\ref{14-1}))(K)=(i^\vee_{F*}(\xi)\otimes \mu)(i^\vee_F(K))=\\\left(vol_F^{-1}(i^\vee_F(K)+i_F^\vee(A))\otimes vol_F\right)\otimes vol_{V/F}=\\\label{14-2} vol_F^{-1}(i_F^\vee(K+A))\otimes vol_V. \end{eqnarray}
On the other hand we have \begin{eqnarray} \xi\boxtimes \left(i_F^{\vee*}(\mu\otimes vol_V^{-1})\otimes vol_V\right)=\\\label{14-3} \left(vol_V^{-1}(\bullet +A)\otimes vol_V\right)\boxtimes \left(i_F^{\vee*}(vol_F^{-1})\otimes vol_V\right). \end{eqnarray} Let us fix $S\in \ck(Ker(i_F^\vee))=\ck((V/F)^*)$ with $vol_{V/F}^{-1}(S)=1$. Then by Lemma \ref{L:aLemma} we have \begin{eqnarray}\label{14-4} (i_F^{\vee *}(vol_F^{-1}))(K)=vol_F^{-1}(i_F^\vee(K))=\frac{1}{(n-i)!}
\frac{d^{n-i}}{d\varepsilon^{n-i}}\big|_{\varepsilon=0}vol_V^{-1}(K+\varepsilon S). \end{eqnarray} Substituting (\ref{14-4}) into (\ref{14-3}) and using the definition of pushforward we get \begin{eqnarray}\label{14-5} (\mbox{l.h.s. of }(\ref{14-1}))= vol_V^{-1}(i_F^\vee(K+A))\otimes vol_V. \end{eqnarray} Comparing (\ref{14-5}) and (\ref{14-2}) we conclude the equality (\ref{14-1}). Hence Lemma \ref{L:2a6} is proved. \qed
Case 2a is proved as well.
\underline{Case 2b.} Let us assume now that $i+j+1<n$.
By Schneider's theorem \cite{schneider-simple} every odd $(i+j+1)$-homogeneous valuation is uniquely determined by its restrictions to all $(i+j+1)$-dimensional subspaces. Hence it is enough to check that for any $(i+j+1)$-dimensional subspace $i\colon L\hookrightarrow V$ one has $$i^*(\phi\cdot \psi)=i^*(\mathbb{F}_V^{-1}(\mathbb{F}_V\phi\ast \mathbb{F}_V\psi)).$$ Let us compute the right hand side of the above equality: \begin{eqnarray*} i^*\left(\mathbb{F}_V^{-1}(\mathbb{F}_V\phi\ast \mathbb{F}_V\psi)\right)\overset{\mbox{Thm. } \ref{T:z3}}{=} \mathbb{F}_L^{-1}\left(i_*^\vee(\mathbb{F}_V\phi\ast\mathbb{F}_V\psi)\right)\overset{\mbox{Prop. } \ref{P:homom-of-push}}{=}\\ \mathbb{F}_L^{-1}(i_*^\vee(\mathbb{F}_V\phi)\ast i_*^\vee(\mathbb{F}_V\phi))\overset{\mbox{Thm. }\ref{T:z3}}{=} \mathbb{F}_L^{-1}(\mathbb{F}_L(i^*\phi)\ast \mathbb{F}_L(i^*\psi))\overset{\mbox{Case 2a}}{=}\\ i^*\phi\cdot i^*\psi=i^*(\phi\cdot \psi). \end{eqnarray*} Thus Case 2b is proved. Hence Case 2 is proved too.
\underline{Case 3.} Assume that $\phi,\psi\in Val_1^{-,sm}(V)$.
We will use the homomorphism property of the Fourier transform in two dimensions proved in Section \ref{two-dim-isomorphism}. By Klain's theorem \cite{klain} it is enough to show that for any 2-dimensional space $E$ and an imbedding $i\colon E\hookrightarrow V$ one has \begin{eqnarray}\label{general-isomor} i^*(\phi\cdot \psi)=i^*\left(\mathbb{F}_V^{-1}(\mathbb{F}_V\phi\ast\mathbb{F}_V\psi)\right). \end{eqnarray} Let us compute the right hand side of (\ref{general-isomor}): \begin{eqnarray*} i^*\left(\mathbb{F}_V^{-1}(\mathbb{F}_V\phi\ast\mathbb{F}_V\psi)\right)\overset{\mbox{Thm. }\ref{T:z3}}{=} \mathbb{F}_L^{-1}\left(i^\vee_*(\mathbb{F}_V\phi\ast\mathbb{F}_V\psi)\right)\overset{\mbox{Prop. }\ref{P:homom-of-push}}{=}\\ \mathbb{F}_L^{-1}\left(i^\vee_*(\mathbb{F}_V\phi)\ast i^\vee_*(\mathbb{F}_V\psi)\right)\overset{\mbox{Thm. }\ref{T:z3}}{=} \mathbb{F}_L^{-1}\left(\mathbb{F}_L(i^*\phi)\ast \mathbb{F}_L(i^*\psi)\right)\overset{\mbox{Thm. }\ref{T:ISO}(2)}{=}\\ i^*\phi\cdot i^*\psi=i^*(\phi\cdot \psi). \end{eqnarray*} Case 3 is proved.
\underline{Case 4.} Let us prove the equality (\ref{pr-conv}) in general.
The only remaining case is $\phi\in Val^{-,sm}_i(V),\,\psi\in Val^{-,sm}_j(V)$, and either $i>1$ or $j>1$. For any $k\geq 1$ the subspace $Val_1^{-,sm}(V)\cdot Val_{k-1}^{+,sm}(V)$ is dense in $Val_k^{-,sm}(V)$ by the Irreducibility Theorem. Hence we may assume that $\phi=\phi^-\cdot \phi^+$ where $\phi^-\in Val_1^{-,sm}(V)$, $\phi^+\in Val_{i-1}^{+,sm}(V)$; and similarly $\psi=\psi^-\cdot \psi^+$ where $\psi^-\in Val_1^{-,sm}(V)$, $\psi^+\in Val_{j-1}^{+,sm}(V)$. Then we have \begin{eqnarray*} \mathbb{F}_V(\phi\cdot \psi)=\mathbb{F}_V((\phi^+\cdot\psi^+)\cdot(\phi^-\cdot\psi^-))\overset{\mbox{Case 1}}{=}\\ \mathbb{F}_V(\phi^+\cdot\psi^+)\ast\mathbb{F}_V(\phi^-\cdot\psi^-)\overset{\mbox{Cases 1,3}}{=}\mathbb{F}_V(\phi^+)\ast\mathbb{F}_V(\psi^+)\ast\mathbb{F}_V(\phi^-)\ast\mathbb{F}_V(\psi^-)\overset{\mbox{Case 2}}{=}\\ \mathbb{F}_V(\phi^+\cdot\phi^-)\ast \mathbb{F}_V(\psi^+\cdot \psi^-))=\mathbb{F}_V(\phi)\ast\mathbb{F}_V(\psi). \end{eqnarray*} Theorem \ref{T:hfh1} is proved. \qed
\begin{remark}\label{R:non-unique} The Fourier transform $\mathbb{F}_V$ we have constructed is not quite canonical. More precisely, let us fix $n>1$. Let $\cc_n$ denote the category whose objects $Ob(\cc_n)$ are $n$-dimensional real vector spaces, and morphisms between them are linear isomorphisms. Assume that for any object $V$ of $\cc_n$ we are given an isomorphism $\mathbb{F}_V\colon Val^{sm}(V)\tilde\rightarrow Val^{sm}(V^*)\otimes Dens(V)$ of linear topological spaces such that \newline $\bullet$ for any morphism $f\colon V\rightarrow W$ in $\cc_n$ (i.e. $f$ is just a linear isomorphism) the following diagram is commutative \begin{eqnarray*} \square<1`1`1`1;1300`400>[Val^{sm}(V)`Val^{sm}(W)`Val^{sm}(V^*)\otimes Dens(V)`Val^{sm}(W^*)\otimes Dens(W); `\mathbb{F}_V`\mathbb{F}_W`] \end{eqnarray*} where the horizontal arrows are obvious isomorphisms induced by the isomorphisms $V\overset{f}{\rightarrow}W$ and $W^*\overset{f^\vee}{\rightarrow}V^*$ where $f^\vee$ is the dual of $f$; \newline $\bullet$ for any $V\in Ob(\cc_n)$ the map $\mathbb{F}_V$ is an isomorphism of algebras when the source is equipped with the product and the target with the convolution; \newline $\bullet$ for any $V\in Ob(\cc_n)$ one has the Plancherel type formula as in Theorem \ref{T:main1}(3).
Then one can show that there exist exactly four families of maps $\{\mathbb{F}_V\}_{V\in Ob(\cc_n)}$ satisfying the above conditions. The difficult part (which is the main subject of this article) is to prove existence of at least one such a family. \end{remark}
\section{A hard Lefschetz type theorem for valuations.}\label{lefschetz}\setcounter{subsection}{1}\setcounter{theorem}{0} \setcounter{equation}{0} Let $V$ be a {\itshape Euclidean} $n$-dimensional space. Let $V_1\in Val_1(V)$ denote the first intrinsic volume (see e.g. \cite{schneider-book}, p. 210). This valuation is invariant under the orthogonal group and it is smooth. The main result of this section is the following theorem. \begin{theorem}[hard Lefschetz type theorem]\label{T:lefschetz} Let $0\leq i<n/2$. Then the map $$Val_i^{sm}(V)\rightarrow Val_{n-i}^{sm}(V)$$ given by $\phi\mapsto (V_1)^{n-2i}\cdot \phi$ is an isomorphism. \end{theorem} \begin{remark}\label{R:lefschetz-even} For even valuations this result was proved first by the author in \cite{alesker-gafa-sem-04}. \end{remark}
Before we prove Theorem \ref{T:lefschetz} we need some preparations. In \cite{alesker-jdg-03} the author has introduced an operator $$\Lambda\colon Val(V)\rightarrow Val(V)$$ defined by
$(\Lambda\phi)(K)=\frac{d}{d\varepsilon}\big|_{\varepsilon=0}\phi(K+\varepsilon \cdot D)$ for any $\phi\in Val(V),\, K\in \ck(V)$. Note that $\phi(K+\varepsilon \cdot D)$ is a polynomial in $\varepsilon\geq 0$ of degree at most $n$ by a result of McMullen \cite{mcmullen-euler}. The operator $\Lambda$ decreases the degree of homogeneity by 1. We are going to use the following theorem which was proved by the author \cite{alesker-jdg-03} for even valuations and by Bernig and Br\"ocker \cite{bernig-brocker} in general. \begin{theorem}\label{T:hlold} Let $n\geq i>n/2$. The operator $$\Lambda^{2i-n}\colon Val_i^{sm}(V)\rightarrow Val_{n-i}^{sm}(V)$$ is an isomorphism. \end{theorem}
Next, the Euclidean metric on $V$ induces the identifications $V\tilde\rightarrow V^*$ and $Dens(V)\tilde\rightarrow \mathbb{C}$. Under these identifications the Fourier transform acts $\mathbb{F}_V\colon Val^{sm}(V)\tilde \rightarrow Val^{sm}(V)$. We will need the following lemma which was observed by Bernig and Fu in \cite{bernig-fu}, Corollary 1.9, in the case of even valuations. \begin{lemma}\label{L:hlopers} For any $\phi\in Val^{sm}(V)$ one has $$V_1\cdot \phi=\kappa(\mathbb{F}_V^{-1}\circ \Lambda\circ \mathbb{F}_V)(\phi)$$ where $\kappa$ is a non-zero constant depending on $n$ only. \end{lemma} {\bf Proof.} The proof is essentially the same as in the even case \cite{bernig-fu}, once one has the Fourier transform. By the homomorphism property of the Fourier transform we have \begin{eqnarray}\label{opa4} \mathbb{F}_V(V_1\cdot\phi)=\mathbb{F}_V(V_1)\ast \mathbb{F}_V(\phi). \end{eqnarray} Observe that $\mathbb{F}_V(V_1)$ is an $O(n)$-invariant valuation homogeneous of degree $n-1$. Hence by the Hadwiger characterization theorem \cite{hadwiger-book} it must be proportional to the $(n-1)$-th intrinsic volume $V_{n-1}$, which is proportional to the valuation $K\mapsto
\frac{d}{d\varepsilon}\big|_{0}vol(K+\varepsilon D)$. Next observe that for any $A\in \ck^{sm}(V)$ and any $\phi\in Val^{sm}(V)$ \begin{eqnarray}\label{opa1} vol(\bullet+A)\ast \phi=\phi(\bullet+A). \end{eqnarray} Indeed the equality (\ref{opa1}) is easily checked for $\phi$ of the form $\phi(\bullet)=vol(\bullet+B)$, and the general case follows from the McMullen's conjecture. Hence $\mathbb{F}_V(V_1)\ast \mathbb{F}_V(\phi)$ is proportional to \begin{eqnarray}\label{opa3}
\frac{d}{d\varepsilon}\big|_{0}(\mathbb{F}_V\phi)(\bullet+\varepsilon D)=\Lambda(\mathbb{F}_V\phi). \end{eqnarray} Then Lemma \ref{L:hlopers} follows from (\ref{opa4}), (\ref{opa3}). \qed
{\bf Proof of Theorem \ref{T:lefschetz}.} It follows immediately from Theorem \ref{T:hlold} and Lemma \ref{L:hlopers}. \qed
\section{Appendix: a remark on exterior product on valuations.}\label{appendix-ext-product}\setcounter{subsection}{1}\setcounter{theorem}{0} \setcounter{equation}{0} In this appendix we will prove a slightly more refined statement on the exterior product of valuations than it appears in \cite{alesker-gafa-04}. Though for the purposes of this article we need only the case of translation invariant valuations, we will prove the result in a greater generality of polynomial valuations following \cite{alesker-gafa-04}.
Let us remind the definition of a {\itshape polynomial} valuation introduced by Khovanskii and Pukhlikov in \cite{khovanskii-pukhlikov1}. Let $V$ be an $n$-dimensional real vector space. \begin{definition} A valuation $\phi$ is called polynomial of degree at most $d$ if for every $K\in {\cal K}(V)$ the function $x\mapsto \phi (K+x)$ is a polynomial on $V$ of degree at most $d$. \end{definition} Note that valuations polynomial of degree 0 are just translation invariant valuations. Polynomial valuations have many nice combinatorial-algebraic properties (\cite{khovanskii-pukhlikov1}, \cite{khovanskii-pukhlikov2}).
Let $PVal_d(V)$ denote the space of continuous valuations on $V$ which are polynomial of degree at most $d$. It is a Fr\'echet space (in fact a Banach space) with the topology of uniform convergence on compact subsets of $\ck(V)$. Let $\Omega^{n}_{d}(V)$ denote the (finite dimensional) space of
$n$-densities on $V$ with polynomial coefficients of degree at most $d$ (clearly $\Omega^{n}_{d}(V)$ is canonically isomorphic to $(\oplus _{i=0}^{d}Sym^{i}V^*) \otimes |\wedge ^{n}V^*|$ where $|\wedge
^nV^*|$ denotes the space of Lebesgue measures on $V$).
The group $GL(V)$ acts naturally on $PVal_d(V)$ as usual: $(g\phi)(K)=\phi(g^{-1}K)$. This action is continuous. The subspace of $GL(V)$-smooth vectors is denoted by $PVal_d^{sm}(V)$.
For a vector space $U$, a smooth measure $\mu$ on $U$,and $A\in \ck(U)$ let us denote by $\mu_A$ the valuation $[K\mapsto \mu(K+A)]$. Now let us state the main result of this appendix which refines Proposition 1.10 from \cite{alesker-gafa-04}. \begin{proposition}\label{P:ext-prod} Let $V,W$ be finite dimensional real vector spaces. There exists a continuous bilinear map $$PVal_d^{sm}(V)\times PVal_{d'}(W)\rightarrow PVal_{d+d'}(V\times W)$$ which is uniquely characterized by the property that for any polynomial measures $\mu,\nu$ on $V,W$ respectively, and any $A\in \ck(V),\, B\in \ck^{sm}(W)$ one has $$(\mu_A,\nu_B)\mapsto (\mu\boxtimes\nu)_{A\times B}$$ where $\mu\boxtimes\nu$ denotes the usual product measure. This map is called the exterior product and is denoted by $\boxtimes$. \end{proposition} \begin{remark} An important difference of this proposition in comparison to \cite{alesker-gafa-04} is that now we can consider the exterior product of a smooth valuation by a continuous one (and not just a product of two smooth valuations). \end{remark}
Before we prove this proposition let us introduce more notation and remind some constructions from \cite{alesker-gafa-04}. Let us denote by $\PP_+(V^*)$ the manifold of oriented lines passing through the origin in $V^*$. Let $L$ denote the line bundle over $\PP_+(V^*)$ whose fiber over an oriented line $l$ consists of linear functionals on $l$.
We are going to remind the construction from \cite{alesker-gafa-04} of a natural linear map $$\Theta_{k,d}\colon \Omega^{n}_{d}(V) \otimes C^{\infty} ((\PP_+(V^*))^k, L^{\boxtimes k})\rightarrow PVal_d(V)$$ which commutes with the natural action of the group $GL(V)$ on both spaces and induces an epimorphism on the subspaces of smooth vectors.
The construction is as follows. Let $\mu\in \Omega^{n}_{d}(V),\, A_1,\dots ,A_k\in \ck(V)$. Then $\int_{\sum_{j=1}^k \lambda_jA_j} \mu$ is a polynomial in $\lambda_j\geq 0$ of degree at most $n+d$. This can be easily seen directly, but it was also proved in general for polynomial valuations by Khovanskii and Pukhlikov \cite{khovanskii-pukhlikov1}. Also it easily follows that the coefficients of this polynomial depend continuously on $(A_1,\dots ,A_k)\in \ck(V)^k$ with respect to the Hausdorff metric. Hence we can define a continuous map $\Theta_{k,d}'\colon\Omega^{n}_{d}(V) \times\ck(V)^k\rightarrow PVal_d(V)$ given by $$(\Theta_{k,d}'(\mu;A_1,\dots,A_k))(K):=\frac{\partial^k}{\partial
\lambda_1\dots\partial\lambda_k}\bigg |_{\lambda_j =0}\int_{K+\sum_{j=1}^k \lambda_jA_j}\mu.$$
It is clear that $\Theta_{k,d}'$ is Minkowski additive with respect to each $A_j$. Namely, say for $j=1, \, a,b\geq 0$, one has $$\Theta_{k,d}'(\mu;a A_1'+b A_1'',A_2,\dots,A_k)=a\Theta_{k,d}'(\mu;A_1',A_2,\dots,A_k)+ b\Theta_{k,d}'(\mu;A_1'',A_2,\dots,A_k).$$
Remind that for any $A\in \ck(V)$ one defines the supporting functional $h_A(y):=\sup_{x\in A}(y,x)$ for any $y\in V^*$. Thus $h_A\in C(\mathbb{P}_+(V^*),L)$. Moreover it is well known (and easy to see) that $A_N\rightarrow A$ in the Hausdorff metric if and only if $h_{A_N}\rightarrow h_A$ in $C(\mathbb{P}_+(V^*),L)$. Also any section $F\in C^2(\mathbb{P}_+(V^*),L)$ can be presented as a difference $F=G-H$ where
$G,\,H\in C^2(\mathbb{P}_+(V^*),L)$ are supporting functionals of some convex compact sets and $\max\{||G||_{C^2},||H||_{C^2}\}\leq c
||F||_{C^2}$ where $c$ is a constant. (Indeed one can choose $G=F+R\cdot h_D,\, H=R\cdot h_D$ where $D$ is the unit Euclidean ball, and $R$ is a large enough constant depending on
$||F||_{C^2}$.) Hence we can uniquely extend $\Theta_{s,d}'$ to a multilinear continuous map (which we will denote by the same letter): $$\Theta_{k,d}'\colon\Omega^{n}_{d}(V) \times (C^2(\mathbb{P}_+(V^*),L))^k\rightarrow PVal_d(V).$$ By Theorem \ref{T:bilinear-forms} it follows that this map gives rise to a continuous linear map $$\Theta_{k,d}\colon\Omega^{n}_{d}(V)\otimes C^{\infty}(\mathbb{P}_+(V^*)^k, L^{\boxtimes k})\rightarrow PVal_d(V).$$ Since $\Theta_{k,d}$ commutes with the action of $GL(V)$, its image is contained in $PVal_d^{sm}(V)$. Thus we got a continuous map $$\Theta_{k,d}\colon\Omega^{n}_{d}(V)\otimes C^{\infty}(\mathbb{P}_+(V^*)^k, L^{\boxtimes k})\rightarrow PVal_d^{sm}(V).$$ which we wanted to construct.
We will study this map $\Theta_{k,d}$. Note that it depends on $k$ and $d$ which will be fixed from now on. Let us denote by $\Theta_d$ the sum of the maps $\bigoplus _{k=0}^{n}\Theta_{k,d}$. Thus $$\Theta_d\colon\Omega^{n}_{d}(V) \otimes\left(\bigoplus _{k=0}^{n}C^{\infty}(\PP_+(V^*)^k, L^{\boxtimes k})\right) \rightarrow PVal^{sm}_d(V).$$ The following result was proved by the author in \cite{alesker-gafa-04}, Corollary 1.9. \begin{lemma}[\cite{alesker-gafa-04}]\label{theta-onto} The map $\Theta_d$ is onto $PVal^{sm}_d(V)$. \end{lemma} Since the source and the target spaces of $\Theta_d$ are Fr\'echet spaces, by the open mapping theorem (see e.g. \cite{schaefer}, Ch. III, \S 2) the topology on $PVal^{sm}_d(V)$ is the quotient topology on $\Omega^{n}_{d}(V) \otimes\left(\bigoplus _{k=0}^{n}C^{\infty}(\PP_+(V^*)^k, L^{\boxtimes k})\right)$.
{\bf Proof} of Proposition \ref{P:ext-prod}. Denote $n:=\dim V,\, m:=\dim W$. We have the following claim whose proof is easy and is omitted. \begin{claim}\label{C:ap1} Let $\phi\in PVal_d(V)$. Let $\mu\in \Omega^m_{d'}(W)$. Then the map $\Psi_{\phi,\mu}\colon \ck(V\times W)\rightarrow \mathbb{C}$ given by
$$\Psi_{\phi,\mu}(K):=\int_{w\in W}\phi(K\cap(V\times \{w\})d\mu(w)$$ is a continuous valuation polynomial of degree at most $d+d'$. \end{claim} Hence by a result of Khovanskii and Pukhlikov \cite{khovanskii-pukhlikov1}, $\Psi_{\phi,\mu}(\sum_{i=1}^s\lambda_iK_i)$ is a polynomial in $\lambda_1,\dots,\lambda_s\geq 0$ of degree at most $d+d'+n+m$ for any $K_1,\dots,K_s\in \ck(V\times W)$ (for translation invariant valuations this fact was proved earlier by McMullen \cite{mcmullen-euler}). Hence for any $A_1,\dots,A_k\in \ck(W)$, $K\in \ck(V\times W)$ the expression \begin{eqnarray*} \Psi_{\phi,\mu}\left(K+\left(\{0\}\times \sum_{i=1}^k\lambda_iA_i\right)\right)=\int_{w\in W}\phi\left(\left(K+\left(\{0\}\times\sum_{i=1}^k\lambda_iA_i\right)\right)\cap \left(V\times\{w\}\right)\right)d\mu(w) \end{eqnarray*} is a polynomial in $\lambda_1,\dots,\lambda_k\geq 0$ of degree at most $d+d'+n+m$ (in particular, there is a uniform bound on the degree).
It easily follows that
$\frac{\partial^k}{\partial\lambda_1\dots\partial\lambda_k}\big|_0 \Psi_{\phi,\mu}\left(K+\left(\{0\}\times\sum_{i=1}^k\lambda_iA_i\right)\right)$ is a continuous valuation with respect to $K\in \ck(V\times W)$. Moreover the map \begin{eqnarray}\label{ap2} PVal_d(V)\times\Omega_{d'}^m(W)\times \ck(W)^k\rightarrow PVal_{d+d'}(V\times W) \end{eqnarray} given by
$$(\phi,\mu;A_1,\dots,A_k)\mapsto \left[K\mapsto \frac{\partial^k}{\partial\lambda_1\dots\partial\lambda_k}\big|_0 \Psi_{\phi,\mu}\left(K+\left(\{0\}\times\sum_{i=1}^k\lambda_iA_i\right)\right)\right]$$ is continuous. Also this map is Minkowski additive with respect to each $A_j\in \ck(W)$. By the argument used in the construction of the map $\Theta_d$, the map (\ref{ap2}) extends (uniquely) to a multilinear {\itshape continuous} map \begin{eqnarray}\label{ap3} PVal_d(V)\times\Omega_{d'}^m(W)\times C^\infty(\mathbb{P}_+(W^*),L)^k\rightarrow PVal_{d+d'}(V\times W). \end{eqnarray} By Theorem \ref{T:bilinear-forms} the map (\ref{ap3}) gives rise to a bilinear {\itshape continuous} map \begin{eqnarray}\label{ap4} PVal_d(V)\times\Omega_{d'}^m(W)\otimes C^\infty(\mathbb{P}_+(W^*)^k,L^{\boxtimes k})\rightarrow PVal_{d+d'}(V\times W). \end{eqnarray} Summing up over $k=0,\dots, m$ we obtain a bilinear {\itshape continuous} map \begin{eqnarray}\label{ap5} PVal_d(V)\times\left(\Omega_{d'}^m(W)\otimes \left(\bigoplus_{k=0}^m C^\infty\left(\mathbb{P}_+(W^*)^k,L^{\boxtimes k}\right)\right)\right)\rightarrow PVal_{d+d'}(V\times W). \end{eqnarray} \begin{lemma}\label{L:ap6} The map (\ref{ap5}) factorizes (uniquely) as \begin{eqnarray*} \Vtriangle<1`1`-1;900>[PVal_d(V)\times\left(\Omega_{d'}^m(W)\otimes \left(\bigoplus_{k=0}^m C^\infty\left(\mathbb{P}_+(W^*)^k,L^{\boxtimes k}\right)\right)\right)`PVal_{d+d'}(V\times W)`PVal_d(V)\times PVal_{d'}^{sm}(W);`Id\boxtimes \Theta_d`]. \end{eqnarray*} \end{lemma} {\bf Proof.} If $PVal_d(V)$ is replaced by $PVal_d^{sm}(V)$, the corresponding result was proved in \cite{alesker-gafa-04}, and the obtained map $$PVal_d^{sm}(V)\times PVal_{d'}^{sm}(W)\rightarrow PVal_{d+d'}(V\times W)$$ was exactly the exterior product. Our lemma follows from this fact and the continuity of the map (\ref{ap5}) because $PVal^{sm}_d(V)\subset PVal_d(V)$ is a dense subspace. Lemma is proved. \qed
The map $$PVal_d(V)\times PVal^{sm}_{d'}(W)\rightarrow PVal_{d+d'}(V\times W)$$ from Lemma \ref{L:ap6} is the map we need. Proposition \ref{P:ext-prod} is proved. \qed
\end{document} |
\begin{document}
\title{Estimating the Sampling Distribution of Test-Statistics in Bayesian Clinical Trials}
\begin{abstract} Bayesian inference and the use of posterior or posterior predictive probabilities for decision making have become increasingly popular in clinical trials. The current approach toward Bayesian clinical trials is, however, a hybrid Bayesian-frequentist approach where the design and decision criteria are assessed with respect to frequentist operating characteristics such as power and type I error rate. These operating characteristics are commonly obtained via simulation studies. In this article we propose methodology to utilize large sample theory of the posterior distribution to define simple parametric models for the sampling distribution of the Bayesian test statistics, i.e., posterior tail probabilities. The parameters of these models are then estimated using a small number of simulation scenarios, thereby refining these models to capture the sampling distribution for small to moderate sample size. The proposed approach toward assessment of operating characteristics and sample size determination can be considered as simulation-assisted rather than simulation-based and significantly reduces the computational burden for design of Bayesian trials. \end{abstract}
\noindent {\it Keywords: Bayesian sample size determination, Bayesian test-statistic, operating characteristics, posterior probability, posterior predictive probability.}
\section{Introduction}
Bayesian inference and decision making have become increasingly popular in the design and analysis of clinical trials in recent years, specifically, in adaptive designs with stopping criteria, small early phase trials, or single arm trials where external information is utilized \citep{berry_case_1993}. Posterior and posterior predictive probabilities are commonly used to make decisions and draw conclusions in Bayesian clinical trials \citep{berry_bayesian_nodate}. Despite the popularity of Bayesian methods in clinical trials, regulatory agencies require the designs and procedures to be assessed with respect to frequentist operating characteristics \citep{fdacderbent_adaptive_nodate}. While the most common operating characteristics are power and type I error rate, other quantities, such as probability of stopping for efficacy or futility at interim analyses, may be of interest. Evaluation of frequentist criteria requires understanding the sampling behaviour of the Bayesian procedures, particularly, the sampling distribution of Bayesian ``test statistics". Therefore, the current approach for using Bayesian methods in clinical trials is not ``fully Bayesian", but a hybrid of Bayesian and frequentist philosophies \citep{berry_bayesian_nodate}. The hybrid approach allows for utilizing flexible and advanced Bayesian methodology so long as the frequentist error rates associated with the decisions made based on these procedures are quantified, controlled, and reported.
Frequentist properties of posterior functionals being used as test statistics may be studied and evaluated based on the asymptotic properties of the posterior itself. In fact, much of the Bayesian sample size determination (SSD) literature takes advantage of these asymptotic properties to obtain approximate sample size or power estimates \citep{ohagan_bayesian_2001, kunzmann_review_2021, ohagan_bayesian_2001}. However, these approximations are poor for small sample sizes and improve at a slower rate for more complex and highly parametrized models. As a result, \cite{gelfand_simulation-based_2002} proposed a simulation based approach toward Bayesian SSD. Following this seminal work, current practice in trials relies on Monte Carlo simulations to quantify frequentist operating characteristics of a proposed Bayesian design and analysis framework. This entails iteratively simulating trials, with a particular design, analysis model and decision procedure, to estimate and control the probability of making erroneous decisions over the sampling distribution.
These simulation studies can involve a large number of scenarios, arising from various configurations of design parameters (sample size, number and spacing of interim analysis, randomization techniques, etc.), analysis model parameters (baseline, effect, and hyper/nuisance parameters) and decision parameters (efficacy and futility thresholds). The simulations are most often designed to mirror a frequentist power analysis. A simulation scenario is defined by assuming fixed values of the model parameters (with a focus on the effect parameter) and a given set of design and decision parameters. For each simulation scenario, data is generated from the model with a given set of parameters and the trial design is simulated for a large number of iterations (10,000 iterations are recommended by the FDA \citep{fdacderbent_adaptive_nodate}). If the posterior distribution arising from the specified statistical model is analytically intractable, each simulation iteration involves sampling or approximation techniques for estimating the posterior resulting in a significant computational burden.
Incorporating the uncertainty in the analysis model parameters has been extensively discussed in the Bayesian SSD literature. See \cite{kunzmann_review_2021} for a review. One area of discussion specifically includes Bayesian assurance which is defined as the integrated probability of success over a ``design prior" (different from the analysis prior) on the parameter of interest, rather than power which is the probability of success conditioned on a fixed true value of the parameter. Although this approach can reduce the number of simulation scenarios, there remains a significant number of scenarios arising from the combination of design and decision parameter values. In addition, calculating Bayesian assurance in non-conjugate, multi-parameter models is computationally intensive.
As we explain in the following sections, the goal of this article is to propose a set of methods which efficiently execute many of the existing approaches in the Bayesian SSD literature for realistically complex models and designs. The proposed approach blends the theoretical and Monte Carlo-based approaches of Bayesian SSD in order to assess the frequentist operating characteristics of Bayesian analysis and decision procedures across the design/model/decision parameter space. This is achieved by taking advantage of the known asymptotic behaviour of the posterior distribution and utilizing Monte Carlo simulations at a small number of selected scenarios to learn the sampling distribution of the Bayesian test statistic as a function of the design/model parameters.
Previous work in this area include \cite{muller_optimal_1995} and \cite{muller_simulation_2005}, who propose a curve fitting approach to a target criterion, such as a utility or loss function, for Bayesian sample size determination. Also, \cite{golchi_estimating_2022} proposed a modelling approach for estimating the operating characteristics in Bayesian adaptive trials which was exclusively based on an initial set of simulations to estimate the sampling distribution. The present approach is different in that it combines asymptotic theory with simulations to learn the sampling distribution rather than target specific functionals of the distribution or rely solely on simulations. In general the proposed approach facilitates decision theoretic methods for clinical trials \citep{lewis_bayesian_2007, calderazzo_decision-theoretic_2020}, where the utility function to be optimized can be computed efficiently from the estimated sampling distribution.
The methods proposed in this article can be viewed as a Bayesian power analysis approach that does not rely on large sample size assumptions or a large number of simulation scenarios. Such an approach is specifically applicable to clinical trials where a complex analysis model is required. Considering the increasing complexity of research questions addressed in modern clinical trials, methodology that enables computationally efficient design of trials with multi-level and high-dimensional models is of increasing need. We anticipate that the present work will transform the statistical practice for clinical trials with complex designs and analysis.
The remainder of this article is organized as follows. Section \ref{Sec:Motiv} describes the problem setting and a motivating example where assessing the design operating characteristics is computationally intensive. The proposed approach is then presented in Section \ref{Sec:methods}. Section \ref{Sec:appl} includes the results for the motivating example and investigates the performance of the approach under different scenarios. The article is concluded with a discussion in Section \ref{Sec:dis}.
\section{Background and Motivation} \label{Sec:Motiv} \subsection{The problem setting}
Consider a clinical trial where a parametric analysis model is defined using a set of parameters denoted by $\boldsymbol{\theta} \in \Theta$. Suppose that the research hypotheses are formulated as \begin{equation}
H_0: \psi(\boldsymbol{\theta})\leq \psi_0 \hskip 10pt \text{vs} \hskip 10pt H_A: \psi(\boldsymbol{\theta})>\psi_0
\end{equation} where $\psi(\boldsymbol{\theta})$ is the scalar target of inference also known as the ``effect" parameter.
Within a Bayesian framework, prior distributions are assigned to $\boldsymbol{\theta}$, which in the context of clinical trials, are often non-informative or weakly informative. Informative priors may be used to reflect prior knowledge or external information about components of $\boldsymbol{\theta}$ that do not contribute to $\psi(\boldsymbol{\theta})$. Inference is then carried out based on the posterior distribution $$\pi\left(\psi(\boldsymbol{\theta})\mid \mathbf{y}\right).$$ Specifically, ``statistical significance" is commonly defined as the posterior probability of the alternative hypothesis exceeding a certain threshold, \begin{equation}
\label{eqn:pAlt}
\tau(\mathbf{y}) = \pi\left(\psi(\boldsymbol{\theta})>\psi_0\mid \mathbf{y}\right)>u.
\end{equation} Although the analysis is performed within a Bayesian framework, the statistical design follows the structure of frequentist hypothesis testing, where type I and II errors are defined with respect to the sampling distribution of a test statistic. In other words, while $\tau(\mathbf{y})$ is obtained by conditioning on the data, it is treated as a test statistic, and its distribution over all possible trial data that could have arisen under the data model is of interest.
Specifically, power can be written in terms of the sampling distribution of $\tau$ or, equivalently, the distribution of $\mathbf{y}$ given an assumed set ``true" values, $ \boldsymbol{\theta}_d)$, for the parameters under the alternative hypothesis, $
P(\tau(\mathbf{y})>u\mid \boldsymbol{\theta}) $. Current Bayesian trial design relies on estimating this probability via Monte Carlo simulations, i.e. by repeatedly sampling data from $\pi(\mathbf{y}\mid \boldsymbol{\theta}_d)$, evaluating $\tau(\mathbf{y})$, and computing the sum $\sum_i \mathbbm{1}(\tau_i>u)$.
The procedure described above is purely frequentist, in that it assumes a fixed true value for the parameters. However, the concept of power has been generalized within a Bayesian framework to reflect uncertainty regarding $\boldsymbol{\theta}_d$. The Bayesian equivalent of power is referred to as Bayesian assurance \citep{ohagan_bayesian_2001} and is defined to incorporate uncertainty over $\boldsymbol{\theta}$ at the design stage via a prior $\pi_d(\boldsymbol{\theta})$: \begin{equation}
P\left(\tau>u\mid\boldsymbol{\boldsymbol{\theta}} \sim \pi_d(\boldsymbol{\theta})\right) = \int_u^1 \tau\,\pi(\tau\mid \boldsymbol{\theta})\pi_d(\boldsymbol{\theta})d\tau d\boldsymbol{\theta}\label{BA}
\end{equation} Note that the distribution $\pi_d(\boldsymbol{\theta})$ is different from the prior distribution used in the analysis for a given trial. These two priors have been referred to as the design versus analysis priors, or sampling versus fitting priors, in the Bayesian SSD literature \citep{ohagan_bayesian_2001, gelfand_simulation-based_2002, pan_unifying_2021}. Here, we consider a point mass as the design prior which is the most common choice in practice as it mirrors a frequentist power analysis. But the methods we propose in the next section facilitate estimating Bayesian assurance in addition to frequentist operating characteristics.
Our goal is to obtain a parametric approximation of $\pi_y(\tau\mid\boldsymbol{\theta}_d)$ whose parameters are estimated as functions of $\boldsymbol{\theta}_d$ and the sample size, $n$, based on a number of simulated distributions at select values of $\boldsymbol{\theta}_d$ and $n$. This parametric approximation can then be used to obtain the Bayesian assurance by integrating over any design prior. We will describe the details of this procedure in Section~\ref{Sec:methods}. But first we will provide a brief discussion of Bayesian computation in trials.
\subsection{Bayesian computation in clinical trials} \label{Sec:comp} Given that design of Bayesian clinical trials and their extensions (e.g. adaptive and platform trials) relies heavily on simulation studies, it is often preferable to keep the analysis model simple and within the family of conjugate models, where an analytic posterior distribution is available. The simplicity of the analysis has been in line with the conventions of RCT, where one relies on randomization to be able to perform a simple comparison test without the need to correct for confounding factors. However, with the increasing number and complexity of research questions that modern clinical studies are designed to answer, more complex statistical models, such as hierarchical and high-dimensional models, are needed to efficiently estimate the quantities of interest.
The added complexity in most cases results in analytically intractable posterior distributions that require sampling or approximation methods. With the advancements in Bayesian computation, this is rarely an issue in the context of a single analysis, as there are a variety of techniques that can be employed to perform Bayesian inference with non-conjugate models. However, at the design stage, any additional computation in the analysis adds a significant computational cost since it is multiplied by the number of trial simulation iterations.
While the computational cost may be less of a concern in most problem settings due to the increasing access to parallel and cloud computing, it remains an issue in trial design and planning due to the scale and multi-faceted nature of these collaborations. Specifically, the statistical explorations are often viewed as a small part of the planning process and in many trials the allocated time and budget does not allow for extensive simulation studies.
Sampling techniques in general, and Markov chain Monte Carlo specifically, are among the most popular approaches in Bayesian inference. However, alternative methods have been used in Bayesian trial simulation with the goal of reducing the computational costs. Notably, \cite{psioda_practical_2018} use the approximate normality of the marginal posterior distribution of the effect resulting from the Bayesian central limit theorem \citep{ghosal_convergence_1995} to calculate the posterior probability of the alternative in (\ref{eqn:pAlt}) as a tail probability of a Gaussian distribution. This approximation eliminates the need for computation to evaluate the test statistic at any analysis point and therefore results in considerable computational savings in the design simulations. \cite{psioda_practical_2018} suggest using this approximation to explore the design space for near optimal settings, where a final MCMC can be run to obtain more precise estimates of design operating characteristics.
Another approximate approach is integrated nested Laplace approximation (INLA) \citep{rue_approximate_2009} that can be used for making approximate Bayesian inference for a class of models that can be expressed as latent Gaussian Markov random fields. Given that most analysis models used in clinical trials are within this class of models, INLA has become increasingly popular in Bayesian trials \citep{hosseini_designing_2023} since it can significantly reduce the computation time for evaluation of posterior functionals as compared to MCMC.
There exists numerous other techniques for computing posterior distributions, including other approximation methods such as variational Bayes (see \cite{tran_variational_2022} and references therein), and sampling approaches that may be suited for Bayesian computation in the context of trial simulation \citep{golchi_sequential_2018}. Our goal is to emphasize that the methods proposed in this article can be employed regardless of the computational method used to obtain the posterior distribution and the test statistic. In our examples, we rely on MCMC sampling for Bayesian analysis.
\subsection{A motivating example} \label{Sec:trial} Common practice in the primary analysis of clinical trials data is to ignore prognostic factors on the account of randomization having been performed. However, recently multiple researchers have demonstrated the benefits of covariate adjustment in clinical trials \citep{benkeser2021improving, van2022combining, lee2022benefits, willard_covariate_2022}.
Specifically, \cite{willard_covariate_2022} show the advantages of adjusting for covariates in Bayesian adaptive clinical trials. Incorporating covariates in the primary analysis will of course require that the simulations performed at the design stage also include these covariates (with assumed distributions and effects).
As an example, consider a two-arm clinical trial design with a dichotomous primary outcome $y_i \sim {\rm Benoulli}(p_i)$ for patient $i = 1, \ldots, I$ and the hypothesis that the intervention reduces the probability of event. Suppose that for each patient, the covariate vector is given as $\mathbf{x}_i$. The following logistic regression model is used for the primary analysis, \begin{equation}
\label{eqn:adj}
{\rm logit} (p_i) = \alpha + \theta A_i + \boldsymbol{\beta} \mathbf{x}_i \end{equation} where $A_i$ is the treatment assignment, with $A_i=1$ denoting assignment to the intervention arm, $\alpha$ is the intercept parameter, $\theta$ is the treatment effect, and $\boldsymbol{\beta}$ is a vector of covariate coefficients. The trial hypotheses are formulated as
\begin{equation*}
H_0: \psi(\alpha, \theta, \beta) \leq \psi_0 \hskip5pt \text{vs} \hskip5pt H_a: \psi(\alpha, \theta, \beta)>\psi_0
\end{equation*} where $\psi(\alpha, \theta, \beta)$ is an estimand such as the marginal risk or odds ratio. The Bayesian test statistic, $\tau$, and the decision rule is defined as in (\ref{eqn:pAlt}). The posterior distribution of $\psi$ cannot be obtained analytically using the logistic regression model in (\ref{eqn:adj}) since conjugate priors cannot be specified for the model parameters. However, as discussed in \cite{willard_covariate_2022}, the marginal estimand can be defined as a contrast of the treatment group specific risk parameters. Then samples from the posterior of $\psi$ are obtained by first marginalizing samples of the conditional risk parameters over the distribution of $\mathbf{x}$, and then forming the contrast using the resulting samples of the marginal risks.
Now consider a hypothetical design exercise where the frequentist operating characteristics of a design with the Bayesian decision rule in (\ref{eqn:pAlt}) are to be assessed for a range of values for the decision threshold, $u$, a range of sample sizes (with the goal of specifying the trial maximum size as well as interim sample sizes), and a range of effect sizes. For each of the scenarios arising from the combination of these values, an adjusted model as described in (\ref{eqn:adj}) is to be compared with an unadjusted model, \begin{equation}
\label{eqn:unadj}
{\rm logit} (p_i) = \alpha + \theta A_i. \end{equation}
While this is a relatively simple setting for trial design simulation, considering $U$ values for $u$, $N$ values for sample size, $T$ values for $\theta$ (keeping other model parameters fixed), $M$ simulation iterations (noting that $M=10,000$ is currently recommended by the \cite{fdacderbent_adaptive_nodate}) results in $\mathcal{O}(UNTM\mathcal{C})$, where $\mathcal{C}$ is the computational cost associated with posterior estimation and which varies depending on the method as discussed in Section~\ref{Sec:comp}.
In the next section we describe an approach that can reduce the computational complexity to $\mathcal{O}(N_0T_0M\mathcal{C})$, with $N_0<<N$ and $T_0<<T$. Then in Section~\ref{Sec:appl}, we apply the proposed approach to compare the operating characteristics of the adjusted and unadjusted models from (\ref{eqn:adj}) and (\ref{eqn:unadj}), respectively.
\section{Methodology} \label{Sec:methods} We propose an approach for approximating the sampling distribution of the test statistic, $\tau$, under the null and alternative hypotheses within an appropriate parametric family. Following \cite{golchi_estimating_2022}, we use the beta family of distributions where the parameters are in turn assigned probability distributions. Therefore, the model for the sampling distribution is in fact an infinite mixture of beta distributions weighted according to the distribution over the parameters. Instead of using Gaussian processes for the beta parameters as was proposed in \cite{golchi_estimating_2022}, we model these parameters based on the known asymptotic behaviour of the test statistic under the null and alternative hypotheses.
\subsection{Asymptotic behaviour of the test statistic} \label{sec:asymptotic} Consider $\psi = \psi_0$ as the point under $H_0$ where the probability of the type I error is maximized (i.e., the boundary of the null and alternative sets). The interest is in estimating (controlling) the type I error rate at $\psi_0$. Denote the maximum likelihood estimator for $\psi$ by $\hat{\psi}_n$. Under weak regularity conditions (compact parameter space, continuity and identifiablity of the model) the below symmetry follows from the Bernstein-von Mises theorem and the classical Central Limit Theorem \citep{vaart_asymptotic_1998}, \begin{equation}
\label{eqn:von-Meses} \psi\mid \hat{\psi}_n \overset{.}{\sim} \mathcal{N}(\hat{\psi}_n, \frac{1}{n}I^{-1}_{\hat{\psi}_n}), \end{equation} \begin{equation}
\label{eqn:CLT} \hat{\psi}_n\mid \psi = \psi^* \overset{.}{\sim} \mathcal{N}( \psi^*, \frac{1}{n}I^{-1}_{\psi^*}). \end{equation} where $I^{-1}_{\hat{\psi}_n}$ and $I^{-1}_{\psi^*}$ denote the inverse of the Fisher information matrix at $\hat{\psi}_n$ and $\psi^*$, respectively. We can write the test statistic as \begin{align*}
\tau &= P(\psi>\psi_0\mid \hat{\psi}_n)\\
&=P(I^{1/2}_{\hat{\psi}_n}\sqrt{n}(\psi - \hat{\psi}_n)>I^{1/2}_{\hat{\psi}_n}\sqrt{n}(\psi_0 - \hat{\psi}_n)), \end{align*} which for large $n$, yields the following from (\ref{eqn:von-Meses}) \begin{equation*}
\tau \approx 1 - \Phi(I^{1/2}_{\hat{\psi}_n}\sqrt{n}(\psi_0 - \hat{\psi}_n))
= \Phi(I^{1/2}_{\hat{\psi}_n}\sqrt{n}( \hat{\psi}_n - \psi_0)), \end{equation*} where $\Phi$ is the standard normal cumulative distribution function (cdf). Then replacing $I^{1/2}_{\hat{\psi}_n}$ with $I^{1/2}_{\psi_0}$, we have \begin{equation*}
\tau\approx \Phi(I^{1/2}_{\psi_0}\sqrt{n}( \hat{\psi}_n - \psi_0)),
\end{equation*} which from (\ref{eqn:CLT}) is the cdf of a standard normal random variable and therefore follows a $U(0,1)$ distribution.
Similarly, under the (separable) alternative, $H_A: \psi = \psi^*$, where there exists $\epsilon$, such that $||\psi^*-\psi_0||\geq\epsilon$, from (\ref{eqn:von-Meses}) for large $n$ we have \begin{align*}
\tau &\approx \Phi(I^{1/2}_{\hat{\psi}_n}\sqrt{n}( \hat{\psi}_n - \psi_0)) \\
&= \Phi(I^{1/2}_{\hat{\psi}_n}\sqrt{n}( \hat{\psi}_n - \psi^* - (\psi_0 -\psi^*))),
\end{align*} resulting in the following after replacing $I^{1/2}_{\hat{\psi}_n}$ with $I^{1/2}_{\psi^*}$,
\begin{align*}
\tau&\approx \Phi(I^{1/2}_{\psi^*}\sqrt{n}( \hat{\psi}_n - \psi^*) - I^{1/2}_{\psi^*}\sqrt{n}( \psi_0- \psi^*)) \\
&=\Phi(Z + I^{1/2}_{\psi^*}\sqrt{n}( \psi^*- \psi_0)), \end{align*} whose distribution converges to a point mass at 1 as $n\rightarrow \infty$.
\subsection{Modelling the sampling distribution}
In this section we define low-parameter models for the sampling distribution of $\tau$ informed by the asymptotic behaviour of the test statistic described above. The parameters of these models will then be estimated from Monte Carlo simulated instances of the sampling distribution. Specifically, under the null distribution, we specify a beta distribution with the same shape and scale parameters, \begin{equation}
\label{eq:betaH0}
\tau\mid \psi = \psi_0 \sim \text{beta}\left(a_0(n), a_0(n)\right) \end{equation} where $n$ is the sample size and $a_0(n)$ is a stochastic function of $n$ that is assigned a distribution such that as $n$ gets large the above distribution will converge to a $\text{beta}(1,1)$. For small $n$, however, $\tau$ is more likely to take values closer to 0 or 1 as the posterior is more sensitive to data variability. This results in a distribution that is better captured by a beta with parameters $a_0(n)<1$. The following log-normal distribution on $a_0$ is defined to mimic the evolution of the sampling distribution as $n$ grows, \begin{equation}
\label{eq:aH0}
\log\left(a_0(n)\right) \sim \mathcal{N}\left(-\frac{\alpha_1}{n} + \frac{\alpha_2}{n^2}, \sigma^2_0\right) \end{equation} where $0<\alpha_0<n$ and $0<\alpha_2<n\alpha_1$ are tuning parameters that are estimated from the ``data", i.e., the simulated sampling distribution at select values of $n$. The role of $\alpha_1$ and $\alpha_2$ is to adjust the rate of convergence of the sampling distribution to a uniform distribution. This rate depends on the complexity of the analysis model and therefore the posterior distribution. Specifically, the second term $\alpha_2/n^2$ can be considered a correction term enabling better fit for cases where $\log(a_0)$ is not linear in $1/n$. The variance parameter $\sigma^2_0$ accounts for the uncertainty in the beta parameters that are estimated from the Monte Carlo samples of the distribution of $\tau$ and that the beta distribution remains a misspecification in finite samples.
Under the alternative hypothesis, the true value of $\psi$ is denoted by $\psi^*$ and, without loss of generality, is assumed to be positive, $\psi^* - \psi_0>0$. The sampling distribution of $\tau$ in this case is modelled as the following beta distribution, \begin{equation}
\label{eq:betaHA}
\tau\mid \psi = \psi^* \sim \text{beta}\left(a_A(\sqrt{n}(\psi^* - \psi_0)), \frac{1}{a_A(\sqrt{n}(\psi^* - \psi_0))}\right) \end{equation} where the shape and the inverse scale parameters depend on the distance from the null scaled by the sample size. As either $\sqrt{n}(\psi^* - \psi_0)$ gets large the sampling distribution is expected to become further skewed with peak at 1 resembling a beta distribution with a large shape parameter and a small scale parameter. Therefore, these parameters are modelled as follows, \begin{equation}
\label{eq:aHA}
\log\left(a_A\left(\sqrt{n}(\psi^* - \psi_0)\right)\right) \sim \mathcal{N}\left(\phi_1\sqrt{n}(\psi^* - \psi_0) + \phi_2 n(\psi^* - \psi_0)^2, \sigma^2_1\right) \end{equation} where $\phi_1$ and $ \phi_2$, similar to the $\alpha$'s above, are estimated from instances of the simulated distribution of $\tau$ at select $\psi^*$ and $n$, and adjust for varying rates of convergence of this distribution for different analysis models.
We note that the beta distribution is in fact a misspecification (even asymptotically) for the sampling distribution under the alternative. However, since the end goal is to estimate the operating characteristics as the tail probabilities of this distribution, this approximation only needs to capture the relevant portion of the sampling distribution and thus the misspecification of the overall sampling distribution will be of little consequence. The relevant tail probabilities will depend on the relevant range of decision threshold $u$, which motivates the quantile matching approach that is described in the following \citep{sgouropoulos_matching_2015}.
For a given $n$ under the null hypothesis and for a pair of values $\psi^*$ and $n$ under the alternative hypothesis, let $q^e_1< \ldots < q^e_J$ be $J$ quantiles of the empirical distribution of $\tau$ generated via Monte Carlo simulations, and $q^{a}_1< \ldots < q^{a}_J$ be the corresponding theoretical quantiles of a beta distribution using either of the parametrizations presented in the previous section. The goal is to obtain an approximation for the sampling distribution in the beta family whose upper quantiles best match the empirical upper quantiles. This is done via Bayesian least squares, i.e., by minimizing the sum of scaled squared loss through the general Bayes framework of \cite{bissiri_general_2016}, which is equivalent to making inference via the following posterior distribution,
\begin{equation}
\label{eqn:postab}
\pi(a\mid \mathbf{q}^e) \propto \exp\left(-\frac{1}{\sigma_\epsilon}\sum_{j = 1}^J (q^e_j - q^{a}_j)^2\right) \pi(a),
\end{equation} where $\pi(a)$ is specified as a uniform prior over (0,1) under the null hypothesis, and a weakly informative prior supporting $a>1$ under the alternative hypothesis.
To account for the uncertainty in these parameters arising from the Monte Carlo error in the empirical sampling distribution, as well as the suboptimal fit of the beta distribution in certain settings, we sample 1000 draws from the posterior distribution in (\ref{eqn:postab}) as ``data'' that will be used to fit the models in (\ref{eq:betaH0})-(\ref{eq:aHA}). We have not performed a formal sensitivity analysis to the number of posterior draws but a few experiments proved that a smaller number of draws would achieve satisfactory results.
\section{Application to trials with covariate adjustment} \label{Sec:appl}
In this section, we apply the proposed methods to a simplified version of the simulation study of \cite{willard_covariate_2022}, where the operating characteristics were compared for a variety of sample sizes and effect sizes for several models. We only compare the fully adjusted model to the unadjusted model, as the goal is not to replicate those simulation results but to showcase the applicability of the proposed approach to such simulation scenarios. Additionally, we simulate a ``fixed" design without stopping criteria at interim analyses so that we can obtain the sampling distribution of $\tau$ at a sequence of points throughout the trial which is unaffected by stopping decisions at previous interim analyses.
\subsection{Type I error rate and power curves for model comparison} In this section, we use select simulation scenarios (Table~\ref{table1}) for dichotomous outcomes similar to those in \cite{willard_covariate_2022}. These scenarios are used to generate power and type I error rates as functions of $n$ and the effect parameters. The data generating model is the logistic regression model in (\ref{eqn:adj}) with $\mathbf{x} = (x_1, x_2, x_3, x_3^2, x_5)$, $\boldsymbol{\beta} = (1, -0.5, 1, -0.1, 0.5)$ and $\alpha = -1.26$.
Figure~\ref{fig:t1e} shows the estimated curves and 95\% credible intervals for type I error rate as a function of sample size for the adjusted and unadjusted models. These results are obtained by fitting the models in Section~\ref{Sec:methods} to the simulated sampling distribution at the 14 sample sizes. The black solid circles are simulation-based estimates of the type I error rate. The goal of this exercise is to demonstrate that although these curves are generated by fitting a model to the sampling distribution and not directly to the simulated tail probabilities, they are able to capture the type I error rates obtained from the simulations and produce corresponding uncertainty estimates.The full curves exhibit the slower rate of convergence to the nominal type I error rate in the highly parametrized adjusted model, a useful detail in specifying sample sizes and scheduling interim analyses when covariate adjustment is considered.
Figure~\ref{fig:power} shows the estimated curves and 95\% credible intervals for power as a function of sample size and a range of effect sizes which are different from those included in the simulation scenarios. The black solid circles are simulation-based estimates of power for the 25 scenarios listed in rows 2-6 of Table~\ref{table1}. This is to demonstrate that power curves and their associated uncertainty may be estimated for any given effect assumption of interest by modelling the sampling distribution using only 25 simulation scenarios. These estimates provide an overall understanding of power corresponding to each analysis and can be used to specify a design with the preferred analysis model.
We calculate Bayesian assurance for the design prior, $\theta \sim \mathcal{N}(-1.25, 0.25)$ which can be obtained at negligible additional cost. Figure~\ref{fig:BA} shows the Bayesian assurance as a function of sample size for the adjusted and unadjusted logistic models. As expected, similar to power, the Bayesian assurance converges to 1 at a faster rate for the model that adjusts for the covariates. We emphasize again that the goal here is not to draw conclusions about these models but to showcase the capabilities of the proposed approach in efficiently obtaining comparison criteria that would be expensive to compute by solely relying on simulations.
\subsection{Prediction performance}
The results of the previous section showcase the use of the proposed approach but do not quantify the prediction/estimation performance as compared to simulated-based estimates. In this section we divide the simulation scenarios into a training set and a test set. The training set is used to fit the models proposed for the sampling distribution, which are then used to estimate the operating characteristics in the test set and compare them with those obtained from the simulations.
Figure~\ref{fig:test} shows these results for the type I error rate, where the training set includes sample sizes $n = 20, 40, 60, 80, 100, 200, 1000$; the test set is defined as the remaining sample sizes listed in the first row of Table~\ref{table1}. The grey dots and error bars show point estimates and 95\% credible intervals obtained from modelling the sampling distribution, while the triangles and squares show the simulation-based type I error rates in the training and test sets, respectively. For both models, type I error rate is estimated with less than 0.002 bias and in most cases, the 95\% credible intervals include the simulation-based estimates.
The concentration of small sample sizes in the training set is intentional, as the evolution of the sampling distribution (and, as a result, type I error rate) is more abrupt when $n$ is small and is best learnt from simulation results, while for larger sample sizes, the sampling distribution is predictable by theoretical asymptotic properties.
To assess the prediction accuracy and precision for power, we use the full set of simulation scenarios in \cite{willard_covariate_2022} that includes 15 additional effect and sample size value combinations, $n = (20, 40, 60, 80, 100)$ with $\theta = -1.24$; $n = (40, 80, 120, 160, 200)$ with $\theta = -0.88$; and $n= (200, 400, 600, 800, 1000) $ with $\theta = -0.55$. This results in a total of 40 simulation scenarios, which is divided into a training set of size 12 given in Table~\ref{table2}, and a test set that includes the 28 remaining scenarios.
Instead of presenting graphs analogous to that of type I error rate, we plot the bias and root mean squared error (RMSE) for the 40 sample size/effect size scenarios in Figure~\ref{fig:test_power_adj} for both models. The results show that, across all scenarios and the two models, both the absolute bias and the RMSE remain below 0.05. The same pattern is present for both models, where power is slightly overestimated for small to moderate $n$ and slightly underestimated for larger sample sizes. As mentioned earlier, under the alternative hypothesis the beta distribution is a misspecification, even asymptotically. This misspecification, however, results in an estimation error that is considered inconsequential from a practical perspective.
We note that the covariate adjusted logistic regression model is a very simple model where many efficient off-the-shelf MCMC samplers can be used to perform Bayesian analysis. However, even within the context of this simple example the computational savings are significant. Suppose that 10,000 trial simulations takes $t$ units of time. To rely on simulations for estimating the operating characteristics for the 54 total scenarios and, say 3 candidate decision criteria, it would take $162t$ units of time and any additional scenarios arising in trial planing would cost an additional $t$ units. With the proposed approach, 19 initial simulation scenarios are used to estimate the sampling distribution at any future simulation scenario and with any given decision criterion. Therefore the total computation time with the proposed methods amounts to $19t + t_{fit}$ where $t_{fit}$ is the one-time cost of fitting the models to the sampling distribution which is about 10 minutes on a 1.4 GHz quad-core processor.
\section{Discussion} \label{Sec:dis} In this article, we propose a simulation-assisted approach for the assessment of frequentist operating characteristics in Bayesian clinical trials that employ posterior or posterior predictive probability of the alternative hypothesis as the test statistic. The proposed approach relies on modelling the sampling distribution of the test statistic. Parametric models are specified to capture the theoretical large sample properties of the posterior distribution and the expected behaviour of the test statistic as a function of sample size and effect size. The model parameters are then estimated using ``observed" instances of the sampling distribution obtained via Monte Carlo simulations at select simulation scenarios.
Theoretical results for the sampling distribution require large $n$, as they rely on asymptotic theory. However, in complex designs, as well as early phase trials, the operating characteristics for small to moderate sample sizes are of interest. Therefore, existing methods for Bayesian sample size determination and assessment of design operating characteristics in clinical trials have mainly relied on Monte Carlo simulations. The novelty of the proposed approach is in utilizing asymptotic properties together with Monte Carlo simulations to learn the operating characteristics across a range of model/design parameters.
While the present article is focused on the posterior probability of the alternative hypothesis as the test statistic, it can be modified to accommodate any test statistic that is derived from the marginal posterior distribution of the parameter of interest in Bayesian trials. Moreover, similar methods may be developed to learn the sampling distribution of test statistics in non-Bayesian clinical trials.
The methods proposed in this article are developed particularly for the assessment of operating characteristics in Bayesian trials with relatively complex analysis models, where relying on Monte Carlo simulations would be time consuming or infeasible. We use logistic regression with covariate adjustment in trials with dichotomous outcomes as an example to showcase the implementation and utility of the proposed methodology. Other settings where this methodology can make a notable difference include clinical trials with Bayesian hierarchical models (e.g. \cite{liu_bayesian_2020}, \cite{carragher_bayesian_2020}, and \cite{zhao_bayesian_nodate}), trials that employ information borrowing techniques resulting in analytically intractable posterior distributions (for example, \cite{psioda_practical_2018} and \cite{zhou_incorporating_2021}), and trials that incorporate time trends in the analysis (e.g. \cite{saville_bayesian_2022}).
The proposed approach can be paired with decision-theoretic approaches (e.g., \cite{lewis_bayesian_2007} and \cite{calderazzo_decision-theoretic_2020}). Learning the sampling distribution across the model/design space enables efficient exploration of a variety of decision criteria derived from any number of loss or utility functions which are implicitly or explicitly defined by assigning weights to type I and II error rates.
The selection of simulation scenarios at which the sampling distribution is simulated is important, as these simulations play the role of the \emph{data} in the proposed approach. The selection of these scenarios is therefore a \emph{design} problem. For type I error rate, this boils down to selecting a sequence of sample size values.We recommend concentrating the simulations at small $n$, where the type I error rate changes (decreases) quickly with sample size. At large $n$ the probability of a type I error converges to $(1-u)\%$, where $u$ is the decision threshold, and the additional refinement from simulations is negligible.
A similar rationale is used for determining the simulation scenarios for estimating power curves. One is typically interested in estimating power for a range of effect sizes. An equally-spaced sequence over the range of interest can be used as the effect sizes at which simulations are performed. For each effect size, the interest is then in the sample size values where power changes at a higher rate. For example, performing simulations at effect/sample size pairs where power is close to 1 is a waste of simulations. We recommend using approximate frequentist sample size determination to obtain sample sizes that result in, e.g. 30\%, 50\% and 80\% power for each effect size.
The total number of simulation scenarios depends on a variety of factors, such as the range of effect sizes and sample sizes to be explored, which are in turn determined by the complexity of the design. For example, if early interim analyses are not of interest, estimating power where it is below 50\% would not be of interest, and one can focus on the upper end of the power curves (top half of the plots in Figure~\ref{fig:power}). However, in general, given the low dimensionality and smoothness of the models used to learn the sampling distribution, the full range of operating characteristics may be estimated with reasonable precision using fewer than 25 simulation scenarios in total.
In this article we focus on the sampling distribution of the test statistic in trials without stopping criteria. The adoption of the proposed approach in the context of adaptive designs requires further investigation and is therefore a direction for future research.
At present we perform power analysis that explores various settings of the effect parameter but holds the rest of the model parameters (nuisance parameters) fixed. While this reflects the common practice of trial design, there may be scenarios where exploration of the nuisance parameter space is also of interest. Therefore, another direction for future research involves extending the proposed methodology to model the sampling distribution across the parameter space. This extension will require incorporating a spacial correlation within the parameter space similar to the approach of \cite{golchi_estimating_2022}.
To conclude, we would like to emphasize that the proposed approach is one that facilitates and complements various existing methods in the Bayesian SSD and trial design literature. We anticipate that this work will greatly contribute to the practice of statistical trial design in trials with complex analysis models as well as innovative and flexible designs.
\section*{Code} The code for implementation of the methods of this article together with simulation data and examples is available at \href{https://github.com/sgolchi/BayesSampDist}{https://github.com/sgolchi/BayesSampDist}.
\begin{table}[h!]
\centering
\begin{tabular}{ l l }
\hline
$n$
& $\theta$ \\
\hline
(20, 40, 60, 80, 100, 120, 160, 200, 300, 400, 500, 600, 800, 1000)& 0\\
(20, 40, 60, 80, 100) & -1.03\\
(40, 80, 120, 160, 200) & -1.36\\
(100, 200, 300, 400, 500) &-0.56\\
(100, 200, 300, 400, 500) &-0.83\\
(200, 400, 600, 800, 1000) & -0.39\\
\hline
\end{tabular}
\caption{Simulation scenarios arising from sample size and effect parameter value combinations.}
\label{table1} \end{table}
\begin{table}[h!]
\centering
\begin{tabular}{ l l }
\hline
$n$
& $\theta$ \\
\hline
(20, 60, 100) & -1.24\\
(40, 120, 200) & -0.88\\
(100, 300, 500) &-0.56\\
(200, 600, 1000) & -0.39\\
\hline
\end{tabular}
\caption{Simulation scenarios in the training set to assess prediction performance for power.}
\label{table2} \end{table}
\begin{figure}
\caption{Estimated curves and 95\% credible intervals for type I error rate as a function of sample size in (a) unadjusted and (b) adjusted model.}
\label{fig:t1e}
\end{figure}
\begin{figure}
\caption{Estimated curves and 95\% credible intervals for power as a function of sample size for a range of effect sizes in the (a) unadjusted and (b) adjusted model.}
\label{fig:power}
\end{figure}
\begin{figure}
\caption{Estimated Bayesian assurance curves as a function of sample size for the (a) unadjusted adjusted models.}
\label{fig:BA}
\end{figure}
\begin{figure}
\caption{Unadjusted analysis model}
\label{fig:test_unadj}
\caption{Adjusted analysis model}
\label{fig:test_adj}
\caption{Point estimates and 95\% credible intervals for type I error rate obtained from a training set of size 7 indicated by triangles. The squares represent the simulation based estimate of the type I error rate to be compared with the simulation-assisted estimate (dots).}
\label{fig:test}
\end{figure}
\begin{figure}
\caption{bias in the adjusted model}
\label{fig:power_bias_adj}
\caption{bias in the unadjusted model}
\label{fig:power_bias_unadj}
\caption{RMSE in the adjusted model}
\label{fig:power_RMSE_unadj}
\caption{RMSE in the unadjusted model}
\label{fig:power_RMSE_adj}
\caption{Bias and RMSE in estimating power for a test set of size 28 from a training set of size 12 in the adjusted ((a) and (c)) and unadjusted ((b) and (d)) model.}
\label{fig:test_power_adj}
\end{figure}
\end{document} |
\begin{document}
\title{Shortcut to adiabatic passage in two and three level atoms}
\author{Xi Chen$^{1,2}$, I. Lizuain$^{1}$, A. Ruschhaupt$^{3}$, D. Gu\'ery-Odelin$^{4}$, J. G. Muga$^{1}$}
\affiliation{
$^{1}$ Departamento de Qu\'{\i}mica-F\'{\i}sica, UPV-EHU, Apartado 644, 48080 Bilbao, Spain \\
$^{2}$ Department of Physics, Shanghai University, 200444 Shanghai, P. R. China\\
$^{3}$ Institut f\"ur Theoretische Physik, Leibniz Universit\"{a}t Hannover, Appelstra$\beta$e 2, 30167 Hannover, Germany\\ {$^{4}$Laboratoire Collisions Agr\'egats R\'eactivit\'e, CNRS UMR 5589, IRSAMC, Universit\'e Paul Sabatier, 118 Route de Narbonne, 31062 Toulouse CEDEX 4, France} }
\date{\today}
\begin{abstract}
We propose a method to transfer the population and control the state of two-level and three-level atoms speeding-up Adiabatic Passage techniques while keeping their robustness versus parameter variations. The method is based on supplementing the standard laser beam setup of Adiabatic Passage methods with auxiliary steering laser pulses of orthogonal polarization. This provides a shortcut to adiabaticity driving the system along the adiabatic path defined by the standard setup. \end{abstract}
\pacs{32.80.Xx, 33.80.Be, 32.80.Qk, 03.65.Ge}
\maketitle
{\it Introduction.---}Two major routes for manipulating the state of a quantum system with interacting fields are based on resonant pulses
or on adiabatic methods, such as ``Rapid'' Adiabatic Passage (RAP), Stimulated Raman Adiabatic Passage (STIRAP) and their many variants. In general terms simple fixed-area resonant pulses may be fast if intense enough, but quite unstable with respect to errors or fluctuations of the parameters, whereas adiabatic passage is robust but slow. For many applications, from Nuclear Magnetic Resonance (NMR) to quantum information processing, the ideal method should be fast and robust, combining the best of the two worlds. These two requirements are particularly demanding if quantum computing is to become feasible at all. It is possible to make the pulses more stable by combining them into pulse sequences, but in practice their use is limited by the longer times required with respect to the single pulse, the need to control phase angles and pulse durations accurately, or off-resonant excitations due to sharp pulse edges \cite{NMR}.
Moreover the error compensating properties of square-pulse sequences are not preserved when substituting them with smooth pulses so that the design of good sequences requires ``a good portion of experience and magic'' \cite{Molmer}.
In NMR, composite pulses are increasingly superseded by adiabatic passage methods \cite{NMR}, which have also been very successful in chemical reaction dynamics \cite{Kral}, laser cooling \cite{lc}, atom optics \cite{RM}, metrology \cite{Salomon}, interferometry \cite{Chu}, or cavity quantum electrodynamics \cite{Lambro,Bergmann}. When robustness is the primary concern, they are quite sufficient, and have as well become basic operations for quantum information processing, either to design robust gates \cite{JJ,gates} or in quantum adiabatic computing \cite{AC1,AC2}, which relies on an adiabatic evolution of the ground state from an initial to a final Hamiltonian. If speed is also important, however, the limitations may be severe \cite{AC2}. Given the stated difficulties of composite pulses, it is then quite natural to look for robustness and high operation velocities taking the adiabatic methods as the starting point and shortening their duration somehow. Our objective here is to propose a shortcut to adiabatic passage (abbreviated as ``SHAPE'' hereafter) using a recent formulation by Berry \cite{Berry09} of ``transitionless quantum driving'', related to work by Kato \cite{Kato} on the adiabatic theorem. The specific applications we shall discuss are speeded-up versions of (2-level) RAP and (3-level) STIRAP schemes, as canonical examples of other adiabatic methods. Variants such as fractional RAP or STIRAP, and multilevel schemes may be treated along similar lines.
The philosophy of the transitionless quantum driving algorithm \cite{Berry09} is to supplement the Hamiltonian $H_0(t)$ of a reference system
with an auxiliary term $H_1(t)$ to steer the dynamics exactly along the instantaneous eigenstates $|\lambda_n(t)\rangle$ of $H_0(t)$ without transitions among them, formally in an arbitrarily short time,
\begin{equation} H_1 (t)= i \hbar \sum_n(|\partial_t \lambda_n \rangle \langle \lambda_n |
- \langle\lambda_n |\partial_t \lambda_n \rangle | \lambda_n \rangle\langle \lambda_n|).
\label{Berry's Hamiltonian}
\end{equation}
At variance with Lyapunov-control methods \cite{Wang}, the extra term is independent of the time-dependent state so it leads to simpler, linear dynamics, and moreover it provides systematically exact solutions for adiabatic following without the need for a trial and error approach to find a good control field \cite{Wang}. Regarding its physical realizability, in general there is no guarantee that $H_1$ may be easy to implement and each case needs a separate study. For example, for an $H_0$ describing a particle in a time-dependent harmonic potential, $H_1$ turns out to be a non-local interaction, and its realizability in a useful parameter domain for cold atoms remains an open question \cite{JPB2}. (Transitionless dynamics for the harmonic oscillator may be achieved with a local interaction by inverse engineering the frequency dependence with the aid of Ermakov-Riesenfeld invariants \cite{Xi}, or with state acceleration techniques \cite{Nakamura}.) For a particle with spin in a time dependent magnetic field, $H_1$ becomes a complementary, time-dependent magnetic field \cite{Berry09}. For the atomic two- and three-level systems studied here, $H_1$ will involve laser interactions added to the original laser setup implied by $H_0$ as discussed below.
{\it Rapid Adiabatic Passage.---}Let us consider first the speeding-up of a standard Rapid Adiabatic Passage that inverts the population of two-levels of an atom,
$|0\rangle$ and $|1\rangle$, by sweeping the radiation through resonance.
This broadspread technique originated in Nuclear Magnetic Resonance \cite{Bloch} but is used in virtually all fields where 2-level systems may be controlled by external interactions, such as laser-chemistry, modern quantum optics or quantum information processing.
When the frequency sweep is much shorter than the life-time for spontaneous emission and other relaxation times, it is termed rapid adiabatic passage (RAP).
Using the rotating wave approximation, the Hamiltonian ${H}_0 (t)$ in a laser-adapted interaction picture can be written as
\begin{eqnarray} \label{H0} {H}_0 (t)= \frac{\hbar}{2} \left(\begin{array}{cc} \Delta (t) & \Omega_{R}(t) \\ \Omega_{R} (t)& - \Delta (t) \end{array}\right), \end{eqnarray}
where $\Omega_{R}(t)$ is the Rabi frequency, which we take to be real, and $\Delta (t)= \omega_0- \omega_L$ the detuning, assumed to change slowly on the scale of the optical period. It is the difference between the Bohr transition frequency and the laser carrier frequency $\omega_L$, due to a change in the carrier frequency or a controlled alteration of the Bohr frequency by Zeeman or Stark shifts.
The instantaneous eigenvectors are
\begin{eqnarray} \label{instantaneuous states}
|\lambda_{+}(t)\rangle &=& \cos[\theta(t)/2]
|1 \rangle - \sin[\theta(t)/2] |0 \rangle,\\
|\lambda_{-}(t)\rangle &=& \sin[\theta(t)/2]
|1 \rangle + \cos[\theta(t)/2]
|0 \rangle, \end{eqnarray}
with the mixing angle $\theta (t)\equiv \arccos [-\Delta(t)/\Omega(t)]$ and eigenvalues $E_{\pm}(t)= \pm \hbar \Omega /2$, where $\Omega = \sqrt{\Delta^2 (t) + \Omega^2_R (t)}$. If the adiabaticity condition
\begin{equation} \label{adiabatic condition}
\frac{1}{2}|\Omega_a| \ll |\Omega(t)|, \end{equation}
where $\Omega_a\equiv[\Omega_R(t)\dot{\Delta}(t)-\dot{\Omega}_R(t)\Delta(t)]/\Omega^2$,
is satisfied, the state evolving from $|\psi(t=0)\rangle=|\lambda_\pm(0)\rangle$ follows the adiabatic approximation
\begin{eqnarray} \label{adiabatic states}
|\psi_{\pm}(t)\rangle= \exp{\left\{-\frac{i}{\hbar}\int^{t}_{0} dt' E_\pm(t')\right\}} |\lambda_\pm(t)\rangle, \end{eqnarray}
whereas transitions will occur otherwise. Different adiabatic passage schemes correspond to different specifications of $\Omega_R$ and $\Delta$ for which $\psi_{\pm}$ passes from one bare state to the other. The simplest one is the Landau-Zener scheme with constant $\Omega_R$ and linear-in-time $\Delta$. For the examples below we shall use the more adiabatic (and thus potentially faster) Allen-Eberly scheme \cite{AE,VG}: $\Omega_R=\Omega_0\sech(\pi t/2 t_0)$, $\Delta=(2\beta^2 t_0/\pi) \tanh(\pi t/2 t_0)$.
Regardless of the scheme chosen ${H}_1(t)$ takes the form
\begin{eqnarray} {H}_1 (t) = \frac{\hbar}{2} \left(\begin{array}{cc} 0 & -i \Omega_{a} \\ i \Omega_{a} & 0 \end{array}\right), \end{eqnarray}
where (up to a phase factor) $\Omega_a$ plays the role of the Rabi frequency of the auxiliary field.
In principle $H=H_0+H_1$ drives the dynamics along the $H_0$-adiabatic path in arbitrarily short times, but there are practical limitations such as the laser power available. Moreover, a comparison with $H_0$-dynamics is only fair if $|\Omega_a|$ is smaller or approximately equal to the peak Rabi frequency with the original laser setup. Independently of the scheme chosen and in a range of interaction times that break down the adiabaticity condition, it is remarkable that the dynamics can be driven along the $H_0$-adiabatic path while fulfilling the inequalities $|\Omega_{a}|\le|\Omega|\le |\Omega_0|$.
The physical meaning and realizability of the auxiliary term are determined by going back to the Schr\"odinger picture: it represents a laser with the same time dependent frequency of the original one, but a differently shaped time-dependent intensity, and perpendicular polarization.
For the Allen-Eberly scheme the population of the excited state $P_1$ starting from the ground state depends on the dimensionless parameters $\tau=t_0\beta$ and $\omega=\Omega_0/\beta$ \cite{VG}: $P_1= 1 - \sech^2(2 \tau^2/\pi)\cos^2[\tau (\omega^2 - 4 \tau^2/\pi^2)^{1/2}]$.
A population transfer near to one ($P_1>0.999$) and stable versus parameter variations is achieved for $\omega\ge 3$ and $\tau\ge 3$. We may calculate $\Omega_a$ and the minimal time for which the maximum of $\Omega_a$ with respect to $t$ is $\le\Omega_0$. In the stated range this is accurately given by $\tau_m=\pi/(4\omega)$, or
$t_{0,m}=\pi/(4 \Omega_0)$.
The reduction factor with respect to the adiabatic time $\tau_a\approx 3$ may be very significant, $t_{0,m}/t_a\approx{\pi}/{(12\omega)}$, this is $0.09$ for $\omega=3$, or $0.01$ for $\omega=20$. Of course the SHAPE Hamiltonian $H$ may also drive the system along the adiabatic path outside the $w,\tau>3$ domain as illustrated in Fig. \ref{figAE}.
\begin{figure}
\caption{ (color online). (Dimensionless) energies $\varepsilon=E/(\beta \hbar)$ versus (dimensionless) time $T=t\beta$ for the AE scheme: Diabatic energies, dot-dashed (blue) lines; adiabatic energies, dashed (orange) lines; average energy evolving with $H_0$, solid (purple) line; average energy evolving with $H=H_0+H_1$, dotted (red) line, indistinguishable from the lower adiabatic energy. $\omega=5$, $\tau=1.22$.}
\label{figAE}
\end{figure}
For comparison, the population of the excited state due to a square $\pi$ pulse with on-resonance Rabi frequency $\Omega_0$ is
\begin{eqnarray} P_{1} = \frac{\Omega_0}{\Omega}\left|\sin \left(\frac{\Omega t}{2}\right)\right|^2. \end{eqnarray}
Complete population transfer requires $\Delta=0$, and a pulse time
$t_R= \frac{\pi}{\Omega_{0}}$.
For the same $\Omega_0$ and limiting the auxiliary laser by $|\Omega_a|\le \Omega_0$, the minimal characteristic time $t_{0,m}$ of the SHAPE method is of the order of $t_R$, $t_{0,m}=t_R/4$. In fact the actual interaction time to implement a successful population inversion with the AE scheme (SHAPE corrected or not) should be a few times $t_{0}$; this may be estimated from the dependence of the excited population of the adiabatic state with time, which is $>.999$ for $t\ge 8 t_{0}$.
Figures 2 and 3 show examples of the fidelity ($P_1$) with respect to variations in the Rabi frequency and detuning with SHAPE (AE scheme for $H_0$), the evolution with $H_0$ (AE scheme), a Rabi $\pi$-pulse, and a composite $\frac{\pi}{2}(x)\pi(y)\frac{\pi}{2}(x)$ pulse, a fault-tolerant combination where $x,y$ refer to the laser polarization (and Pauli matrix) involved. Clearly SHAPE provides a fast, robust and efficient population inversion compared to all other methods. All cases are for the same $\Omega_0$, and in SHAPE $|\Omega_a|\le \Omega_0$.
\begin{figure}
\caption{ (color online). Dependence of the fidelity ($P_1$) on the changes of (a) the Rabi frequency from $\Omega_0$ to $\Omega_0(1+\eta)$ (b): the detuning by $\delta$. SHAPE, AE scheme (solid, red line) with $\Omega_a\le\Omega_0$; ordinary Adiabatic Passage (dotted, black line); Rabi pulse (dashed, blue line); Composite pulse $\frac{\pi}{2}(x)\pi (y)\frac{\pi}{2}(x)$ (dot-dashed, purple line). $\Omega_0=2 \pi\times 5$ MHz; $t_0=25$ ns; $\beta=2\pi$ MHz.}
\label{fig.3}
\end{figure}
{\it Stimulated Raman Adiabatic Passage.---}Similar ideas can be applied to three-level STIRAP. The Hamiltonian ${H}_0 (t)$ for the two-photons resonance case within the rotating wave approximation (RWA) and in a laser adapted interaction picture reads \cite{Bergmann}
\begin{eqnarray} {H}_0 (t)= \frac{\hbar}{2} \left(\begin{array}{ccc} 0 & \Omega_{p}(t) & 0 \\ \Omega_{p}(t) & 2 \Delta & \Omega_{s}(t) \\ 0 & \Omega_{s}(t) & 0 \end{array}\right), \end{eqnarray}
in terms of the Rabi frequencies for the Stokes, $\Omega_{s}(t)$, and pumping lasers, $\Omega_{p}(t)$, and the laser detuning $\hbar \Delta= (E_2-E_1)- \hbar \omega_p=(E_2-E_3)- \hbar \omega_s$.
The instantaneous eigenstates $|\lambda_n \rangle$ are
\begin{eqnarray} \label{instantaneuous states-for 3-level system} \nonumber
|\lambda_{+} (t) \rangle &=& \sin \theta \sin \phi |1\rangle + \cos \phi |2\rangle + \cos \theta \sin \phi |3\rangle, \\ \nonumber
|\lambda_{-} (t) \rangle &=& \sin \theta \cos \phi |1\rangle - \sin \phi |2\rangle + \cos \theta \cos \phi |3\rangle, \\
|\lambda_{0} (t) \rangle &=& \cos \theta |1\rangle - \sin \theta |3\rangle, \end{eqnarray}
with eigenvalues given by $E_{+} (t)=\hbar \Omega \cot(\phi/2)$, $E_{0}=0$, and $E_{-} (t) = -\hbar \Omega \tan(\phi/2)$. The time-dependent mixing angles $\theta$ and $\phi$ are respectively defined by $\tan \theta = \Omega_{p} (t)/ \Omega_{s} (t)$ and $\tan (2\phi) = \Omega / \Delta(t)$, whereas $\Omega =\sqrt{\Omega^2_{p}(t) + \Omega^2_{s}(t)}$. The population transfer $1\rightarrow 3$
is realized by the ``dark state'' $|\lambda_0\rangle$.
The Hamiltonian ${H}_1 (t)$, takes now the form
\begin{eqnarray} \label{Berry3} {H}_1 (t)= i \hbar \left(\begin{array}{ccc} 0 &\dot{\phi}\sin \theta & \dot{\theta} \\ -\dot{\phi} \sin \theta & 0 & -\dot{\phi}\cos \theta \\ -\dot{\theta} & \dot{\phi} \cos \theta& 0 \end{array}\right), \end{eqnarray}
with
$ \dot{\theta}= [{\dot{\Omega}_{p} (t) \Omega_{s} (t)- \dot{\Omega}_{s} (t) \Omega_{p} (t)}]/{\Omega^2}, $ and $ \dot{\phi}=[{(\dot{\Omega}_{p} (t) \Omega_{p} (t) + \dot{\Omega}_{s} (t) \Omega_{s} (t)) \Delta (t) }]/[{2 \Omega (\Delta^2+\Omega^2)}]. $ We would need in principle three new lasers to implement this Hamiltonian. The ones connecting levels $1$-$2$ and $2$-$3$ should have the same frequency as the original ones but orthogonal polarization, and the field connecting levels $1$-$3$ should be on resonance with this transition to get an interaction picture Hamiltonian like (\ref{Berry3}) (The RWA approximation is assumed in all cases.) If $1$-$3$ is electric-dipole-forbidden, a magnetic dipole transition may be used instead.
If we are only interested in performing a full passage from 1 to 3 and do not want to reproduce all the effects of the full Hamiltonian $H_0+H_1$, $H_1$ may be simplified by retaining just the $1$-$3$ interaction,
\begin{equation} {H}'_1 (t)= \frac{\hbar}{2} \left(\begin{array}{ccc} 0 & 0 & i\Omega'_a \\ 0 & 0 & 0 \\ -i \Omega'_a & 0 & 0 \end{array}\right), \label{13} \end{equation}
where $\Omega'_a = 2 \dot{\theta}$. That this is so may be seen by working out the Schr\"odinger equation in the adiabatic basis: $d\langle \lambda_0(t)|\psi(t)\rangle/dt$ does not depend on $\dot{\phi}$ so that the $1$-$3$ and $2$-$3$ auxiliary lasers can be left out without affecting
$\langle\lambda_0(t)|\psi(t)\rangle$.
In the examples below we have chosen the pulse shapes \cite{Fewell}
\begin{eqnarray} \nonumber \Omega_{p}(t) &=& \Omega_{0}(t) f(t-\tau) ;~~ \Omega_{s}(t) = \Omega_{0}(t) f(t), \\ \label{oms} f(t) &=& \left\{ \begin{array}{ll} \sin^4(\pi t/{\sf{T}}) ~~ &(0 < t < {\sf T}), \\ 0 ~~~~~&(\mbox{otherwise).} \end{array} \right. \end{eqnarray}
Fig. \ref{fig.4} shows a STIRAP Stokes-Pump pulse sequence where ${\sf T}$ is too short for complete population transfer because adiabaticity breaks down, see Fig. \ref{fig.5}a.
We can remedy that with the auxiliary interaction in (\ref{13}), see Figs. \ref{fig.4} and \ref{fig.5}b. Keeping $|\Omega_a'|\le \Omega_0$ the process duration is reduced approximately ten times with respect to the ordinary STIRAP scheme.
\begin{figure}
\caption{ (color online). Time evolution of Rabi frequencies in a STIRAP sequence of laser pulses defined by Eq. (\ref{oms}) with $\Omega_0 = 2 \pi \times 5$ MHz, $\Delta = 2 \pi \times 0.5$ MHz, and ${\sf T} = 0.26$ $\mu$s. Dashed blue line: $\Omega_s/\Omega_0$; Solid red line: $\Omega_p/\Omega_0$; The dotted orange curve represents $\Omega'_a/\Omega_0$.}
\label{fig.4}
\end{figure}
\begin{figure}
\caption{ (color online). Time evolution of the populations of levels 1, 2 and 3 for STIRAP: (a) Hamiltonian ${H}_0 (t)$; (b) Hamiltonian ${H} (t)$ with additional ${H}_1 (t)$. Same parameters as in Fig. \ref{fig.4}.}
\label{fig.5}
\end{figure}
{\it Discussion and conclusions.---}A method to achieve fast and robust population transfer in two-level and three-level atomic systems has been presented, based on supplementing the laser setup of standard adiabatic passage methods (RAP or STIRAP) by additional, properly time-shaped pulses with orthogonal polarization.
The Hamiltonian $H_1(t)$ that describes the additional steering pulses providing a shortcut to adiabaticity is given by a general algorithm to drive quantum systems without transitions \cite{Berry09}. Other states (such as superpositions of two-levels) may be prepared by speeded-up (SHAPE) versions of fractional RAP or STIRAP, and, if necessary, the phases may be controlled thanks to the freedom to choose the reference Hamiltonian $H_0(t)$ or phase gates. As an outlook, similar techniques may provide a way to carry out adiabatic computation in a finite time \cite{Aha,bra}, or to speed up logic gates based on adiabatic processes \cite{CBZ,Ion}, interferometric techniques in superconducting qubits \cite{Shevchenko} or quantum dots \cite{Jong}, and the creation of entangled pairs of two-state systems \cite{Unanyan}. The SHAPE method is compatible with approaches that optimize $H_0$ such as the quantum brachistochrone approach \cite{Zan}, since, after optimizing the adiabatic process, it leads to the design of even faster process. Other adiabatic techniques \cite{SCRAP} may also benefit from these ideas speeding up the avoided level crossings and keeping their stability versus parameter variations.
We thank M. V. Berry and J. H. Eberly for discussions, and acknowledge funding by Projects No. GIU07/40, No. FIS2009-12773-C02-01, No. NSFC 60806041, No. 08QA14030, No. 2007CG52, No. S30105, No. ANR-09-BLAN-0134-01, and Juan de la Cierva Program.
\end{document} |
\begin{document}
\title{Enhanced estimation of loss in the presence of Kerr nonlinearity}
\author{Matteo A. C. Rossi} \email{matteo.rossi@unimi.it} \homepage{http://users.unimi.it/aqm} \affiliation{Quantum Technology Lab, Dipartimento di Fisica, Universit\`a degli Studi di Milano, 20133 Milano, Italy}
\author{Francesco Albarelli} \email{francesco.albarelli@unimi.it} \homepage{http://users.unimi.it/aqm} \affiliation{Quantum Technology Lab, Dipartimento di Fisica, Universit\`a degli Studi di Milano, 20133 Milano, Italy}
\author{Matteo G. A. Paris} \email{matteo.paris@fisica.unimi.it} \homepage{http://users.unimi.it/aqm} \affiliation{Quantum Technology Lab, Dipartimento di Fisica, Universit\`a degli Studi di Milano, 20133 Milano, Italy} \affiliation{CNISM, Unit\`a Milano Statale, I-20133 Milano, Italy} \affiliation{INFN, Sezione di Milano, I-20133 Milano, Italy}
\date{\today}
\begin{abstract} We address the characterization of dissipative bosonic channels and show that estimation of the loss rate by Gaussian probes (coherent or squeezed) is improved in the presence of Kerr nonlinearity. In particular, enhancement of precision may be substantial for short interaction time, i.e. for media of moderate size, e.g. biological samples. We analyze in detail the behaviour of the quantum Fisher information (QFI), and determine the values of nonlinearity maximizing the QFI as a function of the interaction time and of the parameters of the input signal. We also discuss the precision achievable by photon counting and quadrature measurement and present additional results for truncated, few-photon, probe signals. Finally, we discuss the origin of the precision enhancement, showing that it {\em cannot} be linked quantitatively to the non-Gaussianity of the interacting probe signal. \end{abstract} \pacs{03.65.Ta, 42.50.Dv} \maketitle \section{Introduction} \label{sec:introduction} The characterization of quantum channels is a relevant task in quantum technology \cite{LoPresti01,Fujiwara2001,LoPresti03,Sarovar2004,Lobino563,Oli07}. In particular, characterizing lossy channels in continuous variable systems is crucial to quantify decoherence \cite{Serafini2005}, to assess quantum illumination protocols \cite{yuen09,Tan2008,Guha09,Brida10} and to realize quantum reading of classical memories \cite{Pirandola2011a}. In some specific cases, the task is simply to discriminate between the presence or the absence of losses \cite{sasaki97,paris01,Invernizzi2011}, whereas, in general, a strategy to estimate the exact value of the loss is needed. \par The loss rate in optical media and, in turn, the overall loss of the corresponding channels, are not observable quantities in a strict sense. As a consequence, one has to infer their value indirectly, i.e. by assessing the influence of loss on a given probing signal by measuring a suitably chosen observable. The overall choice of the probe, of the measurement, and of the data processing is usually referred to as an estimation strategy. Optimization of the estimation strategy, i.e. minimization of intrinsic and extrinsic fluctuations of the estimate, may be pursued upon employing quantum estimation theory \cite{HelstromBook,Braunstein94,Paris2009,Escher2011}, which provides constructive tools to determine the initial state of the quantum probe and the optimal measurement to be performed at the output. The ultimate bound on precision is set by the quantum Cramèr-Rao inequality, written in terms of the so-called quantum Fisher information. \par In the last decades, much attention has been devoted to the estimation of loss with different initial preparations of the probes. Optimization over Gaussian input states has been performed \cite{Monras2007}, showing that ultimate precision may be achieved using photon counting and Gaussian operations at the output. Fock states have also been shown to saturate the ultimate bound on precision \cite{Sarovar2004,Adesso2009}, whereas the performances of thermal states have been recently investigated \cite{Gea16}. The general scenario of lossy media probed by Gaussian signals at finite temperature has been considered \cite{Monras2011a}, showing that a two-mode squeezed vacuum state is optimal for estimating both the loss parameter and the thermal noise. The benefit of using entanglement in a specific interferometric setup has also been discussed \cite{Venzl2007}. Recently the problem of estimating both the loss and the phase shift in interferometry has been addressed \cite{Crowley2014}, as well as the related problem of estimating the efficiency of realistic detectors \cite{Barbieri2015,Grandi2015}. \par So far, attention has been focused on Gaussian lossy channels where dissipation is due to linear coupling of the a radiation mode to the environment, modeled as a bath of external oscillators. On the other hand, optical media where light propagates, such as gasses, biological samples or optical fibers, may be characterized also by a (usually small) non-linear response to the electromagnetic field. A question thus arises on whether estimation of linear loss in the presence of nonlinearity is enhanced, or not, compared to the pure linear case. Here, we address this question, by considering systems where besides dissipation due to linear coupling to the environment, some form of nonlinearity is present. In particular, we focus on self-Kerr interaction \cite{boyd2008nonlinear}, occurring during propagation of radiation in a nonlinear medium with non negligible cubic nonlinearity. The Kerr effect has been widely studied in quantum optics either at zero \cite{Milburn1986a} or at finite temperature \cite{Stobinska2008}, and attracted interest because it can be employed to generate Schr\"odinger cat-like states \cite{Yurke1986,Yurke1988,Miranowicz2011,Paris1999,Jeong2004}. Nonlinearity of optical fibers has been discussed for it negative impact on the channel capacity \cite{Essiambre2012}, whereas its role as a resource in the estimation of losses has not been assesed so far. \par As a matter of fact, the presence of non-linear effects has been already recognized as a resource for quantum estimation, since it allows one to achieve high precision by using robust classical probe states, instead of fragile nonclassical states \cite{Luis2004,Rivas2010,Luis2010c}. In particular, Kerr-type nonlinearity may be exploited for estimation of squeezing and displacement of a Gaussian state \cite{Genoni2009} and to improve Michaelson interferometry \cite{Luis2015}. \par In this paper, we analyze in detail estimation of loss in the presence of Kerr nonlinearity. We focus mostly on estimation strategies based on Gaussian probes (coherent and squeezed vacuum states), while also briefly examining the use of few-photon probes, the simplest nontrivial ones being optical qutrits. Overall, our results indicate that the presence of Kerr nonlinearity always enhances estimation, improving precision compared to the pure linear case. \par In particular, by focussing attention on the estimation of the loss rate parameter of the channel rather than the overall loss (which also includes the interaction time), we make the time dependence explicit. This is a relevant feature of our analysis since dissipation and nonlinearity set two different time scales in the evolution of the probe state. In this way, we address both regimes of ``short'' and ``long" interaction times, showing that i) nonlinearity always improves estimation; ii) enhancement of precision may be substantial for short interaction time, i.e. for media of moderate size. \par The paper is structured as follows: in Section \ref{sec:quantum_estimation_theory} we briefly review the main tools of quantum estimation theory in order to establish the notation. In Section \ref{sec:model} we present in detail the interaction model we are dealing with, whereas in Section \ref{sec:absence_of_non_linear_effects} we discuss the solution of the problem in the absence of non-linearities. In Sec \ref{sec:solution_in_presence_of_kerr_effect} we give an approximate, analytic, solution for the estimation problem with coherent probes, which holds when the Kerr coupling is much smaller than the loss parameter, and present a detailed numerical study for the general case. We also briefly analyze the use of optical qutrit probes and discuss whether non-Gaussianity plays a role in the estimation procedure. Section \ref{sec:conclusions} closes the paper with some concluding remarks.
\section{Quantum estimation theory} \label{sec:quantum_estimation_theory} Here we briefly review local estimation theory and its generalization to quantum systems \cite{Paris2009}. In an estimation procedure we want to infer the value of a parameter, say $\gamma$, from the data collected by $n$ measurements, $\{x_1,\ldots,x_n\}$. We thus build an estimator $\hat\gamma (\{x_1,\ldots,x_n\})$, that is a function of the outcomes of the measurements. The estimated value of the parameter will be characterized by a statistical error $\delta \gamma$, which is bounded from below by the Cramèr-Rao inequality \cite{Cramer1946} \begin{equation}\label{eq:cramer_rao}
\delta \gamma^2 \geq \frac 1 {N F(\gamma)}, \end{equation} where $N$ is the size of the sample data and $F(\gamma)$ is the classical Fisher information (FI), defined as \begin{equation}\label{eq:fisher_information_definition}
F(\gamma) = \expected{\left(\frac{\partial \ln p(x|\gamma)}{\partial \gamma}\right)^2}. \end{equation}
In Eq. \eqref{eq:fisher_information_definition} $p(x|\gamma)$ is the probability that the outcome of a measurement is $x$ when the value of the parameter is $\gamma$, and $\expected{\cdot}$ is the expected value over the probability distribution $p(x|\gamma)$.
\par If the system is quantum, then $p(x|\gamma) = \text{Tr} (\rho_\gamma \Pi_x)$, where $\rho_\gamma$ is the density operator and $\Pi_x$ is the POVM operator for the outcome $x$. By introducing the logarithmic symmetric derivative $L_\gamma$, satisfying $2\partial_\gamma \rho_\gamma = L_\gamma \rho_\gamma + \rho_\gamma L_\gamma$, we can rewrite Eq. \eqref{eq:fisher_information_definition} as \begin{equation}
F(\gamma) = \expected{\frac{\Re[\text{Tr}(\rho_\gamma \Pi_x L_\gamma)]^2}{\text{Tr}(\rho_\gamma \Pi_x)}}. \end{equation} By maximizing $F(\gamma)$ over all possible quantum measurements on the systems we obtain the quantum Fisher information (QFI) $H(\gamma)$, which has the following expression \cite{Paris2009}: \begin{equation}
H(\gamma) = \text{Tr}(\rho_\gamma L_\gamma^2). \end{equation} We can thus write a quantum version of the Cramèr-Rao bound, \begin{equation}\label{eq:quantum_cramer_rao}
\delta\gamma^2 \geq \frac 1 {N H(\gamma)}, \end{equation} which gives the ultimate precision achievable on the estimation of $\gamma$ with a quantum measurement. The QFI can be calculated explicitly after a diagonalization of the density operator. Upon writing $\rho_\gamma = \sum_n p_n \ket{\psi_n}\bra{\psi_n}$, we get \begin{equation}\label{eq:qfi}
H(\gamma) = 2 \sum_{n,m} \frac{|\braket{\psi_m|\partial_\gamma \rho_\gamma|\psi_n}|^2}{p_n+p_m}, \end{equation} where the sum is carried out over all $n$ and $m$ such that $p_n+p_m\neq 0$. If the state of the quantum system is pure, $\rho_\gamma = \ket{\psi_\gamma}\bra{\psi_\gamma}$, Eq. \eqref{eq:qfi} reduces to \begin{equation}\label{eq:qfi_pure_state}
\begin{split}
H(\gamma) = 4 & \left[ \braket{\partial_\gamma \psi_\gamma| \partial_\gamma\psi_\gamma} + \braket{\partial_\gamma\psi_\gamma|\psi_\gamma}^2 \right. \\
& \left. + \braket{\psi_\gamma|\partial_\gamma\psi_\gamma}^2 + |\braket{\partial_\gamma\psi_\gamma|\psi_\gamma}|^2\right].
\end{split} \end{equation}
\section{The interaction model} \label{sec:model} In this work we consider a lossy bosonic channel with a loss rate parameter $\gamma$, which is the quantity that we want to estimate, where non-linear Kerr effect with coupling $\tilde\lambda$ is present. In the absence of any non-linear effect and working in the interaction picture, the density operator $\rho$ for a single bosonic mode in the channel satisfies a Lindblad master equation of the form \begin{equation}\label{eq:master_eq_loss}
\begin{split}
\frac{d\rho}{dt} & = \frac{\gamma}{2} \mathcal{L}[a]\rho \\
& = \gamma( a \rho a^\dagger - \frac 12 a^\dagger a \rho - \frac 12 \rho a^\dagger a),
\end{split} \end{equation} where $a$ is the annihilation operator in the Fock space of the bosonic mode and $\mathcal L$ is the Lindblad operator. This equation can be obtained, for instance, from the interaction of the bosonic mode with a bath of harmonic oscillators at zero temperature. The evolution through a Gaussian lossy channel can also be represented as the interaction of the input state with a beam splitter \cite{DAriano1994}, i.e. a bilinear evolution operator $U(\phi) = \exp [ i \phi(a^{\dag}b + a b^{\dag} )]$; the auxiliary mode $b$ is traced out at the end and it is initially in its vacuum state. This picture is connected to the master equation \eqref{eq:master_eq_loss} by the relation $\tan^2 \phi = e^{\gamma t} - 1$; as a matter of fact in previous works \cite{Monras2007,Adesso2009} the estimation of $\gamma$ was recast as the estimation of $\phi$. \par The Kerr interacton is described by a non-linear term in the Hamiltonian of the system, namely \begin{equation}\label{eq:Kerr_Hamiltonian}
H_K = \tilde\lambda (a^\dagger a)^2. \end{equation} To take into account this effect, the master equation in Eq. \eqref{eq:master_eq_loss} now becomes \begin{equation}\label{eq:Kerr_master_equation}
\frac{d\rho}{dt} = -i [H_K,\rho] + \frac{\gamma}{2} \mathcal{L}[a]\rho. \end{equation} Upon rescaling the quantities with respect to the loss parameter $\gamma$ \begin{equation}
\tau = \gamma t, \qquad \lambda =\tilde \lambda/\gamma, \end{equation} we arrive at \begin{equation}
\frac{d\rho}{d\tau} = -i \lambda [(a^\dagger a)^2,\rho] + a \rho a^\dagger - \frac 12 a^\dagger a \rho - \frac 12 \rho a^\dagger a, \end{equation} which corresponds to the following system of equations for the matrix elements of $\rho$: \begin{equation}\label{eq:Kerr_master_equation_matrix_el}
\begin{split}
\frac{d\rho_{p,q}}{d\tau} = & - \left[ i {\lambda} (p^2-q^2) + \frac 12 (p+q)\right]\rho_{p,q} \\ & + \sqrt{(1+p)(1+q)} \rho_{p+1,q+1}.
\end{split} \end{equation} The solution for the $\rho_{p,q}$ can be found easily if the initial state is a coherent state, $\rho_0 = \ket{\alpha}\bra{\alpha}$. It reads \begin{equation}\label{eq:matrix_el_exact}
\begin{split}
\rho_{p,q}&(\tau) = \frac{\alpha^p\overline\alpha^q}{\sqrt{p!q!}} \\ & \exp\left\{-\frac 12 (p+q) \Delta \tau - |\alpha|^2\left[ 1-\frac{1-e^{-\Delta \tau}}{\Delta} \right]\right\},
\end{split} \end{equation} where $ \Delta = 1 + 2 i \lambda (p-q) $. \par We will also consider the case of a squeezed vacuum initial state
$\rho_0=|r\rangle \langle r |$, where we restrict to a real squeezing parameter $r$, so that the squeezing operator reads $S(r) = \exp\left( \frac{1}{2} r^2 (a^{\dag 2} - a^{2}) \right)$. The explicit analytical expression of the matrix elements of the solution with this initial state can be found in Refs. \cite{Milburn1989,Perinova1988}, but the matrix elements are known also for arbitrary initial states \cite{Peinova1990,Chaturvedi1991}. Notice that for the lossy channel (i.e. a thermal bath at zero temperature) these analytical expressions of the matrix elements are suitable for a numerical computation of the values of the relevant observables. As a matter of fact it is possible to work in a truncated Hilbert space in the Fock basis, since the loss only drives the system into smaller subspaces; this would not be possible if we considered both loss and noise (i.e. a bath with finite temperature). Notice also that $\rho(\tau)$ is in general a mixed state and cannot be diagonalized explicitly, such that an analytic expression for the quantum Fisher information is not available. \par We start our analysis by reviewing the analytic solutions when the Kerr effect is not present (i.e. $\lambda = 0$), and then discuss approximate and numerical solutions for the general case of $\lambda \neq 0$.
\section{Solution in the absence of non-linear effects} \label{sec:absence_of_non_linear_effects}
\begin{figure}
\caption{(Color online) Plot of the QFI in the absence of
non-linearity as a function of the rescaled time $\tau$ for different
probe states at the fixed mean input energy $\bar{n}=1$. The solid
blue line represents the optimal Fock state $|1\rangle$, the dashed
orange line represent a coherent state, while the dot-dashed green
line represent the squeezed vacuum. The graph reflects the general
fact that a Fock state is always optimal and for $\tau \to 0$ the
optimal Gaussian state is the squeezed vacuum, while for greater
values a coherent state allows for a better estimation.}
\label{fig:comparison_input_states}
\end{figure}
When $ \lambda = 0$, i.e. the non-linear effects are absent, the channel is Gaussian and in particular a coherent probe state remains pure and coherent during the evolution: \begin{equation}\label{eq:state_no_kerr}
\ket{\psi_\gamma(\tau)} = \ket{\alpha e^{-\frac 12 \tau}}. \end{equation} An analytic expression for the QFI is easily obtained using Eq. \eqref{eq:qfi_pure_state}: \begin{equation}\label{eq:qfi_no_kerr}
H_\gamma^{\text{c}}(\tau) = \frac{\bar{n}}{\gamma^2} \tau^2 e^{-\tau}, \end{equation} while for the squeezed vacuum the solution is \cite{Monras2007}: \begin{equation}\label{eq:qfi_no_kerr_sq}
H_\gamma^{\text{sv}} (\tau)= \frac{\left(-2 e^{\tau }+e^{2 \tau }+2\right) \tau ^2 \bar{n}}{\gamma ^2 \left(e^{\tau }-1\right) \left(2 e^{\tau } \bar{n}-2 \bar{n}+e^{2 \tau }\right)}, \end{equation}
where $\bar{n}=|\alpha|^2$ for the coherent state and $\bar{n}=\sinh^2 r$ for the squeezed vacuum. We also report the QFI for Fock probe states $|n\rangle$, which is optimal when the mean energy is an integer ($\bar{n}=n$): \begin{equation}\label{eq:qfi_no_kerr_fock}
H_\gamma^{\text{F}}(\tau)=\frac{\bar{n} \tau^2}{\gamma^2 (e^{\tau}-1)}. \end{equation} \par Notice that in general the quantum signal-to-noise ratio (QSNR) $\gamma^2 H_\gamma(\tau)$ does not depend on $\gamma$: this means that the bound on the relative error on the estimation of $\gamma$ is constant.
\begin{figure}
\caption{(Color online) Plot of the QFI as a function of the rescaled
time $\tau$ for different probe states at the fixed mean input energy
$\bar{n}=1$. The solid curves are obtained in the absence of
nonlinearity, while the dashed curves are obtained in the presence of Kerr
nonlinearity (with $\lambda=0.5$). The solid blue and dashed oranges
curves which lie on top in the region $\tau \approx 2$ refer to the
coherent state probe, while the solid green and dashed orange curves
which lie on top in the region $\tau \approx 0$ refer to the squeezed
vacuum probe. In the inset panel we represent the relative gain
$G(\tau) \equiv H_{\lambda,\gamma}(\tau)/H_\gamma(\tau) - 1$ of the
QFI in the presence of non-linearity over the QFI without Kerr effect,
shown in percentage. The solid blue line represents the coherent
probe, while the dashed green line represents the squeezed vacuum. In
both cases there is a peak in gain at $\tau \lesssim 1$, much more
pronounced for the squeezed vacuum state. The gain vanishes for
increasing $\tau$, but a second, smaller peak can be observed for the
coherent state.}
\label{fig:gain_time}
\end{figure} In Figure \ref{fig:comparison_input_states} we represent the plots of the QFI for the three probe states; this also sums up previous results \cite{Monras2007,Adesso2009} by showing that for small losses the optimal Gaussian state is the squeezed vacuum, for higher losses a coherent state is better, while a Fock state is optimal for every $\tau$. Moreover, we observe that in general $H_\gamma(\tau)$ vanishes for $\tau \gg 1$ and has a global maximum at a certain time $\overline \tau$. This means that if one is able to control the interaction time in an experiment, setting it to $\overline\tau$ allows for optimal estimation of $\gamma$. In particular for the coherent state the optimal time is $\overline \tau = 2$, with the following optimal value: \begin{equation}\label{eq:qfi_no_kerr_opt}
\overline H_\gamma^{\text{c}} = \frac{4 |\alpha|^2}{e^2 \gamma ^2}. \end{equation} As a matter of fact, for coherent states the QFI is saturated by photon-number and a quadrature measurement. Let us compute the Fisher information (FI) for these two measurements:
The probability distribution for a photon counting experiment for the state is a Poisson distribution with mean $\mu = |\alpha e^{-\tau/2}|^2$. The FI for a Poissonian is $\mu^{-1}$, hence, using the chain rule of derivatives, we get \begin{equation}
\begin{split}
F_n(\gamma,\tau) & = \left(\frac{\partial \tau}{\partial \gamma}\right)^2 \left(\frac{\partial \mu}{\partial \tau}\right)^2 \frac 1 \mu =\frac{|\alpha|^2}{\gamma^2} \tau^2e^{-\tau}.
\end{split} \end{equation} The probability distribution for the quadrature measurement $x= (a + a^\dagger) / \sqrt{2}$ is \begin{equation}
p(x|\gamma) = |\braket{x|\alpha e^{-\tau / 2}}|^2 = \frac{e^{-\left(x-\sqrt{2} \Re(\alpha) e^{-\tau/2}\right)^2}}{\sqrt{\pi }} \end{equation} and hence the Fisher information, Eq. \eqref{eq:fisher_information_definition}, is \begin{equation}
F_x(\gamma,t) = \frac{\tau^2 e^{-\tau}\Re(\alpha)^2}{\gamma^2}. \end{equation} We see that $F_x(\gamma,t)= H_\gamma(t)$ as long as $\alpha$ is chosen to be real. If $\alpha$ has a complex phase it suffices to choose the proper quadrature or to apply a phase shift to the coherent state to saturate the QFI.
\section{Solution in the presence of Kerr effect} \label{sec:solution_in_presence_of_kerr_effect}
As stated in Section \ref{sec:model}, with $\lambda \neq 0$ the state $\rho(\tau)$ is a mixed state and not explicitly diagonalizable. In the following, we present an approximate solution for the coherent probe state, valid in the regime of small $\lambda$ and $\tau$, in which the state of the system remains pure and it is thus possible to get an analytical expression for the QFI. Then we show numerical results obtained from a truncation of the Fock space for both coherent and squeezed vacuum probe states. The results are presented both for the optimal time and small time cases; at optimal time only the coherent input is considered since the optimal value of the QFI is always greater than the optimal value of the squeezed vacuum QFI. This fact can be seen in Fig. \ref{fig:gain_time}, where we show the behavior of the QFI with and without Kerr interaction for both the Gaussian probes we are considering. From the particular choice of parameters in Fig. \ref{fig:gain_time} we see that the QFI with nonlinear interaction always has a greater value: we will show that this is true in general.
\subsection{Pure state approximation} \label{sub:pure_state_approximation}
\begin{figure}
\caption{(Color online) Fidelity between the pure state of Eq.
\eqref{eq:pure_state_approx} and the exact state (truncation at
10 photons), for $\alpha = 0.5$ (orange), $\alpha = 0.75$ (blue)
and $\alpha = 1$ (green). The fidelity decreases with increasing
$\lambda$ and $\alpha$. It temporarily decreases with time, but
it tends asymptotically to one as the system reaches the state
$\ket{0}$. For small values of $\alpha$ and $\lambda$ the pure
state approximation has fidelity above 0.99, which then
decreases as the energy of the state increases.}
\label{fig:fidelity}
\end{figure} When we work with a coherent input state and the non-linear effect is small compared to the loss parameter, i.e. when $ \lambda \ll 1$, the state of the system can still be approximated with a pure state for small $\tau$. Expansion of the exponent of $e$ in Eq. \eqref{eq:matrix_el_exact} to the first order in $ \lambda$ and then expansion to the second order of $\tau$ yields \begin{equation}\label{eq:pure_state_approx}
\begin{split}
\rho_{p,q}(\tau) = & \frac{\alpha^p\overline\alpha^q}{\sqrt{p!q!}} \exp\left\{-\frac 12 (p+q) \tau - e^{-\tau} |\alpha|^2 \right. \\
& \left.- i \lambda (p^2 - q^2) \tau - i \lambda |\alpha|^2 (p - q)\tau^2 \right\}.
\end{split} \end{equation} This is the lowest order of expansion for which we obtain a correction to the quantum Fisher information of Eq. \eqref{eq:qfi_no_kerr}.
The QFI computed for $\rho_{p,q}(\tau)$ of Eq. \eqref{eq:pure_state_approx} is \begin{equation}\label{eq:qfi_pure_state_approx}
H_{\lambda,\gamma}^{\text{c}}(\tau) = \frac {\left| \alpha \right| ^2}{\gamma^2} \tau^2 e^{- \tau} \left(1+4 \lambda ^2 \tau^2 \left| \alpha \right| ^4\right) + O(\lambda^3). \end{equation}
We notice that $H_{\lambda,\gamma}^{\text{c}}(t)$ adds a correction of second order in $\lambda$ and in $\tau$ to $H_\gamma^{\text{c}}(\tau)$ of Eq. \eqref{eq:qfi_no_kerr}. If we define the \emph{relative gain} in the estimation of $\gamma$ as $G_{\lambda}(\tau)\equiv H_{\lambda,\gamma}(\tau)/H_\gamma(\tau) - 1$, then using the pure state approximation it reads: \begin{equation}\label{eq:relgain_coh}
G_{\lambda}^{\text{c}}(\tau)=4\lambda^2 \tau^2 |\alpha|^4 + O(\lambda^3). \end{equation}
The optimal time, up to the second order in $\lambda$, is \begin{equation}
\overline{\tau}(\lambda) = 2 + 32 \lambda^2 |\alpha|^4 + O(\lambda^3) \end{equation} and the corresponding optimal QFI is \begin{equation}\label{eq:qfi_opt_pure_state_approx}
\overline{H}_\gamma^{\text{c}}(\lambda) = \frac{4 \left| \alpha \right| ^2}{e^2\gamma^2}(1 + 16 \lambda ^2 \left| \alpha \right| ^4)+O(\lambda^3); \end{equation} so the \emph{optimal relative gain} $\overline{G}_{\lambda} \equiv \overline{H}_{\lambda,\gamma}/\overline{H}_\gamma - 1$ is \begin{equation}\label{eq:relgain_opt_coh}
\overline{G}_{\lambda}^{\text{c}}=16 \lambda^2 |\alpha|^4 + O(\lambda^3). \end{equation}
Equations \eqref{eq:relgain_coh} and \eqref{eq:relgain_opt_coh} show that the correction to the QFI due to the presence of a small non-linear effect is positive and increases with $\lambda^2$. This means that the nonlinearity of the dispersive medium can be a resource in the estimation of the loss parameter.
The fidelity of the approximate state of Eq. \eqref{eq:pure_state_approx} to the exact state (after a truncation of the density matrix) is shown in Fig. \ref{fig:fidelity} as a function of $\tau$ and $\lambda$, for two values of $|\alpha|$. The pure state approximation is good for a wide range of parameters only if the energy of the initial state is not too big, so that fidelity is close to one \cite{Bina2014,Mandarino2014}. This means that the analytical expression of the optimal relative gain \eqref{eq:relgain_opt_coh} is good only for small energies, while at a fixed small time $\tau \ll 1$ the relative gain \eqref{eq:relgain_coh} is a good approximation even for higher input energies.
In Subsection \ref{sub:numerical_results} we calculate the QFI numerically for general values of $\lambda$ and $\alpha$, in order to verify the increase of the QFI also for regions where the pure-state approximation does not hold.
\subsection{Numerical results} \label{sub:numerical_results} As the density matrix cannot be diagonalized in general and the Fock space is infinite-dimensional, in order to evaluate the QFI we resort to numerical diagonalization of the density matrix in a truncated Fock space. The truncation size, which depends on the input energy, is chosen in such a way that the difference between the analytical and the numerical QFI for $\lambda=0$ must be less than $0.001\%$.
\begin{figure}
\caption{(Color online) Optimal relative gain $\overline{G}\equiv \overline{H}_{\lambda,\gamma}/\overline{H}_\gamma - 1$ of the optimal QFI in presence of non-linearity over the optimal QFI without Kerr effect for different regions of $\alpha$ and $\lambda$, shown in percentage. On the left, a 3D plot, on the right the corresponding contour plot. We can see that the gain is always greater than zero, vanishing for large $\lambda$ and $\alpha$. We can identify two regimes: The first regime, visible in the upper panels when $\alpha \lesssim 2$ is characterized by the presence of local maxima of the gain, which reaches values of about $2 \%$. For large $\lambda$ the improvement reaches a non-vanishing asymptotical value. In the second regime, visible in the lower panels, at fixed $\alpha$ the gain has a single maximum with respect to $\lambda$. As $\alpha$ increases, the maximum moves to smaller values of $\lambda$, but $G$ increases.}
\label{fig:improvement}
\end{figure}
\begin{figure*}
\caption{(Color online) Relative gain $G(\tau) \equiv H_{\lambda,\gamma}(\tau)/H_\gamma(\tau) - 1$ of the QFI in presence of non-linearity over the QFI without Kerr effect at fixed time for a coherent probe state (top) and for a squeezed vacuum probe state (bottom), shown in percentage. From left to right we have the results for $\tau=0.5$, $0.1$, $0.01$. For coherent states we can see a structure similar to that of Fig. \ref{fig:improvement}: the relative gain increases with $\alpha$ and $\lambda$ until it reaches a maximal value, but at small $\tau$ the relative gain is much higher than at the optimal time. For the squeezed vacuum state the gain is smaller as $\tau$ gets smaller (cfr. Fig. \ref{fig:gain_time}).}
\label{fig:improvement_smallt}
\end{figure*}
\subsubsection{Optimal QFI} The behavior of the QFI as a function of time for fixed $\lambda$ and $\alpha$ is shown in Fig. \ref{fig:gain_time}. The QFI starts from zero and reaches a maximum, then vanishes as $\tau$ increases and the system reaches the zero-photon state $\ket 0$. Assuming that we are able to control the interaction time of the probe with the channel, we can consider as a figure of merit the optimal QFI, i.e. the maximum of $H_{\lambda,\gamma}(t)$ over time.
In Fig. \ref{fig:improvement} we show the optimal relative gain in the estimation of $\gamma$. The first notable result is the confirmation of the results obtained in the pure state approximation: the optimal QFI in presence of non-linearity is always greater than without Kerr effect, i.e. the optimal relative gain is always greater than zero. It vanishes for increasing $\alpha$ and $\lambda$ and for $\alpha \rightarrow 0$.
By looking at the panels of Fig. \ref{fig:improvement}, we can identify two regimes. The first regime, for $\alpha \lesssim 2$, is characterized by the presence of local maxima of the gain. At fixed $\alpha$, the maxima occur periodically, with $G$ reaching an asymptotic value for $\lambda \rightarrow \infty$. In the second regime, for $\alpha \gtrsim 2$, there is a single local maximum for the gain at fixed $\alpha$. For increasing $\alpha$, the optimal $\lambda$ decreases, but $G$ increases. It is not clear if there is a local maximum for $\alpha$ greater than the values under investigation or if this behavior will persist for $\alpha \rightarrow \infty$, and, in the latter case, if $G$ increases indefinitely or saturates with $\alpha$.
\subsubsection{Small time QFI}
Now instead of studying the QFI maximized over time we look at the behavior at a fixed time, in particular we focus on times smaller than the characteristic time of the loss, i.e. $\tau < 1$, as an example we study three cases $\tau=0.5,0.1,0.01$. This regime is of interest for media of moderate size, such as biological samples.
In this setting the improvement brought by the nonlinear interaction can be substantial. In Fig. \ref{fig:improvement_smallt} we show the results for a coherent probe state (top row) and for a squeezed vacuum probe state (bottom row). For the squeezed probe we restricted the computation to a smaller range of mean input energies, as the dimension of the truncated Hilbert space needed to obtain a good approximation grows much more rapidly.
By looking at the top-left panel in Fig. \ref{fig:improvement_smallt}, the one for $\tau=0.5$, we notice a similar structure to the one in Fig. \ref{fig:improvement}, albeit rescaled. We found that fixing the time parameter $\tau$ changes the scaling in the $\alpha-\lambda$ (or $\bar{n}-\lambda$) plane; however, it was not possible to explicitly see this scaling from the analytical expressions of the states.
The improvement due to the Kerr nonlinearity is much more relevant at times which do not correspond to the optimal time, indeed in Fig. \ref{fig:gain_time} we see that the maxima of the graph in the inset panel do not correspond to the ones in the main graph. Moreover, even if the behavior of different input states is slightly different, the most relevant improvement is always obtained for $\tau<1$, this is due to the fact that the value of the QFI at those times is smaller, so that a slight improvement in the absolute value brings a great relative gain.
\subsubsection{FI for the quadrature measurement with coherent probe} \label{ssub:fi_for_the_quadrature_measurement}
Although the optimal QFI is improved by the Kerr effect, we need to find the actual measurement that reaches the quantum bound. In Section \ref{sec:absence_of_non_linear_effects} we showed that for a coherent probe both photon counting and quadrature measurement are optimal when $\lambda = 0$, however they are not optimal if the nonlinear term is present. Indeed, photon counting is not affected at all by the Kerr effect, as the diagonal elements of the density matrix are independent of $\lambda$. For this reason we study numerically the effect of nonlinearity on a quadrature measurement. We present the results for a coherent probe state; the analysis is less interesting for a squeezed vacuum probe as the optimal measurement in the linear case is not just a quadrature measurement, but is given by Gaussian operations and photon counting \cite{Monras2007}.
We found that in general the quadrature measurement is not optimal, i.e. the Fisher information is always lower than the QFI. This fact is presented in the left panel of Fig. \ref{fig:FI_vs_QFI}, for measurements at the optimal time, where the ratio $\overline R = F_x(\overline \tau)/\overline H_\gamma(\lambda)$ is shown. Here $\overline H_\gamma(\lambda)$ is the optimal QFI and $F_x(\overline \tau)$ is the FI of the quadrature measurement at the time $\overline\tau$ that optimizes the QFI, after an optimization over the quadrature phase (the optimal quadrature phase depends on $\alpha$ and $\lambda$). The ratio is close to one only for $\lambda$ close to zero or $\alpha \ll 1$. For increasing $\alpha$ and $\lambda$ the ratio appears to tend asymptotically to $1/3$.
In the small time regime a quadrature measurement is still sub-optimal in presence of nonlinearity, however in some cases such a measurement can perform better than the best possible measurement in the linear case, because the relative improvement of the QFI in this regime is substantial.
In particular, this behaviour seems to increase with increasing nonlinearity $\lambda$ and increasing input energy $\alpha$, however we can see from the right panel of Fig. \ref{fig:FI_vs_QFI} that oscillations are present and there are small regions where a quadrature measurement does not give an improvement, i.e. $R < 1$.
\begin{figure}
\caption{(Color online) In the left panel we show the ratio $\overline R =
F_x(\overline \tau)/\overline H_\gamma^c(\lambda)$ between the
FI of the
quadrature at the time $\overline\tau$, $F_x(\overline \tau)$,
after an optimization over the quadrature phase
and the
optimal QFI $\overline H_\gamma(\lambda)$,
for various
values of $\lambda$ and $\alpha$. The quadrature measurement is
optimal only for $\lambda = 0$ and for vanishing energy of the
probe ($\alpha \rightarrow 0$). For $\alpha \lesssim 2$ the
ratio oscillates with $\lambda$. For large $\alpha$ and
$\lambda$ the ratio reaches asymptotically the value of $1/3$.
In the right panel we show the ratio
$R=F_x(\tau)/H_{\gamma}^{c}(\tau)$ for fixed small $\tau=0.1$;
the quantity $H_{\gamma}^c$ is the QFI without nonlinearities
(Eq. \eqref{eq:qfi_no_kerr}). The quadrature measurement in
presence on Kerr effect achieves increasingly better
performances for increasing values of $\lambda$ and $\alpha$,
even if the ratio has a slightly oscillating behaviour and there are some regions in which $R < 1$, i.e. the Kerr effect is slightly detrimental.}
\label{fig:FI_vs_QFI}
\end{figure}
\subsection{Results with optical qutrit states} \label{subsec:qutrit}
One may wonder what happens if the optimal Fock states are used as probes, instead of Gaussian states.
The obvious answer is that the Kerr nonlinear term $(a^{\dag} a)^2$ does not affect single Fock states, but also a simple superpositions of the form $a |0\rangle + b |n \rangle$ is not affected. The most simple superposition affected by the nonlinear evolution is the optical qutrit state \begin{equation} \label{eq:qutrit}
\cos\theta |0\rangle + e^{i \mu }\sin\theta\sin\varphi
|1\rangle + e^{i \nu }\sin\theta \cos\varphi
|2 \rangle, \end{equation} where $\theta$ is fixed by choosing the mean energy $\bar{n}$ as the relevant parameter, so that $\theta = \arcsin \sqrt{2 \bar{n}/(3 + \cos 2 \varphi)}$.
In the Gaussian lossy evolution, without Kerr nonlinearity, these qutrit states approximate the optimal non-Gaussian states when the mean energy $\bar{n}$ is not an integer; this is particularly important for the low energy regime $\bar{n}<1$ \cite{Adesso2009}.
In general, the maximum value of the QFI obtainable with the state \eqref{eq:qutrit} is the same regardless of the Kerr term in the evolution, but the maximum happens for different values of the initial parameters and at a different time. This is due to the fact that during the evolution the system is constrained to remain in the subspace of dimension three; so if we optimize on every possible parameter there is no room for improvement left.
However in order to achieve the maximal QFI one should be able to tune the value of the initial parameters for every mean energy $\bar{n}$, and in the nonlinear case also for every value of $\lambda$. In particular in the linear case the result must be optimized only over the parameter $\varphi$, since the relative phases $\mu$ and $\nu$ give an optimal result for the value $\pi$.
We thus resort to work in a setting similar to the one used to study the optimal gain for the coherent states: given a \emph{fixed} initial state we check if the nonlinear evolution brings an improvement. In particular we fix $\mu=\nu=\pi$ and we check the behaviour of the quantum Fisher information for different values of $\varphi$, while optimizing over time $t$. The results are in Fig. \ref{fig:improvement_qutrit}: we find that on average the nonlinear terms brings an improvement for values of $\lambda \approx 1$, i.e. when the nonlinear parameter is approximately equal to the loss parameter to estimate. For higher values of $\lambda$ we have an oscillatory behaviour and on average the nonlinearity can also be detrimental.
We also found that at fixed small times the nonlinear Kerr term does not always bring an improvement on average when using qutrit states.
\begin{figure}
\caption{(Color online) Average relative gain of the optimal QFI in presence of non-linearity over the optimal QFI without Kerr effect for qutrit states, shown in percentage. The range of the parameters are $0<\bar{n}<1$ and $0<\lambda<\pi$. On the left, a 3D plot, on the right the corresponding contour plot. Every point in the plot is the average improvement obtained by generating 1000 random values of the parameter $\varphi$ of the state \eqref{eq:qutrit} in the range $\left(0,\frac{\pi}{2}\right)$, while the phases are fixed $\mu=\nu=\pi$ and $\theta$ is fixed by the choice of the mean energy $\bar{n}$.}
\label{fig:improvement_qutrit}
\end{figure}
\subsection{Discussion} The nonlinear Kerr interaction makes the initial Gaussian probe non-Gaussian during the evolution and a question arises on whether the observed increase of the QFI may be quantitatively linked to some quantifier of non-Gaussianity \cite{Genoni2010}. Indeed, it would be desirable to identify the proper resource which guarantees the improvement in the estimation by means of a nonlinear interaction, since this would represent a guideline to engineer optimal estimation schemes. On the other hand also a qualitative indicator to assess the effectiveness of Kerr interaction to enhance precision may be useful. \par In previous works it has been conjectured \cite{Adesso2009} that a family of optimal non-Gaussian states exists for any fixed energy, but the authors remark that non-Gaussianity in itself cannot be a resource since there are non-Gaussian states which are far less efficient probes than the optimal Gaussian ones. Hereby we confirm that result. In fact, during its evolution a Gaussian input state first becomes non-Gaussian and then it evolves towards the Gaussian state $\ket{0}$, which is the stationary state. This qualitative behaviour is also shown by the relative gain in the estimation of $\gamma$, as can be seen in Fig. \ref{fig:gain_time}. These two quantities, however, do not have a quantitative relation in general, e.g. states leading to the largest improvement at optimal time are not the most non-Gaussian. \par Overall, our results show that while the evolution drives the Gaussian input into a set of non-Gaussian states which are more sensitive to loss detection, non-Gaussianity is not a resource in itself. This idea is confirmed by looking at the behaviour of qutrit probe states, which are already highly non-Gaussian: there we find evidences that the Kerr interaction may be detrimental in some regimes, whereas when an improvement is present, the states are non necessarily more non-Gaussian.
\section{Conclusions} \label{sec:conclusions} In conclusion, we have addressed the characterization of dissipative bosonic channels in the presence of nonlinearity and shown that the estimation of the loss rate by coherent or squeezed probes is improved in the presence of Kerr nonlinearity. In particular, enhancement of precision may be substantial for short interaction time, i.e. for media of moderate size, whereas for larger media the improvement is asymptotically negligible. \par We have analyzed in detail the behaviour of the quantum Fisher information (QFI), and have found the values of nonlinearity maximizing the QFI as a function of the interaction time and of the parameters of the input signal. We have also shown that Ker nonlinearity may be helpful also using few photon probes as optical qutrits. \par We have discussed the precision achievable by photon counting and quadrature measurement, showing that they cannot, in general, achieve the QFI in the presence of nonlinearity. On the other hand, for short interaction times even this suboptimal measurement offers a precision improvement compared to the linear case. \par Finally, we have discussed the possible origin of the precision enhancement, showing that it cannot be linked quantitatively to the non-Gaussianity of the interacting probe signal.
\begin{acknowledgments} The authors thank Benoit Vallet for his contribution in the early stage of this project. This work has been supported by EU through the Collaborative Project QuProCS (Grant Agreement 641277) and by UniMI through the H2020 Transition Grant 15-6-3008000-625. \end{acknowledgments}
\end{document} |
\begin{document}
\title{Maximum likelihood estimation in constrained parameter spaces for mixtures of factor analyzers}
\author{Francesca Greselin \and Salvatore Ingrassia} \institute{Francesca Greselin \at Department of Statistics and Quantitative Methods\\ Milano-Bicocca University\\ Via Bicocca degliArcimboldi 8 - 20126 Milano (Italy). \email{francesca.greselin@unimib.it} \and Salvatore Ingrassia\at Department of Economics and Business\\ University of Catania\\ Corso Italia 55, - Catania (Italy). \email{s.ingrassia@unict.it} }
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract} Mixtures of factor analyzers are becoming more and more popular in the area of model based clustering of high-dimensional data. According to the likelihood approach in data modeling, it is well known that the unconstrained log-likelihood function may present spurious maxima and singularities and this is due to specific patterns of the estimated covariance structure, when their determinant approaches 0. To reduce such drawbacks, in this paper we introduce a procedure for the parameter estimation of mixtures of factor analyzers, which maximizes the likelihood function in a constrained parameter space.
We then analyze and measure its performance, compared to the usual non-constrained approach, via some simulations and applications to real data sets.
\keywords{Constrained estimation \and Factor Analyzers Modeling \and Mixture Models \and Model-Based Clustering.}
\end{abstract}
\section{Introduction and motivation} Finite mixture distributions have been receiving a growing interest in statistical modeling. Their central role is mainly due to their double nature: they combine the flexibility of non-parametric models with the strong and useful mathematical properties of parametric models. According to this approach, when we know that a sample of observations has been drawn from different populations, we assume a specific distributional form in each of the underlying populations. The purpose is to decompose the sample into its mixture components, which, for quantitative data, are usually modeled as a multivariate Gaussian distribution, and to estimate parameters. The assumption of underlying normality, besides the elegant analytic properties, allows also to employ the EM algorithm for the ML estimation of the parameters. On the other side, when considering a large number of observed variables, Gaussian mixture models can provide an over-parameterized solution as, besides the mixing weights, it is required to estimate the mean vector and the covariance matrix for each component \citep{Peel:McLa:2000}. As a consequence, we observe at the same time an undue load of computationally intensive procedures for the estimation.
This is the reason why a number of strategies have been introduced in the literature to avoid over-parameterized solutions. Among the various proposal, some authors developed methodologies for variable selection (see, f.i., \citet{Liu:2003} and \citet{Hoff:2005} in the Bayesian framework, \citet{Pan:Shen:2007} and \citet{Raft:Dean:Vari:2006} in the frequentist one). They further motivate their approach from the observation that the presence of non-informative variables can be strongly misleading for some clustering methods. With the same purpose of parsimony, but a completely different approach, \citet{Banf:Raft:mode:1993} devised a methodology to identify common patterns among the component-covariance matrices; their proposal arose a great attention in the literature. Along a slightly different line of thinking, \citet{Ghah:Hilt:1997} and \citet{McLa:Peel:Bean:2003} proposed to employ latent variables to perform dimensional reduction in each component, starting from the consideration that in many phenomena some few unobserved features could be explained by the many observed ones.
In this paper we address mixtures of factor analyzers by assuming that the data have been generated by a linear factor model with latent variables modeled as Gaussian mixtures. Our purpose is to improve the performances of the EM algorithm, by facing with some of its issues and giving practical recipes to overcome them. It is well known that the EM algorithm generates a sequence of estimates, starting from an initial guess, so that the corresponding sequence of the log-likelihood values is not decreasing. However, the convergence toward the MLE is not guaranteed, because the log-likelihood is unbounded and presents local maxima. Another critical point is that the parameter estimates as well as the convergence of the whole estimation process may be affected by the starting values (see, f.i., \citet{McKr:theE:2007}) so that the final estimate crucially depends on the initial guess. This issue has been investigated by many authors, starting from the seminal paper of \cite{Redn:Walk:1984}. Along the lines of \citep{Ingr:2004}, in this paper we introduce and implement a procedure for the parameters estimation of mixtures of factor analyzers, which maximizes the likelihood function in a constrained parameter space, having no singularities and a reduced number of spurious local maxima. We then analyze and measure its performance, compared to the usual non-constrained approach.
We have organized the rest of the paper as follows. In Section \ref{sec:GaussianFA} we summarize main ideas about Gaussian Mixtures of Factor Analyzer model; in Section \ref{sec:The AECM algorithm} we provide fairly extensive notes concerning the likelihood function and the AECM algorithm. Some well known considerations \citep{Hath:Acon:1985} related to spurious maximizers and singularities in the EM algorithm are recalled in Section \ref{ConstrainedML}, and motivate our proposal to introduce constraints on factor analyzers. Further, we give a detailed methodology to implement such constraints into the EM algorithm. In Section \ref{sec:numerical results} we show and discuss the improved performance of our procedure, on the ground of some numerical results based on both simulated and real data. Section \ref{sec:concluding} contains concluding notes and provides ideas for future research.
\section{The Gaussian Mixture of Factor analyzers}\label{sec:GaussianFA}
Within the Gaussian Mixture (GM) model-based approach to density estimation and clustering, the density of the $d$-dimensional random variable $\mathbf{X}$ of interest is modelled as a mixture of a number, say $G$, of multivariate normal densities in some unknown proportions $\pi_1,\ldots \pi_G$. That is, each data point is taken to be a realization of the mixture probability density function, \begin{equation} f(\mathbf{x};\mbox{\boldmath $\theta$})=\sum_{g=1}^G \pi_g \phi_d(\mathbf{x};\mbox{\boldmath $\mu$}_g,\mbox{\boldmath $\Sigma$}_g)\label{mixt-gaussian} \end{equation} where $\phi_d(\mathbf{x};\mu,\mbox{\boldmath $\Sigma$})$ denotes the $d$-variate normal density function with mean $\mbox{\boldmath $\mu$}$ and covariance matrix $\mbox{\boldmath $\Sigma$}$. Here the vector $\mbox{\boldmath $\theta$}_{GM}(d,G)$ of unknown parameters consists of the $(G-1)$ mixing proportions $\pi_g$, the $G \times d$ elements of the component means $\mu_g$, and the ${1 \over 2} G d (d+1) $ distinct elements of the component-covariance matrices $\mbox{\boldmath $\Sigma$}_g$. Therefore, the $G$-component normal mixture model (\ref{mixt-gaussian}) with unrestricted component-covariance matrices is a highly parametrized model. We crucially need some method for parsimonious parametrization of the matrices $\mbox{\boldmath $\Sigma$}_g$, because they requires $O(d^2)$ parameters. Among the various proposals for dimensionality reduction, we are interested here in considering Mixtures of Gaussian Factor Analyzers (MGFA), which allows to explain data by explicitly modeling correlations between variables in multivariate observations. We postulate a finite mixture of linear sub-models for the distribution of the full observation vector $\mathbf{X}$, given the (unobservable) factors $\mathbf{U}$. That is we can provide a local dimensionality reduction method by assuming that the distribution of the observation $\mathbf{X}_i$ can be given as \begin{equation} \mathbf{X}_i=\mbox{\boldmath $\mu$}_g+\mbox{\boldmath $\Lambda$}_g\mathbf{U}_{ig}+\mathbf{e}_{ig} \quad\textrm{with probability }\quad \pi_g \: (g=1,\ldots,G) \quad \textrm{for} \,\, i=1,\ldots,n, \label{factor_an} \end{equation} where $\mbox{\boldmath $\Lambda$}_g$ is a $d \times q$ matrix of \textit{factor loadings}, the \textit{factors} $\mathbf{U}_{1g},\ldots, \mathbf{U}_{ng}$ are $\mathcal{N}(\mathbf{0},\mathbf{I}_q)$ distributed independently of the \textit{errors} $\mathbf{e}_{ig}$, which are independently $\mathcal{N}(\mathbf{0},\mbox{\boldmath $\Psi$}_g)$ distributed, and $\mbox{\boldmath $\Psi$}_g$ is a $d \times d$ diagonal matrix $(g=1,\ldots,G)$. We suppose that $q<d$, which means that $q$ unobservable factors are jointly explaining the $d$ observable features of the statistical units. Under these assumptions, the mixture of factor analyzers model is given by (\ref{mixt-gaussian}), where the $g$-th component-covariance matrix $\mbox{\boldmath $\Sigma$}_g$ has the form \begin{equation} \mbox{\boldmath $\Sigma$}_g=\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g+\mbox{\boldmath $\Psi$}_g \quad (g=1,\ldots,G). \label{Sigmag} \end{equation} The parameter vector $\mbox{\boldmath $\theta$}_{MGFA}(d,q,G)$ now consists of the elements of the component means $\mbox{\boldmath $\mu$}_g$, the $\mbox{\boldmath $\Lambda$}_g$, and the $\mbox{\boldmath $\Psi$}_g$, along with the mixing proportions $\pi_g$ $(g=1,\ldots,G-1)$, on putting $\pi_G=1-\sum_{i=1}^{G-1}\pi_g$. Note that in the case of $q>1$, there is an infinity of choices for $\mbox{\boldmath $\Lambda$}_g$, since model (\ref{factor_an}) is still satisfied if we replace $\mbox{\boldmath $\Lambda$}_g$ by $\mbox{\boldmath $\Lambda$}_g \mathbf{H}'$, where $\mathbf{H}$ is any orthogonal matrix of order $q$. As $q(q-1)/2$ constraints are needed for $\mbox{\boldmath $\Lambda$}_g$ to be uniquely defined, the number of free parameters, for each component of the mixture, is \[ dq+d-{1 \over 2 } q (q-1) . \]
Comparing the two approaches and willing now to measure the gained parsimony when we use mixtures of factor analyzers, with respect to the more usual gaussian mixtures, and denoting by $|\mbox{\boldmath $\theta$}_{CovGM}(d,G)|$ and $|\mbox{\boldmath $\theta$}_{CovMGFA}(d,q,G)|$, the number of the estimated parameters for the covariance matrices in the GM and MGFA models, respectively, we have to choose values of $q$ such that the following quantity $P$ is positive
\[P=|\mbox{\boldmath $\theta$}_{CovGM}(d,G)|-|\mbox{\boldmath $\theta$}_{CovMGFA}(d,q,G)|={ G \over 2 } d (d+1)-G[dq-d+{1 \over 2 } q (q-1)]\] i.e.: \[P={ G \over 2 } [(d-q)^2-(d+q) ] . \] This is the only requirement for parsimony. Now, we can express the relative reduction $RR(d,q,G)=RR(d,q)$ given by
\begin{align*}
RR(d,q) &=\frac{|\mbox{\boldmath $\theta$}_{CovGM}(d,G)|-|\mbox{\boldmath $\theta$}_{CovGMFA}(d,q,G)|}{|\mbox{\boldmath $\theta$}_{CovGM}(d,G)|} = \frac{(d-q)^2-(d+q)}{d(d+1)}. \end{align*}
In Table \ref{tab:RR} we report the relative reduction, in term of lower number of estimated parameters for the covariance matrices in the MGFA models, with respect to the GM models.
\begin{scriptsize}
\begin{table}[h!] \begin{center} \caption{Relative reduction $RR(d,q)$}\label{tab:RR}
\begin{tabular}{ c | ccc ccc ccc ccc ccc}
\hline
$q|d$ &1 &2 &3 &4 &5 &6 &7 &8 &9 &10 &11 &12 &13 &14 &15\\
1 &- &- &- &0.20 &0.33 &0.43 &0.50 &0.56 &0.60 &0.64 &0.67 &0.69 &0.71 &0.73 &0.75\\ 2 &- &- &- &- &0.07 &0.19 &0.29 &0.36 &0.42 &0.47 &0.52 &0.55 &0.58 &0.61 &0.63\\ 3 &- &- &- &- &- &- &0.11 &0.19 &0.27 &0.33 &0.38 &0.42 &0.46 &0.50 &0.53\\ 4 &- &- &- &- &- &- &- &0.06 &0.13 &0.20 &0.26 &0.31 &0.35 &0.39 &0.43\\ 5 &- &- &- &- &- &- &- &- &0.02 &0.09 &0.15 &0.21 &0.25 &0.30 &0.33 \\
\hline \end{tabular} \end{center} \end{table} \end{scriptsize} The relative reduction represents the extent to which the factor model offers a simpler interpretation for the behaviour of $\mathbf{x}$ than the alternative assumption given by the gaussian mixture model.
\section{The likelihood function and the EM algorithm for MGFA}\label{sec:The AECM algorithm}
In this section we summarize the main steps of the EM algorithm for mixtures of Factor analyzers, see e.g. \cite{McLa:Peel:fini:2000} for details.
Let ${\underset{\sim}{\bX}}=(\mathbf{x}_1, \ldots, \mathbf{x}_n)$ be a sample of size $n$ from density (\ref{mixt-gaussian}), and let $\mathbf{x}_i$ ($i=1, \ldots, n$) denotes the realization of $\mathbf{X}_i$ in (\ref{factor_an}). For given data ${\underset{\sim}{\bX}}$, parameters in (\ref{mixt-gaussian}) can be estimated according to the likelihood approach via the EM algorithm, where the likelihood function is given by: \begin{align*} L(\mbox{\boldmath $\theta$}; {\underset{\sim}{\bX}}) & = \prod_{i=1}^n \left\{ \sum_{g=1}^G \phi_d(\mathbf{x}_i; \mbox{\boldmath $\mu$}_g,\mbox{\boldmath $\Sigma$}_g) \, \pi_g \right\} = \prod_{i=1}^n \left\{ \sum_{g=1}^G \phi_d(\mathbf{x}_i; \mbox{\boldmath $\mu$}_g,\mbox{\boldmath $\Lambda$}_g,\mbox{\boldmath $\Psi$}_g) \, \pi_g \right\} \, , \end{align*} where we set $\mbox{\boldmath $\Sigma$}_g = \mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g + \mbox{\boldmath $\Psi$}_g$ ($g=1, \ldots, G$). Consider the augmented data $\{ (\mathbf{x}_i, \mathbf{u}_{ig}, \mathbf{z}_i), \, i=1, \ldots, n \}$, where $\mathbf{z}_i = (z_{i1}, \ldots, z_{ig})'$, with $z_{ig}=1$ if $\mathbf{x}_i$ comes from the $g$-th population and $z_{ig}=0$ otherwise. Then, the complete-data likelihood function can be written in the form: \begin{equation}
L_c(\mbox{\boldmath $\theta$}; {\underset{\sim}{\bX}}) = \prod_{i=1}^n\prod_{g=1}^G\left[ \phi_d\left(\mathbf{x}_i|\mathbf{u}_i;\mbox{\boldmath $\mu$}_g,\boldsymbol{\Lambda}_g,\boldsymbol{\Psi}_g\right)\phi_q(\mathbf{u}_{ig}) \pi_g \right]^{z_{ig}}. \label{eq:complete-data log-likelihood gen} \end{equation} In particular, due to the factor structure of the model, see \citet{Meng:VanD:TheE:1997}, we have to consider the alternating expectation-conditional maximization (AECM) algorithm. Such a procedure is an extension of the EM algorithm that uses different specifications of missing data at each stage. The idea is to partition $\mbox{\boldmath $\theta$}=(\mbox{\boldmath $\theta$}'_1, \mbox{\boldmath $\theta$}'_2)'$ in such a way that $L(\mbox{\boldmath $\theta$}; {\underset{\sim}{\bX}})$ is easy to maximize for $\mbox{\boldmath $\theta$}_1$ given $\mbox{\boldmath $\theta$}_2$ and vice versa. Then, we can iterate between these two conditional maximizations until convergence. In this case $\mbox{\boldmath $\theta$}_1=\{ \pi_g, \mbox{\boldmath $\mu$}_g, \, g=1, \ldots, G \}$ where the missing data are the unobserved group labels $\underset{\widetilde{}}{\bZ}=(\mathbf{z}'_1, \ldots, \mathbf{z}'_n)$, and the second part of the parameters vector is given by $\mbox{\boldmath $\theta$}_2=\{ (\mbox{\boldmath $\Lambda$}_g, \mbox{\boldmath $\Psi$}_g), \, g=1, \ldots, G \}$ where the missing data are the group labels $\mathbf{Z}$ and the unobserved latent factors $\mathbf{U}=(\mathbf{U}_{11}, \ldots, \mathbf{U}_{nG})$. Hence, the application of the AECM algorithm consists of two cycles, and there is one E-step and one CM-step alternatively considering $\mbox{\boldmath $\theta$}_1$ and $\mbox{\boldmath $\theta$}_2$ in each pair of cycles.
\paragraph{First Cycle.} Here it is $\mbox{\boldmath $\theta$}_1=\{ \pi_g, \mbox{\boldmath $\mu$}_g, \, g=1, \ldots, G \}$ where the missing data are the unobserved group labels $\mathbf{Z}=(\mathbf{z}'_1, \ldots, \mathbf{z}'_n)$. The complete data likelihood is \begin{align} L_{c1}(\mbox{\boldmath $\theta$}_1) & =\prod_{i=1}^n\prod_{g=1}^G\left[ \phi_d\left(\boldsymbol{\mathbf{x}}_i;\mbox{\boldmath $\mu$}_g,\mbox{\boldmath $\Sigma$}_g\right)\pi_g \right]^{z_{ig}} . \label{eq:complete-data likelihood1} \end{align}
The E-step on the first cycle on the $(k+1)$-th iteration requires the calculation of
$Q_1(\mbox{\boldmath $\theta$}_1; \mbox{\boldmath $\theta$}^{(k)}) = \mathbb{E}_{\mbox{\boldmath $\theta$}^{(k)}} \{\mathcal{L}_{c} (\mbox{\boldmath $\theta$}_1) | {\underset{\sim}{\bX}} \}$ which is the expected complete-data log-likelihood given the data ${\underset{\sim}{\bX}}$ and using the current estimate $\mbox{\boldmath $\theta$}^{(k)}$ for $\mbox{\boldmath $\theta$}$. In practice it requires calculating
$\mathbb{E}_{\mbox{\boldmath $\theta$}^{(k)}} \{Z_{ig}| {\underset{\sim}{\bX}} \}$ and usual computations show that this step is achieved by replacing each $z_{ig}$ by its current conditional expectation given the observed data $\mathbf{x}_i$, that is we replace $z_{ig}$ by $z_{ig}^{(k+1/2)}$, where \begin{equation}
z_{ig}^{(k+1)} =\frac{\phi_d\left(\mathbf{x}_i|\mbox{\boldmath $\mu$}_g^{(k)},\mbox{\boldmath $\Lambda$}_g^{(k)},\mbox{\boldmath $\Psi$}^{(k)}_g\right) \pi_g^{(k)} }{\sum_{j=1}^G \phi_d \left(\mathbf{x}_i|\mbox{\boldmath $\mu$}_j^{(k)},\mbox{\boldmath $\Lambda$}_j^{(k)},\mbox{\boldmath $\Psi$}_j^{(k)}\right) \pi_j^{(k)}} . \end{equation}
On the M-step, the maximization of this complete-data log-likelihood yields \begin{align*} \pi_g^{(k+1)} & =\frac{\sum_{i=1}^n z_{ig}^{(k+1)}}{n} \\ \mbox{\boldmath $\mu$}_g^{(k+1)} &=\frac{1}{n_g} \sum_{i=1}^n z_{ig}^{(k+1)} \mathbf{x}_i \end{align*} where $n_g^{(k+1)}=\sum_{i=1}^n z_{ig}^{(k+1)}$. According to notation in \citet{McLa:Peel:fini:2000}, we set $\mbox{\boldmath $\theta$}^{(k+1/2)}=(\mbox{\boldmath $\theta$}_1^{(k+1)'}, \mbox{\boldmath $\theta$}_2^{(k)'})'$.
\paragraph{Second Cycle.} Here it is $\mbox{\boldmath $\theta$}_2=\{ \mbox{\boldmath $\Sigma$}_g, \, g=1, \ldots, G \}= \{ (\mbox{\boldmath $\Lambda$}_g$, $\mbox{\boldmath $\Psi$}_g), \, g=1, \ldots, G \}$ where the missing data are the unobserved group labels $\mathbf{Z}$ and the latent factors $\mathbf{U}$. Therefore, the complete data likelihood is \begin{align}
L_{c2}(\mbox{\boldmath $\theta$}_2) & =\prod_{i=1}^n\prod_{g=1}^G\left[\phi_d\left(\mathbf{x}_i|\mathbf{u}_{ig};\mbox{\boldmath $\mu$}_g^{(k+1)},\mbox{\boldmath $\Sigma$}_g\right)\phi_q\left(\mathbf{u}_{ig}\right) \pi_g^{(k+1)} \right]^{z_{ig}} \nonumber \\
& = \prod_{i=1}^n\prod_{g=1}^G\left[\phi_d\left(\mathbf{x}_i|\mathbf{u}_{ig};\mbox{\boldmath $\mu$}_g^{(k+1)},\mbox{\boldmath $\Lambda$}_g,\mbox{\boldmath $\Psi$}_g\right) \phi_q\left(\mathbf{u}_{ig}\right) \pi_g^{(k+1)} \right]^{z_{ig}} , \label{eq:complete-data likelihood2} \end{align} where \begin{align*}
\phi_d\left(\mathbf{x}_i|\mathbf{u}_{ig};\mbox{\boldmath $\mu$}_g^{(k+1)},\mbox{\boldmath $\Lambda$}_g,\mbox{\boldmath $\Psi$}_g\right) &= \frac{1}{|2 \pi \mbox{\boldmath $\Psi$}_g|^{1/2}} \exp \left\{ - \frac{1}{2} (\mathbf{x}_i - \mbox{\boldmath $\mu$}_g^{(k+1)}-\mbox{\boldmath $\Lambda$}_g \mathbf{u}_{ig})' \mbox{\boldmath $\Psi$}_g^{-1} (\mathbf{x}_i - \mbox{\boldmath $\mu$}_g^{(k+1)}- \mbox{\boldmath $\Lambda$}_g \mathbf{u}_{ig}) \right\}. \\ \phi_q (\mathbf{u}_{ig}) & = \frac{1}{(2 \pi)^{q/2}} \exp \left\{ - \frac{1}{2}\mathbf{u}_{ig}' \mathbf{u}_{ig} \right\}. \end{align*} Now the complete data log-likelihood is given by \begin{align}
\mathcal{L}_{c2} (\mbox{\boldmath $\theta$}_2)& =-\frac{nd}{2} \ln 2 \pi +\sum_{g=1}^G n_g \ln \pi_g +\frac{1}{2}\sum_{i=1}^n \sum_{g=1}^G z_{ig} \ln | \mbox{\boldmath $\Psi$}^{-1}_g| \nonumber \\
& \quad -\frac{1}{2} \sum_{i=1}^n\sum_{g=1}^G z_{ig}\mathrm{tr}\left\{ (\mathbf{x}_i - \mbox{\boldmath $\mu$}_g^{(k+1)}- \mbox{\boldmath $\Lambda$}_g \mathbf{u}_{ig}) (\mathbf{x}_i - \mbox{\boldmath $\mu$}_g^{(k+1)}- \mbox{\boldmath $\Lambda$}_g \mathbf{u}_{ig})' \mbox{\boldmath $\Psi$}_g^{-1} \right\}. \label{L2(theta2)} \end{align}
Some algebras lead to the following estimate of $\{ (\mbox{\boldmath $\Lambda$}_g$, $\mbox{\boldmath $\Psi$}_g), \, g=1, \ldots, G \}$:
\begin{align*} \hat{\mbox{\boldmath $\Lambda$}}_g & = \mathbf{S}^{(k+1)}_g \mbox{\boldmath $\gamma$}^{(k)'}_g [\mbox{\boldmath $\Theta$}_g^{(k)}]^{-1} \\
\hat{\mbox{\boldmath $\Psi$}}_g & =\text{diag}\left\{ \mathbf{S}^{(k+1)}_g- \hat{\mbox{\boldmath $\Lambda$}}_g \mbox{\boldmath $\gamma$}_g^{(k)} \mathbf{S}^{(k+1)}_g\right\} \, . \end{align*} where we set \begin{align*} \mathbf{S}_g^{(k+1)} & =(1/n_g^{(k+1)})\sum_{i=1}^n z_{ig}^{(k+1)}(\mathbf{x}_i - \mbox{\boldmath $\mu$}_g^{(k+1)}) (\mathbf{x}_i - \mbox{\boldmath $\mu$}_g^{(k+1)})' \\ \mbox{\boldmath $\gamma$}^{(k)}_g & =\mbox{\boldmath $\Lambda$}^{(k)'}_g (\mbox{\boldmath $\Lambda$}^{(k)}_g\mbox{\boldmath $\Lambda$}^{(k)'}_g+\mbox{\boldmath $\Psi$}^{(k)}_g)^{-1} \\ \mbox{\boldmath $\Theta$}^{(k)}_{ig} & =\mathbf{I}_q-\mbox{\boldmath $\gamma$}^{(k)}_g \mbox{\boldmath $\Lambda$}^{(k)}_g +\mbox{\boldmath $\gamma$}^{(k)}_g (\mathbf{x}_i-\mbox{\boldmath $\mu$}_g)(\mathbf{x}_i-\mbox{\boldmath $\mu$}_g)' \mbox{\boldmath $\gamma$}^{(k)'}_g . \end{align*} Hence the maximum likelihood estimates $\hat{\mbox{\boldmath $\Lambda$}}_g$ and $\hat{\mbox{\boldmath $\Psi$}}_g$ for $\mbox{\boldmath $\Lambda$}$ and $\mbox{\boldmath $\Psi$}$ can be obtained by alternatively computing the update estimates $\mbox{\boldmath $\Lambda$}_g^{+} $ and $\mbox{\boldmath $\Psi$}^{+}_g$, by \begin{align} \mbox{\boldmath $\Lambda$}_g^{+} & = \mathbf{S}^{(k+1)}_g \mbox{\boldmath $\gamma$}^{(k)'}_g [\mbox{\boldmath $\Theta$}_g^{(k)}]^{-1} \qquad \mbox{and} \qquad \mbox{\boldmath $\Psi$}^{+}_g =\text{diag}\left\{ \mathbf{S}^{(k+1)}_g- \mbox{\boldmath $\Lambda$}_g^{+} \mbox{\boldmath $\gamma$}^{(k)}_g\mathbf{S}^{(k+1)}_g\right\} \, , \label{LambdaAndPsi} \end{align} and, from the latter, evaluating the update estimates $\mbox{\boldmath $\gamma$}_g^{+}$ and $\Theta_g^{+}$ by \begin{align}
\mbox{\boldmath $\gamma$}_g^{+} = \mbox{\boldmath $\Lambda$}_g^{'} ( \mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}_g^{'} +\mbox{\boldmath $\Psi$}_g)^{-1} \qquad \mbox{and} \qquad \mbox{\boldmath $\Theta$}_g^{+}=\mathbf{I}_q-\mbox{\boldmath $\gamma$}_g \mbox{\boldmath $\Lambda$}_g+ \mbox{\boldmath $\gamma$}_g \mathbf{S}_g^{(k+1)} \mbox{\boldmath $\gamma$}^{'}_g, \label{gammaAndTheta} \end{align} iterating these two steps until convergence on $\hat{\mbox{\boldmath $\Lambda$}}_g$ and $\hat{\mbox{\boldmath $\Psi$}}_g$, so giving ${\mbox{\boldmath $\Lambda$}}^{(k+1)}_g$ and ${\mbox{\boldmath $\Psi$}}^{(k+1)}_g$ .
In summary, the procedure can be described as follows. For a given initial random clustering $\mathbf{z}^{(0)}$, on the $(k+1)-th$ iteration, the algorithm carries out the following steps, for $g=1, \ldots, G$: \begin{enumerate} \item Compute $z_{ig}^{(k+1)}$ and consequently obtain $\pi^{(k+1)}_g$, $\mbox{\boldmath $\mu$}^{(k+1)}_g$, $n_g^{(k+1)}$ and $\mathbf{S}^{(k+1)}_g$; \item Set a starting value for $\mbox{\boldmath $\Lambda$}_g $ and $\mbox{\boldmath $\Psi$}_g $ from $\mathbf{S}^{(k+1)}_g$; \item Repeat the following steps, until convergence on $\hat{\mbox{\boldmath $\Lambda$}}_g$ and $\hat{\mbox{\boldmath $\Psi$}}_g$: \begin{enumerate} \item Compute $\mbox{\boldmath $\gamma$}_g^{+}$ and $\mbox{\boldmath $\Theta$}_g^{+}$ from (\ref{gammaAndTheta}); \item Set $\mbox{\boldmath $\gamma$}_g \leftarrow \mbox{\boldmath $\gamma$}^{+}_g$ and $\mbox{\boldmath $\Theta$}_g \leftarrow \mbox{\boldmath $\Theta$}^{+}_g$; \item Compute $\mbox{\boldmath $\Lambda$}_g^{+} \leftarrow \mathbf{S}^{(k+1)}_g \mbox{\boldmath $\gamma$}^{'}_g (\mbox{\boldmath $\Theta$}_g^{-1})$ and $\mbox{\boldmath $\Psi$}^{+}_g \leftarrow \text{diag}\left\{ \mathbf{S}^{(k+1)}_g- \mbox{\boldmath $\Lambda$}_g^{+} \mbox{\boldmath $\gamma$}^{}_g \mathbf{S}^{(k+1)}_g\right\}$; \item Set $\mbox{\boldmath $\Lambda$}_g \leftarrow \mbox{\boldmath $\Lambda$}_g^{+}$ and $\mbox{\boldmath $\Psi$}_g \leftarrow \mbox{\boldmath $\Psi$}_g^+$;
\end{enumerate} \end{enumerate}
To completely describe the algorithm, here we give more details on how to specify the starting values for $\mbox{\boldmath $\Lambda$}_g $ and $\mbox{\boldmath $\Psi$}_g $ from $\mathbf{S}^{(k+1)}_g$, as it is needed in Step 2.
Starting from the eigen-decomposition of $\mathbf{S}^{(k+1)}_g$, say $\mathbf{S}^{(k+1)}_g=\mathbf{A}_g \mathbf{B}_g \mathbf{A}_g'$, computed on the base of $z^{(k+1)}_{ig}$, the main idea is that $\mbox{\boldmath $\Lambda$}_g$ has to synthesize the "more important" relations between the $d$ observed features, see \cite{McNi:Murp:Pars:2008}. Then, looking at the equality $\mbox{\boldmath $\Sigma$}_g=\mbox{\boldmath $\Gamma$}_g\mbox{\boldmath $\Gamma$}_g'+\mbox{\boldmath $\Psi$}_g$, the initial values of $\mbox{\boldmath $\Lambda$}_g$ were set as \begin{equation} \lambda_{ij}=\sqrt{d_j}a_{ij} \end{equation} where $d_{j}$ is the $j$th largest eigenvalue of $\mathbf{S}^{(k+1)}_g$ and $a_{ij}$ is the $i$th element of the corresponding eigenvector $\mathbf{a}_j$ (the $j$th column in $A_g$), for $i \in\{1,2,\ldots,p\}$ and $j \in\{1,2,\ldots,q\}$. Finally the $\mbox{\boldmath $\Psi$}_g$ matrices can be initialized by the position $\mbox{\boldmath $\Psi$}_g=\mathrm{diag} \{\mathbf{S}^{(k+1)}_g-\mbox{\boldmath $\Lambda$}_g\mbox{\boldmath $\Lambda$}_g'\}$.
\section{Likelihood maximization in constrained parametric spaces} \label{ConstrainedML}
Properties of maximum likelihood estimation for normal mixture models have been deeply investigated. It is well known that $\mathcal{L}(\theta)$ is unbounded on $\mbox{\boldmath $\Theta$}$ and may present many local maxima. Day (1969) was perhaps the first noting that any small number of sample points, grouped sufficiently close together, can give raise to spurious maximizers, corresponding to parameters points with greatly differing component standard deviation. To overcome this issue and to prevent $\mathcal{L}(\theta)$ from singularities, \citet{Hath:Acon:1985} proposed a constrained maximum likelihood formulation for mixtures of univariate normal distributions, suggesting a natural extension to the multivariate case. Let $c \in (0,1]$, then the following constraints \begin{equation} \min_{1 \leq h \neq j \leq k} \lambda (\mbox{\boldmath $\Sigma$}_h \mbox{\boldmath $\Sigma$}_j^{-1}) \geq c \label{constlambda} \end{equation} on the eigenvalues $\lambda$ of $\mbox{\boldmath $\Sigma$}_h \mbox{\boldmath $\Sigma$}_j^{-1}$ leads to properly defined, scale-equivariant, consistent ML-estimators for the mixture-of-normal case, see Hennig (2004). It is easy to show that a sufficient condition for (\ref{constlambda}) is \begin{equation} a \leq \lambda_{ig} \leq b , \qquad i=1,\ldots,d; \qquad g= 1,\ldots,G \label{alambdab} \end{equation} where $\lambda_{ig}$ denotes the $i$th eigenvalue of $\mbox{\boldmath $\Sigma$}_g$ i.e. $\lambda_{ig}=\lambda_i(\mbox{\boldmath $\Sigma$}_g)$, and for $a,b \in \mathbb{R}^{+}$ such that $a/b \geq c$, see \citet{Ingr:2004}. Differently from (\ref{constlambda}), condition (\ref{alambdab}) can be easily implemented in any optimization algorithm. Let us consider the constrained parameter space $\mbox{\boldmath $\Theta$}_c$ of $\mbox{\boldmath $\Theta$}$: \begin{align} \mbox{\boldmath $\Theta$}_c =& \{ (\pi_1, \ldots, \pi_G, \mbox{\boldmath $\mu$}_1, \ldots, \mbox{\boldmath $\mu$}_G, \mbox{\boldmath $\Sigma$}_1, \ldots, \mbox{\boldmath $\Sigma$}_G) \in \mathbb{R}^{k[1+d+(d^2+d)/2]} \: : \: \nonumber \\& \pi_g \geq 0, \: \pi_1+\cdots+\pi_G =1, \: a \leq \lambda_{ig} \leq b, \quad g=1, \ldots, G \; \; \; i=1, \ldots, d\}. \label{Psi_c} \end{align} Due to the structure of the covariance matrix $\mbox{\boldmath $\Sigma$}_g$ given in \eqref{Sigmag}, bound in \eqref{alambdab} yields \begin{equation}
\lambda_{\rm min} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g+\mbox{\boldmath $\Psi$}_g) \geq a \qquad \mbox{and} \qquad \lambda_{\rm max} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g+\mbox{\boldmath $\Psi$}_g) \leq b , \qquad g=1,\ldots,G \label{alambdabSigma} \end{equation} where $\lambda_{\rm min} (\cdot)$ and $\lambda_{\rm max} (\cdot)$ denote the smallest and the largest eigenvalue of $(\cdot)$ respectively. Since $\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g$ and $\mbox{\boldmath $\Psi$}_g$ are symmetric and positive definite, then it results: \begin{align} \lambda_{\rm min} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g+\mbox{\boldmath $\Psi$}_g) & \geq \lambda_{\rm min} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g) + \lambda_{\rm min} (\mbox{\boldmath $\Psi$}_g) \geq a \label{lminSigma}\\ \lambda_{\rm max} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g+\mbox{\boldmath $\Psi$}_g) & \leq \lambda_{\rm max} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g) + \lambda_{\rm max} (\mbox{\boldmath $\Psi$}_g) \leq b \, , \label{lmaxSigma}
\end{align} see \citet{Lutk:Matrix:1996}.
Moreover, being $\mbox{\boldmath $\Psi$}_g$ a diagonal matrix, then \begin{alignat}{2} \lambda_{\rm min} (\mbox{\boldmath $\Psi$}_g) & = \min_i \psi_{ig} & \qquad \mbox{and} \qquad \lambda_{\rm max} (\mbox{\boldmath $\Psi$}_g) &= \max_i \psi_{ig}, \label{lminmaxPsi} \end{alignat} where $\psi_{ig}$ denotes the $i$-th diagonal entry of the matrix $\mbox{\boldmath $\Psi$}_{g}$.
Concerning the square $d \times d$ matrix $\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g$ ($g=1,\ldots, G$), we can get its eigenvalue decomposition, i.e. we can find $\mbox{\boldmath $\Lambda$}_g$ and $\mbox{\boldmath $\Gamma$}_g$ such that \begin{equation} \mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g =\mbox{\boldmath $\Gamma$}_g \mbox{\boldmath $\Delta$}_g \mbox{\boldmath $\Gamma$}'_g \label{decompSigma1} \end{equation} where $\mbox{\boldmath $\Gamma$}_g$ is the orthonormal matrix whose rows are the eigenvectors of $\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g$ and $\mbox{\boldmath $\Delta$}_g=\mathrm{diag}(\delta_{1g}, \ldots, \delta_{dg})$ is the diagonal matrix of the eigenvalues of $\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g$, sorted in non increasing order, i.e. $\delta_{1g}\geq \delta_{2g} \geq \ldots \geq \delta_{qg} \geq 0$, and $\delta_{(q+1)g} = \cdots = \delta_{dg}=0$.
Now, we can apply the singular value decomposition to the $d \times q$ rectangular matrix $\mbox{\boldmath $\Lambda$}_g$, so giving $\mbox{\boldmath $\Lambda$}_g = \mathbf{U}_g \mathbf{D}_g \mathbf{V}'_g$, where $\mathbf{U}_g$ is a $d \times d$ unitary matrix (i.e., such that $\mathbf{U}'_g \mathbf{U}_g = \mathbf{I}_d$) and $\mathbf{D}_g$ is a $d \times q$ rectangular diagonal matrix with $q$ nonnegative real numbers on the diagonal, known as \textit{singular values}, and $\mathbf{V}_g$ is a $q \times q$ unitary matrix. The $d$ columns of $\mathbf{U}$ and the $q$ columns of $\mathbf{V}$ are called the \textit{left singular vectors} and \textit{right singular vectors} of $\mbox{\boldmath $\Lambda$}_g$, respectively. Now we have that \begin{equation} \mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g = (\mathbf{U}_g \mathbf{D}_g \mathbf{V}'_g)(\mathbf{V}_g \mathbf{D}'_g \mathbf{U}'_g) = \mathbf{U}_g \mathbf{D}_g \mathbf{I}_q \mathbf{D}'_g \mathbf{U}'_g = \mathbf{U}_g \mathbf{D}_g \mathbf{D}'_g \mathbf{U}'_g \label{decompSigma2} \end{equation} and equating \eqref{decompSigma1} and \eqref{decompSigma2} we get $\mbox{\boldmath $\Gamma$}_g= \mathbf{U}_g$ and $\mbox{\boldmath $\Delta$}_g = \mathbf{D}_g\mathbf{D}'_g $, that is \begin{equation} \mathrm{diag}(\delta_{1g}, \ldots, \delta_{qg}) = \mathrm{diag}(d_{1g}^2, \ldots, d_{qg}^2) \, . \label{eqeigen} \end{equation} with $d_{1g} \geq d_{2g} \geq \cdots \geq d_{qg} \geq 0$. In particular, it is known that only the first $q$ values of $\mathbf{D}_g$ are non negative, and the remaining $d-q$ terms are null. Thus it results \begin{equation} \lambda_{\rm max} (\mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g) = d_{1g}^2 .\label{lmaxLambda} \end{equation}
Supposing now to choose a value for the upper bound $b$ in such a way that ${b \geq \mbox{\boldmath $\Psi$}_{ig}}$ for $g=1, \ldots, G$ and $i=1, \ldots, q$, then constraints \eqref{lminSigma} and \eqref{lmaxSigma} are satisfied when \begin{align} d_{ig}^2+ \psi_{ig} & \geq a \quad \quad \quad \quad &i=1,\ldots, d \label{lminLambdaPsia} \\ d_{ig} & \leq \sqrt{b - \mbox{\boldmath $\Psi$}_{ig}} & i=1,\ldots,q \label{lmaxPsib} \\ \psi_{ig} & \leq b \quad & i=q+1,\ldots,d \label{lmaxPsi} \end{align} for $g=1, \ldots, G$. In particular, we remark that condition \eqref{lminLambdaPsia} reduces to $\Psi_{ig}\geq a$ for $i=(q+1), \ldots, d$.
\section{Constraints on the covariance matrix for factor analyzers}\label{sec:constraintsFA}
The two-fold (eigenvalue and singular value) decomposition of the $\mbox{\boldmath $\Lambda$}_g$ presented above, suggests how to modify the EM algorithm in such a way that the eigenvalues of the covariances $\mbox{\boldmath $\Sigma$}_g$ (for $g=1,\dots,G$) are confined into suitable ranges. To this aim we have to implement constraints \eqref{lminLambdaPsia}, \eqref{lmaxPsib} and \eqref{lmaxPsi}.
We proceed as follows on the $(k+1)$th iteration: \begin{enumerate} \item Decompose $\mbox{\boldmath $\Lambda$}_g$ according to the singular value decomposition as $\mbox{\boldmath $\Lambda$}_g = \mathbf{U}_g \mathbf{D}_g \mathbf{V}'_g$; \item Compute the squared singular values $(d_{1g}^2, \ldots, d_{qg}^2)$ of $\mbox{\boldmath $\Lambda$}_g$; \item Create a copy $\mathbf{D}^*_g$ of $\mathbf{D}_g^{(k+1)}$ and a copy $\mbox{\boldmath $\Psi$}^*_g$ of $\mbox{\boldmath $\Psi$}^{(k+1)}_g$; \item For $i=1$ to $q$, if $d_{ig}^2 + \psi_{ig}^{(k+1)} < a$, then if $a-\psi_{ig}^{(k+1)} \geq 0$ set $d_{ig} \leftarrow \sqrt{a-\psi_{ig}^{(k+1)}}$ else $d_{ig} \leftarrow \sqrt{a}$ into $\mathbf{D}^*_g$; \item For $i=q+1$ to $d$, if $\psi_{ig}^{(k+1)} < a$ then set $\psi_{ig}^{(k+1)} \leftarrow a$ into $\mbox{\boldmath $\Psi$}^*_g$; \item For $i=1$ to $q$, if $d_{ig}^2+\psi_{ig}^{(k+1)} > b $, then if $b-\psi_{ig}^{(k+1)} \geq 0$ set $d_{ig} \leftarrow \sqrt{b - \psi_{ig}^{(k+1)} }$ into $\mathbf{D}^*_g$ else $d_{ig} \leftarrow \sqrt{b}$ into $\mathbf{D}^*_g$; \item For $i=q+1$ to $d$, if $\psi_{ig}^{(k+1)} > b$ then set $\psi_{ig}^{(k+1)} \leftarrow b$ into $\mbox{\boldmath $\Psi$}^*_g$; \item Set $\mbox{\boldmath $\Lambda$}^{(k+1)}_g \leftarrow \mathbf{U}_g \mathbf{D}^*_g \mathbf{V}'_g$; \item Set $\mbox{\boldmath $\Psi$}^{(k+1)}_g \leftarrow \mbox{\boldmath $\Psi$}^*_g$. \item Stop. \end{enumerate} It is important to remark that the resulting EM algorithm is monotone, once the initial guess, say $\mbox{\boldmath $\Sigma$}_g^0$, satisfies the constraints. Further, as shown in the case of gaussian mixtures in \cite{Ingr:Rocc:2007}, the maximization of the complete loglikelihood is guaranteed. From the other side, it is apparent that the above recipes require some a priori information on the covariance structure of the mixture, throughout the bounds $a$ and $b$.
\section{Numerical studies}\label{sec:numerical results}
In this section we present numerical studies, based on both simulated and real data sets, in order to show the performance of the constrained EM algorithm with respect to unconstrained approaches.
\subsection{Artificial data}\label{sec:simdata} We consider here three mixtures of $G$ components of $d$-variate normal distributions, for different values of the parameter $\mbox{\boldmath $\theta$}_0$.
First, we point out that the point of local maximum corresponding to the consistent estimator $\mbox{\boldmath $\theta$}^*$, has been chosen to be the limit of the EM algorithm using the true parameter $\mbox{\boldmath $\theta$}_0$ as initial estimate, i.e. considering the true classification. In other words, we set $z_{ig}=1$ if the $i$th unit comes from the $g$th component and $z_{ig}=0$ otherwise. In the following, such estimate will be referred to as the right maximum of the likelihood function.
To begin with, we generate a set of 100 different random initial clusterings to initialize the algorithm at each run. To this aim, for a fixed number $G$ of components of the mixture, we draw each time a set of random starting values for the $z_{ig}$ from the multinomial distribution with values in $(1, 2, \ldots, G)$ with parameters $(p_1, p_2, \ldots, p_g) = (1/G, 1/G,\ldots, 1/G)$. Then we run a hundred times both the unconstrained and the constrained AECM algorithms (for different values of the constraints $a,b$) using the same set of initial clusterings in both cases.
The initial values for the elements of $\mbox{\boldmath $\Lambda$}_g$ and $\mbox{\boldmath $\Psi$}_g$ can be obtained as described at the end of Section \ref{sec:The AECM algorithm} from the eigen-decomposition of $\mathbf{S}_g$, and the algorithms run until convergence or it reaches the fixed maximum number of iterations.
The stopping criterion is based on the Aitken acceleration procedure \citep{Aitk:OnBe:1926}, to estimate the asymptotic maximum of the log-likelihood at each iteration of the EM algorithm (in such a way, a decision can be made regarding whether or not the algorithm reaches convergence; that is, whether or not the log-likelihood is sufficiently close to its estimated asymptotic value). The Aitken acceleration at iteration $k$ is given by \begin{displaymath}
a^{\left(k\right)}=\frac{\mathcal{L}^{\left(k+1\right)}-\mathcal{L}^{\left(k\right)}}{\mathcal{L}^{\left(k\right)}-\mathcal{L}^{\left(k-1\right)}}, \end{displaymath} where $\mathcal{L}^{\left(k+1\right)}$, $\mathcal{L}^{\left(k\right)}$, and $\mathcal{L}^{\left(k-1\right)}$ are the log-likelihood values from iterations $k+1$, $k$, and $k-1$, respectively. Then, the asymptotic estimate of the log-likelihood at iteration $k + 1$ is given by \begin{displaymath} \mathcal{L}_{\infty}^{\left(k+1\right)}=\mathcal{L}^{\left(k\right)}+\frac{1}{1-a^{\left(k\right)}}\left(\mathcal{L}^{\left(k+1\right)}-\mathcal{L}^{\left(k\right)}\right), \end{displaymath} see \citet{Bohn:Diet:Scha:Schl:Lind:TheD:1994}. In our analyses, the algorithms stop when $\mathcal{L}_{\infty}^{\left(k+1\right)}-\mathcal{L}^{\left(k\right)}<\epsilon$, with $\epsilon=0.001$. Programs have been written in the R language; the different cases and the obtained results are described below.
\paragraph{\textsc{Mixture 1: $G=3$, $d=6$, $q=2$, $N=150$.}} \ \\
The sample has been generated with weights $\mbox{\boldmath $\alpha$} = (0.3, 0.4, 0.3)'$ according to the following parameters:
\begin{align*}
\mbox{\boldmath $\mu$}_1 &= (0,0,0,0,0,0)' & \mbox{\boldmath $\Psi$}_1 &= \mbox{diag}(0.1,0.1,0.1,0.1,0.1,0.1) \\
\mbox{\boldmath $\mu$}_2 &= (5,5,5,5,5,5)' & \mbox{\boldmath $\Psi$}_2 &= \mbox{diag}(0.4,0.4,0.4,0.4,0.4,0.4) \\
\mbox{\boldmath $\mu$}_3 &= (10,10,10,10,10,10)' & \mbox{\boldmath $\Psi$}_3 &= \mbox{diag}(0.2,0.2,0.2,0.2,0.2,0.2)
\end{align*}
\\
\begin{gather*} \mbox{\boldmath $\Lambda$}_1 = \begin{pmatrix} 0.50 & 1.00 \\ 1.00 & 0.45 \\ 0.05 & -0.50 \\ -0.60 & 0.50 \\ 0.50 & 0.10 \\ 1.00 & -0.15 \end{pmatrix} \quad \quad \mbox{\boldmath $\Lambda$}_2 = \begin{pmatrix} 0.10 & 0.20 \\ 0.20 & 0.50 \\ 1.00 & -1.00 \\ -0.20 & 0.50 \\ 1.00 & 0.70 \\ 1.20 & -0.30 \end{pmatrix} \quad \quad
\mbox{\boldmath $\Lambda$}_3 = \begin{pmatrix} 0.10 & 0.20 \\ 0.20 & 0.00 \\ 1.00 & 0.00 \\ -0.20 & 0.00 \\ 1.00 & 0.00 \\ 0.00 & -1.30 \end{pmatrix}.\\ \end{gather*}
Hence, the covariance matrices $\mbox{\boldmath $\Sigma$}_g = \mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g + \mbox{\boldmath $\Psi$}_g$ ($g=1, 2, 3$) have the following eigenvalues: \begin{align*} \lambda(\mbox{\boldmath $\Sigma$}_1) &=(3.17, 1.63, 0.10, 0.10, 0.10, 0.10)' \\ \lambda(\mbox{\boldmath $\Sigma$}_2) &=(4.18, 2.27,0.40, 0.40, 0.40,0.40)' \\ \lambda(\mbox{\boldmath $\Sigma$}_3) &=(2.29, 1.93, 0.20,0.20, 0.20, 0.20)', \end{align*} whose largest value is given by $\max_{i,g} \lambda_i(\mbox{\boldmath $\Sigma$}_g)=4.18 \, . $
First we run the unconstrained algorithm: the right solution has been attained in 24\% of cases, without incurring in singularities. Summary statistics (minimum, first quartile $Q_1$, median $Q_2$, third quartile $Q_3$ and maximum) about the distribution of the misclassification error over the 100 runs are reported in Table \ref{tab:MisclassCase1}. Due to the choice on parameters, we rarely expect too small eigenvalues in the estimated covariance matrices: we set $a=0.01$ to protect from them; conversely, as local maxima are quite often due to large estimated eigenvalues, we consider setting also a constraint from above, taking into account some values for $b$, the upper bound. To compare how the choice of the bounds $a$ and $b$ influences the performance of the constrained EM, we experimented with different pairs of values, and in Table \ref{tab:case1} we report the more interesting cases. Further results are reported in Figure \ref{fig:BoxPlotMixture1}, which provides the boxplots of the distribution of the misclassification errors obtained in the sequence of $100$ runs, showing the poor performance of the unconstrained algorithm compared with the good behaviour of its constrained version. For all values of the upper bound $b$, the third quartile of the misclassification error is steadily equal to $0$. Indeed, for $b=6,10$ and 15 we had no misclassification error, while we observed very low and rare misclassification errors only for $b=20$ and $b=25$ (respectively 3 and 11 not null values, over 100 runs). Moreover, the robustness of the results with respect to the choice of the upper constraint is apparent. \begin{table}[h] \begin{center} \caption{Mixture 1: Summary statistics of the distribution of the Misclassification Error over 100 runs of the unconstrained EM algorithm}\label{tab:MisclassCase1} \begin{tabular}{ccccc} \hline
\multicolumn{5}{c}{Misclassification Error} \\
min & $Q_1$& $Q_2$ & $Q_3$ & max \\ \hline
0\% & 17\% & 36\% & 45.3\% & 60\% \\
\hline\hline \end{tabular} \end{center} \end{table} \begin{table}[h] \begin{center} \caption{Mixture 1: Percentage of convergence to the right maximum of the constrained EM algorithms for $a=0.01$ and some values of the upper constraint $b$}\label{tab:case1} \begin{tabular}{cccccc} \hline
\multicolumn{6}{c}{$b$} \\ $+\infty$ & 6 & 10 & 15 & 20 & 25 \\ \hline 24\% & 100\% & 100\% & 100\% & 97\% & 89\% \\
\hline\hline \end{tabular} \end{center} \end{table} \begin{figure}
\caption{\rm Mixture 1: Boxplots of the misclassification error. From left to right, the first boxplot refers to the unconstrained algorithm, then the following boxplots correspond to the constrained algorithm, for $a=0.01$ and $b$ respectively set to the values $b=6,10,15,20,25$.}
\label{fig:BoxPlotMixture1}
\end{figure}
In Figure \ref{fig:Mixture_1A} we plot the classified data on the three factor spaces given by $\hat{\mathbf{U}}_{i1}, \hat{\mathbf{U}}_{i2}$ and $\hat{\mathbf{U}}_{i3}$ under the true maximum of the likelihood function (first rows of plots), while in the second row we give the classification obtained according to a spurious maximum of the likelihood function.
We recall that an original data point $\mathbf{x}_i$ can be represented in $q$ dimensions by the posterior distribution of its associated $q$-dimensional latent factor $\mathbf{U}_i$. A convenient summary of this distribution is its mean. Hence we can portray the $\mathbf{x}_i$ in $q$-dimensional space by plotting the estimated conditional expectation of each $\mathbf{U}_i$ given $\mathbf{x}_i$, that is, the (estimated) posterior mean of the factor $\mathbf{U}_i$ (for $i=1,\ldots,n$). We have that \begin{align*}
\hat {\mathbf{u}_i} &= \mathbb{E}_{\hat{\mbox{\boldmath $\theta$}}} \{\mathbf{U}_i \| \mathbf{x}_i \} =\hat{\gamma(\mathbf{x}_i-\overline{\mathbf{x}}}) \end{align*} where $\mathbb{E}_{\hat{\mbox{\boldmath $\theta$}}} $ denotes expectation using the estimate $\hat{\mbox{\boldmath $\theta$}}$ instead of $\mbox{\boldmath $\theta$}$, and $\hat{\gamma}$ has been computed following (\ref{gammaAndTheta}).\\ In the particular case of $q=2$, as in this simulation experiment, we can draw the data in a bidimensional plot in Figure \ref{fig:Mixture_1A}. From the two series of plots, it can be seen that the appropriate factor space allows for the right classification, while a spurious likelihood maximizer leads to unsuitable factor spaces, which in turn generate serious issues in classification. \begin{figure}
\caption{\rm Mixture 1: plot of the classified data on the three factor spaces, under the true maximum of the likelihood function (upper row) and, conversely, under a spurious maximum of the likelihood function (row below)}
\label{fig:Mixture_1A}
\end{figure}
\paragraph{\textsc{Mixture 2: $G=4$, $d=7$, $q=2$, $N=100$.}}\ \\
The sample has been generated with weights $\mbox{\boldmath $\alpha$} = (0.2,0.3,0.35,0.15)'$ according to the following parameters:
\begin{align*} \mbox{\boldmath $\mu$}_1 &= (0,0,0,0,0,0,0)' & \mbox{\boldmath $\Psi$}_1 &= \mbox{diag}(0.2,0.2,0.2,0.2,0.2,0.2,0.2)\\
\mbox{\boldmath $\mu$}_2 &= (5,5,5,5,5,5,5)' & \mbox{\boldmath $\Psi$}_2 &= \mbox{diag}(0.25,0.25,0.25,0.25,0.25,0.25,0.25) \\ \mbox{\boldmath $\mu$}_3 &= (10,10,10,10,10,10,10,)' & \mbox{\boldmath $\Psi$}_3 &= \mbox{diag}(0.15,0.15,0.15,0.15,0.15,0.15,0.15) \\ \mbox{\boldmath $\mu$}_4 &= (15,15,15,15,15,15,15)' & \mbox{\boldmath $\Psi$}_4 &= \mbox{diag}(0.1,0.1,0.1,0.1,0.1,0.1,0.1) \end{align*}
\begin{gather*} \small \mbox{\boldmath $\Lambda$}_1 = \begin{pmatrix} 0.30 & 0.60 \\ 0.60 & 0.27 \\ 0.03 & -0.30 \\ -0.36 & 0.30 \\ 0.30& 0.06 \\ 0.60 &-0.09 \\ -0.63 & 1.50 \end{pmatrix} \, \mbox{\boldmath $\Lambda$}_2 = \begin{pmatrix} 0.08 &0.16 \\ 0.16 & 0.40 \\ 0.80 & -0.80\\ -0.16 & 0.40 \\ 0.80 & 0.56 \\ 0.96 &-0.24\\ 1.60 & -0.24\end{pmatrix} \, \mbox{\boldmath $\Lambda$}_3 = \begin{pmatrix} 0.07 & 0.14\\ 0.14 & 0.00 \\ 0.70 & 0.00 \\ -0.14 & 0.00 \\ 0.70 & 0.00 \\ 0.00 & -0.91\\ 0.70 & -0.70 \end{pmatrix}\, \mbox{\boldmath $\Lambda$}_4 = \begin{pmatrix} 0.04 & 0.08\\ 0.08 & 0.00 \\ 0.40 & 0.00 \\ -0.08 & 0.00 \\ 0.40 & 0.00 \\ 0.00 & -0.52\\ -0.40 & 0.80 \end{pmatrix}. \normalsize \end{gather*} The covariance matrices $\mbox{\boldmath $\Sigma$}_g = \mbox{\boldmath $\Lambda$}_g \mbox{\boldmath $\Lambda$}'_g + \mbox{\boldmath $\Psi$}_g$ ($g=1, 2, 3$) have respectively the following eigenvalues: \begin{align*}
\lambda(\mbox{\boldmath $\Sigma$}_1) &=(4.10 ,1.14 ,0.33,0.21, 0.15, 0.09, 0.04)'\\ \lambda(\mbox{\boldmath $\Sigma$}_2) &=(7.62, 1.18, 0.34, 0.20, 0.18 ,0.12, 0.05)' \\ \lambda(\mbox{\boldmath $\Sigma$}_3) &=(3.36, 1.36, 0.24, 0.17, 0.14, 0.10, 0.09)' \\ \lambda(\mbox{\boldmath $\Sigma$}_4) &=(2.08, 0.48, 0.11, 0.09, 0.07, 0.06, 0.02)'. \end{align*} whose largest value is given by $\max_{i,g} \lambda_i(\mbox{\boldmath $\Sigma$}_g)=7.62 \,.$
First we run the unconstrained algorithm: the right solution has been attained only once, over 100 runs. Afterwards, we run the constrained algorithm for different values of the upper bound $b$ on the largest eigenvalue, while maintaining $a=0.01$, and using the same random starting values as before, to compare how the choice of the bounds influences the performance of the constrained EM. In Table \ref{tab:case2} we collected the percentage of times in which the algorithm attained the right maximum (where $b=+\infty$ denotes the unconstrained procedure), showing a great improvement with respect to the previous 1\% obtained through the unconstrained version. \begin{table}[h] \begin{center} \caption{Mixture 2: Percentage of convergence to the right maximum of the constrained EM algorithms for $a=0.01$ and different values for the upper bound $b$.}\label{tab:case2} \begin{tabular}{ccccc} \hline
\multicolumn{5}{c}{$b$} \\
$+\infty$ &10 & 15 & 20 & 25 \\ \hline
1\%&69\% & 60\% & 46\% & 33\% \\
\hline\hline \end{tabular} \end{center} \end{table} Further details are given in Figure \ref{fig:BoxPlotMixture2} which shows the boxplots of the distribution of the misclassification error in the $5$ sequences of $100$ runs, corresponding to the different values of the constraint $b$. Also in this case the unconstrained algorithm had a bad performance, with a median misclassification error of 0.53, while its constrained version, for $b=10$ and 15, in more than 50\% of the runs had no misclassification error. Furthermore, the unconstrained algorithm did not attain convergence in 4 out of the 100 runs. \begin{figure}
\caption{\rm Mixture 2: Boxplots of the misclassification error: from left to right, the first boxplot refers to the unconstrained algorithm, then the following boxplots correspond to the constrained algorithm, for $a=0.01$ and $b$ respectively set to the values $b=10,15,20,25$.}
\label{fig:BoxPlotMixture2}
\end{figure}
Finally, in Figure \ref{fig:Mixture_2A} we plot the classified data on the factor spaces, under the true maximum of the likelihood function, while in Figure \ref{fig:Mixture_2B} we give the classification in some wrong factor spaces, obtained according to a spurious maximum of the likelihood function. \begin{figure}
\caption{\rm Mixture 2: plot of the classified data on the factor spaces, under the "right" solution given by the algorithm}
\label{fig:Mixture_2A}
\end{figure} \begin{figure}
\caption{\rm Mixture 2: plot of the classified data on the factor spaces, giving an example of the wrong classification, which is obtained when the algorithm converges to a spurious maximum of the loglikelihood}
\label{fig:Mixture_2B}
\end{figure}
\paragraph{\textsc{Mixture 3: $G=4$, $d=7$, $q=2$, $N=100$.}} \ \\
The third study concerns an artificial dataset analysed in \citet{Baek:McLa:MixtFA:2010}. It has been generated with weights $\mbox{\boldmath $\alpha$} = (0.5,0.5)'$ according to the following parameters:
\begin{align*} \mbox{\boldmath $\mu$}_1 &= (0,0,0)' & \mbox{\boldmath $\mu$}_2 &= (2,2,6)'
\end{align*}
\begin{gather*} \mbox{\boldmath $\Sigma$}_1 = \begin{pmatrix} 4 & -1.8 &-1 \\ -1.8 & 2 & 0.9 \\ -1 & 0.9 & 2 \end{pmatrix} \quad \quad \mbox{\boldmath $\Sigma$}_2 = \begin{pmatrix} 4 & 1.8 & .8 \\ 1.8 & 2 & 0.5 \\ 0.80 & 0.5 &2\end{pmatrix} \quad \end{gather*} The covariance matrices $\mbox{\boldmath $\Sigma$}_g $ ($g=1, 2$) have respectively the following eigenvalues: \begin{align*}
\lambda(\mbox{\boldmath $\Sigma$}_1) &=(5.55, 1.61, 0.84)'\\ \lambda(\mbox{\boldmath $\Sigma$}_2) &=(5.33, 1.73, 0.94)' \end{align*} We run the unconstrained algorithm and its constrained version with the choices of $a=0.01$ and $b=6,10,15,20,25$ as before, and also we compare our proposal to the Mixture of Common Factor Analyzers (MCFA) approach of \citet{Baek:McLa:MixtCT:2011}. The percentages of convergence to the right maximum for the seven different cases are reported in Table \ref{tab:case3}. We recall that MCFA requires a common pattern between covariance matrices. This model is greatly employed in the literature, for parsimony and to avoid potential singularities with small clusters. \begin{table}[h] \begin{center} \caption{Mixture 3: Percentage of convergence to the right maximum of the unconstrained EM, the constrained EM algorithm and the MCFA EM algorithm}\label{tab:case3} \begin{tabular}{c ccccc c} \hline unconstrained &\multicolumn{5}{c}{constrained} & MCFA \\ \cline{2-6}
&$b=6$ & $b= 10$ & $b=15$ & $b=20$ & $b=25$ \\
\hline
95\% & 100\% & 96\% & 96\%&97\% & 97\%& 36\%\\
\hline\hline \end{tabular} \end{center} \end{table} Over the 100 runs, the MCFA EM algorithm did not converge in 36 cases, while it always reached convergence in the other cases. With respect to the performance of the different algorithms in terms of misclassification error, the corresponding boxplots are shown in Figure \ref{fig:BoxPlotMcLch}. We also note that the misclassification error was steadily equal to 1\% over the 100 runs for the constrained algorithm with $b=6$, it was always equal to 1\% except 5 runs for the unconstrained algorithm, while in the case of MCFA we have $Q_1=Me=1\%$, but $Q_3= 34.5\%$ and $Max=50\%$. All these results show that, to attain good performance and robustness in estimation, our proposal works quite better. Furthermore, it allows for a more general solution in comparison to the rigid requirement of a common pattern between covariance matrices. As a consequence, also the log-likelihood of the model obtained by our constrained algorithm ($\mathcal{L}=-1032.218$) is fairly greater than the log-likelihood obtained in MCFA model ($\mathcal{L}=-1147.396$). \begin{figure}
\caption{\rm Mixture 3: Boxplots of the misclassification error. From left to right, the first boxplot refers to the unconstrained algorithm, then the following boxplots correspond to the constrained algorithm, for $a=0.01$ and $b$ respectively set to the values $b=6,10,15,20,25$, and finally to the MCFA algorithm.}
\label{fig:BoxPlotMcLch}
\end{figure}
\subsection{Real data}\label{sec:realdata}
\paragraph{\textsc{The Wine data set }}\ \\
Now we consider the wine data, proposed in \citet{Forina:etal:1986}, consisting of
$d=27$ chemical and physical properties of three different cultivars of Italian wine: Barolo, Grignolino and Barbera. This dataset is often used to test and compare the performance of various classification algorithms: among them, in \citet{McNi:Murp:Pars:2008} using parsimonious Gaussian mixture models and in \cite{Andr:McNi:Exte:2011} using parsimonious mixtures of multivariate $t$-factor analyzers.
Consider first the complete dataset, with $d=27$. We run the EM algorithm starting from the true classification, and using the maximum likelihood estimate $\hat{\mbox{\boldmath $\theta$}}$ we get 3 misclassified units (i.e. Misclassification Error $1.69\%$). Based on estimates of $\mbox{\boldmath $\Lambda$}_g$ and $\mbox{\boldmath $\Psi$}_g$, we get \begin{alignat*}{3} \lambda_{\rm max}(\hat{\mbox{\boldmath $\Lambda$}}_1) & = 28513 \qquad \lambda_{\rm max}(\hat{\mbox{\boldmath $\Lambda$}}_2) & = 6345 \qquad \lambda_{\rm max}(\hat{\mbox{\boldmath $\Lambda$}}_3) & = 9045 \\ \lambda_{\rm max}(\hat{\mbox{\boldmath $\Psi$}}_1) & = 27830 \qquad \lambda_{\rm max}(\hat{\mbox{\boldmath $\Psi$}}_2) & = 22532 \qquad \lambda_{\rm max}(\hat{\mbox{\boldmath $\Psi$}}_3) & = 21573 . \end{alignat*} With the aim at comparing our results with the above findings in the literature, we first scaled the original data, and applied the \textit{Pgmm} package \citep{McNi:Pgmm:2011}. Using a set of three random starts, the best model (BIC) for the given range of factors and components (from 1 up to 4) is a CUU model with $q$ = 4 and $G$ = 3. The CUU acronym stands for a MGFA with patterned covariance matrices, with a common (C) volume and unconstrained (U) shapes and orientations among the $G$=3 components in the mixture. Factors for the best model are of dimension $q$=4, with BIC= -11427.65. The obtained classification is given by Table \ref{tab:MisclWine}, showing only 2 misclassified units. \begin{table}[h] \begin{center} \caption{Pgmm package applied on the Wine dataset }\label{tab:MisclWine}
\begin{tabular}{c|ccc} \hline \multicolumn{4}{c}{Classification table } \\ \hline
& 1 & 2& 3\\
\hline
1& 59 &0 &0\\
2 & 1 &69 &1\\
3 & 0 &0 &48 \\
\hline \end{tabular} \end{center} \end{table}
Then we employed our approach, after scaling the data and using hierarchical clustering for initialization (as in the previously cited work). We obtained 5 misclassified units (which means a misclassification error of $2.8\%$). If we initialize the EM algorithm with the true belonging of units and considering still 4-dimensional factors, we obtain a perfect classification. We also obtain a better fit of the model to the data, assessed by a greater penalized likelihood value, namely BIC= -10814.68, due to the lighter constraints we are imposing here. Finally, we employed a mixture of $t$-factor analyzers, applying the teigen R-package \citep{Andr:McNi:2011}, on the scaled data. We considered patterned models, whose label is a sequence of four letters: each letter can be "C" or "U" or "I" denoting "Constrained to be equal", "Unconstrained" and "Isotropic" patterns on group covariances, and the four letters in the model label are respectively referred to volume, shape, orientation, and the degrees of freedom of the $t$-distribution. We got that the best fit (BIC =-11939.94) is given by CICC model with $G$=5, and this is somehow surprising as we always obtained 3 groups, by all the methods seen so far, in particular also in the proposed constrained EM approach for gaussian factors.
\paragraph{\textsc{The Flea Beetles data set }}\ \\
The flea beetles data were introduced by \citet{Lubi:DiTa:1962} and are available within the GGobi software, see \citet{Sway:Cook:GGob:2006}. Data were collected on $74$ specimens of flea beetle of the genus \textit{Chaetocnema}, which contains three species: {\em concinna}, {\em heptapotamica}, or {\em heikertingeri}. Measurements were collected on the width (in the fore-part and from the side) and angle of the aedeagus, on the width of the first and second joint of the tarsus, and on the width of the head between the external edges of the eyes of each beetle.
The goal of the original study was to form a classification rule to distinguish the three species. To this aim, we considered $q=2$ factors, according to the results of \citet{Andr:McNi:Exte:2011}, and we run firstly the unconstrained algorithms. Over the 100 runs, the unconstrained EM algorithm never reached the true solution, and summary statistics (minimum, first quartile $Q_1$, median $Q_2$, third quartile $Q_3$ and maximum) about the distribution of the misclassification error over the 100 runs are reported in Table \ref{tab:MisclassFlea}. \begin{table}[h] \begin{center} \caption{Flea Beetles data: Summary statistics of the distribution of the Misclassification Error over 100 runs of the unconstrained EM algorithm}\label{tab:MisclassFlea} \begin{tabular}{ccccc} \hline
\multicolumn{5}{c}{Misclassification Error} \\
min & $Q_1$& $Q_2$ & $Q_3$ & max \\ \hline
4.1\% & 28.0\% & 36.5\% & 41.9\% & 51.4\% \\
\hline\hline \end{tabular} \end{center} \end{table} The first results motivated us to run also the constrained EM algorithm, to see if it improves convergence to the right maximum and consequent classification. Tacking into account that \[ \min_{i,g} \lambda_i(\mbox{\boldmath $\Sigma$}_g)=0.64 \qquad \max_{i,g} \lambda_i(\mbox{\boldmath $\Sigma$}_g)=191.55,\] we considered constrained estimation with \begin{center} lower bound $a$ either 0.1 or 0.5, $\quad \quad$ and $\quad \quad$ upper bound $b$ either 200 or 300.\\ \end{center}
Over the 100 runs, the constrained algorithm steadily improves all results, as it can be seen in Table \ref{tab:Flea}, which shows also that the best results can be obtained with the tightest constraints, i.e. $a=0.05, b= 200$. \begin{table}[h] \begin{center} \caption{Flea Beetles data: Percentage of convergence to the right maximum of the unconstrained EM and the constrained EM algorithm}\label{tab:Flea} \begin{tabular}{c cccc} \hline unconstrained &\multicolumn{4}{c}{constrained} \\ \cline{2-5}
&$a=0.1, b=200$ & $a=0.05, b= 200$ & $a=0.1, b=300$ & $a=0.5, b=300$ \\
\hline
0\% & 31\% & 34\% & 21\%&17\% \\
\hline\hline \end{tabular} \end{center} \end{table}
Figure \ref{fig:BoxPlotFlea} provides the boxplots of the distribution of the of the $100$ misclassification errors in the sequences of $100$ runs for both unconstrained and constrained algorithms. The impact of the lower bound $a$ on the estimation is critical, but it seems not to depend too much on its value (remember that its purpose is to protect against divergence of the algorithm) while the upper bound $b$ crucially drives the classification results, showing the best performance when it mimics the value of the largest eigenvalue of the $\mbox{\boldmath $\Sigma$}_g$'s. \begin{figure}
\caption{\rm Flea Beetles data: Boxplots of the misclassification error. From left to right, the first boxplot refers to the unconstrained algorithm, then the following boxplots correspond to the constrained algorithm, for each pair of bounds $(a,b)$ }
\label{fig:BoxPlotFlea}
\end{figure} As a final comment, it is worth mentioning that, when dealing with EM estimation based on random starts, authors in the literature usually give results in terms of "best outcome over a small number of runs", say 10 runs for instance. Therefore, we can conclude that the constrained algorithm (having a performance of $31\%$) provides the true solution and the perfect classification for the Flea Bleetles dataset.
\section{Concluding remarks}\label{sec:concluding} Mixtures of factor analyzers are commonly used to explain data, in particular, correlation between variables in multivariate observations, allowing also for dimensionality reduction. For these models, as well as for gaussian mixtures, however, the loglikelihood function may present spurious maxima and singularities and this is due to specific patterns of the estimated covariance structure. It is known, from the literature, that a constrained formulation of the EM algorithm considerably reduces such drawbacks for gaussian mixtures. Motivated by these considerations, in this paper we introduced a constrained approach for gaussian mixtures of factor analyzers. In particular we implemented a methodology to maximize the likelihood function in a constrained parameter space, having no singularities and a reduced number of spurious local maxima. The performance of the newly introduced estimation approach has been shown and compared to the usual non-constrained one, as well as to the approach based on common factors. To this purpose we present numerical simulations on synthetic samples and applications to real data sets widely employed in the literature. The results shows that the problematic convergence of the EM, even more critical when dealing with factor analyzers, can be greatly improved.
\end{document} |
\begin{document}
\title{ A note on injective envelopes and von Neumann algebras} \vbox{\hfil {\Large\bf }\hfil} \author{U. Haag} \date{\today \\ \texttt{\hfil Contact:haag@mathematik.hu-berlin.de}} \maketitle \par\noindent \begin{abstract} The article exhibits certain relations between the injective envelope $\, I ( A )\, $ of a $C^*$-algebra $\, A\, $ and the von Neumann algebra generated by a representation $\,\lambda\, $ of $\, A\, $ provided it is injective. More specifically we show that there exist positive retractions $\, \sigma : \lambda ( A )'' \twoheadrightarrow I ( A )\, $ which are close to being $*$-homomorphisms in the sense that they are Jordan homomorphisms of the underlying Jordan algebras, and the kernel of $\,\sigma\, $ is given by a twosided ideal. \end{abstract} \par
\noindent If $\, A\, $ is a $C^*$-algebra its injective envelope is denoted $\, I ( A )\, $. \par
\par\noindent {\bf Theorem.}\quad (i) Let $\, \lambda : A\rightarrow \mathcal B ( \mathcal H )\, $ be a faithful $*$-representation of the unital $C^*$-algebra $\, A\, $ with strong closure $\, A'' \, $ acting on the separable Hilbert space $\,\mathcal H\, $. If $\, A''\, $ is injective then there is a canonical $*$-ideal $\, \mathcal J \vartriangleleft A''\, $ such that the kernel of every completely positive projection $\, \Phi : A'' \rightarrow A''\, $ extending the identity map of $\, A\, $ and with range completely isometric to the injective envelope of $\, A\, $ contains $\, \mathcal J\, $. The quotient $\, A'' / \mathcal J\, $ contains a canonical monotone complete subspace (complete sublattice) completely isometric with $\, I ( A )\, $ which is a Jordan subalgebra of the quotient algebra. If $\, J ( A )\, $ denotes the preimage of $\, I ( A )\, $ in $\, A''\, $ modulo $\, \mathcal J\, $ then $\, J ( A )\, $ is a Jordan subalgebra. The von Neumann algebra $\, A''\, $ is an injective envelope for $\, A\, $ if and only if $\,\mathcal J\, $ is trivial. Given an arbitrary completely isometric inclusion $\, \iota : I ( A ) \hookrightarrow A''\, $ extending the identity map of $\, A\, $ there exists a (nonunique) positive retraction $\, \sigma : A'' \twoheadrightarrow I ( A )\, $ for the inclusion $\,\iota\, $ which satisfies the Schwarz equality $\, \sigma ( x x^* ) = \sigma ( x ) \sigma ( x )^*\, $ for every normal element $\, x\in A''\, $, in particular $\, \sigma\, $ is a Jordan homomorphism, i.e. $\, \sigma ( x y + y x ) = \sigma ( x ) \sigma ( y ) + \sigma ( y ) \sigma ( x )\, $, and maps projections to projections and unitaries to unitaries. Moreover every selfadjoint element $\, x\in I ( A )^{sa}\, $ is the monotone decreasing limit of a net $\, ( y_{\mu } ) \searrow x\, $ such that each $\, y_{\mu }\in I ( A )^{sa}\, $ is the monotone increasing limit $\, ( a_{\mu\nu } ) \nearrow y_{\mu }\, $ of elements $\, a_{\mu\nu }\in A^{sa}\, $ (Up-Down-Theorem for $\, I ( A )\, $). \par
\noindent (ii) If $\,\mathfrak X\, $ is an operator system contained in an abelian $C^*$-algebra then each selfadjoint element $\, x\in I ( \mathfrak X )^{sa}\, $ is the least upper bound of the subset $\, \{ a_{\lambda }\in {\mathfrak X}^{sa}\,\vert\, a_{\lambda } \leq x \}\, $, and the greatest lower bound of the subset $\, \{ a_{\mu }\in {\mathfrak X}^{sa}\,\vert\, x\leq a_{\mu } \} \, $. In particular any monotone complete abelian $C^*$-algebra is injective. \par
\noindent {\it Proof.}\quad We begin with the last statement for an operator subsystem of an abelian $C^*$-algebra. The assumption implies that also $\, I ( \mathfrak X )\, $ is abelian. Let $\, x\in I ( \mathfrak X )^{sa}\, $ be given and $\, \{ a_{\lambda }\,\vert\, a_{\lambda }\in \mathfrak X\, ,\, a_{\lambda } \leq x \}\, $ be the subset of elements in $\, {\mathfrak X}^{sa}\, $ which are smaller than $\, x\, $ (or equal to if $\, x\in \mathfrak X\, $). Let $\, \overline x\, $ be the least upper bound of this set in $\, I ( \mathfrak X )\, $ which exists by monotone completeness of the injective envelope (Theorem 6.1.3 of \cite{E-R}). Then $\,\overline x \leq x\, $. Consider the subspaces $\, A_x = \mathfrak X + \mathbb C\, x \subseteq I ( \mathfrak X )\, $ and $\, A_{\overline x} = \mathfrak X + \mathbb C\, \overline x\subseteq I ( \mathfrak X )\, $ and define a map $\, \nu : A_x \rightarrow A_{\overline x}\, $ extending the identity map of $\, \mathfrak X\, $ in the obvious way by sending $\, x\, $ to $\,\overline x\, $. We claim that $\,\nu\, $ is positive (and hence completely contractive since unital with $\, I ( \mathfrak X )\, $ abelian). To see this let a positive element in $\, A_x\, $ be given which can be written as $$\, y\, =\, a\, +\, \gamma \, x \geq 0 $$ with $\, \gamma \in \mathbb R\, $ and $\, a\in {\mathfrak X}^{sa}\, $. Suppose that $\, \gamma < 0\, $. Then since $\,\overline x \leq x\, $ one has $\, \nu ( y ) \geq y \geq 0\, $. We may therefore assume $\, \gamma > 0\, $. Then $\, \nu ( y )\, $ is equal to the least upper bound of the set $\, \{ \gamma a_{\lambda } + a\,\vert\, a_{\lambda }\in \mathfrak X\, ,\, a_{\lambda } \leq x \}\, $ which equals the least upper bound of the set $\, \{ b_{\lambda }\in\mathfrak X\,\vert\, b_{\lambda } \leq a + \gamma x \}\, $, hence $\, \nu ( y ) \geq 0\, $ as desired. Extending $\,\nu\, $ to a completely positive map of $\, I ( \mathfrak X )\, $ into $\, I ( \mathfrak X )\, $ and using rigidity gives that $\, x = \overline x\, $. The case of $\, x\, $ being equal to the greatest lower bound of elements in $\, {\mathfrak X}^{sa}\, $ which are larger follows by symmetry. This proves the special Up/Down-property of $\, I ( \mathfrak X )\, $. If $\, A\, $ is a monotone complete $C^*$-algebra then by the foregoing argument each element $\, x\in I ( A )^{sa}\, $ is the least upper bound of all elements $\, \{ a\in A\,\vert\, a\leq x \}\, $. But this set also has a least upper bound $\, \overline x\, $ in $\, A\, $ with $\, \overline x \geq x\, $ whereas the set $\, \{ b\in A^{sa}\,\vert\, b\geq x \}\, $ has a greatest lower bound $\, \underline x\, $ in $\, A\, $, so that $\, \overline x \leq \underline x \leq x\leq \overline x\, $ and equality follows in each instance, i.e. $\, A = I ( A )\, $ so $\, A\, $ must be injective. This proves (ii). \par
\noindent Let $\, \lambda : A \rightarrow \mathcal B ( \mathcal H )\, $ be a representation of $\, A\, $ as in part (i) of the theorem with strong closure given by the injective von Neumann algebra $\, A''\, $. From injectivity there exists a completely positive projection $\, \Phi : A'' \rightarrow A''\, $ with range completely isometric to $\, I ( A )\, $. The map $\, \Phi\, $ factors as the product of a completely positive retraction $\, \rho : A'' \twoheadrightarrow I ( A )\, $ and a completely isometric inclusion $\, \iota : I ( A ) \hookrightarrow A''\, $. To prove the first statement choose a dense $*$-linear subspace $\, \mathfrak Y\, $ of $\, A''\, $ together with a basis $\, \{ c_{\omega } {\}}_{\omega\in\Omega }\, $ consisting of selfadjoint elements (of norm one say) which is assumed to be well ordered by a corresponding index set such that for each fixed $\, \omega \in \Omega\, $ the element $\, c_{\omega }\, $ is linear independent from the closure of the linear span of the set $\, \{ c_{\kappa }\,\vert\, \kappa < \omega \}\, $ and of norm one in the corresponding quotient space. The existence of such a dense subspace is guaranteed from Zorn's Lemma. Let $\, J_{\iota } ( A )\, $ denote the canonical subspace of $\, A''\, $ consisting of elements having a unique image under every positive projection $\, \Psi : A'' \rightarrow A''\, $ with range equal to $\, \Phi ( A'' )\, $. Let $\, J_+\, $ denote the subset $\,\{ b \}\, $ of elements of $\, (A'')^{sa}\, $ which are monotone increasing limits $\, ( a_{\nu } ) \nearrow b\, $ of some monotone increasing net of elements $\, a_{\nu }\in A^{sa}\, $, and $\, J_- = - J_+\, $. Then the subspace $\, J = J_+ + J_-\, $ is contained in $\, J_{\iota } ( A )\, $. Indeed if $\, \overline b\, $ is the supremum of the same net $\, ( a_{\nu } )\, $ in $\, I ( A )\, $ (which exists by monotone completeness of $\, I ( A )\, $), then $\, \overline b \geq b\, $ and $\, \Psi ( b ) = \iota ( \overline b )\, $ for every positive projection $\,\Psi\, $ with range $\, \Phi ( A'' )\, $. Also $\, J_{\pm }\, $ contains $\, A\, $ by choosing nets consisting of a single element. One may now assume that the subset $\,\{ c_{\omega } \} \cap J\, $ generates a dense subspace $\, \mathfrak J \subseteq J\, $ and exhausts the leading halfopen interval of all indices $\, 1 \leq \omega < {\omega }_0\, $ beginning with the first element and bounded above by a least index $\, {\omega }_0\, $. To save notation we let $\, {\omega }_0 = 0\, $ and start the numbering with this index omitting the indication of any basis element in $\,\mathfrak J\, $. For each positive element $\, b\in J_+\, ,\, b\geq 0\, $ one checks the following reverse Schwarz inequality $$ \rho ( b^2 )\> =\> \rho ( \sup\, \{ a^2\,\vert\, a \leq b\, ,\, a\in A \} )\> =\> \sup\, \{ a^2\,\vert\, a\leq b\, ,\, a\in A \}\> \leq\> \rho ( b )^2 $$ where on the right side the supremum in $\, I ( A )\, $ is understood. Since $\,\rho\, $ is completely positive the ordinary Schwarz inequality gives an equality. If $\, b\in J_+\, $ is arbitrary then $$ \rho ( b^2 )\> =\> \rho ( ( \Vert b \Vert + b )^2 ) - \Vert b {\Vert }^2 - 2 \Vert b \Vert \rho ( b )\> =\> \rho ( b )^2 \> . $$ For $\, a\, ,\, b\in J_+\, $ one gets $$ \rho ( a b + b a )\> =\> \rho ( ( a + b )^2 ) - \rho ( a^2 ) - \rho ( b^2 )\> =\> \rho ( a ) \rho ( b ) + \rho ( b ) \rho ( a ) $$ and hence $$ \rho ( ( a - b )^2 )\> =\> \rho ( a - b )^2 \> , $$ i.e. the Schwarz equality holds for arbitrary $\, b\in J\, $. One inductively constructs a positive projection $\, \overline\Psi : A'' \rightarrow A''\, $ over the identity map of $\, A\, $ depending on the chosen basis as follows. From the Up-Down Theorem (cf. \cite{Pe}, Theorem 2.4.3) every element $\, c\in (A'')^{sa}\, $ is the infimum of the set $\, \{ b\,\vert\, b \geq c\, ,\, b\in J_+ \}\, $ and correspondingly the supremum of the set $\,\{ a\,\vert\, a \leq c\, ,\, a \in J_- \}\, $ by the symmetry $\, c \mapsto - c\, $. For $\, c_0\, $ define $$ \overline\Psi ( c_0 )\> =\> \inf\, \{ \Phi ( b )\,\vert\, b \geq c_0\, ,\, b\in J \} \> $$ where the infimum in $\, \Phi ( A'' )\, $ is understood. On decomposing $\, b = b_+ + b_-\, $ with $\, b_+\in J_+\, ,\, b_-\in J_-\, $ and since $\, b_-\, $ is the infimum of elements in $\, A \subseteq J_+\, $ one has $$ \overline\Psi ( c_0 )\> =\> \inf\, \{ \Phi ( a )\,\vert\, a\geq c_0\, ,\, a\in J_+ \} \> . $$ One checks positivity of $\,\overline\Psi\, $. Let $\, x = a + \gamma c_0 \geq 0\, $ be given with $\, a\in J\, $. If $\, \gamma > 0\, $ then $\, \overline\Psi ( x ) = \inf\, \{ \Phi ( b )\,\vert\, b \geq x\, ,\, b\in J \} \geq 0\, $. On the other hand if $\,\gamma < 0\, $ then $\, \overline\Psi ( x ) = \sup\, \{ \Phi ( a )\,\vert\, 0 \leq a \leq x\, ,\, a \in J \} \geq 0\, $. To specify the restriction of $\, \overline\Psi\, $ to the domain $\, J_0 = J + \mathbb R\, c_0\, $ this map will also be denoted by $\, {\overline\Psi }_0\, $. Put $\, J_{0 , -} = J - {\mathbb R}_+\, c_0\, $. For the sucessor $\, c_1\, $ of $\, c_0\, $ one defines $$ \overline\Psi ( c_1 )\> =\> \inf\, \{ {\overline\Psi }_0 ( b )\,\vert\, b \geq c_1\, ,\, b\in J_0 \}\> =\> \inf\, \{ {\overline\Psi }_0 ( b )\, \vert\, b\geq c_1\, ,\, b\in J_{0 , -} \} \> . $$ Indeed if $\, b = b_- + \alpha c_0\, $ with $\, \alpha \geq 0\, $ then $\, {\overline\Psi }_0 ( \alpha c_0 ) = \inf\, \{ \alpha \Phi ( a )\,\vert\, a\geq c_0\, ,\, a\in J_+ \}\, $ so one may reduce to considering only elements in $\, J_{0 , -}\, $. Then one similarly checks positivity of the induced extended linear map $\, {\overline\Psi }_1\, $ with domain $\, J_1 = J + \mathbb R\, c_0 + \mathbb R\, c_1\, $. One proceeds by induction. Assume that given $\,\omega\in\Omega\, $ one has already constructed the positive map $\, {\overline\Psi }_{\kappa < \omega }\, $ with domain $\, J_{\kappa < \omega }\, $ generated by $\, J\, $ and all basis elements $\, \{ c_{\kappa }\,\vert\, \kappa < \omega \}\, $. Let $\, J_{\kappa < \omega , - }\, $ be the subcone generated by $\, J\, $ and arbitrary linear combinations $\, \sum_{\kappa < \omega }\, {\alpha }_{\kappa } c_{\kappa }\, $ with negative coefficients $\, {\alpha }_{\kappa } \leq 0\, $. Define $$ \overline\Psi ( c_{\omega } )\> =\> \inf\, \{ {\overline\Psi }_{\kappa < \omega } ( b )\,\vert\, b\geq c_{\omega }\, ,\, b\in J_{\kappa < \omega } \}\> =\> \inf\, \{ {\overline\Psi }_{\kappa < \omega } ( b )\,\vert\, b\geq c_{\omega }\, ,\, b\in J_{\kappa < \omega , -} \} $$ and check as above that this gives a positive extension of $\, {\overline\Psi }_{\kappa < \omega }\, $ to the subspace $\, J_{\omega } = J_{\kappa < \omega } + \mathbb R\, c_{\omega }\, $. If one has exhausted all basis elements by this procedure one only needs to extend $\, \overline\Psi\, $ to a positive projection on $\, A''\, $ by continuity. The map $\,\overline\Psi\, $ then factors as a product of a positive retraction $\, \overline\sigma : A'' \twoheadrightarrow I ( A )\, $ and the completely isometric inclusion $\, \iota\, $ as above. It is clear that $\, \overline\Psi ( c_0 )\, $ gives the maximal value for the image of $\, c_0\, $ under any positive projection $\, \Psi : A'' \rightarrow A''\, $ with range $\, \Phi ( A'' )\, $, and that $\, \overline\Psi ( c_{\omega } )\, $ gives the maximal value for $\, \Psi ( c_{\omega } )\, $ subject to the condition that $\, \Psi ( c_{\kappa } ) = \overline\Psi ( c_{\kappa } )\, $ for $\, \kappa < \omega\, $. The construction above gives $$ \overline\Psi ( c_0 )\> =\> \inf\, \{ \Phi ( b )\,\vert\, b\geq c_0\, ,\, b\in J_+ \}\> \geq\> \inf\, \{ b\,\vert\, b\geq c_0\, ,\, b\in \Phi ( A'' ) \} $$ where the infimum in $\, \Phi ( A'' )\, $ is understood, since $\, b\geq c_0\, $ implies $\, \Phi ( b ) \geq c_0\, $ if $\, b\in J_+\, $. On the other hand the value of $\,\Psi ( c_0 )\, $ certainly must be smaller than the value to the right of the inequality so equality follows. Correspondingly there is for each chosen basis as above a positive projection $\,\underline\Psi : A'' \rightarrow A''\, $ with same range such that the values of $\, \underline\Psi ( c_{\omega } )\, $ are conditionally minimal, and in particular the value of $\,\underline\Psi ( c_0 )\, $ is the absolutely minimal value any positive projection can take in $\, c_0\, $, which follows from the symmetry $\, c_{\omega } \mapsto - c_{\omega }\, $ plus the above construction. If $\, b \in J_{0 , +} = J + {\mathbb R}_+\, c_0\, $ with $\, b \geq 0\, $ one checks the following reverse Schwarz inequality $$ \overline\sigma ( b^2 )\> \leq\> \inf\,\{ \rho ( a^2 )\,\vert\, a\geq b\, ,\, a\in J_+ \}\> =\> \inf\, \{ \rho ( a )^2\,\vert\, a\geq b\, ,\, a\in J_+ \} $$ $$ \> =\> \inf\, \{ \rho ( \Phi ( a )^2 )\,\vert\, a\geq b\, ,\, a\in J_+ \}\> =\> \> \tau ( \inf\, \{ \Phi ( a )^2\,\vert\, a\geq b\, ,\, a\in J_+ \} ) $$ $$ \> =\> \tau ( ( \inf\, \{ \Phi ( a )\, ,\, a\geq b\, ,\, a\in J_+ \} )^2 )\> =\> \tau ( \overline\Psi ( b )^2 ) =\> \overline\sigma ( b )^2 \> . $$ This needs some explanation. The first inequality follows from the general scheme that the image of an infimum of a decreasing net of elements under a positive map must be smaller than the infimum of its images, then the second is the Schwarz equality for $\, a\in J\, $ proved above, the third is given by definition of the $C^*$-product in $\, I ( A )\, $ which can be retrieved (cf. \cite{E-R}, Theorem 6.1.3) from the completely positive projection $\,\Phi\, $ by the formula $$ x\circ y\> =\> \rho ( \iota ( x ) \iota ( y ) )\> . $$ One finds that if $\, \tau : A'' \twoheadrightarrow I ( A )\, $ is any positive retraction for $\,\iota\, $ then $$ x^2\> =\> \tau ( \iota ( x ) ) \tau ( \iota ( x ) )\> \leq\> \tau ( \iota ( x ) \iota ( x ) )\> \leq\> \rho ( \iota ( x ) \iota ( x ) )\> = x^2 $$ which follows from the Schwarz inequality for the completely positive map $\,\iota\, $ and the Kadison-Schwarz inequality for the positive map $\,\tau\, $. The second equality in the second line follows by choosing a suitable positive retraction $\,\tau\, $ taking precisely the chosen value for the element $\, z = \inf\, \{ \Phi ( a )^2\,\vert\, a\geq b\, ,\, a\in J_+ \}\, $, namely $\, \tau ( z ) = \inf\,\{ \rho ( \Phi ( a )^2 )\,\vert\, a\geq b\, ,\, a\in J_+ \}\, $. Clearly no positive retraction may take a larger value in $\, z\, $. To see that $\, \tau\, $ exists one makes use of the above construction choosing $\, c_0 = z\, $ unless $\, z\in J\, $ in which latter case one may take $\,\tau = \rho\, $. Indeed since $\, z \geq 0\, $ one has $$ z\> =\> \inf\, \{ c\,\vert\, c\geq z\, ,\, c = d^2\, ,\, c , d\in J_+ \} $$ so there exists a positive retraction $\,\tau : A'' \twoheadrightarrow I ( A )\, $ with $$ \> \tau ( z )\> =\> \inf\, \{ \rho ( c )\, \vert\, c\geq z\, ,\, c\in J_+ \}\> =\> \inf\, \{ \rho ( \Phi ( d )^2 )\,\vert\, d^2 \geq z\, ,\, d\in J_+ \} $$ $$ \geq\> \inf\,\{ \rho ( \Phi ( d )^2)\,\vert\, d \geq b\, ,\, d\in J_+ \} \qquad\qquad\qquad\qquad\qquad \>\> $$ since $\, z \geq b^2\, $ by definition ($\, a\in J_+\, $ implies $\, \Phi ( a ) \geq a\, $). Then equality follows from the maximality argument above. Then the first two equalities in the third line follows from weak continuity of the $C^*$-product in $\, A''\, $ and the definition of $\, \overline\Psi ( b )\, $. The last inequality is implied by the general consideration above since $\, \tau ( \overline\Psi ( b )^2 ) = \tau ( \iota ( \overline\sigma ( b ) ) \iota ( \overline\sigma ( b ) ) ) = \rho ( \overline\Psi ( b )^2 )\, $. Since $\, \overline\sigma\, $ is positive the ordinary Kadison-Schwarz inequality applies to get the converse statement so that $$ \overline\sigma ( b^2 )\> =\> \overline\sigma ( b )^2 \> . $$ Then if $\, b = b_1 + b_2\, $ with $\, b_1\, ,\, b_2\, $ as above one has $$ {\overline\Psi }_0 ( b_1 b_2 + b_2 b_1 )\> =\> {\overline\Psi }_0 ( ( b_1 + b_2 )^2 ) - {\overline\Psi }_0 ( b_1^2 ) - {\overline\Psi }_0 ( b_2^2 )\> =\> {\overline\Psi }_0 ( b_1 ) {\overline\Psi }_0 ( b_2 ) + {\overline\Psi }_0 ( b_2 ) {\overline\Psi }_0 ( b_1 )\> . $$ Then if $\, b = b_1 - b_2\, $ with $\, b_1\, ,\, b_2\, $ as above one gets $$ \overline\sigma ( b^2 )\> =\> \overline\sigma ( b_1^2 ) - \overline\sigma ( b_1 b_2 + b_2 b_1 ) + \overline\sigma ( b_2^2 )\> =\> \overline\sigma ( b )^2Ê $$ and the Schwarz equality continues to hold for such differences. Let now $\, b\in J_0 + {\mathbb R}_+ c_1\, ,\, b\geq 0\, $. As above one gets $$ \overline\sigma ( b^2 )\> \leq\> \inf\, \{ \overline\sigma ( a^2 )\,\vert\, a\geq b\, ,\, a\in J_0 \} \> =\> \inf\, \{ \overline\sigma ( a )^2\,\vert\, a\geq b\, ,\, a\in J_0 \} $$ $$Ê\> =\> \inf \{ \rho ( \overline\Psi ( a )^2 )\,\vert\, a\geq b\, ,\, a\in J_0 \}\> =\> \tau ( \inf\, \{ \overline\Psi ( a )^2\,\vert\, a\geq b\, ,\, a\in J_0 \} $$ $$\> =\> \tau ( ( \inf\, \{ \overline\Psi ( a )\,\vert\, a\geq b\, ,\, a\in J_0 \} )^2 )\> =\> \tau ( \overline\Psi ( b )^2 )\> = \overline\sigma ( b )^2\> . $$ The argument is completely analogous to the previous case, but maybe just a little bit more tricky. One notes first that each element of the form $\, \Phi ( a )^2\, $ is in $\, J_{\iota } ( A )\, $ since $\, \tau ( \Phi ( a )^2 ) = \rho ( \Phi ( a )^2 )\, $ as shown above. Define $\, z = \inf\, \{ \overline\Psi ( a )^2\,\vert\, a\geq b\, ,\, a\in J_0 \}\, $ and considering $\, z\, $ as the first element of a suitable well ordered basis there exists a positive retraction $\, \tau : A'' \twoheadrightarrow I ( A )\, $ taking the maximal possible value in $\, z\, $. This value is given by $$ \tau ( z )\> =\> \inf\, \{ a\,\vert\, a\geq z\, ,\, a\in \Phi ( A'' ) \}\> \geq\> \inf\, \{ d^2\,\vert\, d^2\geq z\, ,\, d\in \Phi ( A'' ) \} $$ where the infimum is in $\,\Phi ( A'' )\, $, i.e in the second case it is the supremum of all elements in $\, \Phi ( A'' )\, $ which are smaller than each element $\, d^2\geq z\, $ with $\, d\in \Phi ( A'' )\, $. The inequality follows since $\, d^2 \geq z\, $ implies $\, \Phi ( d^2 )\geq d^2\geq z\, $ and $$ \inf\, \{ d^2\,\vert\, d^2\geq z\, ,\, d\in \Phi ( A'' ) \}\> =\> \inf\, \{ \Phi ( d^2 )\,\vert\, d^2\geq z\, ,\, d\in \Phi ( A'' ) \} \> . $$ Indeed, $\, \Phi ( d^2 )\, $ is the smallest element in $\, \Phi ( A'' )\, $ larger than $\, d^2\, $. This accounts for the second equation in the middle line, and the rest of the argument is much the same as before. One proceeds by induction to prove the Schwarz equality $\, \overline\sigma ( x^2 ) = \overline\sigma ( x )^2\, $ for all selfadjoint elements in $\, \mathfrak Y\, $, and by continuity for all elements in $\, (A'')^{sa}\, $. Then the Schwarz inequality holds for arbitrary normal elements. Indeed, for $\, x = a + i b\, $ a normal element the ordinary Schwarz inequality applies to give $$ \overline\sigma ( x x^* )\> =\> \overline\sigma ( a )^2 + \overline\sigma ( b )^2\> \geq\> \overline\sigma ( x ) \overline\sigma ( x )^*\> =\> \overline\sigma ( a )^2 + \overline\sigma ( b )^2 + i ( \overline\sigma ( b ) \overline\sigma ( a ) - \overline\sigma ( a ) \overline\sigma ( b ) ) $$ which implies that $\, i ( \overline\sigma ( b ) \overline\sigma ( a ) - \overline\sigma ( a ) \overline\sigma ( b ) ) \leq 0\, $. By symmetry one also gets $\, -i ( \overline\sigma ( b ) \overline\sigma ( a ) - \overline\sigma ( a ) \overline\sigma ( b ) ) \leq 0\, $ and hence $\, \overline\sigma ( a ) \overline\sigma ( b ) = \overline\sigma ( b ) \overline\sigma ( a )\, $ proving the Schwarz equality $\, \overline\sigma ( x x^* ) = \overline\sigma ( x ) \overline\sigma ( x )^*\, $ for any normal element. From this one easily induces for $\, x = x_+ - x_-\, $ selfadjoint with $\, x_+ = x\vee 0\, $ and $\, - x_- = x\wedge 0\, $ that $\overline\sigma\, $ sends $\, x_+\, $ to $\, \overline\sigma ( x )_+\, $ and $\, x_-\, $ to $\,\overline\sigma ( x )_-\, $ (since $\,\overline\sigma ( x_+ ) \overline\sigma ( x_- ) = \overline\sigma ( x_- )\overline\sigma ( x_+ ) = 0\, $). In particular if $\, x = x_+ - x_-\in (A'')^{sa} \, $ is an element of the kernel of $\,\overline\sigma\, $ with $\, x_+ x_- = x_- x_+ = 0\, $ then both $\, x_+\, $ and $\, x_-\, $ are contained in the kernel, i.e. the kernel is linearly generated by positive elements. From this one gets that the kernel of $\,\overline\sigma\, $ is a twosided ideal, for if $\, a\geq 0\, $ is contained in the kernel then also $\,\sqrt a\, $ which follows directly from the Schwarz equality $$ \overline\sigma ( \sqrt a )^2\> =\> \overline\sigma ( a )\> =\> 0 \> . $$ Let $\, b\, ,\, c \in A''\, $ be arbitrary elements. Then $$ \overline\sigma ( ba ) + \overline\sigma ( \sqrt a b \sqrt a )\> =\> \overline\sigma ( b\sqrt a ) \overline\sigma ( \sqrt a ) + \overline\sigma ( \sqrt a ) \overline\sigma ( b \sqrt a )\> =\> 0 $$ and since obviously $\, \overline\sigma ( \sqrt a b \sqrt a ) \leq \Vert b \Vert \overline\sigma ( a ) = 0\, $ one gets $\, \overline\sigma ( b a ) = 0\, $. By the same line of argument $$ \overline\sigma ( b a c ) + \overline\sigma ( c b a )\> =\> \overline\sigma ( b a ) \overline\sigma ( c ) + \overline\sigma ( c ) \overline\sigma ( b a )\> =\> 0 $$ and since $\, \overline\sigma ( c b a ) = 0\, $ from the previous argument one concludes that $\, \sigma ( b a c ) = 0\, $, which implies the assertion. It is not unlikely that $\,\overline\sigma\, $ is a $*$-homomorphism in general. Indeed, one may define an associative Banach algebra product on $\, I ( A )\, $ by the formula $$ x * y\> =\> \overline\sigma ( \iota ( x ) \iota ( y ) ) \> . $$ Associativity is readily checked from the fact that the kernel of $\,\overline\sigma\, $ is an ideal. If this product should also have the $C^*$-property $\, \Vert x * x^* \Vert = \Vert x {\Vert }^2\, $ then from uniqueness of the $C^*$-product on $\, I ( A )\, $ the product must be the usual one and $\,\overline\sigma\, $ must be a $*$-homomorphism. However it does not seem very easy to prove the $C^*$-property for a general (nonnormal) element $\, x\, $. For a normal element the result follows of course immediately from the Schwarz equality. The property of $\,\overline\sigma\, $ being a $*$-homomorphism is in fact equivalent to it being $2$-positive. In this case one gets for selfadjoint elements $\, a\, $ and $\, b\, $ $$ {\overline\sigma }_2 \left( {\begin{pmatrix} a & b \\ b & 0 \end{pmatrix} }^2\right)\> \geq\> {\overline\sigma }_2 \left( \begin{pmatrix} a & b \\ b & 0 \end{pmatrix} \right)^2\> $$ and hence $$ \begin{pmatrix} 0 & \overline\sigma ( a b ) - \overline\sigma ( a )\overline\sigma ( b ) \\ \overline\sigma ( b a ) - \overline\sigma ( b ) \overline\sigma ( a ) & 0 \end{pmatrix}\> \geq\> 0 $$ implying $\, \overline\sigma ( a b ) = \overline\sigma ( a ) \overline\sigma ( b )\, $. Let $\, {\mathcal J}_{\iota }\, $ be the canonical subspace consisting of those elements which are in the kernel of every positive retraction $\,\sigma : A'' \twoheadrightarrow I ( A )\, $ giving a left inverse for $\,\iota\, $. This space is just the intersection of all kernels of positive retractions of the above type, since there is for every selfadjoint element $\, x\, $ a maximal and a minimal possible value which are taken by retractions of the form considered above, so if these are both zero then $\, x\, $ is contained in $\, {\mathcal J}_{\iota }\, $. Being the intersection of a given class of ideals $\,Ê{\mathcal J}_{\iota }\, $ is a twosided ideal itself. Also from the Schwarz equality for normal elements $\,\overline\sigma\, $ maps projections to projections and unitaries to unitaries. A similar argument shows that the subspace $\, J_{\iota } ( A )\, $ of elements with unique image in $\, I ( A )\, $ is a Jordan subalgebra, so that its image in the quotient $\, A'' / \mathcal J\, $ is also a Jordan subalgebra and canonically completely isometric with $\, I ( A )\, $. We claim that it is (relatively) monotone complete. Let an increasing net $\, ( x_{\lambda } )_{\lambda }\, $ be given with $\, \{ x_{\lambda } \} \subseteq I ( A )\, $ and $\, x\in I ( A )\, $ its least upper limit. Suppose that $\, x\, $ is not the least upper limit of the same net in $\, A'' / \mathcal J\, $. Then there exists an element $\, y\in A'' / \mathcal J\, ,\, y < x\, $ such that $\, y\, $ is an upper bound for the net $\, ( x_{\lambda } )_{\lambda }\, $. The positive projection $\, \Psi\, $ with range $\,\iota ( I ( A ) )\, $ induces a positive projection $\,\widetilde\Psi : A'' / \mathcal J \rightarrow A'' / \mathcal J\, $ with range equal to $\, I ( A )\, $. As in the argument above the value of $\, \widetilde\Psi ( y )\, $ is necessarily given by $\, x\, $ no matter the choice of $\,\Psi\, $. Therefore any preimage of $\, y\, $ is contained in $\, J ( A )\, $ which implies $\, y\in I ( A )\, $, hence $\, y = x\, $. To prove the Up-Down Theorem for $\, I ( A )\, $ choose for given $\, x\in I ( A )^{sa}\, $ a preimage $\,\overline x \in J_{\iota } ( A )^{sa}\, $. From the Up-Down Theorem in $\, A''\, $ one gets a monotone decreasing net $\, ( {\overline b}_{\mu } ) \searrow \overline x\, $ with each $\, b_{\mu }\, $ the limit of a monotone increasing net $\, ( a_{\mu \nu } ) \nearrow b_{\mu }\, $ of elements $\, a_{\mu \nu }\in A^{sa}\, $. Then each positive retraction $\, \sigma : A'' \twoheadrightarrow I ( A )\, $ sends each element $\, {\overline b}_{\mu }\, $ to a corresponding element $\, b_{\mu }\, $ which is the monotone increasing limit of the net $\, ( a_{\mu \nu } )\, $ in $\, I ( A )\, $ and $\, x\, $ is the limit of the monotone decreasing net $\, ( b_{\mu } )\, $. If $\, {\mathcal J}_{\iota }\, $ is trivial then the subspace $\, J\, $ generated by limits of monotone increasing (or decreasing) nets of elements in $\, A^{sa}\, $ is contained in $\, \Phi ( A'' )\, $ because the difference between the supremum of such a net in $\, A''\, $ and its supremum in $\,\Phi ( A'' )\, $ is in the kernel of any positive retraction and hence in $\, {\mathcal J}_{\iota }\, $. Then again by the same argument since every element in $\, (A'')^{sa}\, $ can be represented as the infimum of a monotone decreasing net of elements in $\, \Phi ( A'' )^{sa}\, $ it must be contained in $\, \Phi ( A'' )\, $ itself so $\,\Phi\, $ is the identity map and $\, A'' \simeq I ( A )\, $ follows. Put $\, \mathcal J = \sum_{\iota }\, {\mathcal J}_{\iota }\, $ which is a twosided ideal of $\, A''\, $. Each element $\, x\in\mathcal J\, $ is contained in the kernel of every completely positive projection $\,\Phi : A'' \rightarrow A''\, $ extending the identity map of $\, A\, $ and with range completely isometric to $\, I ( A )\, $. To see this let to different completely isometric projections $\, \Phi\, ,\, {\widetilde\Phi} : A'' \rightarrow A''\, $ as above be given. Then it is easy to see that $\, \Phi \circ\widetilde\Phi\, $ is also a projection (since $\, \Phi\circ\widetilde\Phi\circ\Phi = \Phi\, $ from rigidity) with range $\, \Phi ( A'' )\, $ and kernel equal to the kernel of $\,\widetilde\Phi\, $. Thus for each different choice of kernel there is a corresponding projection onto a fixed copy of $\, I ( A ) \subseteq A''\, $. This implies that if $\, x\, $ is contained in the kernel of any completely positive projection with fixed range $\, \iota ( I ( A ) )\, $ it is necessarily also in the kernel of any such projection having a different range $\, {\widetilde\iota} ( I ( A ) )\, $. In particular this accounts for all elements in $\,\mathcal J\, $. Since the embedding of $\, I ( A )\, $ into $\, A'' / {\mathcal J}_{\iota }\, $ is completely canonical, the same is true for the embedding into $\, A'' / \mathcal J\, $ and in particular any two completely isometric inclusions $\, \iota\, ,\, {\widetilde\iota }\, $ will agree modulo $\,\mathcal J\, $. This implies that the preimage $\, J ( A )\, $ of $\, I ( A )\, $ modulo $\,\mathcal J\, $ is a Jordan subalgebra of $\, A''\, $ \qed \par
\noindent The statement of the theorem extends to the case where $\, A\, $ is separable, but is represented on a nonseparable Hilbert space $\,\mathcal H\, $ for in this case the representation decomposes into a direct sum of separable representations so the Up-Down Theorem applies. Note also that the proof uses the injectivity of $\, A''\, $ only insofar as to obtain a completely isometric inclusion $\, I ( A )\subseteq A''\, $ extending the identity of $\, A\, $, so that it also applies in case that such an inclusion exists without $\, A''\, $ being injective (it is conceivable that it is always possible to embed $\, I ( A )\, $ into $\, A^{**}\, $ and hence into the strong closure of $\, A\, $ in any representation but this needs a proof). \par
\noindent {\it Remark.}\quad Since the map $\,\overline\sigma\, $ constructed in the proof of the theorem maps projections to projections it is natural to ask whether it induces a map of the $K_0$-groups of the respective $C^*$-algebras. Clearly it sends a pair of homotopic projections $\, p {\sim }_h q\, $ in $\, A''\, $ to homotopic projections in $\, I ( A )\, $ but more is true. It is always possible to extend the map $\, \overline\sigma\, $ to a Jordan homomorphism $$ {\overline\sigma }_n : M_n ( A'' ) \twoheadrightarrow M_n ( I ( A ) ) $$ for any $\, n\, $ such that this map reduces to $\,\overline\sigma\, $ restricted to the left upper corner. This implies that $\,\overline\sigma\, $ maps stably homotopic projections to stably homotopic projections and hence induces a homomorphism of the subgroup of $\, K_0 ( A'' )\, $ generated by the projections in $\, A''\, $ into $\, K_0 ( I ( A ) )\, $. Then choosing any ascending sequence of natural numbers $\, 1= n_1 \leq n_2 \leq \cdots \leq n_k\leq\cdots\, $ and compatible Jordan homomorphisms $$ {\overline\sigma }_{n_k} : M_{n_k} ( A'' ) \twoheadrightarrow M_{n_k} ( I ( A ) ) $$ in the sense above will give a well defined map $$ K_0 ( A'' ) \longrightarrow K_0 ( I ( A ) )\> . $$ \par
\noindent {\it Example.}\quad If $\, A\, $ is commutative then any positive retraction $\, \sigma : \lambda ( A )'' \twoheadrightarrow I ( A )\, $ as constructed in the proof of the theorem is a $*$-homomorphism. The homomorphism $\, \sigma \, $ cannot be normal in general. To see this let $\, A = C ( X )\, $ be the algebra of continuous functions on the interval $\, X = [ 0 , 1 ]\, $ and, choosing some countable dense subset $\, X_S\subseteq X\, $, consider the representation $\, {\lambda }_S\, $ of $\, A\, $ on the Hilbert space $\, l^2 ( X_S )\, $ by pointwise multiplication so that $\, A'' = l^\infty ( X_S )\, $. If $\, {\sigma }_{{\lambda }_S}\, $ would be normal then $\, I ( A )\, $ would be $*$-isomorphic to the von Neumann algebra $\, l^\infty ( Y_S )\, $ for some dense subset $\, Y_S \subseteq X_S\, $ by \cite{Pe}, Corollary 2.5.5. But then, for any given point $\, x_0\in Y_S\, $, $\, Y_S\backslash \{ x_0 \}\, $ is dense in $\, X\, $ so that the natural projection $\, l^\infty ( Y_S ) \twoheadrightarrow l^\infty ( Y_S \backslash \{ x_0 \} )\, $ is faithful on $\, A\, $. From rigidity it would also have to be faithful on $\, I ( A )\, $ (there always exists an extension of the identity map of $\, A\, $ in the reverse direction) which gives a contradiction. The same example serves to show that $\,\rho\, $ is not unique in this case. Divide $\, X_S = Y_S \cup Z_S\, $ into a disjoint sum of dense subsets so that $\, l^\infty ( X_S ) \simeq l^\infty ( Y_S ) \oplus l^\infty ( Z_S )\, $. Then for each summand there is a corresponding surjection $\, {\rho }_{Y , Z} : l^\infty ( X_S ) \twoheadrightarrow I ( C ( X ) )\, $ which annihilates the complementary summand, so these must be different. From part (ii) of the theorem one obtains for commutative $\, A\, $ a simple characterization of the elements in $\, J ( A )\, $ which in this case is a canonical $C^*$-subalgebra of $\, A''\, $. For a given selfadjoint element $\, f\, $ in $\, A''\, $ define $\, A_f = \{ a\in A\,\vert\, a\geq f \}\, $ and $\, B_f = \{ b\in A\,\vert\, b \leq f \}\, $. Also define for any subset $\, \mathcal A \subset A^{sa}\, $ which is bounded from below, resp. for any subset $\, \mathcal B \subset A^{sa}\, $ which is bounded from above, its complement by $\, {\mathcal A}^c = \{ c\in A\,\vert\, c \leq \inf\, \mathcal A \}\, $, resp. $\, {\mathcal B}^c = \{ d\in A^{sa}\,\vert\, d\geq \sup\, \mathcal B \}\, $, so that $\, \mathcal A \subseteq ({\mathcal A}^c)^c\, $ and $\, {\mathcal A}^c = (({\mathcal A}^c)^c)^c\, $. The proof of the theorem shows that for any selfadjoint element $\, f\, $ in $\, A''\, $ there exists a $*$-homomorphism $\, \overline\sigma : A'' \twoheadrightarrow I ( A )\, $ extending the identity map of $\, A\, $ such that $\, \overline\sigma ( f ) = \inf\, \{ x\,\vert\, x\in I ( A )\, ,\, \iota ( x ) \geq f \}\, $ with respect to some positive inclusion $\,\iota : I ( A ) \hookrightarrow A''\, $ extending the identity map of $\, A\, $. If $\, f\in J ( A )\, $ then the image of $\, f\, $ in $\, I ( A )\, $ is the same for any choice of positive retraction $\, \sigma : A'' \twoheadrightarrow I ( A )\, $ (whatever the choice of $\, \iota\, $ is). This implies by part (ii) of the theorem that the value of $\, \sigma ( f )\, $ must be equal to $\, \inf\, A_f\, $ (the infimum taken in $\, I ( A )\, $) and also equal to $\, \sup\, B_f\, $. This then implies the identities $\, (A_f)^c = ((B_f)^c)^c\, $ or equivalently $\, (B_f)^c = ((A_f)^c)^c\, $. On the other hand it is easy to see that these identities can be satisfied only for $\, f\in J ( A )\, $. Since there is a unique positive retraction $\, \sigma : J ( A ) \twoheadrightarrow I ( A )\, $ extending the identity map of $\, A\, $ which in fact is a $*$-homomorphism this map induces by duality a canonical embedding $\, s : Spec ( I ( A ) ) \hookrightarrow Spec ( J ( A ) )\, $ of the spectrum (or the state space) of $\, I ( A )\, $ as a closed subspace of the spectrum (resp. state space) of $\, J ( A )\, $, the image of which may be called the {\it rigid states with respect to $\, A\, $}. They have the following rigidity property: given any positive extension $\, j : J ( A ) \rightarrow J ( A )\, $ extending the identity map of $\, A\, $ and $\, \phi\, $ a state in the image of $\, s\, $ one gets $\, \phi ( j ( f ) ) = \phi ( f )\, $ for all $\, f\in J ( A )\, $. Note also that $\, J ( A )\, $ has the following injectivity property relative to $\, A\, $: given any subspace $\, E\subseteq J ( A )\, $ containg $\, A\, $ and positive map $\, \rho : E \rightarrow J ( A )\, $ which reduces to the identity map on $\, A\, $ there is a positive extension of $\, \rho\, $ to all of $\, J ( A )\, $. This follows since by injectivity of $\, A''\, $ there exists a positive extension of $\,\rho\, $ to a map $\, \overline\rho : J ( A ) \rightarrow A''\, $ which must necessarily send $\, J ( A )\, $ into itself. \par
\noindent
\par
\noindent
\end{document} |
\begin{document}
\title{Global Solution for Gas-Liquid Flow of 1-D van der Waals Equation of State with Large Initial Data} \author{ Qiaolin He$^1$,
Ming Mei$^{2,3}$, Xiaoding Shi$^{4}$\thanks{\scriptsize{Corresponding author, shixd@mail.buct.edu.cn}}, Xiaoping Wang$^5$ \\
\scriptsize{$^1$School of Mathematics, Sichuan University, Chengdu, 610064, China}\\
\scriptsize{$^{2}$Department of Mathematics, Champlain College St.-Lambert, St.-Lambert, Quebec, J4P 3P2, Canada}\\
\scriptsize{$^{3}$Department of Mathematics and Statistics, McGill University, Montreal, Quebec,H3A 2K6, Canada }\\
\scriptsize{$^{4}$Department of Mathematics, School of Science, Beijing University of Chemical Technology, Beijing, 100029, China}\\
\scriptsize{$^{5}$Department of Mathematics, Hong Kong University of Science and Technology, Hong Kong, China}} \date{} \maketitle
\noindent\textbf{Abstract}. This paper is concerned with a diffuse interface model for the gas-liquid phase transition. The model consists the compressible Navier-Stokes equations with van der Waals equation of state and a modified Allen-Cahn equation. The global existence and uniqueness of strong solution with the periodic boundary condition (or the mixed boundary condition) in one dimensional space is proved for large initial data. Furthermore, the phase variable and the density of the gas-liquid mixture are proved to stay in the physical reasonable interval. The proofs are based on the elementary energy method and the maximum principle, but with new development, where some techniques are introduced to establish the uniform bounds of the density and to treat the non-convexity of the pressure function.
\
\noindent{\bf Keywords:} global solution, Navier-Stokes equations, Allen-Cahn equation, gas-liquid flow, van der Waals equation of state \
\noindent{\bf MSC:} 35M10,35Q30
\section {\normalsize Introduction and Main Result} \setcounter{equation}{0} In the last few decades, there have been many progresses on modelling and analysis of the multiphase and phase transition problems, in particular on the phase field models of the phenomena, see \cite{AC1979}-\cite{CH1958}, \cite{HMR2012} \cite{LT-1998} \cite{V1894} \cite{QWS} \cite{WQS} and the references therein. In this paper, we investigate Navier-Stokes-Allen-Cahn system proposed by Blesgen \cite{B1999} which describes the compressible two-phase flow with diffusive interface. The system consists of the compressible Navier-Stokes equations and a modified Allen-Cahn equation, and it
is especially useful for analyzing the phase transition properties of gas-liquid flow. It allows phases to shrink or grow due to changes of density in the fluid and incorporates their transport with the current. The Navier-Stokes-Allen-Cahn system is commonly expressed as follows (see \cite{B1999}, \cite{FPRS2008}, \cite{DLL2013}, \cite{CG2017}, \cite{STY} etc.) \begin{equation}\label{3dNSAC} \left\{\begin{array}{llll} \displaystyle \partial_t\rho+\textrm{div}(\rho \mathbf{u})=0, \\
\displaystyle \displaystyle \partial_t(\rho \mathbf{u})+\textrm{div}(\rho \mathbf{u}\otimes \mathbf{u})+\nabla p-(\nu\Delta\mathbf{u}+\eta\nabla\textrm{div}\mathbf{u})=-\epsilon\mathrm{div}\big(\nabla\chi\otimes\nabla\chi-\frac{|\nabla\chi|^2}{2}\mathbb{I}\big),\\ \displaystyle\partial_t\big(\rho\chi\big)+\mathrm{div}(\rho \chi \mathbf{u})=-\frac{1}{\epsilon}\frac{\partial f(\rho,\chi)}{\partial\chi}+\frac{\epsilon}{\rho} \Delta\chi, \end{array}\right. \end{equation} where $\rho=\rho(\mathbf{x},t)$, $\mathbf{u}=\mathbf{u}(\mathbf{x},t)$ and $\chi=\chi(\mathbf{x},t)$ are the density, the velocity and the concentration difference of the gas-liquid mixture. The constants $\nu>0, \ \eta\geq0$ are viscosity coefficients, and the constant $\epsilon>0$ is defined as the thickness of the diffuse interface of the gas-liquid mixture. The potential energy density $f=f(\rho,\chi)$, satisfying the Ginzburg-Landau double-well potential model (see \cite{HW}, \cite{DLL2013}, \cite{CG2017} and the references therein), follows that:
\begin{equation}\label{potential energy density}
f(\rho,\chi)=-3\rho+\frac{8\Theta}{3}\ln\frac{\rho}{3-\rho}+\frac{1}{4}\big(\chi^2-1\big)^2,
\end{equation}
with $0<\Theta$ is the positive constant related to the ratio of the actual temperature to the critical temperature.
The pressure $p$ is given by the following van der Waals equation of state (see \cite{V1894}, \cite{HK}, \cite{HW}, \cite{MLW1}, \cite{MLW2}, \cite{HSWZ}, \cite{HLS} and the references therein)
\begin{equation}\label{the formula of pressure} p(\rho)=\left\{\begin{array}{llll}
\displaystyle\rho^2\frac{\partial f}{\partial\rho}=-3\rho^2+\frac{8\Theta\rho}{3-\rho}\quad & \mathrm{if}\ 0\leq\rho<3,\\
\displaystyle+\infty,\quad & \mathrm{if}\ \rho\geq3.
\end{array}\right. \end{equation} We have the following properties of the pressure $p$:
\begin{enumerate} \item[(i)] $p(\rho)>0$ for $\rho>0$, $p(0)=0$;
\item[(ii)] When $\Theta\geq1$, $p(\rho)$ is a monotone increasing function. When $0<\Theta<1$, there exist two positive densities $3>\beta>\alpha>0$ such that $p(\rho)$ is increasing on $[0,\alpha]$ and on $[\beta,3)$, $p(\rho)$ is decreasing on $(\alpha,\beta)$;
\item[(iii)] $p'(\rho)=\frac{-6(\rho^3-6\rho^2+9\rho-4\Theta)}{(3-\rho)^2}$. When $0<\Theta<1$, there exist a positive density $\gamma$, such that, $p(\gamma)=p(\beta)$,and $p(\rho)>p(\gamma)$ for $\rho>\gamma$, $p$ is increasing on $[0,\gamma]$. \end{enumerate} \begin{rmk}The van der Waals state equation \eqref{the formula of pressure} is proposed by the Dutch physicist J. D. van der Waals \cite{V1894}. It is a thermodynamic equation of state which is based on the theory that fluids are composed of particles with non-zero volumes, and subject to an inter-particle attractive force. Over the critical temperature (i.e. $\Theta\geq1$ in \eqref{the formula of pressure}), this equation of state is an improvement over the ideal gas law. And what's more, below the critical temperature (i.e. $0<\Theta<1$ in \eqref{the formula of pressure}), this equation is also qualitatively reasonable for the low-pressure gas-liquid states. \end{rmk} \begin{rmk} The concentration difference $\chi$ of the gas-liquid mixture can be understood as
$\chi=\chi_1-\chi_2$, where $\chi_i=\frac{M_i}{M}$ is the mass concentration of the fluid $i~(i=1,2)$, $M_i$ is the mass of the components in the representative material volume $V$. The item of $\epsilon\Big(\nabla\chi\otimes\nabla\chi-\frac{|\nabla\chi|^2}{2}\mathbb{I}\Big)$ in the momentum equation \eqref{3dNSAC} can be seen as an additional stress contribution in the stress tensor. This describes the capillary effect associated with free energy $E_{\mathrm{free}}(\rho,\chi)=\int_\Omega\Big(\frac{\rho}{\epsilon} f(\rho,\chi)+\frac{\epsilon}{2}|\nabla\chi|^2\Big)d\mathbf{x}$, (see \cite{AF2008}, \cite{FPRS2008}, \cite{DLL2013},\cite{CG2017}, \cite{CHMS-2018} and the references therein). \end{rmk}
There are a lot of works on the well-posedness of the solutions to compressible Navier-Stokes system. We refer to the work of Matsumura-Nishida \cite{MN1980}, Matsumura-Nishihara \cite{MN1985}-\cite{MN1986}, Lions \cite{Lions1998}, Huang-Li-Xin \cite{HLX2012},
Mei \cite{M1997}-\cite{M1999}, Huang-Li-Matsumura \cite{HLM2010}, Huang-Matsumura-Xin \cite{HMX2006}, Huang-Wang-Wang-Yang \cite{HWWY2015}, Shi-Yong-Zhang \cite{SYZ2016} and the references therein.
The study of interfacial phase changing in mixed fluids can be traced back to the work by van der Waals (1894). van der Waals described the interface between two immiscible fluids as a layer in the pioneer paper \cite{V1894}. His idea was successfully applied by Cahn-Hilliard \cite{CH1958} and Allen-Cahn \cite{AC1979} to describe the complicated phase separation and coarsening phenomena, the motion of anti-phase boundaries in the mixture respectively. Lowengrub-Truskinovsky \cite{LT-1998} added the effect of the motion of the particles and the interaction with the diffusion into the Cahn-Hilliard equation, and the Navier-Stokes-Cahn-Hilliard system was put forward. Blesgen \cite{B1999} then combined the compressible Navier-Stokes system with the modified Allen-Cahn equation to describe the behavior of cavitation in a flowing liquid, which was known as Navier-Stokes-Allen-Cahn system. The difference between Navier-Stokes-Allen-Cahn system and Navier-Stokes-Cahn-Hilliard system is that, for the former, the diffusion fluxes are neglected and the development of the constitutive equation for mass conversion of any of the considered phases is focused. This leads to that the latter conserves the volume fractions while the former does not.
Nowadays, Navier-Stokes-Allen-Cahn system and Navier-Stokes-Cahn-Hilliard system are widely used in the interfacial diffusion problems of fluid mechanics and material science. Comparatively speaking, the numerical treatment to the former is simpler than that of the latter which involves fourth-order differential operators. However, because the concentration difference $\chi$ in \eqref{3dNSAC} does not preserve overall volume fraction, a Lagrange multiplier is usually introduced in \eqref{3dNSAC}$_3$ as a constraint to conserve the volume, see Yang-Feng-Liu-Shen \cite{YFLS2006}, Zhang-Wang-Mi \cite{ZWM} and the references therein. Feireisl-Petzeltov$\mathrm{\acute{a}}$-Rocca-Schimperna \cite{FPRS2008} obtained the global existence of weak solutions for the isentropic case, where the method they used is the framework introduced by Lions \cite{Lions1998}. Along the way proposed by Feireisl \textit{et al.}, Ding-Li-Luo \cite{DLL2013} proved the global existence of one-dimensional strong solution in the bounded domain for initial density without vacuum states. Chen-Guo \cite{CG2017} generalized Ding-Li-Luo's result to the case that the initial vacuum is allowed.
However, all the results above are for the ideal fluid. In order to study the gas-liquid phase transition, we need to consider the non-ideal viscous fluid in which, there is an interval of the density $\rho$ where the pressure $p$ decreases as $\rho$ increases, and the phase transition takes place. The equations of state \eqref{the formula of pressure} proposed by van der Waals is quite satisfactory in describing this phenomena. Hsieh-Wang \cite{HW} solved the isentropic compressible Navier-Stokes system model by the van der Waals state equation numerically by a pseudo-spectral method with a form of artificial viscosity. They showed that the phase transition depends on the selection of the initial density. He-Liu-Shi \cite{HLS} investigated the large time behavior for van der Waals fluid in 1-D by using a second order TVD Runge-Kutta splitting scheme combined with Jin-Xin relaxation scheme.
Mei-Liu-Wong \cite{MLW1,MLW2} studied Navier-Stokes system with additional artificial viscosity
and $p(\rho)=\rho^{-3}-\rho^{-1}$. By using the Liapunov functional method, they proved the existence, uniqueness, regularity and uniform boundedness of the periodic solution in 1-D. Hoff and Khodia \cite{HK} considered the dynamic stability of certain steady-state weak solutions of system (1.1) for compressible van der Waals fluids in 1-D whole space with the small initial disturbance.
In this paper, we study the global existence of the solution for the system \eqref{3dNSAC} with the van der Waals state equation \eqref{the formula of pressure} in one dimension. More precisely, for general initial conditions without vacuum state, our purpose is to study the existence and uniqueness of global strong solution for the isentropic Navier-Stokes-Allen-Cahn systems \eqref{3dNSAC} even with large initial data. Moreover we show that the phase variable $\chi$ belongs to the physical interval $[-1,1]$. Some new techniques are developed to establish the up and low bounds of the density $\rho$, and to treat the non-convexity of the pressure $p(\rho)$, both are crucial steps in the proof.
We now present our main result. The 1-D isentropic Navier-Stokes-Allen-Cahn system in the Euler coordinates is expressed in the following
\begin{equation}\label{NSAC} \left\{\begin{array}{llll} \displaystyle \rho_t+(\rho u)_x=0, \ \ & x\in\mathbb{R},t>0, \\ \displaystyle \rho u_t+\rho uu_x+p_x=\nu u_{xx}-\frac{\epsilon}{2}\big(\chi_x^2\big)_x,\ \ & x\in\mathbb{R},t>0,\\ \displaystyle\rho\chi_t+\rho u\chi_x=-\frac{1}{\epsilon}(\chi^3-\chi)+\frac{\epsilon}{\rho}\chi_{xx},\ \ & x\in\mathbb{R},t>0, \end{array}\right. \end{equation} with the $L$-periodic boundary value condition: \begin{equation}\label{periodic boundary for Euler} \left\{\begin{array}{llll} (\rho,u,\chi)(x,t)=(\rho,u,\chi)(x+L,t),\ \ & x\in\mathbb{R},t>0,\\
(\rho,u,\chi)\big|_{t=0}=(\rho_0,u_0,\chi_0),\ \ & x\in\mathbb{R}. \end{array}\right. \end{equation} We introduce the Hilbert space $L^2_{\mathrm{per}}$ of square integrable functions with the period $L$: \begin{equation}\label{periodic function sobolev space}
L^2_{\mathrm{per}}=\Big\{g(x)\big|g(x+L)=g(x)\ \mathrm{for\ all}\ x\in\mathbb{R},\ {\mathrm{and}\ } g(x)\in L^2(0,L) \Big\}, \end{equation}
with the norm denoted also by $\|\cdot\|$ (without confusion) which is given by $\|g\|=(\int_0^L|g(x)|^2dx )^{\frac{1}{2}}$. $H_{\mathrm{per}}^l \ (l\geq0)$ denotes the $L_{\mathrm{per}}^2$-functions $g$ on $\mathbb{R}$ whose derivatives $\partial^j_x g, j=1,\cdots,l$ are $L_{\mathrm{per}}^2$
functions, with the norm
$ \|g\|_l=(\sum_{j=0}^l\|\partial^j_x g\|^2)^{\frac{1}{2}}$. The initial and boundary data for the density, velocity and concentration difference of two components are assumed to be: \begin{equation}\label{initial data of v} (\rho_0,u_0)\in H_{\mathrm{per}}^1,\ \ \chi_0\in H_{\mathrm{per}}^2;\quad 0<\rho_0<3, \quad-1\leq\chi_0\leq1;
\end{equation} \begin{eqnarray}\label{Compatibility condition of chi} \chi_t(x,0)=-u_0\chi_{0x}+\frac{\epsilon}{\rho_0^2}\chi_{0xx}-\frac{1}{\epsilon\rho_0}\Big(\chi_0^3-\chi_0\Big). \end{eqnarray}
\begin{thm} \label{main thm-1} Assume that $(\rho_0,u_0,\chi_0)$ satisfies \eqref{initial data of v}-\eqref{Compatibility condition of chi}, then there exists a unique global strong solution $(\rho,u,\chi)$ of the system \eqref{NSAC}-\eqref{periodic boundary for Euler} such that for any $T>0$, \begin{eqnarray}\label{global solution for periodic boundary} &&\rho\in L^\infty(0,T;H_{\mathrm{per}}^1)\cap L^2(0,T;H_{\mathrm{per}}^1),\notag\\ &&u\in L^\infty(0,T;H_{\mathrm{per}}^1)\cap L^2(0,T;H_{\mathrm{per}}^2),\notag\\ &&\chi\in L^\infty(0,T;H_{\mathrm{per}}^2)\cap L^2(0,T;H_{\mathrm{per}}^3), \\ && -1\leq\chi\leq1,\ 0< \rho<3,\ \mathrm{for \ all}\ (x,t)\in\mathbb{R}\times[0,T],\notag \end{eqnarray} and \begin{eqnarray}\label{energy estimate for periodic boundary problem} \left.\begin{array}{llll}
\displaystyle\sup_{t\in [0,T]}\big\{\|(\rho,u)(t)\|^2_1+\|\chi\|_2^2\big\}+\int_0^T\big(\|\rho\|_1^2+\|u\|_2^2+\|\chi\|_3^2\big)dt \leq C, \end{array}\right.
\end{eqnarray}
where $C$ is a positive constant depending only on the initial data and $T$. \end{thm}
\noindent\begin{rmk}
There are two difficulties to overcome in proving Theorem 1.1. One is the upper and lower bounds of the density $\rho$, the other is the non-convexity of the pressure. For the former, we use the singularity of pressure and the energy estimation of $\|\frac{1}{\rho}\|_{L^{\infty}([0,L]\times[0,T])}$. For the latter, we decompose the pressure according to its convexity. The results of the Theorem 1.1 are valid even for large initial data. They also match well with the existing numerical studies in \cite{HW} and \cite{HLS}. \end{rmk}
Moreover, we consider the following mixed boundary value problem: \begin{equation}\label{mixed boundary problem} \left\{\begin{array}{llll} \displaystyle \rho_t+(\rho u)_x=0, \\ \displaystyle \rho u_t+\rho uu_x+p_x=\nu u_{xx}-\frac{\epsilon}{2}\big(\chi_x^2\big)_x,\\ \displaystyle\rho\chi_t+\rho u\chi_x=-\frac{1}{\epsilon}(\chi^3-\chi)+\frac{\epsilon}{\rho}\chi_{xx},\\
\displaystyle(u,\chi_x)\big|_{x=0,L}=(0,0),\\
\displaystyle(\rho,u,\chi)\big|_{t=0}=(\rho_0,u_0,\chi_0). \end{array}\right. \end{equation} Similarly, we have the following existence theorem for the mixed boundary problem \eqref{mixed boundary problem}. The proof will be omitted. \begin{thm} \label{main thm-2} Assume that $(\rho_0,u_0,\chi_0)$ satisfies
\begin{equation}\label{initial data of rho u and chi} (\rho_0,u_0)\in H^1,\ \ \chi_0\in H^2,\quad 0<\rho_0<3,\quad-1\leq\chi_0\leq1,
\end{equation} \begin{eqnarray}\label{Compatibility condition of rho u and chi} \chi_t(x,0)=-u_0\chi_{0x}+\frac{\epsilon}{\rho_0^2}\chi_{0xx}-\frac{1}{\epsilon\rho_0}\Big(\chi_0^3-\chi_0\Big), \end{eqnarray} then there exists a unique global strong solution $(\rho,u,\chi)$ of the system \eqref{mixed boundary problem},such that for any $T>0$, \begin{eqnarray}\label{global solution for periodic boundary} &&\rho\in L^\infty(0,T;H^1)\cap L^2(0,T;H^1),\notag\\ &&u\in L^\infty(0,T;H^1)\cap L^2(0,T;H^2),\notag\\ &&\chi\in L^\infty(0,T;H^2)\cap L^2(0,T;H^3), \\ && -1\leq\chi\leq1,\ 0< \rho<3,\ \mathrm{for \ all}\ (x,t)\in[0,L]\times[0,T],\notag \end{eqnarray} and \begin{eqnarray}\label{energy estimate for mixed boundary problem} \left.\begin{array}{llll}
\displaystyle\sup_{t\in [0,T]}\big\{\|(\rho,u)(t)\|^2_1+\|\chi\|_2^2\big\}+\int_0^T\big(\|\rho\|_1^2+\|u\|_2^2+\|\chi\|_3^2\big)dt \leq C, \end{array}\right.
\end{eqnarray}
where $C$ is a positive constant depending only on the initial data and $T$. \end{thm}
The outline of this paper is as follows. In Section 2, we first give the local existence of the solution for the system \eqref{NSAC}-\eqref{periodic boundary for Euler}. Then, we give a series of lemmas which lead us the desired a priori estimates. Finally, Theorem 1.1 is proved by the well-known alternative result and the maximum principle for parabolic equation.
\section {Proofs of the main theorem} \setcounter{equation}{0} In this section, we will present the global existence on strong solution for the periodic problem \eqref{NSAC}-\eqref{periodic boundary for Euler} . Firstly, for $\forall m>0$, $M>0$, $T>0$, we define the periodic solution space: \begin{eqnarray}\label{periodic function space}
&&X_{\mathrm{per},m,M}([0,T])\equiv\Big\{(\rho,u,\chi)\Big|(\rho,u)\in C^0([0,T];H_{\mathrm{per}}^1),\chi\in C^0([0,T];H_{\mathrm{per}}^2),\qquad\qquad\qquad\notag\\
&&\qquad\quad\qquad\qquad\qquad\rho\in L^2([0,T];H_{\mathrm{per}}^1), u\in L^2([0,T];H_{\mathrm{per}}^2),\chi\in L^2([0,T];H_{\mathrm{per}}^3),\\
&&\qquad\quad\qquad\qquad\qquad\qquad\inf_{x\in\mathbb{R},t\in [0,T]}\rho(x,t)\geq m,\sup_{t\in [0,T]}\{\|(\rho,u)\|_1^2,\|\chi\|_2^2\}\leq M
\Big\}.\notag \end{eqnarray}
\begin{prop}[Local existence]\label{local existence and uniqueness for approximate periodic solution}
For $\forall m>0$, $M>0$, if $\inf_{x\in\mathbb{R}}\rho_0(x,t)\geq m$, $\|(\rho_0,u_0)\|_1^2$, $\|\chi_0\|_2^2\leq M$, then there exists a small time $T_*=T_*(\rho_0,u_0,\chi_0)>0$ such that the periodic boundary problem \eqref{NSAC}-\eqref{periodic boundary for Euler} admits a unique solution $(\rho,u,\chi)$ satisfying that $(\rho,u,\chi)\in X_{\mathrm{per},\frac{m}{2},2M}([0,T_*])$. \end{prop} \begin{proof} Taking $0<T<+\infty$, for $\forall m>0$, $M>0$, we construct an iterative sequence $(\rho^{(n)},u^{(n)},\chi^{(n)})$,$n=1,2\cdots\cdots$, satisfying $(\rho^{(0)},u^{(0)},\chi^{(0)})=(v_0,u_0,\chi_0)$, and the following iterative scheme \begin{equation}\label{iterative-NSAH} \left\{\begin{array}{llll} \displaystyle \rho^{(n)}_t+(\rho^{(n)} u^{(n-1)})_x=0, \\ \displaystyle \rho^{(n)} u^{(n)}_t+\rho^{(n)} u^{(n-1)}u^{(n)}_x+(p(\rho^{(n)}))_x=\nu u^{(n)}_{xx}-\frac{\epsilon}{2}\big((\chi^{(n)})_x^2\big)_x,\\ \displaystyle\rho^{(n)}\chi^{(n)}_t+\rho^{(n)} u^{(n-1)}\chi^{(n)}_x=-\frac{1}{\epsilon}((\chi^{(n-1)})^3-\chi^{(n-1)})+\frac{\epsilon}{\rho^{(n)}}\chi^{(n)}_{xx},\\ \displaystyle(\rho^{(n)},u^{(n)},\chi^{(n)})(x,t)=(\rho^{(n)},u^{(n)},\chi^{(n)})(x+L,t),\\ (\rho^{(n)},u^{(n)},\chi^{(n)})(x,0)=\big(\rho_0,u_0,\chi_0\big)(x), \end{array}\right. \end{equation} By using the usual iterative approach (c.f. \cite{CHMS-2018}), we can obtained the local existence of the solution for the periodic boundary problem \eqref{NSAC}-\eqref{periodic boundary for Euler}, the details are omitted. \end{proof} Now we will prove the global existence and uniqueness of the solution for the periodic boundary problem \eqref{NSAC}-\eqref{periodic boundary for Euler}.
Setting \begin{equation}\label{mu}
\mu=\frac{1}{\epsilon}(\chi^3-\chi)-\frac{\epsilon}{\rho}\chi_{xx}. \end{equation} From the physical point of view, the functional $\mu$ in \eqref{mu} can be understood as the chemical potential. The basic energy equality is presented below. From the definition of the pressure $p$ in \eqref{the formula of pressure}, we fix a positive reference density $\tilde \rho$ satisfying (see the properties of $p$) \begin{equation}\label{reference density} 0<\tilde\rho<\gamma<3, \end{equation} and define \begin{equation}\label{Phi} \Phi(\rho)=\rho\int_{\tilde{\rho}}^{\rho}\frac{p(s)-p(\tilde{\rho})}{s^2}ds. \end{equation} Noting that \begin{equation*}
\Phi'(\rho)=\frac{\Phi(\rho)+p(\rho)-p(\tilde\rho)}{\rho},\qquad \mathrm{and}\qquad \Phi''(\rho)=\frac{p'(\rho)}{\rho}, \end{equation*} then $\Phi(\tilde \rho)=\Phi'(\tilde\rho)=0$, and so that, there exist positive constants $c_1,c_2>0$ such that \begin{equation}\label{positive definite}
c_1(\rho-\tilde\rho)^2\leq\Phi(\rho)\leq c_2(\rho-\tilde\rho)^2. \end{equation} Moreover, combining with the mass conservation equation \eqref{NSAC}$_1$, one gets \begin{equation}\label{renormalization mass conservation equation} \Phi(\rho)_t+\big(\Phi(\rho)u\big)_x+\big(p(\rho)-p(\tilde{\rho})\big)u_x=0. \end{equation} Taking advantage of the local existence result Proposition \ref{local existence and uniqueness for approximate periodic solution}, we know that there exists a unique strong solution of the system \eqref{NSAC}-\eqref{periodic boundary for Euler} for $T$ small enough. By using the well-known alternative result, and the maximum principle for parabolic equation (see \cite{P2005}), it suffices to show the following a priori estimate. \begin{prop}[A priori estimate]\label{a priori estimate proposition for periodic boundary problem}
Assume that $(v_0,u_0,\chi_0)$ satisfies \eqref{initial data of v}-\eqref{Compatibility condition of chi}, let $(\rho,u,\chi)\in X_{\mathrm{per},m,M}([0,T])$ be a local solution for a given $T>0$, then there exists a positive constant $C$, such that
\begin{eqnarray}\label{a priori estimate} \left.\begin{array}{llll}
\displaystyle\sup_{t\in [0,T]}\big\{\|(\rho,u)(t)\|^2_1+\|\chi\|_2^2\big\}+\int_0^T\big(\|\rho\|_1^2+\|u\|_2^2+\|\chi\|_3^2\big)dt \leq C. \end{array}\right.
\end{eqnarray} \end{prop} Proposition 2.2 can be obtained by the following series of lemmas.
\begin{lem}\label{lem of lower estimate} Under the assumption of Proposition \ref{a priori estimate proposition for periodic boundary problem}, for $\forall T>0$,
it holds that \begin{eqnarray}\label{the first energy inequality} &&\int_0^L\Big(\rho u^2+\Phi(\rho)+\chi_x^2+\rho(\chi^2-1)^2\Big)dx+\int_0^T\int_0^L\Big(\mu^2+u_x^2\Big)dxdt\leq C, \end{eqnarray} where $\mu$ is defined in \eqref{mu}. \end{lem} \begin{proof} Multiplying Eq.\eqref{NSAC}$_2$ by $u$ and Eq.\eqref{NSAC}$_3$ by $\mu$, integrating the resultant equations over $[0,L]$ and adding them up, one has \begin{equation}\label{basic energy equality} \frac{d}{dt}\int_0^L\big(\frac{\rho u^2}{2}+\frac{\epsilon\chi_x^2}{2}+\frac{\rho(\chi^2-1)^2}{4\epsilon}\big)dx+\int_0^L\Big(\mu^2+\nu u_x^2+up_x(\rho)\Big)dxdt=0. \end{equation} Integrating \eqref{renormalization mass conservation equation} and adding the result to \eqref{basic energy equality}, one then gets \begin{equation}\label{the basic energy inequality} \frac{d}{dt}\int_0^L\Big(\frac{\rho u^2}{2}+\frac{\epsilon}{2}\chi_x^2+\Phi(\rho)+\frac{\rho(\chi^2-1)^2}{4\epsilon}\Big)dx+\int_0^L\Big(\mu^2+\nu u^2_x\Big)dxd\tau=0. \end{equation} Integrating \eqref{the basic energy inequality} over $[0,T] $, one has \begin{equation}\label{the basic energy inequality for density velocity and concentration difference} \sup_{t\in[0,T]}\int_0^L\Big(\frac{\rho u^2}{2}+\frac{\epsilon}{2}\chi_x^2+\Phi(\rho)+\frac{\rho(\chi^2-1)^2}{4\epsilon}\Big)dx+\int_0^T\int_0^L\Big(\mu^2+\nu u^2_x\Big)dxd\tau=E_0, \end{equation} where $E_0=\int_0^L\big(\frac{1}{2}\rho_0 u_0^2+\frac{\epsilon}{2}\chi_{0x}^2+\Phi(\rho_0)+\frac{\rho_0}{4\epsilon}(\chi_0^2-1)^2\big)dx$. The proof is obtained. \end{proof}
\begin{lem}\label{lem of lower estimate for chi} Under the assumption of Proposition \ref{a priori estimate proposition for periodic boundary problem}, for $\forall T>0$,
it holds that \begin{eqnarray}\label{sup of concentration}
\|\chi\|_{L_{\mathrm{per}}^{\infty}}\leq C. \end{eqnarray} \end{lem}
\begin{proof} Integrating the mass equation \eqref{NSAC}$_1$ over $[0,L]\times[0,t]$, one has \begin{equation}\label{mass conservation}
\int_0^L\rho(x,t)dx=\int_0^L\rho_0(x)dx. \end{equation} By Lemma 2.1, we then have \begin{equation}\label{inequality of chi}
\int_0^L\rho\chi^4dx\leq2\int_0^L\rho\chi^2dx-\int_0^L\rho dx+C_1\leq\frac{1}{2}\int_0^L\rho\chi^4dx+C. \end{equation} Therefore \begin{equation}\label{L4 L1 of chi}
\int_0^L\rho\chi^4dx\leq C,\ \ \ \int_0^L\rho\chi dx \leq \int_0^L\rho\chi^4dx+\int_0^L\rho dx\leq C. \end{equation} From \eqref{the first energy inequality}, one has \begin{eqnarray}\label{sup of chi}
|\chi(x,t)|&=&\frac{1}{\int_0^L\rho_0dx}\Big|\chi(x,t)\int_0^L\rho(y,t)dy\Big|\notag\\
&\leq&\frac{1}{\int_0^L\rho_0dx}\Big(\big|\int_0^L\big(\chi(x,t)-\chi(y,t)\big)\rho(y,t)dy\big|+\big|\int_0^L\chi(y,t)\rho(y,t)dy\big|\Big)\notag\\
&\leq&\frac{1}{\int_0^L\rho_0dx}\Big(\big|\int_0^L\rho(y,t)\big(\int_y^x\chi_s(s,t)ds\big)dy\big|+\big|\int_0^L\chi(y,t)\rho(y,t)dy\big|\Big)\notag\\
&\leq&\frac{1}{\int_0^L\rho_0dx}\int_0^L|\chi_x|dx\int_0^L\rho(y,t)dy+C_1\leq C. \end{eqnarray} The proof is completed. \end{proof}
\begin{lem}\label{lem of sup estimate for rho} Under the assumption of Proposition \ref{a priori estimate proposition for periodic boundary problem}, for $\forall T>0$,
it holds that \begin{eqnarray}\label{sup of density}
\|\rho\|_{L_{\mathrm{per}}^{\infty}([0,L]\times[0,T])}<3,\ \ \ \int_0^T\int_0^L\chi_{xx}^2dx\leq C. \end{eqnarray} \end{lem} \begin{proof}
Observing Lemma 2.1, one has \begin{eqnarray}\label{first energy inequality} \sup_{t\in[0,T]}\int_0^L\Phi(\rho)dx\leq E_0=\int_0^L\big(\frac{1}{2}\rho_0 u_0^2+\frac{\epsilon}{2}\chi_{0x}^2+\Phi(\rho_0)+\frac{\rho_0}{4\epsilon}(\chi_0^2-1)^2\big)dx. \end{eqnarray} From the definitions of \eqref{Phi} and \eqref{the formula of pressure}, one gets \begin{equation}\label{delta limit}
\lim_{\delta\rightarrow0}\mathrm{mes}\big\{(x,t)\in[0,L]\times[0,T]\big|\rho(x,t)\geq3-\delta\big\}=0, \end{equation} thus \begin{equation}\label{upper bound of density}
\|\rho(x,t)\|_{L^{\infty}([0,L]\times[0,T])}<3. \end{equation} Moreover, from the equation \eqref{mu} and the energy inequalities \eqref{L4 L1 of chi}, \eqref{sup of chi}, one obtains $$\int_0^T\int_0^L\chi_{xx}^2dx=\int_0^T\int_0^L\Big(\rho(\chi^3-\chi)-\rho\mu\Big)^2dx\leq C.$$ The proof is completed. \end{proof}
\begin{lem}\label{lem of inf estimate for rho} Under the assumption of Proposition \ref{a priori estimate proposition for periodic boundary problem}, for $\forall T>0$,
it holds that \begin{eqnarray}\label{inf of density}
\sup_{t\in[0,T]}\|\rho_x\|_{L^2_{\mathrm{per}}}\leq C, \ \ \ \|\frac{1}{\rho}\|_{L_{\mathrm{per}}^{\infty}([0,L]\times[0,T])}\leq C. \end{eqnarray} \end{lem} \begin{proof} From the mass conservation equation \eqref{NSAC}$_1$, one has \begin{eqnarray}\label{The relation between density and velocity}
u_{xx}&=&-\big[\frac{1}{\rho}\big(\rho_t+\rho_x u\big)\big]_x=
\big[(-\ln\rho)_t+\rho u(\frac{1}{\rho})_x\big]_x=[-(\ln\rho)_x]_t+[\rho u(\frac{1}{\rho})_x\big]_x\notag\\
&=&\big[\rho(\frac{1}{\rho})_x]_t+[\rho u(\frac{1}{\rho})_x\big]_x=
\rho(\frac{1}{\rho})_{xt}+\rho u(\frac{1}{\rho})_{xx}+[\rho_x(\frac1\rho)_t+(\rho u)_x(\frac1\rho)_x]\notag\\
&=&\rho(\frac{1}{\rho})_{xt}+\rho u(\frac{1}{\rho})_{xx}-\frac{\rho_x}{\rho^2}\big[\rho_t+(\rho u)_x\big]=\rho(\frac{1}{\rho})_{xt}+\rho u(\frac{1}{\rho})_{xx}. \end{eqnarray}
Substituting \eqref{The relation between density and velocity} into the momentum equation \eqref{NSAC}$_2$, one gets \begin{equation}\label{the other form for NSAC-2}
(\rho u)_t+(\rho u^2)_x+p'(\rho)\rho_x=\nu\big[\rho\frac{d}{dt}(\frac{1}{\rho})_x+\rho u(\frac{1}{\rho})_{xx}\big]-\frac{\epsilon}{2}\big(\chi_x^2\big)_x, \end{equation} Multiplying \eqref{the other form for NSAC-2} by $(\frac{1}{\rho})_x$, and integrating over $[0,L]$, further \begin{eqnarray}\label{the basic energy equality-2 for density}
&&\frac{d}{dt}\int_0^L\big(\frac\nu2\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2-\rho u(\frac{1}{\rho})_x \big)dx+\int_0^L \frac{p'(\rho)}{\rho^2}\rho_x^2dx\notag\\ &&=-\int_0^L\rho u(\frac1\rho)_{xt}dx+\int_0^L(\rho u^2)_x(\frac1\rho)_xdx+\frac{\epsilon}{2}\int_0^L\big(\chi_x^2\big)_x(\frac{1}{\rho})_xdx\notag\\ &&=\int_0^L\Big((\rho u)_x(-\frac{\rho_t}{\rho^2})+(\rho u^2)_x(-\frac{\rho_x}{\rho^2})\Big)dx+\epsilon\int_0^L\chi_x\chi_{xx}(\frac1\rho)_xdx\\ &&=\int_0^L u_x^2dx+\epsilon\int_0^L\chi_x\chi_{xx}(\frac1\rho)_xdx\notag\\
&&\leq\int_0^L u_x^2dx+\epsilon\Big(\|\frac1\rho\|_{L_{\mathrm{per}}^{\infty}}+\int_0^L\rho\big|(\frac1\rho)_x\big|^2dx\Big)\|\chi_{xx}\|_{L_{\mathrm{per}}^2}^2.\notag \end{eqnarray} In view of the mean value theorem, there exists $a(t)\in [0,L]$ satisfying $\rho(a(t),t)=\frac{1}{L}\int_0^L\rho_0dx$, so that \begin{eqnarray} \frac{1}{\rho(x,t)}&=&\frac{1}{\rho(x,t)}-\frac{1}{\rho(a(t),t)}+\frac{1}{\rho(a(t),t)}\notag\\ &=&\int_{a(t)}^x\big(\frac{1}{\rho(y,t)}\big)_ydy+\frac{L}{\int_0^L\rho_0dx}\notag\\
&\leq&\int_0^L\big|\frac{\rho_x(x,t)}{\rho^2(x,t)}\big|dx+\frac{L}{\int_0^L\rho_0dx}\\ &\leq&\big(\int_0^L\frac1\rho dx\big)^{\frac12}\Big(\int_0^L\frac{\rho^2_x(x,t)}{\rho^3(x,t)}dx\Big)^{\frac12}+\frac{L}{\int_0^L\rho_0dx}\notag\\
&\leq&\frac{1}{2}\big\|\frac{1}{\rho}\big\|_{L_{\mathrm{per}}^{\infty}}+
\frac{L}{2}\int_{0}^L\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2dx+\frac{L}{\int_0^L\rho_0dx},\notag \end{eqnarray} then one has the Sobolev inequality about $\frac{1}{\rho}$, \begin{equation}\label{the sobolev inequality of 1/rho}
\big\|\frac{1}{\rho}\big\|_{L_{\mathrm{per}}^{\infty}}\leq L\int_{0}^L\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2dx+\frac{2 L}{\int_0^L\rho_0dx}.
\end{equation} Substituting the above expression into the inequality \eqref{the basic energy equality-2 for density}, one gets \begin{eqnarray}\label{the energy equality for derivative of density}
&&\frac{d}{dt}\int_0^L\big(\frac\nu2\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2-\rho u(\frac{1}{\rho})_x \big)dx+\int_0^L \frac{p'(\rho)}{\rho^2}\rho_x^2dx\notag\\
&&\leq\int_0^L u_x^2dx+\epsilon\Big((L+1)\int_{0}^L\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2dx+\frac{2 L}{\int_0^L\rho_0dx}\Big)\|\chi_{xx}\|_{L_{\mathrm{per}}^2}^2. \end{eqnarray} Setting \begin{eqnarray}\label{the set of derivative for p}
&&A_{\mathrm{increase}}(t)=\big\{x\in[0,L]\big|0\leq\rho(x,t)<\alpha\big\}\cup\big\{x\in[0,L]\big|\beta<\rho\leq M\big\},\\
&&A_{\mathrm{decrease}}(t)=\big\{x\in[0,L]\big|\alpha\leq\rho(x,t)\leq\beta\big\}, \end{eqnarray} then multiplying \eqref{the energy equality for derivative of density} by $\frac{\nu}{2}$, and adding up \eqref{the basic energy inequality}, one gets \begin{eqnarray}\label{key inequality-2 for density}
&&\frac{d}{dt}\int_0^L\big(\frac{\mu^2\rho}{4}\big|\big(\frac{1}{\rho}\big)_x\big|^2-\frac{\mu\rho u}{2}(\frac{1}{\rho})_x +\frac{\rho u^2}{2}+\Phi(\rho)+\frac{\rho(\chi^2-1)^2}{4\epsilon}+\frac{\epsilon\chi_x^2}{2}\big)dx\notag\notag\\ && \ +\int_{A_{\mathrm{increase}}(t)}\rho p'(\rho)(\frac1\rho)_x^2dx+\int_0^L\big(\mu^2+\frac{\nu}{2}u_x^2\big)dx\\
&&\leq\frac{\epsilon\nu}{2}\Big((L+1)\int_{0}^L\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2dx+\frac{2 L}{\int_0^L\rho_0dx}\Big)\|\chi_{xx}\|_{L_{\mathrm{per}}^2}^2-\int_{A_{\mathrm{decrease}}(t)}\rho p'(\rho)(\frac1\rho)_x^2dx\notag\\
&&\leq\frac{\epsilon\nu}{2}\Big((L+1)\int_{0}^L\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2dx+\frac{2 L}{\int_0^L\rho_0dx}\Big)\|\chi_{xx}\|_{L_{\mathrm{per}}^2}^2+\frac{6(27-4\Theta)}{(3-\beta)^2}\int_0^L\rho\big|(\frac1\rho)_x\big|^2dx.\notag \end{eqnarray} Integrating the inequality \eqref{key inequality-2 for density} over $[0,T]$, applying Lemma 2.2-2.3 and combining with Gronwall's inequality, one obtains \begin{eqnarray}
&&\int_0^L\big(\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2+\rho u^2+(\rho-\tilde\rho)^2+\rho(\chi^2-1)^2+\chi_x^2\big)dx\notag+\int_0^T\int_0^L\big(\mu^2+u_x^2\big)dx\leq C. \end{eqnarray}
In view of \eqref{the sobolev inequality of 1/rho}, combining with $\int_0^L\rho\big|\big(\frac{1}{\rho}\big)_x\big|^2dx\geq\frac{1}{\|\rho\|_{L_{\mathrm{per}}^\infty}^3}\int_0^L\rho_x^2dx$, the proof of Lemma 2.4 is completed. \end{proof}
The estimate of the higher order derivatives for the phase parameter $\chi$ and the velocity $u$ can be obtained in a simpler way then in Lemma 2.1-Lemma 2.4. \begin{lem}\label{higher order derivatives for chi and u} Under the assumption of Proposition \ref{a priori estimate proposition for periodic boundary problem}, for $\forall T>0$,
it holds that \begin{equation}\label{twice order derivatives for chi}
\sup_{t\in[0,T]}\big(\|\chi_{t}\|^2_{L^2_{\mathrm{per}}}+\|\chi_{xx}\|^2_{L^2_{\mathrm{per}}}\big)+ \int_0^T\int_0^L\big(\chi_{xt}^2+\chi^2_{t}+\chi_{xxx}^2\big)dxdt\leq C, \end{equation} \begin{equation}\label{twice order derivatives for u}
\sup_{t\in[0,T]}\|u_{x}\|^2_{L^2_{\mathrm{per}}}+\int_0^T\int_0^L\big(u_t^2+u_{xx}^2\big)dxdt\leq C. \end{equation} \end{lem} \begin{proof} For the sake of convenience, we introduce the Lagrange coordinate system below:
\begin{equation}\label{lagrange coordinate} y=\int_0^x\rho(s,t) ds,\ \ t=t;\qquad v=\frac{1}{\rho}. \end{equation} Integrating \eqref{NSAC} over $[0,R]\times[0,t]$ and using the boundary condition \eqref{periodic boundary for Euler}, we have \begin{equation}\label{conservation}
\frac{1}{L}\int_0^L\rho dx=\frac1L\int_0^L\rho_0dx:=\bar\rho. \end{equation} Setting \begin{equation}\label{length of period} \tilde{L}:=\bar\rho L, \end{equation} then the system \eqref{NSAC} can be reduced into
\begin{equation}\label{Lagrange-NSAC-modified} \left\{\begin{array}{llll} \displaystyle v_t-u_y=0, \ \ & y\in\mathbb{R},t>0,\\ \displaystyle u_t+p_y=\nu \big(\frac{u_{y}}{v}\big)_y-\epsilon\big(\frac{\chi_y^2}{v^2}\big)_y,\ \ & y\in\mathbb{R},t>0,\\ \displaystyle \chi_t=-\frac{v}{\epsilon}(\chi^3-\chi)+\epsilon v\big(\frac{\chi_y}{v}\big)_y,\ \ & y\in\mathbb{R},t>0,\\ (v,u,\chi)(y,t)=(v,u,\chi)(y+\tilde{L},t),\ \ & y\in\mathbb{R},t>0,\\
(v,u,\chi)\big|_{t=0}=(v_0,u_0,\chi_0),\ \ & y\in\mathbb{R}. \end{array}\right. \end{equation} From \eqref{Lagrange-NSAC-modified}$_3$, one has \begin{equation}\label{the time dereviative of chi}
\chi_t=-\frac{1}{\epsilon}v(\chi^3-\chi)+\frac{\chi_{yy}}{v}-\frac{2\chi_yv_y}{v^2}, \end{equation} then \eqref{the time dereviative of chi} and Lemma 2.1-2.4 implies that \begin{equation}\label{the first energy inequality in Lagrange coordinate} \displaystyle\int_0^{\tilde{L}}\Big(u^2+v^2_y+v^2+\chi_y^2+(\chi^2-1)^2\Big)dy+\int_0^T\int_0^{\tilde{L}}\Big(\mu^2+u_y^2+\chi_t^2+\chi^2_{yy}\Big)dy\leq C,\end{equation} \begin{equation}\label{upper and lower bound in Lagrange coordinate} \displaystyle 0<c\leq v\leq C<+\infty,\ \ and \ \ 0\leq\chi\leq C, \end{equation} and \begin{equation}\label{the relationship between chi-yy with chi-t}
\int_0^{\tilde{L}}\chi_{yy}^2dy\leq C\Big(\int_0^{\tilde{L}}\chi_t^2dy+1\Big).\end{equation} Differentiating \eqref{the time dereviative of chi} with respect to $t$, one gets \begin{equation}\label{time derivative of equation 3}
\chi_{tt}=-\frac{v_t}{\epsilon}(\chi^3-\chi)-\frac{v}{\epsilon}(3\chi^2-1)\chi_t+\epsilon v_t\Big(\frac{\chi_y}{v^2}\Big)_y+\epsilon v\Big(\frac{\chi_y}{v^2}\Big)_{yt}. \end{equation} Multiplying \eqref{time derivative of equation 3} by $\chi_t$, and integrating it over $[0,\tilde{L}]$ with respect of $y$, one obtains \begin{eqnarray}\label{bound of chi-t} &&\frac{1}{2}\frac{d}{dt}\int_0^{\tilde{L}}\chi_t^2dy+ \epsilon\int_0^{\tilde{L}}\frac{\chi_{yt}^2}{v}dy\notag\\ &&=-\frac{1}{\epsilon}\int_0^{\tilde{L}}\big((\chi^3-\chi)u_y\chi_t+v(3\chi^2-1)\chi_t^2\big)dy+\epsilon\int_0^{\tilde{L}}u_y\Big(\frac{\chi_y}{v^2}\Big)_y\chi_tdy\notag\\ &&\ \ \ \ \ -\epsilon\int_0^{\tilde{L}}v_y\Big(\frac{\chi_y}{v^2}\Big)_{t}\chi_tdy+\epsilon\int_0^{\tilde{L}}\frac{2}{v^2}\chi_yu_y\chi_{yt}dy\notag\\ &&=I_1+I_2+I_3+I_4. \end{eqnarray} Following from Sobolev inequality and Lemma 2.1-2.4 and \eqref{the first energy inequality in Lagrange coordinate}-\eqref{upper and lower bound in Lagrange coordinate}, one deduces \begin{equation}\label{I1}
\big|I_1\big|\leq C_1\big(\|u_y\|^2+\|\chi_t\|^2\big), \end{equation} \begin{eqnarray}\label{I2}
\big|I_2\big|&\leq& C\Big(\int_0^{\tilde{L}}\big|u_y\chi_{yy}\chi_t\big|dy+\int_0^{\tilde{L}}\big|u_y\chi_yv_y\chi_t\big|dy\Big)\notag\\
&\leq& C\Big(\|\chi_t\|_{L^{\infty}}\|u_y\|\|\chi_{yy}\|+\|\chi_t\|_{L^{\infty}}\|\chi_y\|_{L^{\infty}}\|u_y\|\|v_y\|\Big)\notag\\
&\leq& C\Big(\|\chi_t\|^2\|u_y\|+\|\chi_t\|^2\|u_y\|^{\frac43}+\|\chi_t\|^{\frac32}\|u_y\|+\|\chi_t\|^{\frac43}\|u_y\|^{\frac43}+\|\chi_t\|\|u_y\|+\|\chi_t\|^{\frac23}\|u_y\|^{\frac43}\Big)\notag\\
& &+\frac{\epsilon}{4}\|\chi_{yt}\|^2, \end{eqnarray} and \begin{eqnarray}\label{I3}
\big|I_3\big|+\big|I_4\big|&\leq& C\int_0^{\tilde{L}}\big(|v_y\chi_{yt}\chi_t|+|v_y\chi_yu_y\chi_t|+|\chi_yu_y\chi_{yt}|\Big)dy\notag\\
&\leq&C\Big(\|\chi_t\|\|u_y\|^2+\|\chi_t\|^2\Big)+\frac{\epsilon}{4}\|\chi_{yt}\|^2. \end{eqnarray} Substituting \eqref{I1}--\eqref{I3} into \eqref{bound of chi-t}, applying the Gronwall's inequality, one drives \begin{equation}\label{bound of chi-t and chi-yt} \int_0^{\tilde{L}}\chi_t^2dy+\int_0^T\int_0^{\tilde{L}}\chi_{yt}^2dy\leq C. \end{equation} Combining with \eqref{the relationship between chi-yy with chi-t}, one gets \begin{equation}\label{the bound of the second derivative of concentration} \int_0^{\tilde{L}}\chi_{yy}^2dy\leq C. \end{equation} It holds that
\begin{equation}\label{twice order derivatives for chi in lagrange coordinates} \int_0^{\tilde{L}}(\chi_t^2+\chi_{yy}^2)dy+\int_0^T\int_0^{\tilde{L}}\big(\chi_{yt}^2+\chi^2_{t}+\chi_{yy}^2\big)dy\leq C. \end{equation} Multiplying \eqref{Lagrange-NSAC-modified}$_2$ by $-u_{yy}$, integrating over $[0,{\tilde{L}}]$ by parts, by using Sobolev inequality, Lemma 2.1-2.4 and \eqref{twice order derivatives for chi}, one obtains \begin{eqnarray}\label{the second derivative of velocity}
&&\big(\frac{1}{2}\int_0^{\tilde{L}}u^2_ydy\big)_t+\nu\int_0^{\tilde{L}}\frac{u_{yy}^2}{v}dy\notag\\
&&=\int_0^{\tilde{L}} u_{yy}(p_\delta)'_vv_ydy+\int_0^{\tilde{L}}\frac{ u_{yy}u_yv_y}{v^{2}}dy+\int_0^{\tilde{L}}\frac{2\epsilon\chi_y\chi_{yy}u_{yy}}{v^3}dy-\int_0^{\tilde{L}}\frac{3\epsilon\chi_y^2v_y u_{yy}}{v^4}dy\notag\\
&&\leq C\big(\|u_y\|^2+1\big)+\frac{\nu}{2}\int_0^{\tilde{L}}\frac{u_{yy}^2}{v}dy. \end{eqnarray} Thus it holds that
\begin{equation}\label{twice order derivatives for u in lagrange coordinates}
\int_0^{\tilde{L}}u^2_ydy+\int_0^T\int_0^{\tilde{L}}u_{yy}^2dy\leq C. \end{equation} Let's go back to the Euler coordinates, by using \eqref{twice order derivatives for chi in lagrange coordinates}, \eqref{twice order derivatives for u in lagrange coordinates}, combining with $\chi_{xxx}=2\rho\rho_x\chi_t+\rho^2\chi_{xt}+2\rho\rho_xu\chi_x+\rho^2u_x\chi_x+\rho^2u\chi_{xx}+\rho_x(\chi^3-\chi)+\rho(3\chi^2-1)\chi_x$, one has \begin{equation}
\sup_{t\in[0,T]}\big(\|\chi_{t}\|^2_{L^2_{\mathrm{per}}}+\|\chi_{xx}\|^2_{L^2_{\mathrm{per}}}\big)+ \int_0^T\int_0^L\big(\chi_{xt}^2+\chi^2_{t}\big)dxdt\leq C, \end{equation} \begin{equation}
\sup_{t\in[0,T]}\|u_{x}\|^2_{L^2_{\mathrm{per}}}+\int_0^T\int_0^Lu_{xx}^2dxdt\leq C, \end{equation} and \begin{equation}\label{third derivative for chi}
\int_0^T\int_0^L\chi_{xxx}^2dxdt\leq C. \end{equation} Furthermore, by using $u_t=-(p_\delta)_y+\nu \big(\frac{u_{y}}{v}\big)_y-\epsilon\big(\frac{\chi_y^2}{v^2}\big)_y$, one obtains \begin{equation}\label{t derivative for u}
\int_0^T\int_0^Lu_t^2dxdt\leq C. \end{equation} Then proof of Lemma 3.5 is achieved. \end{proof}
From Lemma 2.1-Lemma 2.5, Proposition 2.2 is obtained, and the proof of Theorem 1.1 is completed.
\noindent {\textbf{Acknowledgments:} The research of M.Mei was supported in part by NSERC 354724-2016 and FRQNT grant 256440. The research of X. Shi was partially supported by National Natural Sciences Foundation of China No. 11671027 and 11471321. The work of X.P. Wang was supported in part by the Hong Kong Research Grants Council (GRF grants 16324416, 16303318 and NSFC-RGC joint research grant N-HKUST620/15).}
\end{document} |
\begin{document}
\title{ Geometric partition categories: \\
On short Brauer algebras and their blob subalgebras} \author{Z. K\'AD\'AR and P. P. MARTIN \\ School of Mathematics, University of Leeds} \date{} \maketitle
\noindent {\small Abstract: The main result here gives an algebra(/linear category) isomorphism between a geometrically defined subcategory ${\mathcal J}^1_0$ of a short Brauer category ${\mathcal J}_0$ and a certain one-parameter specialisation of the blob category ${\mathfrak b}$. That is, we prove the Conjecture in Remark 6.7 of \cite{KadarMartinYu}.
We also define a sequence of generalisations ${\mathcal J}^i_{i-1}$ of the category ${\mathcal J}^1_0$. The connection of $ {\mathcal J}_0$ with the blob category inspires a search for connections also with its beautiful representation theory. Here we obtain formulae determining the non-semisimplicity condition (generalising the classical `root-of-unity' condition). }
\noindent {\small {\em Keywords}: diagram algebra, topological spin chain.}
\section{Introduction}
A motivating aim here is to study the structure of the $k$-linear categories $\BBl{l}$ from \cite{KadarMartinYu},
and in particular the representation theory of the corresponding $k$-algebras (with $k$ a field) ${\mathcal J}_{l,n}$ in the non-semisimple cases. These structures are of intrinsic interest (cf. \cite{jk,james,Martin0915,CoxDeVisscher}); and see also \cite{KadarMartinYu} for a discussion of some of the extrinsic motivations for this study --- in short one seeks generalisations of the intriguing examples of Kazhdan--Lusztig theory \cite{KazhdanLusztig79,Soergel97a,AndersenJantzenSoergel94} observed \cite{Martin0915} in the representation theory of the Brauer category ${\mathcal B } = \BBl{\infty}$ \cite{Brauer37}. Another motivating aim is to study module categories over monoidal categories (see e.g. \cite{Ostrik01} for a review) beyond the usual `semisimple' setting.
The study strategy in Part~1 (\S\ref{ss:pre1}-\ref{ss:main01})
can be seen as trying to relate the problem to the representation theory of the blob category ${\mathfrak b}$ and the blob algebra ${\mathfrak b}_n$ \cite{MartinSaleur94a}, which is contrastingly very well understood (see e.g. \cite{CoxGrahamMartin03}), itself with deep and tantalising connections to Kazhdan--Lusztig theory \cite{MartinWoodcock03}. (More recently see e.g. \cite{BowmanCoxSpeyer}.) This also allows us to make contact with the original physical motivations for these algebras, as the algebras of physical systems with boundaries and interfaces \cite{MartinSaleur94a}. Indeed the blob algebras have been of renewed interest recently in several areas, not only of physics but also for example the study of KLR algebras \cite{KLRI,KLRII}, Soergel bimodules \cite{Soergel07} and monoidal categories \cite{JoyalStreet}.
As we shall see, in the simplest non-trivial case the algebras are (at least) related by inclusions of the form ${\mathfrak b}_m \hookrightarrow {\mathcal J}_{0,n}$. Inclusion is not in general a directly helpful relationship in representation theory. (For example the Temperley--Lieb algebra $T_n$ \cite{tl} is a subalgebra of ${\mathfrak b}_n$, but the representation theories of these algebras are radically different: cf. \cite{Martin91} and \cite{CoxGrahamMartin03}.) However the inclusion here is of `high index', so there is hope that it will indeed shed light on the open problem.
In Part~2 (\S\ref{ss:other1}) we include some indicative results on ${\mathcal J}_{0,n}$ representation theory. These are obtained by working directly with ${\mathcal J}_{0,n}$, but serve as a first step in this direction (full analysis of these results is demoted to a separate paper).
In Section~\ref{ss:pre1} we introduce concepts and notations.
In \S\ref{ss:s4} we define for each category $\BBl{l}$ a new subcategory. In \S\ref{ss:main01} we examine the relationship to the blob category.
In particular in Section~\ref{ss:main01} we state and prove the main theorem.
In section~\ref{ss:other1} we consider consequences for the algebras ${\mathcal B }^l_n ={\mathcal J}_{l,n}$ themselves.
In Section~\ref{ss:discusstar} we discuss related open problems.
\input 4point6sec2 \input 4point6sec3
\section{The blob isomorphism Theorem} \label{ss:main01}
\input paul46/p36s4a \input 4point6sec4
\section{On representation theory consequences for short Brauer algebras} \label{ss:other1}
\newcommand{\Specht}[2]{{\mathcal S}^{#1}_{#2}} \newcommand{\Deltam}[2]{\Delta^{#1}_{#2}} \newcommand{\Deltamd}[2]{D^{#1}_{#2}} \newcommand{Chebyshev}{Chebyshev}
\subsection{Summary of relevant results for ${\mathfrak b}_n$}
Let us restrict to the case $k=\C$. From a representation theory perspective the natural parameterisation of ${\mathfrak b}_n$ is $\delta=[2]$ (recall $[n]=(q^n - q^{-n})/(q-q^{-1})$) and $\delta' = \frac{[m+1]}{[m]}$. Then if $m \not\in \Z$ we know that ${\mathfrak b}_n$ is semisimple, with a well-known structure \cite{MartinSaleur94a}. If $m \in \Z $ but $q$ not a root of unity then the algebras are no longer semisimple (for sufficiently large $n$), but the structure is still relatively simple to describe. The most interesting case is $m \in \Z$ and $q$ a root of unity. The structure in this case is quite complicated. See e.g. \cite{CoxGrahamMartin03} for a full description. With this summary in mind, note that due to (\ref{th:Theta}) we are interested in the cases when \[ \frac{[m+1]}{[m]} \; = \; \frac{[2] + 1}{2} \] This is solved for example by $m=1$ when $[2]=1$.
For our present purposes the key point here has a precursor already even from the arithmetically simpler Temperley--Lieb case, as follows.
\mdef \label{de:cheby} Recall (see e.g. \cite{Martin91}) that the Chebyshev\ polynomials are the polynomials $d_n$ determined by the recurrence $ d_{n+2}=x d_{n+1}-d_n$,
with initial conditions $d_0=1, d_1=x$. (We write $x$ for $\delta$ here, simply for reasons of familiarity.) The first few are $d_n = 1,x , x^2 -1, x^3 -2x, x^4 -3x^2 +1, ...$ ($n=0,1,2,3,...$).
These arise for example as the determinants of gram matrices such as: \[ \Deltam{n}{n-2} = \mat{cccccccc} \delta & 1 & 0 & 0 & 0\\ 1 & \delta & 1 & 0 &0\\ 0 & 1 & \delta & 1 &0\\ 0 & 0 & 1 & \delta & 1 \\ 0 & 0 & 0 & 1 & \delta \tam \] The obvious translational symmetry of this structure (arising from the local geometrical translational symmetry - the monoidal structure - of the TL diagram `particles') gives rise to the natural fourier parameterisation $d_{n-1} = [n]$. Loosely speaking, the geometrical boundary conditions here pick out a pure fourier sine series (fixing one end); and then the $n$ value (fixing the other end --- hence the special behaviour at roots of unity of $q$). The blob algebra generalises this essentially by changing the boundary conditions. Next we look for evidence of similar phenomena in the short Brauer case.
\ignore{{
\subsection{Summary of results for $\BBl{l,n}$} ... \subsection{Standard Bratteli diagrams} The difference between $\BBl{l,n}$ and $\BBBl{l,n}$ is that ...
}}
\subsection{Gram matrices, towers of recollement}
\begin{figure}
\caption{(a) Indicative labelling scheme for standard modules for
height $l=0$ Brauer algebras.
\quad
(b) Bratteli diagram with dimensions of standard modules up to
$n=5$.
}
\label{fig:bratt1}
\end{figure}
We assume familiarity with the representation theory as treated in \cite{KadarMartinYu}, including the construction of standard modules.
Here we restrict consideration to height 0.
Our labeling scheme for Gram matrices $\Deltam{n}{\lambda}$ of the standard modules $\Specht{n}{\lambda}$
is $\Deltam{n}{\lambda} = \Deltam{n}{m,\pm}$ (superscript: algebra rank $n$; subscript: number $m$ of propagating lines and (for $m>1$) $\pm$ is the symmetric / antisymmetric label from $S_2$). See Fig.\ref{fig:bratt1}.
For example, the diagram basis for the $n=6$ standard module corresponding to $\lambda = (4,+)$ can be drawn as: \[ \includegraphics[width=5.4in]{xfig/base64.eps} \] where we omit to draw the $(2)$-symmetrizer sitting on the first two propagating lines (thus we can draw the $\lambda=(4,-)$ case similarly, provided we keep in mind the omission, which affects calculations). Note that the basis (so drawn) contains one extra diagram compared to the $l=-1$/Temperley--Lieb case.
The extra diagram has an interesting effect on the gram matrix of the natural contravariant form (see \cite{KadarMartinYu}). As for the TL case this can be computed in terms of Chebyshev\ polynomials (or equivalently fourier transforms). But here the initial conditions are different. We have \[ \Deltam{3}{1} = \mat{cccccccc} \delta & 1 & 1 \\ 1 & \delta & 1 \\ 1 & 1 & \delta \tam, \hspace{.351in} \Deltam{4}{2,\pm} = \mat{cccccccc} \delta & 1 & 1 & 0 \\ 1 & \delta & 1 & \pm 1 \\ 1 & 1 & \delta & 1 \\ 0 & \pm 1 & 1 & \delta \tam, \hspace{.351in} \Deltam{n}{n-2,+} = \mat{cccccccc} \delta & 1 & 1 & 0 & 0 & 0 \\ 1 & \delta & 1 & 1 & 0 & 0 \\ 1 & 1 & \delta & 1 & 0 & 0 \\ 0 & 1 & 1 & \delta & 1 & 0 \\ 0 & 0 & 0 & 1 & \delta & 1 \\ 0 & 0 & 0 & 0 & 1 & \delta \tam \]
(we give the $n=6$ example, but the general pattern will be clear). Laplace explanding $\Deltamd{n}{\lambda} = |\Deltam{n}{\lambda}|$ with respect to the bottom row we get a Chebyshev\ recurrence \[ \Deltamd{n}{n-2,\pm} = \delta \Deltamd{n-1}{n-3,\pm} - \Deltamd{n-2}{n-4,\pm} \] where the initial conditions are \newcommand{\delta}{\delta} $\Deltamd{3}{1} = (\delta-1)^2 (\delta+2) $ and $\Deltamd{4}{2,+}= \delta(\delta-1)(\delta^2+\delta-4)$ and $\Deltamd{4}{2,-}= (\delta-1)(\delta+1)(\delta-2)(\delta+2)$.
Note from Theorem~1.1(ii) of \cite{CoxMartinParkerXi06} (the tower-of-recollement method) and Proposition~5.3 of \cite{KadarMartinYu} (standard restriction rules) that the other gram determinants and indeed the `reductive' representation theory can be determined from this subset of gram determinants. We will address this task in a separate paper. Here we restrict to some of the
key preliminary observations.
The Chebyshev\ polynomials $d_n$ from (\ref{de:cheby}) are a basis for the space of polynomials; and the recurrence is linear, so we can express our recurrence in terms of them, and hence make use of their more `fourier-like' formulations: $d_{n-1} = [n] = \frac{q^n -q^{-n}}{q-q^{-1}}$, where $\delta = x = q+q^{-1}$.
The determinants $D_n^\pm$ of the key subset of Gram matrices of form $\Deltam{n}{n-2,\pm}$
can be expressed as
\begin{eqnarray}
D_n^+&=&(x-1) \left[ (x+2) (x-1) d_{n-3}-2x d_{n-4}\right]\\ D_n^-&=&(x-1) (x+2) \left[ (x-1) d_{n-3} -2 d_{n-4} \right] \end{eqnarray}
Explicitly, the low rank cases of all the Gram matrices are as follows: \begin{eqnarray*} D_1^3&=&(x-1)^2(x+2)\\ D_0^4&=&(x-1)^2x^3(x+2)\\ D_2^{4+}&=&(x-1)x(x^2+x-4)\\ D_2^{4-}&=&(x-1)(x+1)(x-2)(x+2)\\ D^5_1&=&(x-1)^{12}(x+1)(x-2)(x+2) {{}^6 (x^2 +x-4)}\\ D^{5+}_3&=&(x-1)(x^4+x^3-5x^2-x+2)\\ D^{5-}_3&=&(x-1)(x+2)(x^3-x^2-3x+1)\\ D^6_0&=&(x-1)^{12}x^{11}(x+1)(x-2)(x+2)^6(x^2+x-4)\\ D^{6+}_2&=&(x-1)^8x^5(x+1)(x-2)(x+2)(x^2+x-4)^6(x^4+x^3-5x^2-x+2)\\ D^{6-}_2&=&(x-1)^8(x+1)^6(x-2)^6(x+2)^7(x^2+x-4)(x^3-x^2-3x+1)\\ D^{6+}_4&=&(x-1)^2 x(x^3+2x^2-4x-6)\\ D^{6-}_4&=&(x-1)^2(x+2)(x^3-4x-2) \end{eqnarray*}
(the cases not computed by recursion may be computed by brute force,
see below).
A key point to take from this is that the short Brauer algebras manifest some similarities with the root-of-unity paradigm for non-semisimplicity, but move beyond it.
As noted, taken in combination with tower of recollement methods these results `seed' the reductive representation theory (the determination of decomposition matrices). We address this analysis fully in a separate paper, but the programme may be illustrated as follows.
\newcommand{\ing}[1]{\includegraphics[width=.31in]{xfig/#1.eps}}
This form corresponds to the map from the standard module $\Specht{n}{\lambda}$ to its contravariant dual which, on general grounds, maps the simple head to the socle \cite{KadarMartinYu}. Thus when the form is non-singular we deduce that the standard module is simple. And on the other hand when it is singular the standard module will have a corresponding submodule. It is not generally easy to determine the rank of the form and hence the dimension of the simple head from the gram determinant. For example the rank of $\Deltam{5}{1}$ is easily seen to be 1, while the dimension of $\Specht{5}{1}$ is 11 (see Fig.\ref{fig:bratt1} or below) and the determinant factor is $(x-1)^{12}$.
To illustrate first consider $D^3_1$. The basis here is $\{ \ing{S31a} , \ing{S31bb} , \ing{S31c} \}$. For example the action of generators on the element $ \ing{S31bb} - \ing{S31a} $ at the singular point $\delta = x =1$ is: \[ \stackrel{\ing{S31d} }{ \ing{S31bb}} - \stackrel{\ing{S31d} }{ \ing{S31a}} \quad = \; 0, \mbox{ }\quad\quad \stackrel{\ing{S31e} }{ \ing{S31bb}} - \stackrel{\ing{S31e} }{ \ing{S31a}} \quad = \; 0, \mbox{ and }\quad \stackrel{\ing{S31ff} }{ \ing{S31bb}} - \stackrel{\ing{S31ff} }{ \ing{S31a}} \quad = \; - (\ing{S31bb} - \ing{S31a} ) \]
That is, when $\delta=x=1$ this element spans a submodule isomorphic to $\Specht{3}{3,-}$.
Meanwhile for the element
$\ing{S31a} + \ing{S31bb} -2 \ing{S31c}$: \[
\stackrel{\ing{S31d} }{ \ing{S31a}} + \stackrel{\ing{S31d} }{ \ing{S31bb}} -2\stackrel{\ing{S31d} }{ \ing{S31c}} \quad =\quad
\stackrel{\ing{S31e} }{ \ing{S31a}} + \stackrel{\ing{S31e} }{ \ing{S31bb}} -2\stackrel{\ing{S31e} }{ \ing{S31c}} \quad = \; 0, \] \[
\stackrel{\ing{S31ff} }{ \ing{S31a}} + \stackrel{\ing{S31ff} }{ \ing{S31bb}} -2\stackrel{\ing{S31ff} }{ \ing{S31c}} \quad = \; \ing{S31a} + \ing{S31bb} -2 \ing{S31c} \] So this element spans a submodule isomorphic to $\Specht{3}{3,+}$. We deduce that the simple head is one-dimensional.
On the other hand consider $\ing{S31a} + \ing{S31bb} + \ing{S31c}$ in case $\delta=x=-2$. This spans a submodule isomorphic to $\Specht{3}{3,+}$. Here the simple head is two-dimensional.
By the module-category embedding property \cite[(4.26)]{KadarMartinYu} these standard module morphisms have images in higher ranks, thus when $x=1$ our map $\Specht{3}{3,-} \rightarrow \Specht{3}{1}$ gives a map $\Specht{5}{3,-} \rightarrow \Specht{5}{1}$ and so on. The embedding functor is not exact so we cannot tell {\em directly} from the gram matrix if an image map has a kernel. So (comparing also with the dimensions from Fig.\ref{fig:bratt1}),
a naive lower bound on the exponent in the factor $(x-1)^{12}$ in $D^5_1$ is 4+4, corresponding to the dimensions of the simple heads of $\Specht{5}{3,+}$ and $\Specht{5}{3,-}$ when $x=1$. It is intriguing to compare with the blob case \cite{CoxGrahamMartin03}. There the embedded standard module morphisms are injective, but if that is the case here the naive bound is still only lifted to 5+5, so we see that there will be some nice subtleties here.
As a further illustration, the basis for $n=6$ and $\lambda=0$ is: \[ \includegraphics[width=5.94in]{xfig/base60.eps} \] (N.B. the basis for $n=5$, $\lambda=1$ is combinatorially identical). (As noted, we do not strictly need such cases for the `Cox criterion'. It is enough to use $\lambda=n-2$. We include it for curiosity's sake.)
The corresponding gram matrix then comes from the
array in Fig.\ref{fig:arrayz}. \begin{figure}
\caption{Gram matrix calculation for $n=6$ and $\lambda=0$. }
\label{fig:arrayz}
\end{figure} Thus, writing $j$ for $\delta^j$ (with $j$ the number of connected components in a diagram), the gram matrix is given by \[
\mat{ccccc|ccccc|cc} 3 & 2 & 2 & 1 & 2 & 2 & 1 & 2 & 1 & 2 & 1 \\ 2 & 3 & 1 & 2 & 1 & 1 & 2 & 1 & 1 & 2 & 1 \\ 2 & 1 & 3 & 2 & 1 & 2 & 1 & 1 & 2 & 1 & 1 \\ 1 & 2 & 2 & 3 & 2 & 1 & 2 & 1 & 2 & 1 & 1 \\ 2 & 1 & 1 & 2 & 3 & 1 & 1 & 2 & 1 & 1 & 2 \\ \hline 2 & 1 & 2 & 1 & 1 & 3 & 2 & 1 & 1 & 1 & 2 \\ 1 & 2 & 1 & 2 & 1 & 2 & 3 & 2 & 1 & 1 & 2 \\ 2 & 1 & 1 & 1 & 2 & 1 & 2 & 3 & 2 & 1 & 1 \\ 1 & 1 & 2 & 2 & 1 & 1 & 1 & 2 & 3 & 2 & 2 \\ 2 & 2 & 1 & 1 & 1 & 1 & 1 & 1 & 2 & 3 & 2 \\ \hline 1 & 1 & 1 & 1 & 2 & 2 & 2 & 1 & 2 & 2 & 3 \tam \] The determinant here can still be computed by brute force.
\section{Discussion} \label{ss:discusstar}
Some notable open questions follow.
\noindent Q1. How to generalise the `short Brauer' construction to the BMW algebra \cite{BirmanWenzl89,Murakami87}?
\noindent Q2.
How to relate the usual two-parameter version of the blob algebra to the short Brauer algebras --- which by the original construction have only a single parameter.
Recall that there is, essentially trivially, a two-parameter version of $T_n$. First recall that $T_n$ has a basis of non-crossing Brauer diagrams \cite{Weyl46o,brown} up to ambient isotopy (see \S\ref{ss:pre1} for a summary of Brauer diagram concepts --- ambient isotopy does not include, for example, the Reidemeister moves included in general Brauer diagram equivalence, but it is sufficient in the non-crossing case, and this is key here). The elements of the basis can be seen as partitioning the interval into alcoves. These alcoves can be shaded black or white with the property that \\ (A1) the colour changes across each boundary; and \\ (A2) the leftmost alcove is white, say. \\ (NB Another way of saying this is that arcs have a well-defined `height' in the sense of this paper, which is either odd or even.) \\ Thus in composition both black and white loops may form. The number of each separately is an invariant of ambient isotopy. It follows that we may associate a different parameter to each.
Thus we have an algebra $T_n(\delta_b, \delta_w)$, say. It is easy to see that $T_n(\delta_b, \delta_w) \cong T_n(\alpha\delta_b, \delta_w /\alpha)$ for any unit $\alpha$, so the difference can usually be scaled away.
For example recall the following.
{\theo{\label{th:TLgen1} {\rm \cite{Martin91}} Consider the algebra defined by generators $U = \{ U_1,U_2,...,U_{n-1} \}$ and relations $\tau = \{ \mbox{ $U_i^2 = \delta U_i$, $U_i U_{i\pm 1} U_i = U_i$, $U_i U_j = U_j U_i$, $j\neq i\pm 1$ } \}$. The map $$ U_i \mapsto u_i = \{\{1,1' \} \{2,2' \}, ... , \{i , i+1 \},\{ i',i+1' \}, ...,
\{n,n' \}\} \qquad (i=1,2,...,n-1) $$ extends to an algeba isomorphism
$ k\langle U \rangle/\tau \cong T_n$. \qed }}
To see the isomorphic two-parameter version consider the effect on the relations of the map $U_i \mapsto \alpha U_i$ ($i$ odd), $U_i \mapsto \alpha^{-1} U_i$ ($i$ even).
The blob algebra ${\mathfrak b}_n$ can be seen as the subalgebra of $T_{2n}(\delta_b, \delta_w)$ generated by diagrams with a lateral-flip symmetry. In this subalgebra, however, it is {\em not } possible to scale away the second parameter.
The short Brauer algebras are, from one perspective, generalisations of $T_n$. It is interesting to consider if there are analogous generalisations of the two-parameter version that (like the blob) have the property that the second parameter becomes material.
This is not obvious. The generalisation destroys the two-tone alcove construction.
How does the two-tone construction look in the categorical setting? Here we write $T(n,m)$ for the subset $J_{-1}(n,m)$ of $J(n,m)$ of non-crossing pair-partitions. We fix $\delta \in k$ and note that ${\mathcal T } = (\N_0, k T(n,m),*)$ is a subcategory of ${\mathcal B }$. Indeed ${\mathcal T } = {\mathcal B }^{-1}$. The inclusion is of $k$-linear categories, and also of monoidal $k$-linear categories.
As in the algebra case we note that in the non-crossing setting we can count the number of black and white loops separately (i.e. these numbers are separately well-defined). Note however that the monoidal structure on ${\mathcal T }$ does not preserve this property. It is the axiom (A2) that is the problem.
\noindent {\bf Acknowledgements.} We thank EPSRC for funding under grant EP/I038683/1. We thank Shona Yu, Azat Gaynutdinov and Peter Finch for useful conversations.
\appendix \section*{Appendix} \section{Colour pictures for Lemma~\ref{mj}} \label{ss:colour} \input 4point6sec3Ap
\end{document} |
\begin{document}
\title{Maps for general open quantum systems and a theory of linear quantum error correction} \author{Alireza Shabani} \affiliation{Department of Electrical Engineering-Systems, University of Southern California, Los Angeles, CA 90089, USA} \author{Daniel A. Lidar} \affiliation{Department of Electrical Engineering-Systems, University of Southern California, Los Angeles, CA 90089, USA} \affiliation{Departments of Chemistry and Physics, University of Southern California, Los Angeles, CA 90089, USA}
\begin{abstract} We show that quantum subdynamics of an open quantum system can always be described by a Hermitian map, irrespective of the form of the initial total system state. Since the theory of quantum error correction was developed based on the assumption of completely positive (CP)\ maps, we present a generalized theory of linear quantum error correction, which applies to any linear map describing the open system evolution. In the physically relevant setting of Hermitian maps, we show that the CP-map based version of quantum error correction theory applies without modifications. However, we show that a more general scenario is also possible, where the recovery map is Hermitian but not CP. Since non-CP maps have non-positive matrices in their range, we provide a geometric characterization of the positivity domain of general linear maps. In particular, we show that this domain is convex, and that this implies a simple algorithm for finding its boundary. \end{abstract}
\pacs{03.67.Pp, 03.67.Hk, 03.67.Lx} \maketitle
\section{Introduction}
The problem of the formulation and characterization of the dynamics of quantum open systems has a long and extensive history \cite {Davies:76,Alicki:87,Breuer:Book}. This problem has become particularly relevant in the context of quantum information processing \cite{Nielsen:book} , where a remarkable theory of quantum error correction (QEC) was developed in recent years to address the problem of how to process quantum information in the presence of decoherence and imperfect control \cite{Gaitan:book}. A\ key assumption common to many previous QEC studies is that the evolution of the quantum information processor can be described by a succession of \emph{ completely positive}\ (CP) maps \cite{Kraus:83}, interrupted by unitary gates or measurements \cite{Knill:97b}. However, it is well known that if the initial total system state is entangled, quantum dynamics is not described by a CP\ map \cite {Pechukas+Alicki:95,Stelmachovic:01,Jordan:04,Carteret:05,Rodriguez:07}. In fact, we showed very recently in Ref. \cite{ShabaniLidar:08} that a CP\ map arises if and only if the initial total system state has vanishing quantum discord \cite{Ollivier:01}, i.e., is purely classically correlated. One is thus naturally led to ask whether this impacts the applicability of QEC\ theory under circumstances where non-classical initial state correlations play a role. Here \textquotedblleft initial state\textquotedblright\ does not refer exclusively to the \textquotedblleft $t=0$\textquotedblright\ point, but also to intermediate times where the recovery map is applied, since this map was also assumed to be CP in standard quantum error correction theory \cite{Knill:97b}. Motivated by this fact we here critically revisit the CP maps assumption in QEC, and show that it can be relaxed \footnote{ Note that this is issue is entirely distinct from the critique of Markovian fault tolerant QEC\ expressed in \cite{AlickiLidarZanardi:05}, which was concerned with the compatibility of other assumptions of fault-tolerant QEC (specifically, fast gates and pure ancillas) with rigorous derivations of the Markovian limit.}. To do so, we first consider the problem of characterizing the type of map that describes open system evolution given an \emph{arbitrary} initial total system state (Section \ref{sec:QDP}). We show that this map is always a linear, \emph{Hermitian} map (of which CP\ maps are a special case). We then argue that the generic noise map describing the evolution of a quantum computer as it undergoes fault tolerant quantum error correction (FT-QEC)\ is indeed not a CP map, but rather such a Hermitian, linear map (Section \ref{sec:FTQEC}). The reason is, essentially, that imperfect error correction results in residual non-classical correlations between the system and the bath, as the next QEC\ cycle is applied. To deal with this, we develop a generalized theory of QEC which we call \textquotedblleft linear quantum error correction\textquotedblright\ (LQEC), which applies to arbitrary linear maps on the system (Section \ref{sec:LQEC} ). Then we show that, fortunately, the CP-map based version of QEC theory applies without modifications in the physically relevant setting of Hermitian maps. However, we show that a more general scenario is also possible, where the recovery map is Hermitian but not CP. This is useful since it obviates the unrealistic assumption that the recovery ancillas enter the QEC cycle as classically correlated with the other system qubits. Our results significantly extend the realm of applicability of QEC, in particular to arbitrarily correlated system-environment states. We conclude in Section \ref{sec:conc}.
\section{Quantum dynamical processes and maps}
\label{sec:QDP}
In this section we prove a basic new result, that a quantum dynamical process can always be represented as a linear, Hermitian map from the initial to the final system-only state. In doing so we rely heavily on our previous work \cite{ShabaniLidar:08}.
The dynamics of open quantum systems can be described as follows. Consider a quantum system $S$ coupled to another system $B$, with respective Hilbert spaces $\mathcal{H}_{S}$ and $\mathcal{H}_{B}$, such that together they form one isolated system, described by the joint initial state (density matrix) $ \rho _{SB}(0)$. Their joint time-evolved state is then \begin{equation} \rho _{SB}(t)=U(t)\rho _{SB}(0)U^{\dag }(t), \end{equation} where $U(t)$ is the unitary propagator of the joint system-bath dynamics from the initial time $t=0$ to the final time $t$, i.e., the solution to the Schrodinger equation $\dot{U}=-(i/\hbar )[H,U]$, where $H$ is the joint system-bath Hamiltonian. The object of interest is the system $S$, whose state at all times $t$ is governed according to the standard quantum-mechanical prescription by the following quantum dynamical process (QDP): \begin{equation} \rho _{S}(t)=\mathrm{Tr}_{B}[\rho _{SB}(t)]=\mathrm{Tr}_{B}[U_{SB}(t)\rho _{SB}(0)U_{SB}(t)^{\dag }]. \label{dynamics1} \end{equation} $\mathrm{Tr}_{B}$ represents the partial trace operation, corresponding to an averaging over the bath degrees of freedom \cite{Breuer:Book}.
The QDP (\ref{dynamics1}) is a transformation from $\rho _{SB}(0)$ to $\rho _{S}(t)$. However, since we are not interested in the state of the bath, it is natural to ask:
\begin{quotation} Under which conditions on $\rho _{SB}(0)$ is the QDP a map $\Phi _{\mathrm{Q} }(t)$, \begin{equation} \rho _{S}(t)=\Phi _{\mathrm{Q}}(t)[\rho _{S}(0)], \label{eq:Qmap} \end{equation} and what are the properties of this map? \end{quotation}
In general, a\ map is an association of elements in the range with elements in the domain. Here we use the term \textquotedblleft map\textquotedblright\ solely to indicate a \emph{state-independent} transformation between two copies of the \emph{same} Hilbert space, in particular $\mathcal{H}_{S}\mapsto \mathcal{H}_{S}$.\footnote{ This is meant to exclude claims that system state-dependent transformations qualify as CP\ maps, as in Ref. \cite{Tong:04}. In such cases the elements of the transformation (the \textquotedblleft Kraus operators\textquotedblright ) depend on the system input state, which contradicts our notion of a map.} Then, a well-known partial answer is that if $\rho _{SB}(0)$ is a tensor product state, i.e., $\rho _{SB}(0)=\rho _{S}(0)\otimes \rho _{B}(0)$, then the QDP (\ref{dynamics1}) is a CP\ map. A more general answer was provided in \cite{ShabaniLidar:08}. To explain this answer we must first introduce some terminology.
\subsection{Various linear maps}
A map $\Phi :\mathcal{B}(\mathcal{H})\mapsto \mathcal{B}(\mathcal{H})$ [space of bounded operators on $\mathcal{H}$] is linear if $\Phi \lbrack a\rho _{1}+b\rho _{2}]=a\Phi \lbrack \rho _{1}]+b\Phi \lbrack \rho _{2}]$ for any pair of states $\rho _{1},\rho _{2}:\mathcal{H}\mapsto \mathcal{H}$, and constants $a,b\in \mathbb{C}$. A linear map is called Hermitian if it maps all Hermitian operators in its domain to Hermitian operators. We first present an operator sum representation for arbitrary and Hermitian linear maps, that generalizes the standard Kraus representation for CP maps \cite {Kraus:83}. The proof is presented in Appendix \ref{app:th1}.
\begin{mytheorem} \label{th1}A map $\Phi _{\mathrm{L}}:\mathfrak{M}_{n}\mapsto \mathfrak{M} _{m} $ (where $\mathfrak{M}_{n}$ is the space of $n\times n$ matrices) is linear iff it can be represented as \begin{equation} \Phi _{\mathrm{L}}(\rho )\text{ }=\sum\nolimits_{\alpha }E_{\alpha }\rho E_{\alpha }^{\prime \dagger } \label{eq:LM} \end{equation} where the \textquotedblleft left and right operation elements\textquotedblright\ $\{E_{\alpha }\}$ and $\{E_{\alpha }^{\prime }\}$ are, respectively, $m\times n$ and $n\times m$ matrices.\newline $\Phi _{\mathrm{H}}$ is a Hermitian map iff \begin{equation} \Phi _{\mathrm{H}}(\rho )=\sum\nolimits_{\alpha }c_{\alpha }E_{\alpha }\rho E_{\alpha }^{\dagger },\quad c_{\alpha }\in \mathbb{R}. \label{eq:QM} \end{equation} \end{mytheorem}
We will sometimes denote a linear map by listing its elements, as in $\Phi _{ \mathrm{L}}=\{E_{\alpha },E_{\alpha }^{\prime }\}_{\alpha =1}^{r}$. Note that a linear map $\Phi _{\mathrm{L}}=\{E_{\alpha },E_{\alpha }^{\prime }\}_{\alpha =1}^{r}$ is trace preserving if ${\sum_{\alpha =1}^{r}}E_{\alpha }^{\prime \dagger }E_{\alpha }=I$. Also note that the two sets of operation elements $\{E_{\alpha },E_{\alpha }^{\prime }\}_{i=1}^{r}$ and $\{F_{\beta },F_{\beta }^{\prime }\}_{\beta =1}^{r}$, where $F_{\beta }=\sum_{\alpha =1}^{r}u_{\alpha \beta }E_{\alpha }$ and $F_{\beta }^{\prime }=\sum_{\alpha =1}^{r}v_{\alpha \beta }E_{\alpha }^{\prime }$, represent the same linear map $\Phi _{\mathrm{L}}$ if the matrices $u$ and $v$ satisfy $uv^{\dag }=I$.
As a simple example of a non-CP, Hermitian\ map, consider the\textit{\ } \emph{inverse-phase-flip map}. The well-known CP phase-flip map is \cite {Nielsen:book}: $\Phi _{\mathrm{PF}}(\rho )=\mathcal{(}1-p)\rho +p\sigma _{z}\rho \sigma _{z}$, where $0\leq p\leq 1$ and $\sigma _{z}$ is a Pauli matrix. Solving for $\Phi _{\mathrm{PF}}^{-1}$ from $\Phi _{\mathrm{PF} }^{-1}[\Phi _{\mathrm{PF}}(\rho )]=\rho $, we find that $\Phi _{\mathrm{PF} }^{-1}(\rho )=c_{0}\rho +c_{1}\sigma _{z}\rho \sigma _{z}$, where $ c_{1}=p/(2p-1)$ and $c_{0}=1-c_{1}$, and $c_{0},c_{1}$ have \emph{opposite} sign for $0<p<1$. Moreover, $\mathrm{Tr}[\Phi _{\mathrm{PF}}^{-1}(\rho )]= \mathrm{Tr}(\rho )$. Therefore $\Phi _{\mathrm{PF}}^{-1}$ is a trace-preserving, Hermitian, non-CP map.
A linear map is called \textquotedblleft completely positive\textquotedblright\ (CP) if it is a Hermitian map with $c_{\alpha }\geq 0$ $\forall \alpha $. CP maps play a key role in quantum information and quantum error correction \cite{Nielsen:book}, though they have a much earlier origin \cite{Stinespring:55,Kraus:83}. There are other useful characterizations of CP maps -- see, e.g., Refs.~\cite {Breuer:Book,Nielsen:book}. It turns out that there is a tight connection between CP and Hermitian maps \cite{Jordan:04,Carteret:05}: a map is Hermitian iff it can be written as the difference of two CP maps.
The definition of a CP map $\Phi _{\mathrm{CP}}$ implies that it can be expressed in the Kraus operator sum representation \cite{Kraus:83}: \begin{equation} \rho _{S}(t)=\sum_{\alpha }E_{\alpha }(t)\rho _{S}(0)E_{\alpha }^{\dagger }(t)=\Phi _{\mathrm{CP}}(t)[\rho _{S}(0)]. \end{equation} If the operation elements $E_{\alpha }$ satisfy ${\sum_{\alpha }}E_{\alpha }^{\dagger }E_{\alpha }=I$ then $\mathrm{Tr}[\rho _{S}(t)]=1$.
\subsection{Special linear states}
Following Ref. \cite{ShabaniLidar:08}, we define the class of \textquotedblleft special-linear\textquotedblright\ (SL) states for which the QDP (\ref{dynamics1}) always results in a linear, Hermitian map. An arbitrary bipartite state on $\mathcal{H}_{S}\otimes \mathcal{H}_{B}$ can be written as \begin{equation}
\rho _{SB}=\sum\nolimits_{ij}\varrho _{ij}|i\rangle \langle j|\otimes \phi _{ij}, \label{eq:rho_SB} \end{equation}
where $\{|i\rangle \}_{i=1}^{\dim \mathcal{H}_{S}}$ is an orthonormal basis for $\mathcal{H}_{S}$, and $\{\phi _{ij}\}_{i,j=1}^{\dim \mathcal{H}_{S}}: \mathcal{H}_{B}\mapsto \mathcal{H}_{B}$ are normalized such that if $\mathrm{ Tr}[\phi _{ij}]\neq 0$ then $\mathrm{Tr}[\phi _{ij}]=1$. The corresponding reduced system and bath states are then $\rho _{S}=\sum_{(i,j)\in \mathcal{C}
}\varrho _{ij}|i\rangle \langle j|$, where $\mathcal{C}\equiv \{(i,j)| \mathrm{Tr}[\phi _{ij}]=1\}$, and $\rho _{B}(0)=\sum_{i}\varrho _{ii}\phi _{ii}$. Hermiticity and normalization of $\rho _{SB}$, $\rho _{S}$, and $ \rho _{B}$ imply $\varrho _{ij}=\varrho _{ji}^{\ast }$, $\phi _{ij}=\phi _{ji}^{\dag }$, and $\sum_{i}\varrho _{ii}=1$.
\begin{mydefinition} \label{def:SL}A bipartite state $\rho _{SB}$, parametrized as in Eq. (\ref {eq:rho_SB}), is in the SL-class\ if either $\mathrm{Tr}[\phi _{ij}]=1$ or $ \phi _{ij}=0,$ $\forall i,j$. \end{mydefinition}
Thus a non-SL\ state is a state for which there exist indexes $i$ and $j$ such that $\mathrm{Tr}[\phi _{ij}]=0$ but $\phi _{ij}\neq 0$. The following result proven in Ref. \cite{ShabaniLidar:08} (generalizing an earlier result in Ref. \cite{Rodriguez:07}) provides an almost complete answer to the question posed above:
\begin{mytheorem}[Theorem 2 of \protect\cite{ShabaniLidar:08}] \label{H-States}If $\rho _{SB}(0)$ is an SL-class state then the QDP (\ref {dynamics1}) is a linear, Hermitian map $\Phi _{\mathrm{H}}:\rho _{S}(0)\mapsto \rho _{S}(t)$. \end{mytheorem}
A further result proven in Ref. \cite{ShabaniLidar:08} (Theorem 3 there) provides necessary and sufficient conditions on $\rho _{SB}(0)$ for the QDP ( \ref{dynamics1}) to be a CP\ map, namely, $\rho _{SB}(0)$ should be a state with vanishing quantum discord \cite{Ollivier:01}. Such a state cannot contain any quantum correlations. This clearly illustrates the limitations of CP\ maps in describing quantum dynamics. At the same time one may wonder as to the generality of the SL-class employed in Theorem \ref{H-States}. Non-SL\ states are sparse \cite{ShabaniLidar:08}, so it is in this regard that we stated that Theorem \ref{H-States} provides an almost complete answer to the question posed above. However, we can go further. As mentioned without proof in Ref. \cite{ShabaniLidar:08}, in fact the QDP\ (\ref {dynamics1}) is a linear, Hermitian map from $\rho _{S}(0)\mapsto \rho _{S}(t)$ for \emph{any} initial state $\rho _{SB}(0)$. We next prove this key fact.
\subsection{Hermitian maps for arbitrary initial states}
We split the general initial state representation (\ref{eq:rho_SB}) into a sum over SL\ and non-SL\ terms (thus splitting $\{\varrho _{ij}\}$ and $ \{\phi _{ij}\}$ into two sets): \begin{equation}
\rho _{SB}(0)=\sum_{ij\in (\mathrm{SL})}\alpha _{ij}|i\rangle \langle j|\otimes \varphi _{ij}+\sum_{ij\in (\mathrm{nSL})}\beta _{ij}|i\rangle
\langle j|\otimes \psi _{ij}. \label{eq:SL} \end{equation}
In accordance with the definition of SL\ states, in the first sum we include only terms $\alpha _{ij}|i\rangle \langle j|\otimes \varphi _{ij}$ for which
$\mathrm{Tr}[\varphi _{ij}]\neq 0$ or $\varphi _{ij}=0$, in the second only terms $\beta _{ij}|i\rangle \langle j|\otimes \psi _{ij}$ with bath operators $\{\psi _{ij}\}$ satisfying $\psi _{ij}\neq 0$ and $\mathrm{Tr}
[\psi _{ij}]=0$. By virtue of this decomposition only the first term contributes to the initial system state: $\rho _{S}(0)=\mathrm{Tr}_{B}[\rho _{SB}(0)]=\sum_{ij(\mathrm{SL})}\alpha _{ij}|i\rangle \langle j|$. This is because the condition $\mathrm{Tr}[\psi _{ij}]=0$ eliminates any contribution from the second term in the decomposition (\ref{eq:SL}) to the initial system state. Consequently Eq. (\ref{eq:Qmap}) assumes an affine form: \begin{equation} \Phi _{\mathrm{Q}}(t)[\rho _{S}(0)]=\Phi _{\mathrm{SL}}(t)[\rho _{S}(0)]+K_{ \mathrm{nSL}}(t), \label{eq:affine} \end{equation} with the term $K_{\mathrm{nSL}}(t)$ being a shift that is \emph{independent of }$\rho _{S}(0)$.
As shown in Ref. \cite{ShabaniLidar:08}, the linear map $\Phi _{\mathrm{SL}}$ is constructed as a function of the bath operators $\{\varphi _{ij}\}$: \begin{equation} \Phi _{\mathrm{SL}}(t)[\rho _{S}(0)]\equiv \sum_{(i,j)\in (\mathrm{SL} );k,\alpha }\lambda _{\alpha }^{ij}V_{kij}^{\alpha }P_{i}\rho _{S}(0)P_{j}(W_{kij}^{\alpha })^{\dag }, \label{Linear} \end{equation}
where $P_{i}\equiv |i\rangle \langle i|$ are projectors, $\lambda _{\alpha
}^{ij}$ are the singular values in the singular value decomposition $\phi _{ij}=\sum_{\alpha }\lambda _{\alpha }^{ij}|x_{ij}^{\alpha }\rangle \langle y_{ij}^{\alpha }|$, and the operators $V_{kij}^{\alpha }\equiv \langle \psi _{k}|U|x_{ij}^{\alpha }\rangle $ and $W_{kij}^{\alpha }\equiv \langle \psi _{k}|U|y_{ij}^{\alpha }\rangle $ act on the system only, with $\{|\psi _{k}\rangle \}$ being an orthonormal basis for the bath Hilbert space ${ \mathcal{H}}_{B}$.
In addition, the non-SL terms in Eq.(\ref{eq:SL}) generate the shift term \begin{equation} K_{\mathrm{nSL}}(t)=\sum_{ij\in (\mathrm{nSL})}\beta _{ij}\mathrm{Tr}
_{B}[U_{SB}(t)|i\rangle \langle j|\otimes \psi _{ij}U_{SB}^{\dag }(t)]. \end{equation} This shows explicitly that $K_{\mathrm{nSL}}(t)$ does not depend on the initial system state, since the latter is fully parametrized by the coefficients $\{\alpha _{ij}\}_{ij\in (\mathrm{SL})}$, while $K_{\mathrm{nSL} }(t)$ depends only upon the coefficients $\{\beta _{ij}\}_{ij\in (\mathrm{nSL })}$.
Now we take a further step to argue that the affine map (\ref{eq:affine}) is actually a linear, Hermitian map if the map acts only on the space of density matrices. This is a direct application of the result in Ref. \cite {Jordan:04}.
\begin{mytheorem} \label{linear Rep}The QDP (\ref{dynamics1}) is representable as a linear, Hermitian map $\Phi _{\mathrm{H}}(t):\rho _{S}(0)\mapsto \rho _{S}(t)$ for any initial system-bath state. \end{mytheorem}
\begin{proof} Let $N\equiv \dim \mathcal{H}_{S}$. Let $F_{0}\equiv I$ and let $\{F_{\mu }: \mathrm{Tr}(F_{\mu })=0\}_{\mu =1}^{N^{2}-1}$ be a basis for the set of traceless Hermitian matrices which are mutually orthogonal with respect to the Hilbert-Schmidt inner product, i.e., $\mathrm{Tr}(F_{\mu }F_{\nu })=N\delta _{\mu \upsilon }$. Hence the initial system state $\rho _{S}(0)$ can be expanded as \begin{equation} \rho _{S}(0)=\frac{1}{N}(I+\sum_{\mu =1}^{N^{2}-1}b_{\mu }F_{\mu });\quad b_{\mu }=\mathrm{Tr}[\rho _{S}(0)F_{\mu }]\equiv \langle F_{\mu }\rangle _{\rho _{S}(0)}, \label{vector} \end{equation} and the final system state is found to be \begin{eqnarray} \rho _{S}(t) &=&\frac{1}{N}[\Phi _{\mathrm{SL}}(I)+\sum_{\mu =1}^{N^{2}-1}b_{\mu }\Phi _{\mathrm{SL}}(F_{\mu })]+K_{\mathrm{nSL}} \notag \\ &=&\Phi _{\mathrm{H}}(t)[\rho _{S}(0)], \end{eqnarray} where the equivalent Hermitian map $\Phi _{\mathrm{H}}$ is constructed by setting $\Phi _{\mathrm{H}}(I)=\Phi _{\mathrm{SL}}(I)+NK_{\mathrm{nSL}}$ and $\Phi _{\mathrm{H}}(F_{\mu })=\Phi _{\mathrm{SL}}(F_{\mu })$ $1\leq \forall \mu \leq N^{2}-1$. That this map is Hermitian is simple to verify, for all the components are Hermitian. \end{proof}
Theorem \ref{linear Rep} provides a complete, and perhaps surprising answer to the question posed at the beginning of this section. Namely, the most general form of a quantum dynamical process, irrespective of the initial system-bath state (in particular arbitrarily entangled initial states are possible) is always reducible to a Hermitian map from the initial system to the final system state. The surprising aspect of this result is that it was not known previously whether QDP\ could always even be reduced to a \emph{map } between system states.
Of course, this result does not resolve the more difficult question of ensuring the positivity of the final system state. That is, a Hermitian map may transform an initially positive system state to a non-positive one, violating the postulate of positivity of quantum states. To resolve this one must identify the \textquotedblleft positivity domain\textquotedblright\ of $ \Phi _{\mathrm{H}}$, i.e., the set of initial system states (positive by definition) which are mapped to positive states by $\Phi _{\mathrm{H}}$ \cite {Jordan:04}. We address this in the next subsection.
\subsection{Geometric characterization of the Positivity Domain}
In this subsection we prove the convexity of the positivity domain and propose a geometric method for characterizing it. Let $S(\mathcal{H})\equiv \{\rho \in \mathcal{L}(\mathcal{H}):\rho >0,\mathrm{Tr}\rho =1\}$, where $ \mathcal{L}(\mathcal{H})$ is the set of all linear operators on $\mathcal{H}$ . The positivity domain of a linear map $\Phi _{\mathrm{L}}:S(\mathcal{H} )\mapsto \mathcal{B}(\mathcal{H})$ is: $P_{\Phi }\equiv \{\rho \in S( \mathcal{H}_{S}):\Phi _{\mathrm{L}}(\rho )>0\}$.
Following earlier work \cite{Jakobczyk:01,Kimura:03,byrd:062322}, in Ref.~ \cite{Kimura:05}, a complete geometric characterization of density matrices was given by using the Bloch vector representation for an arbitrary $N$ -dimensional Hilbert space $\mathcal{H}$. This works as follows:\ let $ \{F_{\mu }\}_{\mu =1}^{N^{2}-1}$ be a basis set as in the proof of Theorem \ref{linear Rep}, whence the expansion (\ref{vector}) applies again. The vector $\mathbf{b}=(b_{1},...,b_{N^{2}-1})\in \mathbb{R}^{N^{2}-1}$ of expectation values is known as the Bloch vector, and knowing its components is equivalent to complete knowledge of the corresponding density matrix, via the map $\mathbf{b}\mapsto \rho \mathbf{=}\frac{1}{N}(I+\sum_{\mu =1}^{N^{2}-1}b_{\mu }F_{\mu })$. Let $\mathbf{n}$ denote a unit vector, i.e., $\mathbf{n}\in \mathbb{R}^{N^{2}-1}$ and $ \sum_{i=1}^{N^{2}-1}n_{i}^{2}=1$, and define $F_{\mathbf{n}}\equiv \sum_{\mu =1}^{N^{2}-1}n_{\mu }F_{\mu }$. Let the minimum eigenvalue of each $F_{ \mathbf{n}}$ be denoted $m(F_{\mathbf{n}})$. The \textquotedblleft Bloch space\textquotedblright\ $\boldsymbol{B}(\mathbb{R}^{N^{2}-1})$ is the set of all Bloch vectors and is a closed convex set, since the set $S(\mathcal{H} )$ is closed and convex, and the map $\mathbf{b}\mapsto \rho $ is linear homeomorphic. As shown in Theorem 1 of Ref.~\cite{Kimura:05}, the Bloch space is characterized in the \textquotedblleft spherical coordinates\textquotedblright\ determined by $\{F_{\mathbf{n}}\}$ as: \begin{equation} \boldsymbol{B}(\mathbb{R}^{N^{2}-1})=\left\{ \mathbf{b}=r\mathbf{n}\in
\mathbb{R}^{N^{2}-1}:r\leq \frac{1}{|m(F_{\mathbf{n}})|}\right\} . \label{eq:Bloch-space} \end{equation} It is hard to imagine a more intuitive or simpler geometric picture.
Next we show that the positivity domain is a convex set as well.
\begin{myproposition} The positivity domain $P_{\Phi }$ of a linear map $\Phi _{\mathrm{L}}$ is a convex set. \end{myproposition}
\begin{proof} Consider two density matrices $\rho $ and $\rho ^{\prime }$ as interior points of $P_{\Phi }$ with corresponding Bloch vectors $\mathbf{b} =(b_{1},...,b_{N^{2}-1})$ and $\mathbf{b^{\prime }}=(b_{1}^{\prime },...,b_{N^{2}-1}^{\prime })$. The claim is that a third density matrix $ \rho ^{\prime \prime }$ with corresponding Bloch vector $\mathbf{b^{\prime \prime }}(\alpha )=\alpha \mathbf{b}+(1-\alpha )\mathbf{b^{\prime }}$, with $ 0\leq \alpha \leq 1$, is then also interior to $P_{\Phi }$. This follows directly by linearity of the map $\Phi _{\mathrm{L}}$. First, by assumption $ \Phi _{\mathrm{L}}[\rho ]=\Phi _{\mathrm{L}}[\frac{1}{N}(I+\sum_{\mu =1}^{N^{2}-1}b_{\mu }F_{\mu })]>0$ and $\Phi _{\mathrm{L}}[\rho ^{\prime }]=\Phi _{\mathrm{L}}[\frac{1}{N}(I+\sum_{\mu =1}^{N^{2}-1}b_{\mu }^{\prime }F_{\mu })]>0$, so that $\alpha \Phi _{\mathrm{L}}[\rho ]+(1-\alpha )\Phi _{ \mathrm{L}}[\rho ^{\prime }]>0$. Second, $\alpha \Phi _{\mathrm{L}}[\rho ]+(1-\alpha )\Phi _{\mathrm{L}}[\rho ^{\prime }]=\Phi _{\mathrm{L}}[\frac{1}{ N}I]+\alpha \sum_{\mu =1}^{N^{2}-1}b_{\mu }\Phi _{\mathrm{L}}[F_{\mu }]+(1-\alpha )\sum_{\mu =1}^{N^{2}-1}b_{\mu }^{\prime }\Phi _{\mathrm{L} }[F_{\mu }]=\Phi _{\mathrm{L}}[\frac{1}{N}(I+\sum_{\mu =1}^{N^{2}-1}b_{\mu }^{\prime \prime }F_{\mu })]=\Phi _{\mathrm{L}}[\rho ^{\prime \prime }]$. Therefore indeed $\Phi _{\mathrm{L}}[\rho ^{\prime \prime }]>0$. \end{proof}
We are now ready to describe an algorithm for finding the boundary of the positivity domain $P_{\Phi }$. We know at this point that $P_{\Phi }$ is convex and that $P_{\Phi }$ is a subset of the Bloch space, itself a closed convex set. Pick a unit vector $\mathbf{n}$ and draw a line through the origin of the Bloch space along $\mathbf{n}$. If $P_{\Phi }$ includes the origin, i.e., the maximally mixed state, then convexity implies that this line intersects the boundary of $P_{\Phi }$ once. If $P_{\Phi }$ does not include the origin then convexity implies that this line either intersects the boundary of $P_{\Phi }$ twice or not at all. I.e., it follows from convexity that the line may not re-enter the positivity domain once it exited. In order to determine this boundary we may thus compute the eigenvalues of $\Phi _{ \mathrm{L}}[\rho _{\mathbf{n}}(r)]$ as a function of $r$, where $r$ is the parameter in Eq. (\ref{eq:Bloch-space}), and where $\rho _{\mathbf{n}}(r)$ is the density matrix determined via the mapping $\mathbf{b}=r\mathbf{n} \mapsto \rho $. The computation should start from $r=0$ and go up to at most
$r=1/|m(F_{\mathbf{n}})|$. The boundary is identified as soon as the eigenvalues of $\Phi _{\mathrm{L}}[\rho _{\mathbf{n}}(r)]$ go from all positive semi-definite to at least one negative, or vice versa. For each unit vector $\mathbf{n}$, the corresponding point on the border of the positivity domain can be found in this way. Then the algorithm constructs the boundary of the positivity domain by finding the boundary points in all directions $\mathbf{n}$. Of course, in practice one can only sample the space of unit vectors $\mathbf{n}$ and factors $r$. In principle this yields a complete geometrical description of the positivity domain of a given linear map.
\section{CP\ maps and fault tolerant quantum error correction}
\label{sec:FTQEC}
\subsection{CP\ maps: pro and con}
We have already mentioned that a QDP (\ref{dynamics1}) becomes a CP map iff the initial system-bath state has vanishing quantum discord, i.e., is purely classically correlated \cite{ShabaniLidar:08}. The standard argument in favor of CP\ maps is that since the system $S$ may be coupled with the bath $ B$, the maps describing physical processes on $S$ should be such that all their extensions into higher dimensional spaces should remain positive, i.e., $\Phi _{\mathrm{CP}}\otimes I_{n}\geq 0$ $\forall n\in \mathbb{Z}^{+}$ , where $I_{n}$ is the $n$-dimensional identity operator. However, one may question whether this is the right criterion for describing quantum dynamics \cite{Pechukas+Alicki:95}. An alternative viewpoint is to seek a description that applies to \emph{arbitrary} $\rho _{SB}(0)$, as we have done above. We now argue that this viewpoint is the correct one for fault-tolerant quantum error correction (FT-QEC).
\subsection{(In)validity of the CP\ map model in FT-QEC}
Let us show that system-environment correlations impose a severe restriction on the applicability of CP maps in FT-QEC. The CP map model used in FT-QEC \cite {Shor:96,Aharonov:96,Knill:98,Steane:03,Knill:05,Reichardt:05,Aharonov:08,Aliferis:08} can be described as follows (see, e.g., Eq. (8.1) in \cite{Aharonov:08}): $ \rho _{S}(T)=\Phi _{\mathrm{CP}}^{\mathrm{tot}}(T,t_{0})[\rho _{S}(t_{0})]$ where \begin{equation} \Phi _{\mathrm{CP}}^{\mathrm{tot}}(T,t_{0})=\bigotimes\nolimits_{i=1}^{N} \Phi _{U}(t_{i})\Phi _{\mathrm{CP}}(t_{i},t_{i-1}), \label{eq:mapmodel} \end{equation} where $T\equiv t_{N}$ is the total circuit time, and where $\Phi _{U}[\rho _{S}]=U_{S}\rho _{S}U_{S}^{\dagger }$ is a unitary map (automatically CP) that describes an ideal quantum logic gate.\footnote{ In this subsection we denote noise maps by their initial and final times, to distinguish them from the instantaneous unitary maps.} This represents the idea used repeatedly in FT-QEC, that the noisy evolution at every time step can be decomposed into \textquotedblleft pure noise\textquotedblright\ $\Phi _{\mathrm{CP}}(t_{i},t_{i-1})$ followed by an instantaneous and perfect unitary gate $\Phi _{U}(t_{i})$. More precisely, in FT-QEC one assumes that the evolution starts ($t=t_{0}=0$) from a product state, then undergoes a CP\ map $\Phi _{\mathrm{CP}}(t_{1},t_{0})$ due to coupling to the environment, followed by an instantaneous error correction step $\Phi _{U}(t_{1})$. If the latter were perfect then the post-error-correction state would again be a product state $\rho _{S}(t_{1})\otimes \rho _{B}(t_{1})$. However, FT-QEC\ allows for the fact that the error correction step is almost never perfect, which means that there is a residual correlation between system and bath at $t_{1}$. Hence, according to Ref.~ \cite{ShabaniLidar:08}, the map that describes the evolution of the system is a CP\ map if and only if the residual correlation is purely classical. Otherwise it is a Hermitian map. To make this point more explicit, consider a sequence of two noise time-steps, interrupted by one error correction step. In the ideal scenario, where the error correction step $\Phi _{U}(t_{1})$ works perfectly (i.e., reduces the system-bath correlations to purely classical), we would have \begin{equation} \Phi _{\mathrm{CP}}^{\mathrm{(2)}}(t_{2},t_{0})=\Phi _{\mathrm{CP} }(t_{2},t_{1})\Phi _{U}(t_{1})\Phi _{\mathrm{CP}}(t_{1},t_{0}), \end{equation} where $\Phi _{\mathrm{CP}}(t_{2},t_{1})$ is again a CP\ noise map. However, in reality $\Phi _{U}(t_{1})$ works imperfectly [system-bath correlations are not purely classical after the action of $\Phi _{U}(t_{1})$], and the actual map obtained is \begin{equation} \Phi _{\mathrm{H}}^{\mathrm{(2)}}(t_{2},t_{0})=\Phi _{\mathrm{H} }(t_{2},t_{1})\Phi _{U}(t_{1})\Phi _{\mathrm{CP}}(t_{1},t_{0}), \end{equation} where $\Phi _{\mathrm{H}}(t_{2},t_{1})$ is now a Hermitian map. Note that, in fact, even the assumption that the first noise map is CP will not be true in general, due to errors in the preparation of the initial state, leading to non-classical correlations between system and bath. We conclude that in general the CP\ map model (\ref{eq:mapmodel}) should be replaced by \begin{equation} \Phi _{\mathrm{H}}^{\mathrm{tot}}(T,t_{0})=\bigotimes\nolimits_{i=1}^{N}\Phi _{U}(t_{i})\Phi _{\mathrm{H}}(t_{i},t_{i-1}), \label{eq:linmap} \end{equation} where $\Phi _{\mathrm{H}}(t_{i},t_{i-1})$ are \emph{Hermitian maps}, not necessarily CP.\footnote{ Note that Eq.~(\ref{eq:linmap}) applies also to non-Markovian noise, and is hence complementary to Hamiltonian FT-QEC \cite {Terhal:04,Aliferis:05,Aharonov:05}.}
It is worth emphasizing that this distinction between purely classical and other correlations, and the resulting difference between CP\ and Hermitian evolution, is not a distinction that has thus far been made in FT-QEC\ theory. Rather, in FT-QEC one distinguishes between \textquotedblleft good\textquotedblright\ and \textquotedblleft bad\textquotedblright\ fault paths, where the former (latter) contain only a few (too many) errors. Quoting from \cite{Terhal:04}: \textquotedblleft There are good fault paths with so-called sparse numbers of faults which keep being corrected during the computation and which lead to (approximately) correct answers of the computation; and there are bad fault-paths which contain too many faults to be corrected and imply a crash of the quantum computer.\textquotedblright\ This leads to a splitting of the total map (\ref{eq:mapmodel}) into a sum over good and bad paths. One then shows that the computation can proceed robustly via the use of concatenated codes, provided the \textquotedblleft bad\textquotedblright\ paths are appropriately bounded. In \cite{Aharonov:08} (p.1272) it was pointed out that the sum over \textquotedblleft good\textquotedblright\ paths need not be a CP\ map, but can be decomposed into a new sum over CP\ maps [Eq. (8.13) there]. This new decomposition can then be treated using standard FT-QEC techniques. However, this assumes again that the total evolution is a CP\ map, which in fact it is not [Eq. ( \ref{eq:linmap})].
These observations motivate a generalized theory of QEC, which can handle non-CP\ noise maps. This is the subject of the next section. \emph{The main result of this theory is reassuring:\ in spite of the invalidity of the CP\ map model in FT-QEC, the CP-map based results apply because the same encoding and recovery that corrects a Hermitian map can be used to correct a closely related CP\ map, whose coefficients are the absolute values of the Hermitian map}. This is formalized in Corollary \ref{cor:HM}.
\section{ Linear Quantum Error Correction}
\label{sec:LQEC}
Having argued that non-CP Hermitian maps arise naturally in the study of open systems, and in particular FT-QEC, we now proceed to develop the theory of Linear QEC. For generality we do this for arbitrary linear maps, i.e., maps of the form (\ref{eq:LM}). We then specialize to the physically relevant case of Hermitian maps.
Let us first recall the fundamental theorem of \textquotedblleft standard\textquotedblright\ QEC (for CP noise and CP recovery maps) \cite {Knill:97b}: Let $P$ be a projection operator onto the code space. Necessary and sufficient conditions for quantum error correction of a CP map, $\Phi _{ \mathrm{CP}}(\rho )={\sum_{i}}F_{i}\rho F_{i}^{\dagger }$ are \begin{equation} PF_{i}^{\dag }F_{j}P=\lambda _{ij}P\quad \forall i,j. \label{eq:QEC-CP} \end{equation} An elegant proof of this theorem and a construction of the corresponding CP recovery map was given in Refs. \cite{Nielsen:98,Nielsen:book}; we use some of their methods in the proofs of Theorems~\ref{th:CP-rec},\ref{th:suff}.
\subsection{CP-recoverable linear noise maps}
While general (non-Hermitian) linear maps of the form (\ref{eq:LM}) do not arise from quantum dynamical processes [Eq. (\ref{dynamics1})], it is still interesting from a purely mathematical standpoint to consider QEC for such maps. Moreover, we easily recover the physical setting from these general considerations.
Theorem~\ref{th:CP-rec} shows that there is a class of linear noise maps which are equivalent to certain non-trace-preserving CP noise maps when it comes to error correction using CP recovery maps.
\begin{mytheorem} \label{th:CP-rec}Consider a general linear noise map $\Phi _{\mathrm{L} }(\rho )$ $={\sum_{i=1}^{N}}E_{i}\rho E_{i}^{\prime \dagger }$ and associate to it an \textquotedblleft expanded\textquotedblright\ CP map $\tilde{\Phi}_{ \mathrm{CP}}(\rho )=\frac{1}{2}{\sum_{i=1}^{N}}E_{i}\rho E_{i}^{\dagger }+ \frac{1}{2}{\sum_{i=1}^{N}}E_{i}^{\prime }\rho E_{i}^{\prime \dagger }$. Then any QEC code $\mathcal{C}$ and corresponding CP recovery map $\mathcal{R }$ for $\tilde{\Phi}_{\mathrm{CP}}$ are also a QEC code and CP recovery map for $\Phi _{\mathrm{L}}$. \end{mytheorem}
\begin{proof} The operation elements of $\tilde{\Phi}_{\mathrm{CP}}$ are $ \{F_{i}\}_{i=1}^{N}=\{\frac{1}{\sqrt{2}}E_{i}\}_{i=1}^{N}$ and $ \{F_{N+i}\}_{i=1}^{N}=\{\frac{1}{\sqrt{2}}E_{i}^{\prime }\}_{i=1}^{N}$, whence $\tilde{\Phi}_{\mathrm{CP}}(\rho )={\sum_{i=1}^{2N}}F_{i}\rho F_{i}^{\dagger }$. The standard quantum error correction conditions (\ref {eq:QEC-CP}) for $\tilde{\Phi}_{\mathrm{CP}}$, where \begin{equation} \lambda \equiv 2\left( \begin{array}{cc} \alpha & \gamma \\ \gamma ^{\dag } & \alpha ^{\prime } \end{array} \right) =\lambda ^{\dag }, \label{eq:lambda} \end{equation} become three sets of conditions in terms of the $E_{i}$ and $E_{i}^{\prime }$ : \begin{eqnarray} \text{(i)}~PE_{i}^{\dag }E_{j}P &=&2\alpha _{ij}P,~\text{(ii)} ~PE_{i}^{\prime \dag }E_{j}^{\prime }P=2\alpha _{ij}^{\prime }P, \notag \\ \text{(iii)}~PE_{i}^{\dag }E_{j}^{\prime }P &=&2\gamma _{ij}P, \label{eq:lambda(i)} \end{eqnarray} where $i,j\in \{1,...,N\}$ and $\alpha _{ij}=\lambda _{ij}$, $\gamma _{ij}=\lambda _{i,N+j}$, $\alpha _{ij}^{^{\prime }}=\lambda _{N+i,N+j}$. The existence of a projector $P$ which satisfies Eqs.~(\ref{eq:lambda(i)} )(i)-(iii) is equivalent to the existence of a QEC code for $\tilde{\Phi}_{ \mathrm{CP}}$. Assuming that a code $\mathcal{C}$ has been found (i.e., $P \mathcal{C}=\mathcal{C}$) for $\tilde{\Phi}_{\mathrm{CP}}$, we use this as a code for $\Phi _{\mathrm{L}}$ and show that the corresponding CP recovery map $\mathcal{R}_{\mathrm{CP}}$ is also a recovery map for $\Phi _{\mathrm{L} }$. Indeed, let $G_{j}\equiv \sum_{i=1}^{2N}u_{ij}F_{i}$ be new operation elements for $\tilde{\Phi}_{\mathrm{CP}}$, where $u$ is the unitary matrix that diagonalizes $\lambda $, i.e., $u^{\dag }\lambda u=d$. Then $\tilde{\Phi }_{\mathrm{CP}}={\sum_{j=1}^{2N}}G_{j}\rho G_{j}^{\dagger }$. Let $\mathcal{R }_{\mathrm{CP}}=\{R_{k}\}$ be the CP recovery map for $\tilde{\Phi}_{\mathrm{ CP}}$. Assume that $\rho $ is in the code space, i.e., $P\rho P=\rho $. We now show that $\mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{L}}(\rho )]=\rho $, i.e., we have CP recovery. First, \begin{eqnarray} \mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{L}}(\rho )] &=&\sum_{k}R_{k}\left( \sum_{i=1}^{N}F_{i}\rho F_{N+i}^{\dag }\right) R_{k}^{\dag } \notag \\ &=&\sum_{i=1}^{N}\sum_{j,j^{\prime }=1}^{2N}u_{ij}^{\ast }u_{N+i,j^{\prime }}\times \notag \\ &&\sum_{k}\left( R_{k}G_{j}P\right) \rho \left( PG_{j^{\prime }}^{\dag }R_{k}^{\dag }\right) . \label{eq:R-CP0} \end{eqnarray} Now, note that \begin{eqnarray} PG_{k}^{\dag }G_{l}P &=&\sum_{ij}u_{ik}^{\ast }u_{jl}PF_{i}^{\dag }F_{j}P=\sum_{ij}u_{ik}^{\ast }\lambda _{ij}u_{jl}P \notag \\ &=&d_{k}\delta _{kl}P. \end{eqnarray} Then the polar decomposition yields \begin{equation} G_{k}P=U_{k}(PG_{k}^{\dag }G_{k}P)^{1/2}=\sqrt{d_{k}}U_{k}P. \end{equation} The recovery operation elements are given by \begin{equation} R_{k}=U_{k}^{\dag }P_{k}, \label{eq:recovery} \end{equation} where $P_{k}=U_{k}PU_{k}^{\dag }$. Therefore $P_{k}=G_{k}PU_{k}^{\dag }/ \sqrt{d_{k}}$. This allows us to calculate the action of the $k$th recovery operator on the $l$th error \cite{Nielsen:98,Nielsen:book}: \begin{eqnarray} R_{k}G_{l}P &=&U_{k}^{\dag }P_{k}^{\dag }G_{l}P=U_{k}^{\dag }(U_{k}PG_{k}^{\dag }/\sqrt{d_{k}})G_{l}P \notag \\ &=&\delta _{kl}\sqrt{d_{k}}P. \end{eqnarray} Therefore, \begin{eqnarray} \mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{L}}(\rho )] &=&\sum_{i=1}^{N}\sum_{j,j^{\prime }=1}^{2N}u_{ij}^{\ast }u_{N+i,j^{\prime }} \notag \\ &&\times \sum_{k}\left( \delta _{kj}\sqrt{d_{k}}P\right) \rho \left( P\sqrt{ d_{k}}\delta _{kj^{\prime }}\right) \notag \\ &=&\rho \sum_{i=1}^{N}\left( udu^{\dag }\right) _{N+i,i}=\rho \sum_{i=1}^{N}\lambda _{N+i,i} \notag \\ &=&2\rho \mathrm{Tr}\gamma ^{\dag }. \label{eq:R-CP} \end{eqnarray} Next note that, using condition (\ref{eq:lambda(i)})(iii) and trace preservation by $\Phi _{\mathrm{L}}$: \begin{eqnarray} PE_{i}^{\prime \dag }E_{i}P &=&2\gamma _{ii}^{\dag }P\Longrightarrow 2 \mathrm{Tr}\gamma ^{\dag }P=P\sum_{i}E_{i}^{\prime \dag }E_{i}P=P \notag \\ &\Longrightarrow &\mathrm{Tr}\gamma ^{\dag }=\frac{1}{2}. \end{eqnarray} Hence, finally: \begin{equation} \mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{L}}(\rho )]=\rho \label{eq:RL_CP} \end{equation} for any $\rho $ in the codespace. \end{proof}
Note that $\tilde{\Phi}_{\mathrm{CP}}(\rho )$ need not be trace preserving:$ \ \mathrm{Tr}[\tilde{\Phi}_{\mathrm{CP}}(\rho )]=\frac{1}{2}\mathrm{Tr}[({ \sum_{i=1}^{N}}E_{i}^{\dagger }E_{i}+{\sum_{i=1}^{N}}E_{i}^{\prime \dagger }E_{i}^{\prime })\rho ]$, and while ${\sum_{i=1}^{N}}E_{i}^{\prime \dagger }E_{i}=I$ if $\Phi _{\mathrm{L}}$ is trace preserving, we do not have conditions on ${\sum_{i=1}^{N}}E_{i}^{\dagger }E_{i}$ and ${\sum_{i=1}^{N}} E_{i}^{\prime \dagger }E_{i}^{\prime }$.
We define the class of \textquotedblleft CP-recoverable linear noise maps\textquotedblright\ $\{\Phi _{\mathrm{CPR}}\}$ as those $\Phi _{\mathrm{L }}$ for which CP\ recovery is always possible. By Theorem~\ref{th:CP-rec} this includes all $\Phi _{\mathrm{L}}$ for which $P$ can be found satisfying conditions~(\ref{eq:lambda(i)})(i)-(iii). However, these conditions are not necessary.
\subsection{Non-CP-recoverable linear noise maps}
We now define \textquotedblleft non-CP-recoverable linear noise maps\textquotedblright\ $\{\Phi _{\mathrm{nCPR}}\}$ as those $\Phi _{\mathrm{ L}}$ for which non-CP-recovery is always possible. Theorem~\ref{th:suff} shows constructively that $\{\Phi _{\mathrm{nCPR}}\}$ includes all linear noise maps $\Phi _{\mathrm{L}}$ for which $P$ can be found satisfying only conditions~(\ref{eq:lambda(i)})(i) and (ii). Clearly, $\{\Phi _{\mathrm{CP} }\}\subset \{\Phi _{\mathrm{CPR}}\}\subset \{\Phi _{\mathrm{nCPR}}\}\subset \{\Phi _{\mathrm{L}}\}$.
\begin{mytheorem} \label{th:suff} Let $\Phi _{\mathrm{L}}=\{E_{i},E_{i}^{\prime }\}_{i}$ be a linear noise map. Then every state $\rho =P\rho P$ encoded using a QEC code defined by a\ projector $P$\ satisfying only Eqs.~(\ref{eq:lambda(i)})(i) and (ii) can be recovered using a non-CP recovery map. \end{mytheorem}
\begin{proof} Let $G_{k}=\sum_{i}u_{ik}E_{i}$ and $G_{k}^{\prime }=\sum_{i}u_{ik}^{\prime }E_{i}^{\prime }$, where the unitaries $u$ and $u^{\prime }$ respectively diagonalize the Hermitian matrices $\alpha $ and $\alpha ^{\prime }$: $ d=u^{\dag }\alpha u$ and $d^{\prime }=u^{\prime \dag }\alpha ^{\prime }u^{\prime }$. Define a recovery map $\mathcal{R}=\{R_{k},R_{k}^{\prime }\}$ (not necessarily CP) with operation elements \begin{equation} R_{k}=U_{k}^{\dag }P_{k},\quad R_{k}^{\prime }=U_{k}^{\prime \dag }P_{k}^{\prime }. \label{eq:R} \end{equation} Here $P_{k}=U_{k}PU_{k}^{\dag }$, $P_{k}^{\prime }=U_{k}^{\prime }PU_{k}^{\prime \dag }$ are projection operators, and $U_{k}$ and $ U_{k}^{\prime }$ arise from the polar decomposition of $G_{k}P$ and $ G_{k}^{\prime }P$, i.e., $G_{k}P=U_{k}(PG_{k}^{\dag }G_{k}P)^{1/2}$ and $ G_{k}^{\prime }P=U_{k}(PG_{k}^{\prime \dag }G_{k}^{\prime }P)^{1/2}$. The proof is entirely analogous to the proof of Theorem~\ref{th:CP-rec}, except that we must keep track of both the primed and unprimed operators. Following through the same calculations we thus obtain $R_{k}G_{l}\sqrt{\rho }=\sqrt{ d_{k}}\delta _{kl}\sqrt{\rho }$ and $R_{k}^{\prime }G_{l}^{\prime }\sqrt{ \rho }=\sqrt{d_{k}^{\prime }}\delta _{kl}\sqrt{\rho }$. Using this in the recovery map applied to the linear noise map, we find: \begin{eqnarray} \mathcal{R}[\Phi (P\rho P)] &=&\sum_{kl}R_{k}E_{l}P\rho PE_{l}^{\prime \dagger }R_{k}^{\prime \dag } \notag \\ &=&\sum_{kl}R_{k}(\sum_{j}u_{lj}^{\ast }G_{j})P\rho P(\sum_{i}u_{li}^{\prime }G_{i}^{\prime \dag })R_{k}^{\prime \dag } \notag \\ &=&F_{\mathrm{L}}P\rho P\propto \rho , \end{eqnarray} where \begin{eqnarray} F_{\mathrm{L}} &\equiv &\sum_{ijkl}u_{lj}^{\ast }u_{li}^{\prime }\sqrt{ d_{k}d_{k}^{\prime \ast }}\delta _{kj}\delta _{ki}=\sum_{kl}u_{lk}^{\ast }u_{lk}^{\prime }\sqrt{d_{k}d_{k}^{\prime \ast }} \notag \\ &=&\mathrm{Tr}[u^{\prime }d^{\prime \dag }du^{\dag }]=\mathrm{Tr}[u^{\prime }u^{\dag }\alpha \alpha ^{\prime \dag }] \label{eq:F_L} \end{eqnarray} is a \textquotedblleft correction factor\textquotedblright\ for non-CP\ recovery of linear noise maps, which was $1$ in the case of CP\ recovery, above. \end{proof}
Gathering the expressions derived in the last proof, we have the following explicit expressions for the left and right recovery operations: \begin{equation} R_{k}=U_{k}^{\dag }P_{k}^{\dag }=\frac{1}{\sqrt{d_{k}}}P\sum_{i}u_{ik}^{\ast }E_{i}^{\dag },~R_{k}^{\prime }=\frac{1}{\sqrt{d_{k}^{\prime }}} P\sum_{i}u_{ik}^{\prime \ast }E_{i}^{\prime \dag }. \end{equation} This also shows that, in general, $R_{k}$ need not equal $R_{k}^{\prime }$, i.e., the recovery map is linear but not necessarily CP.
Note that standard QEC can also be interpreted as \textquotedblleft error correction by inversion\textquotedblright , in the following sense: when the noise map is CP and recovery is also CP, recovery is the inverse of the noise map \emph{restricted to the code space} (Theorem III.3 in Ref.~\cite {Knill:97b}). The same is true for our LQEC results above, which relax the restriction to CP noise maps.
\subsection{The physical case: Hermitian maps}
The general physical case is the case of Hermitian noise maps, to which any quantum dynamical process can be reduced, as follows from Theorem \ref {linear Rep}. We can specialize Theorems \ref{th:CP-rec} and \ref{th:suff} to this case.
\begin{mycorollary} \label{cor:HM}Consider a Hermitian noise map $\Phi _{\mathrm{H}}(\rho )$ $={ \sum_{i=1}^{N}}c_{i}K_{i}\rho K_{i}^{\dagger }$ and associate to it a CP map
$\tilde{\Phi}_{\mathrm{CP}}(\rho )={\sum_{i=1}^{N}|}c_{i}|K_{i}\rho K_{i}^{\dagger }$. Then any QEC code $\mathcal{C}$ and corresponding CP recovery map $\mathcal{R}_{\mathrm{CP}}$ for $\tilde{\Phi}_{\mathrm{CP}}$ are also a QEC code and CP recovery map for $\Phi _{\mathrm{H}}$. \end{mycorollary}
The important conclusion we can draw from Corollary \ref{cor:HM} is that standard QEC\ techniques apply whether the noise map is CP\ or, as it will almost always be due to non-classical correlations, Hermitian. This is because Corollary \ref{cor:HM} tells us that it is safe to replace all negative $c_{i}$ coefficients by their absolute values, and thus replace the actual noise map by its CP\ counterpart.
\begin{proof} We have $\Phi _{\mathrm{H}}(\rho )={\sum_{i=1}^{N}}E_{i}\rho E_{i}^{\prime \dagger }$ with $\{{E}_{i}=\sqrt{c_{i}}K_{i}\}_{i=1}^{N}$ and $\{{E} _{i}^{\prime }=(\sqrt{c_{i}})^{\ast }K_{i}\}_{i=1}^{N}$, whence we can apply the construction of Theorem \ref{th:CP-rec}. Indeed, the \textquotedblleft expanded\textquotedblright\ CP map becomes $\tilde{\Phi}_{\mathrm{CP}}(\rho )=\frac{1}{2}{\sum_{i=1}^{N}}E_{i}\rho E_{i}^{\dagger }+\frac{1}{2}{
\sum_{i=1}^{N}}E_{i}^{\prime }\rho E_{i}^{\prime \dagger }={\sum_{i=1}^{N}|}
c_{i}|K_{i}\rho K_{i}^{\dagger }$, as claimed, and hence a QEC\ code and CP\ recovery for $\tilde{\Phi}_{\mathrm{CP}}$ is also a QEC\ code and CP\ recovery for $\Phi _{\mathrm{H}}$. In particular, $\mathcal{R}_{\mathrm{CP} }[\Phi _{\mathrm{H}}(\rho )]=\rho $. \end{proof}
Note that $\tilde{\Phi}_{\mathrm{CP}}$ need not be trace preserving even in the Hermitian map case: $\mathrm{Tr}[\tilde{\Phi}_{\mathrm{CP}}(\rho )]=
\mathrm{Tr}[{\sum_{i=1}^{N}|}c_{i}|K_{i}^{\dagger }K_{i}\rho ]$, but if $ \Phi _{\mathrm{H}}$ is trace preserving then we only have ${\sum_{i=1}^{N}} c_{i}K_{i}^{\dagger }K_{i}=I$, hence cannot conclude more about $\mathrm{Tr}[ \tilde{\Phi}_{\mathrm{CP}}(\rho )]$. Also note that substitution of ${E}_{i}= \sqrt{c_{i}}K_{i}$ and ${E}_{i}^{\prime }=(\sqrt{c_{i}})^{\ast }K_{i}$ into the QEC conditions (\ref{eq:lambda(i)})(i)-(iii) yields $\alpha _{ij}^{\prime }=\sqrt{\frac{c_{i}}{c_{j}}}\left( \sqrt{\frac{c_{j}}{c_{i}}} \right) ^{\ast }\alpha _{ij}$ and $\gamma _{ij}=\frac{(\sqrt{c_{j}})^{\ast } }{\sqrt{c_{j}}}\alpha _{ij}$, i.e., unlike in the general linear maps case, the matrices $\alpha ^{\prime }$ and $\gamma $ in Eq. (\ref{eq:lambda}) are not independent from $\alpha $. In fact, as shown in Appendix~\ref {app:direct} we can give a direct proof of Corollary \ref{cor:HM} which only invokes a single block of the $\lambda $ matrix.
\subsubsection{Example of CP recovery: Inverse bit-flip map}
Consider \textquotedblleft diagonalizable maps\textquotedblright , i.e., $ \Phi _{\mathrm{D}}(\rho )\equiv \sum_{i}c_{i}K_{i}\rho K_{i}^{\dagger }$, where $c_{i}\in \mathbb{C}$. The expanded CP map is $\tilde{\Phi}_{\mathrm{CP
}}=\sum_{i}|c_{i}|K_{i}\rho K_{i}^{\dagger }$. Now consider as a specific instance an independent-errors inverse bit-flip map on three qubits: $\Phi _{ \mathrm{IPF}}(\rho )=c_{0}\rho +c_{1}\sum_{n=1}^{3}X_{n}\rho X_{n}$, where $ X_{n}$ is the Pauli $\sigma _{x} $ matrix applied to qubit $n$, where $c_{0}$
and $c_{1}$ are real, have opposite sign, and $c_{0}+3c_{1}=1$ (a Hermitian map). Then $\tilde{\Phi}_{\mathrm{CP}}=|c_{0}|\rho
+|c_{1}|\sum_{n=1}^{3}X_{n}\rho X_{n}$, which is a non-trace preserving version of the well known independent-errors CP bit-flip map. The code is $
\mathcal{C}=\mathrm{span}\{|0_{L}\rangle \equiv |000\rangle ,|1_{L}\rangle
\equiv |111\rangle \}$, and $P=|0_{L}\rangle \langle 0_{L}|+|1_{L}\rangle
\langle 1_{L}|$, which satisfies Eq.~(\ref{eq:QEC-H}) with $F_{1}=\sqrt{
|c_{0}|}I$ and $F_{2,3,4}=\sqrt{|c_{1}|}X_{1,2,3}$. Then by Corollary \ref {cor:HM} the same code (and corresponding CP recovery map) also corrects $ \Phi _{\mathrm{IPF}}$. The CP recovery map $\mathcal{R}_{\mathrm{CP}}$ has operation elements $R_{0}=P$ and $\{R_{n}=\frac{1}{\sqrt{3}} PX_{n}\}_{n=1}^{3}$; indeed, it is easily checked that $\mathcal{R}_{\mathrm{ CP}}[\Phi _{\mathrm{IPF}}(P\rho P)]=P\rho P$ for any state $\rho \in \mathcal{C}$.
\subsubsection{Hermitian recovery maps}
Since Hermitian maps are the most general physical maps, it is natural to consider Hermitian recovery of Hermitian noise maps. We thus define \textquotedblleft Hermitian recovery maps\textquotedblright\ $\{\mathcal{R}_{ \mathrm{H}}\}$ as those Hermitian maps that correct a Hermitian noise map $ \Phi _{\mathrm{H}}$, i.e., $\mathcal{R}_{\mathrm{H}}\circ \Phi _{\mathrm{H} }(\rho )\varpropto \rho $. The following result presents a possible set of Hermitian recovery maps.
\begin{mycorollary} \label{cor:Hrec} Consider a Hermitian noise map $\Phi _{\mathrm{H}}(\rho )$ $ ={\sum_{i=1}^{N}}c_{i}K_{i}\rho K_{i}^{\dagger }$ with error operators $ \{K_{i}\}$ satisfying the relations $PK_{i}^{\dag }K_{j}P=\alpha _{ij}P$. Any Hermitian map $\mathcal{R}_{\mathrm{H}}(\rho)=\sum_{k}h_{k}R_{k}\rho R_{k}^{\dag }$ with recovery operators $\{R_{k}\}$ as in Eq. (\ref {eq:recovery}) and $\{h_{k}\}\in \mathbb{R}$ corrects the noise map $\Phi _{ \mathrm{H}}$. \end{mycorollary}
The proof is given in Appendix~\ref{app:Hrec}, and employs a method similar to that of the proof of Theorem~\ref{th:suff}.
\begin{figure}\label{fig}
\end{figure}
\subsubsection{How does non-CP, Hermitian recovery arise?}
In standard QEC theory the recovery map is considered CP. The reason for this is that the recovery ancillas are introduced after the action of the noise channel so that they enter in a tensor product state with the encoded qubits that underwent the noise channel. The recovery map is obtained in the standard setting by first applying a unitary over the encoded qubits plus recovery ancillas, then tracing out the recovery ancillas. This is manifestly a CP\ map over the encoded qubits.
Since we know that the recovery map experienced by the encoded qubits is CP if and only if the initial state of the encoded and recovery ancilla qubits has vanishing quantum discord \cite{ShabaniLidar:08}, it is clear how a non-CP recovery map can be implemented: the recovery ancillas should have non-vanishing quantum discord with the encoded qubits. Since this will still be a QDP, the resulting recovery map will be Hermitian according to Theorem \ref{linear Rep}.
Such a situation can come about in various ways. For example, a scenario which is particularly relevant for quantum computation and communication, is one where the environment causes the recovery ancillas to become non-classically correlated with the encoded qubits before the recovery operation can be applied. This is a reasonable scenario since, while the recovery ancillas are presumably kept pure and isolated from the environment for as long as possible, at some point they must be brought into contact with the encoded qubits, and at this point all qubits (encoded and recovery ancillas) are susceptible to correlations mediated by the environment. This is shown in Fig. \ref{fig}.
\section{Conclusions}
\label{sec:conc}
This work aimed to fill two gaps: one in the theory of open quantum systems, and a resulting gap in the theory of quantum error correction. The first gap had to do with the type of maps that describe open systems given \emph{ arbitrary} initial states of the total system. In fact, it was not a priori clear that there should even be a linear map connecting the initial to the final open system state for arbitrary initial total system states. Building upon the class of \textquotedblleft special linear states\textquotedblright\ we introduced in \cite{ShabaniLidar:08} we showed here that in fact such a linear map description does always exist, and moreover, for quantum dynamics the map is always Hermitian. The map reduces to the completely positive type if and only if the initial total system state has vanishing quantum discord \cite{ShabaniLidar:08}; in all other cases it is Hermitian but not CP. This result, we argued, impacts the theory of quantum error correction, where previously the assumption of CP\ maps was taken for granted. In the second part of this work we filled this gap in QEC theory, by developing a theory of Linear Quantum Error Correction (LQEC), which generalizes the CP-map-based standard theory of QEC. We showed that to every linear map $ \Phi _{\mathrm{L}}$ is associated a CP map which, if correctable, also provides an encoding with corresponding CP recovery map for $\Phi _{\mathrm{L }}$ (Theorem \ref{th:CP-rec}). Moreover, it is possible to find a non-CP recovery for $\Phi _{\mathrm{L}}$ within a larger class of codes (Theorem \ref{th:suff}). From a physical standpoint this result is actually too general, since only Hermitian maps ever arise from quantum dynamics [to the extent that the standard quantum dynamical process (\ref{dynamics1}) is valid]. Hence we specialized LQEC\ to the Hermitian maps case, and showed that in this case standard QEC theory for CP\ maps already suffices, in the sense that it is legitimate to replace a given Hermitian noise map by a corresponding CP\ map obtained simply by taking the absolute values of all the Hermitian map coefficients. Any QEC code which corrects this CP\ map will also correct the original Hermitian map (Corollary \ref{cor:HM}). Nevertheless, there is room for a genuine generalization when one considers Hermitian maps, since it is also possible to perform QEC using Hermitian recovery maps (Corollary \ref{cor:Hrec}). We argued that, in fact, recovery maps will generically be non-CP Hermitian maps, since recovery ancillas that are introduced into a quantum circuit prior to the recovery step will become non-classically correlated with the environment and consequently with the rest of the system.
An interesting open question for future studies is whether the results presented here have an impact on the threshold for fault tolerant quantum error correction. For example, note that while CP\ recovery perfectly returns the encoded state [Eqs. (\ref{eq:RL_CP}) and (\ref{eq:RH_CP})], non-CP\ recovery only does so up to a proportionality factor which depends on the details of the noise and recovery maps [$F_{\mathrm{L}}$ in Eq. (\ref {eq:F_L}) and $F_{\mathrm{H}}$ in Eq. (\ref{eq:F_H})]. This proportionality factor -- assuming non-CP\ recovery is applied -- may differ for different terms in the fault path decomposition \cite{Aharonov:08}, an effect which may propagate into the value of the fault tolerance threshold. This requires careful analysis, which is beyond the scope of this paper.
\begin{acknowledgments} Funded by the National Science Foundation under Grants No. CCF-0726439, PHY-0802678, and PHY-0803304, and by the United States Department of Defense (to D.A.L.). Part of this work was done while D.A.L. enjoyed the generous hospitality of the Institute for Quantum Information at the California Institute of Technology. \end{acknowledgments}
\appendix
\section{Proof of Theorem 1}
\label{app:th1}
We use a method similar to Choi's proof for a CP map representation \cite {Choi:75}, recently clearly reviewed in Ref. \cite{Leung:03}. The main difference between the proofs in Refs. \cite{Choi:75,Leung:03} and our proof is that in the previous proofs positivity allowed for the use of standard diagonalization, whereas in the absence of positivity we use the singular value decomposition \cite{Horn:book}.
\begin{proof}
Eq.~(\ref{eq:LM}) immediately implies that $\Phi _{\mathrm{L}}$ is a linear map. For the other direction, let $\widetilde{M}=\sum_{i,j=1}^{n}|i\rangle
\langle j|\otimes |i\rangle \langle j|=n|\phi \rangle \langle \phi |$, where
$|i\rangle $ is a column vector with $1$ at position $i$ and $0$'s elsewhere, and $|\phi \rangle =n^{-1/2}\sum_{i}|i\rangle \otimes |i\rangle $ is a maximally entangled state over $\mathcal{H}\otimes \mathcal{H}$, where $
\mathcal{H}$ is the Hilbert space spanned by $\{|i\rangle \}_{i=1}^{n}$. $ \widetilde{M}$ is also an $n\times n$ array of $n\times n$ matrices, whose $
(i,j)$th block is $|i\rangle \langle j|$. Construct two equivalent expressions for $(\mathcal{I}\otimes \Phi _{\mathrm{L}})[\widetilde{M}]$, where $\mathcal{I}$ is the $(n\times n)\times (n\times n)$ identity matrix. (i) $(\mathcal{I}\otimes \Phi _{\mathrm{L}})[\widetilde{M}]$ is an $n\times n $ array of $m\times m$ matrices, whose $(i,j)$th block is $\Phi _{\mathrm{L
}}[|i\rangle \langle j|]$. (ii) Consider a singular value decomposition: $( \mathcal{I}\otimes \Phi )[\widetilde{M}]=UDV=\sum_{\alpha }\lambda _{\alpha
}U|\alpha \rangle \langle \alpha |V=\sum_{\alpha }\lambda _{\alpha
}|u_{\alpha }\rangle \langle v_{\alpha }|$. Here $U$ and $V$ are unitary, $D= \mathrm{diag}(\{\lambda _{\alpha }\})$ is diagonal and $\lambda _{\alpha }\geq 0$ are the singular values of $(\mathcal{I}\otimes \Phi _{\mathrm{L}})[
\widetilde{M}]$. Divide the column (row) vector $|u_{\alpha }\rangle $ ($
\langle v_{\alpha }|$) into $n$\ segments each of length $m$ and define an $ m\times n$ ($n\times m$) matrix $E_{\alpha }$ ($E_{\alpha }^{\prime }$)
whose $i$th column (row) is the $i$th segment; then $E_{\alpha }|i\rangle $ (
$\langle i|E_{\alpha }^{\prime \dagger }$) is the $i$th segment of $
|u_{\alpha }\rangle $ ($\langle v_{\alpha }|$). Therefore the $(i,j)$th block of $|u_{\alpha }\rangle \langle v_{\alpha }|$ becomes $E_{\alpha
}|i\rangle \langle j|E_{\alpha }^{\prime \dagger }$.
Equating the two expressions in (i) and (ii) for the $(i,j)$th block of $( \mathcal{I}\otimes \Phi _{\mathrm{L}})[\widetilde{M}]$, we find $\Phi _{
\mathrm{L}}[|i\rangle \langle j|]={\sum_{\alpha }}\lambda _{\alpha
}E_{\alpha }|i\rangle \langle j|E_{\alpha }^{\prime \dagger }$. Since $
\lambda _{\alpha }\geq 0$ we can redefine $E_{\alpha }$ as $\sqrt{\lambda _{\alpha }}E_{\alpha }$ and $E_{\alpha }^{\prime }$ as $\sqrt{\lambda _{\alpha }}E_{\alpha }^{\prime }$, which we do from now on. Finally, the linearity assumption on $\Phi _{\mathrm{L}}$, together with the fact that the set $\{|i\rangle \langle j|\}_{i,j=1}^{n}$ spans $\mathfrak{M}_{n}$, implies Eq.~(\ref{eq:LM}).
Next let us prove Eq. (\ref{eq:QM}) for Hermitian maps. For an old proof that uses very different techniques see Ref. \cite{Hill:73}. Eq.~(\ref{eq:QM} ) immediately implies that $\Phi _{\mathrm{H}}$ is a Hermitian map. For the other direction, associate a matrix $L_{\Phi _{\mathrm{H}}}$ with the Hermitian map $\Phi _{\mathrm{H}}$:\ $\rho ^{\prime }=\Phi _{\mathrm{H} }(\rho )$ $\Longleftrightarrow $ $\rho _{m\mu }^{\prime }=L_{n\nu }^{m\mu }\rho _{n\nu }$ (summation over repeated indices is implied). Hermiticity of $\rho $ and its image $\rho ^{\prime }$ implies $\rho _{\mu m}^{\prime }=\rho _{m\mu }^{\prime \ast }=L_{n\nu }^{m\mu \ast }\rho _{n\nu }^{\ast }=L_{n\nu }^{m\mu \ast }\rho _{\nu n}$, i.e., $L_{n\nu }^{m\mu \ast }=L_{\nu n}^{\mu m}$ \cite{Zyczkowski:04}. We can use this property of $L_{\Phi _{ \mathrm{H}}}$ to show that if $\Phi _{\mathrm{H}}$ is a Hermitian map, then $ \mathcal{I}\otimes \Phi _{\mathrm{H}}$ is Hermiticity preserving. Consider $
\mathcal{M}=\mathcal{M}_{k\xi }^{n\nu }|k\rangle \langle \xi |\otimes
|n\rangle \langle \nu |$. Then $\mathcal{M}^{\prime }=(\mathcal{I}\otimes
\Phi _{\mathrm{H}})[\mathcal{M}]=\mathcal{M}_{k\xi }^{n\nu }|k\rangle
\langle \xi |\otimes \Phi _{\mathrm{H}}(|n\rangle \langle \nu |)=\mathcal{M}
_{k\xi }^{m\mu }|k\rangle \langle \xi |\otimes L_{n\nu }^{m\mu }|n\rangle
\langle \nu |$. Assume that $\mathcal{M}_{k\xi }^{m\mu \ast }=\mathcal{M}
_{\xi k}^{\mu m}$. This property holds for $\mathcal{M}=\widetilde{M}=|\phi
\rangle \langle \phi |$ where $|\phi \rangle =\dim (\mathcal{H}
)^{-1/2}\sum_{i}|i\rangle \otimes |i\rangle $ is a maximally entangled state over $\mathcal{H}\otimes \mathcal{H}$ ($\mathcal{M}_{\xi k}^{\mu m}\equiv 1$
). Then $\mathcal{M}^{\prime \dag }=\mathcal{M}_{k\xi }^{m\mu \ast }|\xi
\rangle \langle k|\otimes L_{n\nu }^{m\mu \ast }|\nu \rangle \langle n|=
\mathcal{M}_{\xi k}^{\mu m}|\xi \rangle \langle k|\otimes L_{\nu n}^{\mu m}|\nu \rangle \langle n|=\mathcal{M}^{\prime }$. Therefore $(\mathcal{I}
\otimes \Phi _{\mathrm{H}})[|\phi \rangle \langle \phi |]$ is Hermitian, and in particular invertible. It follows that the SVD\ used in the proof of Theorem~\ref{th1} can be replaced by standard diagonalization ($U=V^{\dag }$
). In this case the left and right singular vectors $|u_{\alpha }\rangle
=\langle v_{\alpha }|^{\dag }$ are the eigenvectors of $(\mathcal{I}\otimes
\Phi _{\mathrm{H}})[|\phi \rangle \langle \phi |]$ and $c_{\alpha }=\lambda _{\alpha }$ are its eigenvalues. Then $E_{\alpha }=E_{\alpha }^{\prime }$ in Eq.~(\ref{eq:LM}) and $c_{\alpha }\in \mathbb{R}$. \end{proof}
We note that by splitting the spectrum of $(\mathcal{I}\otimes \Phi _{
\mathrm{H}})[|\phi \rangle \langle \phi |]$ into positive and negative eigenvalues, $\{c_{\alpha }^{+}\geq 0\}$ and $\{c_{\alpha }^{-}\leq 0\}$, we have as an immediate corollary a fact that was also noted in \cite{Jordan:04} : Any Hermitian map can be represented as the difference of two CP\ maps: $ \Phi (\rho )$ $={\sum_{\alpha }}c_{\alpha }^{+}E_{\alpha }^{+}\rho E_{\alpha
}^{+\dagger }-{\sum_{\alpha }}|c_{\alpha }^{-}|E_{\alpha }^{-}\rho E_{\alpha }^{-\dagger }$.
\section{Direct Proof of Corollary 1}
\label{app:direct}
\begin{proof} The operation elements of $\tilde{\Phi}_{\mathrm{CP}}$ are $\{{F}_{i}=\sqrt{{
|}c_{i}|}K_{i}\}_{i=1}^{N}$, whence $\tilde{\Phi}_{\mathrm{CP}}(\rho )={ \sum_{i=1}^{N}}F_{i}\rho F_{i}^{\dagger }$. The standard quantum error conditions (\ref{eq:QEC-CP}) for $\tilde{\Phi}_{\mathrm{CP}}$ is a set of conditions in terms of the $F_{i}$: \begin{equation} PF_{i}^{\dag }F_{j}P=\beta _{ij}P,\quad i,j\in \{1,\ldots ,N\}. \label{eq:QEC-H} \end{equation} The existence of a projector $P$ which satisfies Eq.~(\ref{eq:QEC-H}) is equivalent to the existence of a QEC code for $\tilde{\Phi}_{\mathrm{CP}}$. Assuming that a code $\mathcal{C}$ has been found (i.e., $P\mathcal{C}= \mathcal{C}$) for $\tilde{\Phi}_{\mathrm{CP}}$, we use this as a code for $ \Phi _{\mathrm{H}}$ and show that the corresponding CP recovery map $ \mathcal{R}_{\mathrm{CP}}$ is also a recovery map for $\Phi _{\mathrm{H}}$. Indeed, let $G_{j}\equiv \sum_{i=1}^{N}u_{ij}F_{i}$ be new operation elements for $\tilde{\Phi}_{\mathrm{CP}}$, i.e., $\tilde{\Phi}_{\mathrm{CP}}= {\sum_{j=1}^{N}}G_{j}\rho G_{j}^{\dagger }$, where $u$ is the unitary matrix that diagonalizes the Hermitian matrix $\beta =[\beta _{ij}]$, i.e., $ u^{\dag }\beta u=d$. Let $\mathcal{R}_{\mathrm{CP}}=\{R_{k}\}$ be the CP recovery map for $\tilde{\Phi}_{\mathrm{CP}}$. Assume that $\rho $ is in the code space, i.e., $P\rho P=\rho $. We now show that $\mathcal{R}_{\mathrm{CP} }[\Phi _{\mathrm{H}}(\rho )]=\rho $, i.e., we have CP recovery. First, \begin{eqnarray} \mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{H}}(\rho )] &=&\sum_{k}R_{k}\left(
\sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}F_{i}\rho F_{i}^{\dag }\right) R_{k}^{\dag } \notag \\
&=&\sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}\sum_{j,j^{\prime }=1}^{N}u_{ij}^{\ast }u_{ij^{\prime }} \notag \\ &&\times \sum_{k}\left( R_{k}G_{j}P\right) \rho \left( PG_{j^{\prime }}^{\dag }R_{k}^{\dag }\right) . \end{eqnarray} Now, note that, using Eq. (\ref{eq:QEC-H}): \begin{eqnarray} PG_{k}^{\dag }G_{l}P &=&\sum_{ij}u_{ik}^{\ast }u_{jl}PF_{i}^{\dag }F_{j}P=\sum_{ij}u_{ik}^{\ast }\beta _{ij}u_{jl}P \notag \\ &=&d_{k}\delta _{kl}P. \end{eqnarray} Then the polar decomposition yields $G_{k}P=U_{k}(PG_{k}^{\dag }G_{k}P)^{1/2}=\sqrt{d_{k}}U_{k}P$. The recovery operation elements are given by \begin{equation} R_{k}=U_{k}^{\dag }P_{k};\quad P_{k}=U_{k}PU_{k}^{\dag }. \end{equation} Therefore $P_{k}=G_{k}PU_{k}^{\dag }/\sqrt{d_{k}}$. This allows us to calculate the action of the $k$th recovery operator on the $l$th error: \begin{eqnarray} R_{k}G_{l}P &=&U_{k}^{\dag }P_{k}^{\dag }G_{l}P=U_{k}^{\dag }(U_{k}PG_{k}^{\dag }/\sqrt{d_{k}})G_{l}P \notag \\ &=&\delta _{kl}\sqrt{d_{k}}P. \end{eqnarray} Therefore, \begin{eqnarray} \mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{H}}(\rho )] &=&\sum_{i=1}^{N}\frac{
c_{i}}{|c_{i}|}\sum_{j,j^{\prime }=1}^{N}u_{ij}^{\ast }u_{ij^{\prime }} \notag \\ &&\times \sum_{k}\left( \delta _{kj}\sqrt{d_{k}}P\right) \rho \left( P\sqrt{ d_{k}}\delta _{kj^{\prime }}\right) \notag \\
&=&\rho \sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}\left( udu^{\dag }\right) _{ii} \notag \\
&=&(\sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}\beta _{ii})\rho . \label{eq:R_CP} \end{eqnarray} Next note that, using condition (\ref{eq:QEC-H}) and trace preservation by $ \Phi _{\mathrm{H}}$: \begin{eqnarray} PF_{i}^{\dag }F_{i}P &=&\beta _{ii}P\Longrightarrow \sum_{i=1}^{N}\frac{c_{i}
}{|c_{i}|}\beta _{ii}P \notag \\
&=&P\sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}F_{i}^{\dag }F_{i}P=P\sum_{i=1}^{N}c_{i}K_{i}^{\dag }K_{i}P=P \notag \\
&\Longrightarrow &\sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}\beta _{ii}=1. \end{eqnarray} Hence, finally: \begin{equation} \mathcal{R}_{\mathrm{CP}}[\Phi _{\mathrm{L}}(\rho )]=\rho \label{eq:RH_CP} \end{equation} for any $\rho $ in the codespace. \end{proof}
\section{Proof of Corollary 2}
\label{app:Hrec}
\begin{proof}
Let $\{{F}_{i}=\sqrt{{|}c_{i}|}K_{i}\}_{i=1}^{N}$; we simply use the identities given in the proof of the previous theorem -- specifically Eq. ( \ref{eq:R_CP}) -- to calculate $\mathcal{R}_{\mathrm{H}}\circ \Phi _{\mathrm{ H}}(\rho )$ \begin{eqnarray} \mathcal{R}_{\mathrm{H}}[\Phi _{\mathrm{H}}(\rho )]
&=&\sum_{k}h_{k}R_{k}\left( \sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}F_{i}\rho F_{i}^{\dag }\right) R_{k}^{\dag } \notag \\
&=&\sum_{i=1}^{N}h_{k}\frac{c_{i}}{|c_{i}|}\sum_{j,j^{\prime }=1}^{N}u_{ij}^{\ast }u_{ij^{\prime }}\times \notag \\ &&\sum_{k}\left( \delta _{kj}\sqrt{d_{k}}P\right) \rho \left( P\sqrt{d_{k}} \delta _{kj^{\prime }}\right) \notag \\
&=&P\rho P\sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}\sum_{k}h_{k}u_{ik}^{\ast }u_{ik}d_{k} \notag \\ &=&F_{\mathrm{H}}P\rho P\propto \rho , \end{eqnarray} where \begin{equation}
F_{\mathrm{H}}\equiv \sum_{i=1}^{N}\frac{c_{i}}{|c_{i}|}\left( udhu^{\dag }\right) _{ii}, \label{eq:F_H} \end{equation} where $h\equiv \mathrm{diag}(\{h_{k}\})$, and $F_{\mathrm{H}}$ is a \textquotedblleft correction factor\textquotedblright\ for Hermitian\ recovery of Hermitian noise maps, which was $1$ in the case of CP\ recovery, above. \end{proof}
\end{document} |
\begin{document}
\begin{center}
{\LARGE Large cycles in generalized Johnson graphs\\ }
{\Large Vladislav~Kozhevnikov\footnote{Moscow Institute of Physics and Technology (National Research University), Dolgoprudny, Moscow Region, Russia. Supported by Grant N NSh-775.2022.1.1 to support leading scientific schools of Russia}, Maksim~Zhukovskii\footnote{Moscow Institute of Physics and Technology (National Research University), Dolgoprudny, Moscow Region, Russia; The Russian Presidential Academy of National Economy and Public Administration, Moscow, Russia; Moscow Center for Fundamental and Applied Mathematics, Moscow, Russia; Adyghe State University, Caucasus mathematical center, Maykop, Republic of Adygea, Russia. Supported by the Ministry of Science and Higher Education of the Russian Federation (Goszadaniye No. 075-00337-20-03), project No. 0714-2020-0005. } }
Abstract\\
\end{center}
We count cycles of an unbounded length in generalized Johnson graphs. Asymptotics of the number of such cycles is obtained for certain growth rates of the cycle length.
\section{Introduction and new results} \label{intro}
For integers $i\leq j$, everywhere below we denote $\irange{i}{j}:=\iRange{i,i+1}{j}$ and $[i]:=\irange{1}{i}$. For integers $n,r,s$ such that $0\le{s}<r<n$, a simple graph $G(n,r,s)$ with the set of vertices $$
V:=V(G(n,r,s)) = \setdef{x\subset[n]}{|x|=r} $$ and the set of edges $$ E:= E(G(n,r,s)) = \setdef{\br{x,y}}{\icard{x}{y} = s} $$ is called a \textit{generalized Johnson graph}.
Unfortunately, there is no established term for graphs $G(n,r,s)$. In literature they appear as \textit{generalized Johnson graphs} \cite{Agong2018, Cannon2012, Molitierno2017}; \textit{uniform subset graphs} \cite{Chen1987, Chen2008, Simpson1994} and \textit{distance graphs} \cite{Burkin2016, Burkin2018, Pyaderkin2016, Zhukovskii2012_sub}. The family of $G(n,r,s)$ graphs was initially (to the best of our knowledge) considered in \cite{Chen1987}, where they are called ``\textit{uniform subset graphs}''. However, this name does not become widespread. In our opinion, the term ``\textit{generalized Johnson graph}'' is preferred as the most comprehensible, since, if we set $s=r-1$, then the definition of $G(n,r,s)$ turns into the definition of the well-known Johnson graph. Note that the Kneser graph is also a special case of $G(n,r,s)$ with $s=0$. However, the term ``\textit{generalized Kneser graph}" is already used for another class of graphs \cite{Chen2008_generalized_kneser, Denley1997, Frankl1985_generalized_kneser, Jafari2020}.
On the one hand, as we mentioned above, graphs $G(n,r,s)$ generalize Johnson graphs $G(n,r,r-1)$ \cite{Alspach2012, Daven1999, Etzion1996_codes, Etzion1996_chromatic, Etzion2011} and Kneser graphs $G(n,r,0)$ \cite{Brouwer1979, Chen2003, Lovasz1978, Matousek2004, Mutze2020, Poljak1987, Valencia2005}, which are themselves of interest in the graph theory. On the other hand, they are a special case of distance graphs in $\mathbb{R}^n$ with the Euclidean metric, which are used to study problems of combinatorial geometry (Hadwiger--Nelson problem about the chromatic number $\chi(\mathbb{R}^n)$ \cite{Cantwell1996, Chilakamarri1990, Exoo2014, Frankl1980, Frankl1981, Kupavskii2009, Larman1972}, Borsuk problem about partitioning of a set in $\mathbb{R}^n$ into subsets of a smaller diameter \cite{Kahn1993}, and various generalizations of these problems \cite{Raigorodskii2001, Raigorodskii2016, Raigorodskii2013}).\\
Throughout the paper we assume that $r$ and $s$ are constant and $n$ approaches infinity. The total number of vertices in this graph is denoted by $N$: \[N = \abs{V} = \binom{n}{r} \sim \frac{n^r}{r!}.\] From the definition of $G(n,r,s)$ it is evident that this graph is \textit{vertex--transitive}, i.e. for any two vertices there exists an automorphism of the graph mapping the first vertex to the second one. In particular, $G(n,r,s)$ is regular. Let $N_1$ denote the degree of its vertex: \[N_1 = \binom{r}{s} \binom{n-r}{r-s} \sim \binom{r}{s} \frac{n^{r-s}}{(r-s)!}.\]
In \cite{Chen1987} it is proved that the graph $G(n,r,s)$ is Hamiltonian for $s\in\br{r-1,r-2,r-3}$, arbitrary $r$ and $n$ as well as for $s\in\br{0,1}$, arbitrary $r$, and sufficiently large $n$. Hamiltonian cycles has been extensively studied in Kneser graphs $G(n,r,0)$. It is known that they are hamiltonian for $n\ge2.62r$ \cite{Chen2003} and for all $r$ when $n\le27$ (except for the Petersen graph $G(5,2,0)$) \cite{Shields2002}. Graphs $G(2r+1,r,0)$ are also known to be Hamiltonian for all $r\ge3$ \cite{Mutze2020}. As for cycles of a constant length, the asymptotics of the number of their appearances in $G(n,r,s)$ is known for all constant $r$ and $s$ and given below in Theorem~\ref{th_fixed_t_mon_asymp}.
Let $H$ and $G$ be graphs. A map $\varphi:V(H)\to{V(G)}$ is called a \textit{homomorphism} from $H$ to $G$ if, for any pair of vertices $x,y$ of $H$, $\br{x,y}\in{E(H)}\Rightarrow\br{\varphi(x),\varphi(y)}\in{E(G)}$. If a homomorphism is injective, then it is called a \textit{monomorphism}. Let $\homo{H,G}$ and $\mono{H,G}$ denote respectively the number of homomorphisms and monomorphisms from $H$ to $G$. Throughout this paper we write simply $\homo{H}$ and $\mono{H}$ when $G=G(n,r,s)$.
Let $C_t$ be a cycle on $t$ vertices.
The purpose of this paper is to find the asymptotic value of $\mono{C_t}$ for different $t=t(n)$.
Burkin \cite{Burkin2016} found the asymptotics of $\mono{C_t}$ for all $t=\operatorname{const}$.
\begin{theorem}[Burkin, 2016, \cite{Burkin2016}] \label{th_fixed_t_mon_asymp} Let $t$ be a fixed integer. Then \begin{equation} \label{eq_fixed_t_mon_asymp} \mono{C_t} \sim N N_1 \pr{\frac{N_1}{\binom{r}{s}}}^{t-2}. \end{equation} \end{theorem} We generalize this result to cycles of variable length, i.e. $t=t(n)$. It turns out that for slow enough (sublogarithmic) growth of $t(n)$ the asymptotics of $\mono{C_t}$ remains the same as in \eqref{eq_fixed_t_mon_asymp}. In contrast, for superlogarithmic $t(n)=o\pr{\min\{\sqrt{N},N_1\}}$ the asymptotics is different, namely, $\mono{C_t}\sim N_1^t$. These results can be summarized in the following two theorems.
\begin{theorem} \label{th_mon_asymp_eq_hom} As $n\to+\infty$, $\mono{C_t}\sim\homo{C_t}$ iff $t=o\pr{\min\{\sqrt{N},N_1\}}$. \end{theorem}
Theorem~\ref{th_mon_asymp_eq_hom} is the trickiest result of our paper. Asymptotics of $\homo{C_t}$ (stated below in Theorem~\ref{th_hom}) is a more or less direct corollary (modulo technical asymptotical computations) of the well-known representation of $\homo{C_t}$ in terms of eigenvalues of $G(n,r,s)$. Let us fix an arbitrarily small $\varepsilon>0$ and consider the partition of $\mathbb{N}$ obtained by excluding $\varepsilon$-neighborhoods of $\frac{\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}$, $j\in[0,s]$, i.e. the intervals $$ I_j=\left[\left\lfloor\frac{(1+\varepsilon)\ln{n}}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}\right\rfloor\right]\setminus\left[\left\lfloor\frac{(1-\varepsilon)\ln{n}}{\ln{r-j+1\choose s-j+1}-\ln{r-j\choose s-j}}\right\rfloor\right],\quad j\in[s-1], $$ $$ I_s=\left[\left\lfloor\frac{(1-\varepsilon)\ln{n}}{\ln(r-s+1)}\right\rfloor\right],\quad I_0=\left[\left\lfloor\frac{(1+\varepsilon)\ln{n}}{\ln\frac{r}{s}}\right\rfloor,\infty\right). $$
\begin{theorem} \label{th_hom} For arbitrary $t=t(n)\in\mathbb{N}$, \begin{equation} \label{eq_hom} \homo{C_t}={N_1^t}\pr{1+O\left(\frac{1}{n}\right)+\sum\limits_{j=1}^{s}\frac{n^j}{j!}\pr{\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right)}^t}. \end{equation} Moreover, for $j\in[0,s+1]$ and $t\in I_j$, $$ \homo{C_t}\sim N_1^t\frac{n^j}{j!}\left({r-j\choose s-j}/{r\choose s}\right)^t. $$
\end{theorem}
Note that for $t\in I_0$, $\homo{C_t}\sim N_1^t$, while, for $t\in I_s$, $\homo{C_t}\sim N_1^t\frac{n^s}{s!}{r\choose s}^{-t}$ i.e. \eqref{eq_fixed_t_mon_asymp} holds. Theorem~\ref{th_mon_asymp_eq_hom} and Theorem~\ref{th_hom} immediately yield asymptotics of the number of copies of $C_t$ in $G(n,r,s)$ for all $t=o\pr{\min\{\sqrt{N},N_1\}}$ since it equals $\frac{1}{2t}\mono{C_t}$.\\
The rest of the paper is organized as follows. First, in Section~\ref{random_walk}, we discuss general properties of random walks on graphs (Section~\ref{sec:trans_matrix_and_mixing_time}) and more specific properties of random walks on $G(n,r,s)$ (Sections~\ref{sec:eigen_Johnson} and \ref{sec:rw_Janson}). Secondly, in Section~\ref{proof_hom}, we prove Theorem~\ref{th_hom}. Finally, in Sections~\ref{proof_mon_asymp_eq_hom} and \ref{sec:th3_proof} we prove that the condition in Theorem~\ref{th_mon_asymp_eq_hom} is, respectively, sufficient and necessary.
The proof of the sufficiency provided in Section~\ref{proof_mon_asymp_eq_hom} uses exact expressions for the spectrum of $G(n,r,s)$. It should be noted that the proof in the case $r>2s$ (in which $\sqrt{N}=o(N_1)$) as well as in the case $t=\omega(\ln{N})$ can be considerably simplified by using a more general argument (which we omit in this paper) applicable to a wide subclass of spectral expanders (see Section~\ref{sec:th3_proof}). However, for an arbitrary $N_1$-regular graph $G$ on $N$ vertices, the property $\mono{C_t,G}\sim\homo{C_t,G}$ does not necessarily hold when
$t=O(\ln{N})$ and $N_1=O(\sqrt{N})$, even if $G$ is a spectral expander. This fact can be demonstrated, for example, by considering the random regular graph $G(N,N_1)$ with $N_1=\lfloor\ln^8{N}\rfloor$, in which, for any $\varepsilon>0$, the inequality $\mono{C_t,G(N,N_1)}/\homo{C_t,G(N,N_1)}<\varepsilon$ holds with probability approaching $1$ as soon as $t=o(\ln{N}/\ln\ln{N})$. This can be shown by translating the same property from the binomial random graph $G(N,(1+o(1))N_1/N)$ to $G(N,N_1)$ using the sandwich conjecture, which is true for $N_1=\omega(\ln^7{N})$~\cite{Gao2021}. Note that $G(N,N_1)$ is a spectral expander \cite{Zhao2012}. For the definition and properties of binomial random graphs and regular random graphs see \cite{Janson2000}.
The proof of the necessity in Theorem~\ref{th_mon_asymp_eq_hom} provided in Section~\ref{sec:th3_proof} does not rely upon the the whole spectrum of $G(n,r,s)$ but rather uses its spectral expansion property. The necessity of the condition $t=o(N_1)$ follows from the fact that a random walk starts backtracking with positive probability if $t>cN_1$ for a constant $c$, which is proved in Section~\ref{sec:th3_proof_2} using almost solely the regularity of $G(n,r,s)$.
The necessity of $t=o(\sqrt{N})$ is proved in Section~\ref{sec:th3_proof_1} using a high convergence rate of a random walk on an expander, which is discussed in Section~\ref{sec:trans_matrix_and_mixing_time}. Therefore, in Section~\ref{sec:th3_proof} we formulate a generalization of Theorem~\ref{th_mon_asymp_eq_hom} to a class of spectral expanders.
\section{Random walks} \label{random_walk}
Counting cycles in $G(n,r,s)$ can be reduced to analysing the distribution of a random walk on $G(n,r,s)$.
\subsection{Distribution and adjacency matrix} \label{sec:trans_matrix_and_mixing_time}
Let $G$ be an arbitrary regular connected graph on the vertex set $[N]$ with every vertex having degree $N_1$. Let $A=(A_{i,j},\,i,j\in[N])$ be its adjacency matrix ($A_{ij}=1$ if and only if $i$ and $j$ are adjacent in $G$). Moreover, let $\lambda_j$, $j\in[0,r]$, be all distinct eigenvalues of $A$, and let $m_j$ be the multiplicity of $\lambda_j$.\\
Recall that {\it a random walk on $G$} is a discrete-time random process $(X_n,\,n\in\mathbb{Z}_+)$, where $X_0$ is a vertex chosen uniformly at random from $[N]$, and, for every $n\in\mathbb{Z}_+$, $X_{n+1}$ is chosen uniformly at random from the neighbors of $X_n$ in $G$. For $x,y\in[N]$, let $$
P^t(x,y):=\Pb{X_t=y|X_0=x} $$ and $P^t=(P^t(x,y),\,x,y\in[N])$ be the $k$-step transition probabilities matrix.
For a positive integer $t$, $\homo{C_t,G}$ is exactly the trace of $A^t$. Since the trace of $A^t$ equals the sum of its eigenvalues and the eigenvalues of $A^t$ can be computed as the $t$th power of the eigenvalues of $A$~(see, e.g., \cite{Meyer2000}), we get \begin{equation} \homo{C_t,G}=\sum\limits_{j=0}^{r}m_j\lambda_j^t. \label{eq:homo_eigen} \end{equation}
If $G$ is vertex-transitive, then, clearly, all $P^t(z,z)$, $z\in[N]$, are equal to each other. Then, for every $x\in[N]$, \begin{equation} NP^t(x,x)=\sum_{z\in[N]} P^t(z,z)=\frac{\homo{C_t,G}}{N_1^t}. \label{eq:homo_prob} \end{equation} Therefore,~(\ref{eq:homo_eigen}) implies that \begin{equation} \label{eq_t_step_trans_prob} P^t(x,x)=\frac{1}{N}\sum\limits_{j=0}^{r}m_j\pr{\frac{\lambda_j}{N_1}}^t. \end{equation}
Notice that (due to regularity of $G$) the distribution $\pi=(1/N,\ldots,1/N)\in\mathbb{R}^N$ is {\it stationary} meaning that $\pi P^1=\pi$. Let us here assume that $\lambda_0$ is the largest eigenvalue and $\lambda_1$ is the largest in absolute value eigenvalue distinct from $\lambda_0$. From the regularity of $G$ it follows that $\lambda_0=N_1$ and and from its connectedness, that $m_0=1$~\cite{Bapat2014}. Let us also assume that $\abs{\lambda_1}<\lambda_0$ (which is equivalent, for a connected graph, to the graph being non-bipartite~\cite{Bapat2014}).
Fix $v\in [N]$ and $\varepsilon>0$. Let us recall that {\it the variation distance} at time $t\in\mathbb{Z}_+$ with initial state $v$ is $$
\Delta_v(t)=\frac{1}{2}\sum_{u\in[N]}\left|P^t(v,u)-\frac{1}{N}\right|. $$ It is very well known~\cite{Sinclair1992} that {\it the mixing time} $\tau_v(\varepsilon):=\min\{t:\,\Delta_v(t')\leq\varepsilon\text{ for all }t'\geq t\}$ satisfies \begin{equation}
\tau_v(\varepsilon)\leq\left(1-\frac{|\lambda_1|}{N_1}\right)^{-1}\ln\frac{N}{\varepsilon}. \label{eq:mix_time} \end{equation}
\subsection{Eigenvalues of $G(n,r,s)$} \label{sec:eigen_Johnson}
Let $A$ be the adjacency matrix of $G(n,r,s)$. The eigenvalues of $A$ are known~\cite{Delsarte1973}. They are equal to \begin{equation}
\lambda_j=\sum\limits_{\ell=\max\{0,j-s\}}^{\min\{j,r-s\}}(-1)^{\ell}\binom{j}{\ell}\binom{r-j}{r-s-\ell}\binom{n-r-j}{r-s-\ell}, \quad j\in[0,r],
\label{eq:eigen_dist_exact} \end{equation} and the multiplicity of the eigenvalue $\lambda_j$ equals (we let ${n\choose -1}=0$) $$
m_j=\binom{n}{j}-\binom{n}{j-1}. $$
In order to prove Theorem~\ref{th_hom}, we need to analyze asymptotical behaviour of the expression to the right in (\ref{eq_t_step_trans_prob}).
Notice that $\lambda_0=N_1$ and $m_0=1$. Also, for $j\in\irange{1}{s}$, \begin{equation} \frac{\lambda_j}{N_1}=\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right),\quad m_j=\frac{n^j}{j!}+O\left(\frac{1}{n}\right) \label{eq:eigen_distance_asymp1} \end{equation} and, for $j\in\irange{s+1}{r}$, \begin{equation} \abs{\frac{\lambda_j}{N_1}}\sim\frac{\binom{j}{s}(r-s)!}{\binom{r}{s}(r-j)!}n^{-(j-s)},\quad m_j=\frac{n^j}{j!}+O\left(\frac{1}{n}\right). \label{eq:eigen_distance_asymp2} \end{equation}
Therefore, for $t\ge2$ and $j\in\irange{s+1}{r}$, \begin{equation} \frac{m_j\lambda_j^t}{m_s\lambda_s^t}=(O(1))^t\cdot{n^{-(j-s)(t-1)}}=\pr{O\pr{n^{-(j-s)}}}^{t-1}=O\pr{\frac{1}{n}} \label{eq:compare_eigen_dist_large_j} \end{equation} implying that \begin{equation} P^t(x,x)=\frac{1+O\pr{\frac{1}{n}}}{N}\sum\limits_{j=0}^{s}m_j\pr{\frac{\lambda_j}{N_1}}^t= \frac{1}{N}\left(1+O\left(\frac{1}{n}\right)+\sum\limits_{j=1}^{s}m_j\left(\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right)\right)^t\right). \label{eq:hom_eigen_asymp} \end{equation}
\subsection{Random walk on $G(n,r,s)$} \label{sec:rw_Janson}
Here, we consider the random walk $(X_n,\,n\in\mathbb{Z}_+)$ on $G(n,r,s)$. Since $G(n,r,s)$ is vertex-transitive, for any $x\in{V}$, $$
\mono{C_t}=NN_1^t\Pb{X_t=x,X_0\ne{X_1}\ne\ldots\ne{X_{t-1}}|X_0=x}. $$ In order to prove Theorem~\ref{th_mon_asymp_eq_hom}, we bound the deviation of $\frac{\mono{C_t}}{\homo{C_t}}$ from $1$ . For convenience, in this section, we express the bound in terms of the diagonal elements of $P^t$.
Due to (\ref{eq:homo_prob}), we get
\begin{align*} 0\le\frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}
&=\frac{P^t(x,x)-\Pb{X_t=x,X_0\ne{X_1}\ne\ldots\ne{X_{t-1}}|X_0=x}}{P^t(x,x)}=\\
&=\frac{\Pb{X_t=x,\exists{i,j}\in\irange{0}{t-1}:i\ne{j},X_i=X_j|X_0=x}}{\Pb{X_t=x|X_0=x}}. \end{align*} Note that the expression to the right is exactly the probability that the random walk meets itself somewhere on $[0,t-1]$ subject to $X_0=x$ and $X_t=x$.
By the union bound, \begin{align*}
\frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}&\le\sum\limits_{0\le{i}<j<t}\frac{\Pb{X_j=X_i,X_t=x|X_0=x}}{\Pb{X_t=x|X_0=x}}\\
&=\sum\limits_{0\le{i}<j<t}\frac{\sum\limits_{z\in{V}}\Pb{X_t=x|X_j=z}\Pb{X_j=z|X_i=z}\Pb{X_i=z|X_0=x}}{\Pb{X_t=x|X_0=x}}. \end{align*}
Due to vertex-transitivity of $G(n,r,s)$ the probabilities $\Pb{X_j=z|X_i=z}$ are equal for all $z$. Therefore, \begin{equation} \begin{split} \frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}
&\leq\sum\limits_{0\le{i}<j<t}\frac{\sum\limits_{z\in{V}}\Pb{X_{t-j+i}=x|X_i=z}P^{j-i}(x,x)\Pb{X_i=z|X_0=x}}{\Pb{X_t=x|X_0=x}}\\ &=\sum\limits_{0\le{i}<j<t}\frac{P^{t-j+i}(x,x)P^{j-i}(x,x)}{P^{t}(x,x)} =\sum\limits_{k=1}^{t-1}(t-k)\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}\\ &\le{t}\sum\limits_{k=1}^{t-1}\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}={t}\sum\limits_{k=2}^{t-2}\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}. \label{eq_gap_upper_bound} \end{split} \end{equation}
\section{Proof of Theorem~\ref{th_hom}} \label{proof_hom} From (\ref{eq:homo_prob}) and (\ref{eq:hom_eigen_asymp}), we get \begin{equation} \homo{C_t}= N_1^t\pr{1+O\left(\frac{1}{n}\right)+\sum\limits_{j=1}^{s}\frac{n^j}{j!}\pr{\frac{\binom{r-j}{s-j}}{\binom{r}{s}}+O\left(\frac{1}{n}\right)}^t}. \label{eq:hom_expansion} \end{equation} Let $j\in[s-1]$, $\varepsilon>0$. If $t<(1-\varepsilon)\frac{\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}$, then $$
\left(\frac{{r-j\choose s-j}}{{r\choose s}}+o(1)\right)^t=
o\left[n\left(\frac{{r-j-1\choose s-j-1}}{{r\choose s}}+o(1)\right)^t\right] $$ implying that the $(j+1)$th term in the sum in (\ref{eq:hom_expansion}) is asymptotically bigger than the $j$th term. Similarly, if $t>(1+\varepsilon)\frac{\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}$, then the $(j+1)$th term in the sum in (\ref{eq:hom_expansion}) is asymptotically smaller than the $j$th term. From this, we immediately get
$$ \homo{C_t}\sim \begin{cases} N_1^t\frac{n^s}{s!}{r\choose s}^{-t}, & t<(1-\varepsilon)\frac{\ln n}{\ln{r-s+1\choose s-s+1}-\ln{r-s\choose s-s}}; \\ N_1^t\frac{n^j}{j!}{r-j\choose s-j}^t{r\choose s}^{-t}, & \frac{(1+\varepsilon)\ln n}{\ln{r-j\choose s-j}-\ln{r-j-1\choose s-j-1}}<t<\frac{(1-\varepsilon)\ln n}{\ln{r-j+1\choose s-j+1}-\ln{r-j\choose s-j}},\,j\in[s-1]; \\ N_1^t, & t>(1+\varepsilon)\frac{\ln n}{\ln{r\choose s}-\ln{r-1\choose s-1}}. \end{cases}
$$ which proves Theorem~\ref{th_hom}. $\Box$
\section{Proof of sufficiency in Theorem~\ref{th_mon_asymp_eq_hom}} \label{proof_mon_asymp_eq_hom}
Here we prove that if $t=o\pr{\min\{\sqrt{N},N_1\}}$, then $$ \frac{\homo{C_t}-\mono{C_t}}{\homo{C_t}}\to 0,\quad n\to\infty. $$ Since this fraction is non-negative, it is sufficient to prove that the upper bound from (\ref{eq_gap_upper_bound}) approaches 0. Clearly, we may assume that $t\geq 4$.\\
By (\ref{eq_t_step_trans_prob}), for every $x\in V$, \begin{equation} \begin{split} \sum\limits_{k=2}^{t-2}P^{k}(x,x)P^{t-k}(x,x) &=\frac{1}{N^2}\sum\limits_{k=2}^{t-2}\sum\limits_{i,j=0}^{r}m_im_j \left(\frac{\lambda_j}{N_1}\right)^k\left(\frac{\lambda_i}{N_1}\right)^{t-k}\\ &\le\frac{t}{N^2}\sum\limits_{i=0}^{r}m_i^2\abs{\frac{\lambda_i}{N_1}}^t+ \frac{1}{N^2}\sum\limits_{i\ne{j}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}. \end{split} \label{eq:diagonal_decompose} \end{equation} Note that $\abs{\lambda_i}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}=\abs{\lambda_j}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_i}{\lambda_j}}^{k}$. Therefore, for $n$ large enough, due to (\ref{eq:eigen_distance_asymp1})~and~(\ref{eq:eigen_distance_asymp2}), we get $$ \sum\limits_{i\ne{j}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}= 2\sum\limits_{0\le{i}<j\le{r}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}}\leq 2\sum\limits_{0\le{i}<j\le{r}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\frac{\abs{\frac{\lambda_j}{\lambda_i}}^{2}}{1-\abs{\frac{\lambda_j}{\lambda_i}}}}. $$ Moreover, from (\ref{eq:eigen_distance_asymp1})~and~(\ref{eq:eigen_distance_asymp2}) we get that, for $j>i$ and $j>s$, $\frac{\lambda_j}{\lambda_i}=O\left(\frac{1}{n}\right)$, while, for $i<j\leq s$, $$ \frac{\lambda_j}{\lambda_i}=\frac{(s-i)\ldots(s-j+1)}{(r-i)\ldots(r-j+1)}\left(1+O\left(\frac{1}{n}\right)\right). $$ The latter expression is less than $\frac{s}{r}$ if $j\neq 1$ and $n$ is large enough. If $i=0$, $j=1$, then, from (\ref{eq:eigen_dist_exact}), we get $$ \frac{\lambda_1}{\lambda_0}=\frac{{r-1\choose r-s}{n-r-1\choose r-s}-{r-1\choose r-s-1}{n-r-1\choose r-s-1}}{{r\choose r-s}{n-r\choose r-s}}< \frac{{r-1\choose r-s}{n-r-1\choose r-s}}{{r\choose r-s}{n-r\choose r-s}}=\frac{s(n-2r+s)}{r(n-r)}<\frac{s}{r}. $$ Then, for $n$ large enough, \begin{align*} \sum\limits_{i\ne{j}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^t\sum\limits_{k=2}^{t-2}\abs{\frac{\lambda_j}{\lambda_i}}^{k}} &\leq\frac{2}{1-s/r}\sum\limits_{0\le{i}<j\le{r}}{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^{t-2}\abs{\frac{\lambda_j}{N_1}}^{2}}\\ &\leq\frac{2}{1-s/r}\sum\limits_{i,j=0}^r{m_i{m_j}\abs{\frac{\lambda_i}{N_1}}^{t-2}\abs{\frac{\lambda_j}{N_1}}^{2}}\\ &=\frac{2}{1-s/r}\left(\sum\limits_{i=0}^r m_i\abs{\frac{\lambda_i}{N_1}}^{t-2}\right)\left(\sum\limits_{j=0}^r m_j\abs{\frac{\lambda_j}{N_1}}^{2}\right). \end{align*} By (\ref{eq:hom_eigen_asymp}) and the definition of $P^2$, $$ \sum\limits_{j=0}^r m_j\abs{\frac{\lambda_j}{N_1}}^{2}=NP^2(x,x)=\frac{N}{N_1}. $$ Moreover, by (\ref{eq:eigen_distance_asymp1}), (\ref{eq:compare_eigen_dist_large_j}) and (\ref{eq:hom_eigen_asymp}), \begin{align*}
\sum\limits_{i=0}^r m_i\abs{\frac{\lambda_i}{N_1}}^{t-2}\sim
\sum\limits_{i=0}^s m_i\abs{\frac{\lambda_i}{N_1}}^{t-2}\sim
\sum\limits_{i=0}^s m_i\left(\frac{\lambda_i}{N_1}\right)^{t-2}
&=O\left(\sum\limits_{i=0}^sm_i\left(\frac{\lambda_i}{N_1}\right)^{t}\right)\\
&=O\left(\sum\limits_{i=0}^r m_i\left(\frac{\lambda_i}{N_1}\right)^t\right)=
O(NP^t(x,x)). \end{align*} It remains to estimate the first summand in the rightmost expression in (\ref{eq:diagonal_decompose}). From (\ref{eq:eigen_distance_asymp1})~and~(\ref{eq:eigen_distance_asymp2}), we get that, for $j\in\irange{s+1}{r}$, $$ \frac{m_j^2\lambda_j^t}{m_s^2\lambda_s^t}=(O(1))^t\cdot{n^{-(j-s)(t-2)}}=\pr{O\pr{n^{-(j-s)}}}^{t-2}=o(1) $$ implying that $$ \sum\limits_{i=0}^{r}m_i^2\abs{\frac{\lambda_i}{N_1}}^t\sim \sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t. $$ Putting everything together and applying (\ref{eq:hom_eigen_asymp}), we conclude that, for every $x\in V$, $$ t\sum\limits_{k=2}^{t-2}\frac{P^{k}(x,x)P^{t-k}(x,x)}{P^{t}(x,x)}\leq \frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}(1+o(1))+ O\left(\frac{t}{N_1}\right). $$ We finish with proving that the condition $t=o(\sqrt{N})$ implies $$
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}=o(1). $$ If $t>C\ln{n}$, where $C$ is sufficiently large constant, then, by (\ref{eq:eigen_distance_asymp1}), for every $i\in[s]$, $m_i\left(\frac{\lambda_i}{N_1}\right)^t=o(1)$ and $m_i^2\left(\frac{\lambda_i}{N_1}\right)^t=o(1)$. Therefore, $$
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}\sim\frac{t^2}{N}=o(1). $$ If $t\leq C\ln{n}$, then, by (\ref{eq:eigen_distance_asymp1}), $$
\frac{t^2}{N}\frac{\sum\limits_{i=0}^{s}m_i^2\left(\frac{\lambda_i}{N_1}\right)^t}{\sum\limits_{i=0}^{s}m_i \left(\frac{\lambda_i}{N_1}\right)^t}\leq\frac{t^2 m_s}{N}(1+o(1))=O\left(\frac{n^s\ln^2 n}{N}\right)=O\left(\frac{\ln^2 n}{n^{r-s}}\right)=o(1). $$ The sufficiency in Theorem~\ref{th_mon_asymp_eq_hom} is proved. $\Box$
\section{Proof of necessity in Theorem~\ref{th_mon_asymp_eq_hom}} \label{sec:th3_proof}
As was already noted, the necessity of the condition in Theorem~\ref{th_mon_asymp_eq_hom} follows from a more general fact about spectral expanders. Consider a sequence of graphs $\{G_N, N\in\mathbb{N}\}$ such that $[N]$ is the set of vertices of $G_N$, and $G_N$ is non-bipartite, connected and $N_1$-regular ($N_1$ depends on $N$). Let $\lambda_1=\lambda_1(N)$ be the second largest in absolute value eigenvalue of the adjacency matrix of $G_N$. We call the sequence $\{G_N, N\in\mathbb{N}\}$ a \textit{spectral expander}, if there exists $\delta>0$ such that, for all large enough $N$, $\abs{\lambda_1}/N_1<1-\delta$.
\begin{theorem} \label{th_mon_asymp_non_eq_hom} Let $\{G_N, N\in\mathbb{N}\}$ be a spectral expander such that $N_1=\omega(\ln{N})$. Then for any $c>0$ there exists $\varepsilon>0$ such that, for sufficiently large $N$, if $t>c\min\{\sqrt{N},N_1\}$, then $\mono{C_t,G_N}<(1-\varepsilon)\homo{C_t,G_N}$. \end{theorem}
Obviously, Theorem~\ref{th_mon_asymp_non_eq_hom} implies the necessity of the condition $t=o\pr{\min\{\sqrt{N},N_1\}}$ for $\mono{C_t}\sim\homo{C_t}$ stated in Theorem~\ref{th_mon_asymp_eq_hom}.\\
To prove Theorem~\ref{th_mon_asymp_non_eq_hom}, we introduce a random walk on $G_N$ (see Section~\ref{sec:trans_matrix_and_mixing_time}).
In Section~\ref{sec:rw_Janson}, we note that the proportion of self-intersecting cycles in $G(n,r,s)$ is exactly the probability that the random walk meets itself somewhere on $[0,t-1]$ subject to $X_0=x$ and $X_t=x$. It is easy to see that (almost) the same is true for $G_N$ since $X_0$ is chosen uniformly at random from $[N]$. Indeed, \begin{align*}
\homo{C_t,G_N}=\sum_{x\in[N]}P^t(x,x)N_1^t
&=NN_1^t\sum_{x\in[N]}P^t(x,x)\Pb{X_0=x}\\
&=NN_1^t\sum_{x\in[N]}\Pb{X_t=X_0=x}=NN_1^t\Pb{X_t=X_0}. \end{align*} In the same way, $\mono{C_t,G}=NN_1^t\Pb{X_t=X_0,X_0\neq X_1\neq\ldots\neq X_{t-1}}$. Therefore, \begin{equation}
\frac{\homo{C_t,G}-\mono{C_t,G}}{\homo{C_t,G}}=\Pb{\exists{i,j}\in\irange{0}{t-1}:i\ne{j},X_i=X_j|X_t=X_0}. \label{eq:relative_error} \end{equation}
Further, we separately consider cases $\sqrt{N}=o(N_1)$ and $N_1=O(\sqrt{N})$.
\subsection{$\sqrt{N}=o(N_1)$} \label{sec:th3_proof_1}
Let us first bound from above $P^t(x,y)$ for arbitrary $x,y$ from $[N]$ and an integer $t\geq 1$. Since $P^t(x,y)=\sum_{v\in [N]}P^{t-1}(x,v)P^1(v,y)$, we get that \begin{equation} P^t(x,y)\leq\max_{v\in[N]} P^1(v,y)\leq\frac{1}{N_1}. \label{eq:transition_above} \end{equation} Let us also notice that from (\ref{eq:mix_time}) it immediately follows that, for every $C>0$, there exists $\kappa$ such that, for all $t\geq\kappa\ln N$ and all $x,y\in[N]$, we have \begin{equation}
\left|P^t(x,y)-\frac{1}{N}\right|<\frac{1}{N^2}. \label{eq:mixing_Janson} \end{equation}
Let us fix a positive $\tilde c<\min\{c,1\}$ and prove that the random walk subject to $X_t=X_0$ intersects itself during the first $\tilde{c}\sqrt{N}$ steps with non-zero probability. Let $$ \mathcal{J}:=\setdef{(i,j)\in[t]^2}{\kappa\ln N<i<j-3\kappa\ln N<j<\tilde{c}\sqrt{N}}. $$
Note that $|\mathcal{J}|=\frac{\tilde c^2+o(1)}{2}N$
and that $j<t-\kappa\ln{n}$ for every $(i,j)\in\mathcal{J}$.
For all $(i,j)\in\mathcal{J}$, we have that $$
\frac{\Pb{X_i=X_j,X_t=X_0}}{\Pb{X_t=X_0}}= \sum_{x,u\in [N]} \frac{P^i(x,u)P^{j-i}(u,u)P^{t-j}(u,x)}{\sum_{y\in [N]}P^t(y,y)} = \frac{1+o(1)}{N} $$ due to (\ref{eq:mixing_Janson}). Note that $o(1)$ in the expression above converges to $0$ uniformly over all $(i,j)\in\mathcal{J}$. Therefore, \begin{equation} \sum_{(i,j)\in\mathcal{J}}\frac{\Pb{X_i=X_j,X_t=X_0}}{\Pb{X_t=X_0}}
= \frac{\tilde c^2}{2}+o(1). \label{eq:relative_error_first_moment} \end{equation}
Let $$ \begin{aligned} \mathcal{J}_0&:= \setdef{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}{\pr{\forall\br{i,j}\subset\{i_1,i_2,j_1,j_2\}:\abs{i-j}\ge\kappa\ln{N}}},\\ \mathcal{J}_1&:= \setdef{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}{\pr{\exists!\br{i,j}\subset\{i_1,i_2,j_1,j_2\}:\abs{i-j}<\kappa\ln{N}}},\\ \mathcal{J}_2&:= \setdef{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}}{\max\br{\abs{i_1-i_2},\abs{j_1-j_2}}<\kappa\ln{N}}. \end{aligned}
$$
It is clear from the definition of $\mathcal{J}$ that $\mathcal{J}_0\sqcup\mathcal{J}_1\sqcup\mathcal{J}_2={\mathcal{J}\choose 2}$ (recall that $j-i>3\kappa\ln{N}$ for every $(i,j)\in\mathcal{J}$). We have $$
|\mathcal{J}_0|=\frac{1}{2}|\mathcal{J}^2|(1+o(1))=\frac{\tilde c^4+o(1)}{8}N^2,\quad
|\mathcal{J}_1|<4\kappa\ln N(\tilde c\sqrt{N})^3,\quad
|\mathcal{J}_2|<(2\kappa\ln N\tilde c\sqrt{N})^2. $$ As above, uniformly over all $\{(i_1,j_1),(i_2,j_2)\}\in\mathcal{J}_0$, \eqref{eq:mixing_Janson} implies $$ \frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}
= \frac{1+o(1)}{N^2}. $$ Uniformly over all $\{(i_1,j_1),(i_2,j_2)\}\in\mathcal{J}_1$, the relations (\ref{eq:transition_above}) and (\ref{eq:mixing_Janson}) imply $$ \frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}
\leq\frac{1+o(1)}{NN_1}. $$ Uniformly over all $\{(i_1,j_1),(i_2,j_2)\}\in\mathcal{J}_2$, \eqref{eq:transition_above} implies $$ \frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}
\leq\frac{1+o(1)}{N_1^2}. $$ Summing up and recalling that, in the current case, $\sqrt{N}=o(N_1)$, \begin{multline} \sum_{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}} \frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}\\
\leq \frac{\tilde c^4+o(1)}{8}+\frac{(4\kappa\tilde c^3+o(1))\sqrt{N}\ln N}{N_1}+ \frac{(2\kappa\tilde c+o(1))^2 N \ln N}{N_1^2}= \frac{\tilde c^4+o(1)}{8}. \label{eq:relative_error_second_moment} \end{multline} From (\ref{eq:relative_error}), (\ref{eq:relative_error_first_moment}) and (\ref{eq:relative_error_second_moment}) we get \begin{align*} \frac{\homo{C_t,G}-\mono{C_t,G}}{\homo{C_t,G}} & \geq \sum_{(i,j)\in\mathcal{J}} \frac{\Pb{X_i=X_j,X_t=X_0}}{\Pb{X_t=X_0}}\\
&-\sum_{\{(i_1,j_1),(i_2,j_2)\}\in{\mathcal{J}\choose 2}} \frac{\Pb{X_{i_1}=X_{j_1},X_{i_2}=X_{j_2},X_t=X_0}}{\Pb{X_t=X_0}}\\
&=\frac{\tilde c^2}{2}-\frac{\tilde c^4}{8}+o(1). \end{align*} Since $\frac{\tilde c^2}{2}-\frac{\tilde c^4}{8}>0$, we conclude that $\mono{C_t,G}/\homo{C_t,G}$ is bounded away from $1$ as needed.
\subsection{$N_1=O(\sqrt{N})$}
\label{sec:th3_proof_2} W.l.o.g. we may assume that $c<1$ and prove that the random walk subject to $X_t=X_0$ intersects itself during the first $cN_1$ steps with non-zero probability. In the same way as in Section~\ref{sec:th3_proof_1}, we use~(\ref{eq:relative_error}). However, here we consider all $(i,j)$ such that $i$ is even and $j=i+2\leq cN_1$. Let $\mathcal{J}:=\setdef{2i}{i\in\irange{0}{\floor{\frac{cN_1-2}{2}}}}$. For every $i\in\mathcal{J}
$, we have $$ \frac{\Pb{X_i=X_{i+2},X_t=X_0}}{\Pb{X_t=X_0}}
= \frac{1}{N}\sum_{x,u\in[N]} \frac{P^i(x,u)P^2(u,u)P^{t-i-2}(u,x)}{\Pb{X_t=X_0}} =
\frac{1}{N_1}\frac{\Pb{X_{t-2}=X_0}}{\Pb{X_t=X_0}} \sim \frac{1}{N_1} $$ since $P^2(u,u)=\frac{1}{N_1}$ for all $u\in[N]$ and $\Pb{X_{t-2}=X_0}\sim\Pb{X_t=X_0}\sim1/N$ due to (\ref{eq:mixing_Janson}). Thus, $$ \sum_{i\in\mathcal{J}} \frac{\Pb{X_i=X_{i+2},X_t=X_0}}{\Pb{X_t=X_0}}
= \frac{c}{2}+o(1). $$ Now, let $i_1,i_2\in\mathcal{J}$, $i_1<i_2$. Then, similarly,
\begin{align*} &\frac{\Pb{X_{i_1}=X_{i_1+2},X_{i_2}=X_{i_2+2},X_t=X_0}}{\Pb{X_t=X_0}}=\\
&=\frac{1}{N}\sum_{x,u,v\in[N]} \frac{P^{i_1}(x,u)P^2(u,u)P^{i_2-i_1-2}(u,v)P^2(v,v)P^{t-i_2-2}(v,x)}{\Pb{X_t=X_0}}\\ &=\frac{1}{N}\sum_{x,u,v\in[N]} \frac{P^{i_1}(x,u)P^{i_2-i_1-2}(u,v)P^{t-i_2-2}(v,x)}{N_1^2\,\Pb{X_t=X_0}}\\ &=\frac{\Pb{X_{t-4}=X_0}}{N_1^2\,\Pb{X_t=X_0}} \sim\frac{1}{N_1^2}. \end{align*}
Therefore, \begin{align*} \frac{\homo{C_t,G}-\mono{C_t,G}}{\homo{C_t,G}} & \geq \sum_{i\in\mathcal{J}} \frac{\Pb{X_i=X_{i+2},X_t=X_0}}{\Pb{X_t=X_0}}\\
&-\sum\limits_{i_1,i_2\in\mathcal{J}:i_1<i_2} \frac{\Pb{X_{i_1}=X_{i_1+2},X_{i_2}=X_{i_2+2},X_t=X_0}}{\Pb{X_t=X_0}}\\
&=\frac{c}{2}-\frac{c^2}{8}+o(1). \end{align*} Since $c/2-c^2/8>0$, this finishes the proof of Theorem~\ref{th_mon_asymp_non_eq_hom} and therefore of Theorem~\ref{th_mon_asymp_eq_hom}. $\Box$
\end{document} |
\begin{document}
\title{Heralded quantum memory for single-photon polarization qubits } \author{G. W. Lin$^{1}$} \author{X. B. Zou$^{1}$} \email{xbz@ustc.edu.cn} \author{X. M. Lin$^{2}$} \email{xmlin@fjnu.edu.cn} \author{G. C. Guo$^{1}$} \affiliation{$^{1}$Key Laboratory of Quantum Information, Department of Physics, University of Science and Technology of China, Hefei 230026, People's Republic of China} \affiliation{$^{2}$School of Physics and Optoelectronics Technology, Fujian Normal University, Fuzhou 350007, People's Republic of China}
\pacs{03.67.Hk, 03.67.-a, 42.50.-p
}
\begin{abstract} We propose a scheme to implement a heralded quantum memory for single-photon polarization qubits with a single atom trapped in an optical cavity. In this scheme, an injected photon only exchanges quantum state with the atom, so that the heralded storage can be achieved by detecting the output photon. We also demonstrate that the scheme can be used for realizing the heralded quantum state transfer, exchange and entanglement distribution between distant nodes. The ability to detect whether the operation has succeeded or not is crucial for practical application.
\end{abstract} \maketitle
\emph{Introduction}.---A quantum memory, a storage device that can faithfully store and retrieve quantum state of traveling light pulse, is requisite to long-distance quantum communication networks and distributed quantum computers \cite{Kimble,Briegel,Duan,Cirac,D. Boozer,Zhao,Jiang,Kok,Brassard}. By following the seminal protocol proposed by Duan et al. \cite{Duan}, significant advance has been made in storing single photon by using atomic ensembles as storage mediums \cite{Chou,Matsukevich,Chane,Eisaman,Black,Thomp,Choi,Karpa,B. Julsg,Julsgaa,Takano,Alexey,Chuu,Honda,Appel,Chen}. Early experiments using atomic ensembles mainly focused on the storage and retrieval of single photon with the fixed polarization \cite{Chane,Eisaman}. Recent experiments show that single-photon polarization qubits can be stored in two atomic ensembles \cite{Choi,Karpa}. However, above studies are thwarted by photon losses since one can't determine whether or not the incoming photon has been stored or lost. Fortunately, such unpredictable failure may be largely remedied by a heralding feature that announces photon arrival and successful storage without destroying the stored quantum state. In Ref. \cite{Haruka}, a heralded storage has been achieved by means of a spontaneous Raman process that can absorb a single-photon with arbitrary polarization and simultaneously emit a photon with fixed polarization. However, due to small spontaneous Raman scattering probability, the heralded storage occurs rarely, while its polarization state is restored with high fidelity. Cavity quantum electrodynamics (QED) provides another ideal interface between atoms and photons \cite{Wilk,Wang,Cirac,van Enk,Boozer,Fleischhauer,Lloyd}. In the initial proposal for the implementation of quantum networks in cavity QED \cite{Cirac}, the quantum information of photons is encoded in the Fock basis, i.e., the zero- and one-photon Fock states \cite{Cirac,Boozer}. Ref. \cite{Lloyd} proposed a robust method for transmitting entangled polarization state over long distances and teleportation of atomic state via measurements of all four Bell states, using a novel method of sequential elimination.
In this paper, we propose a heralded quantum memory for arbitrary polarization state with a single atom trapped in an optical cavity. Our scheme is based on quantum-state swap between single-photon pulse and trapped atom. The heralded storage can be achieved by detecting the output photon. Numerical simulation results show that our scheme has a high success probability and the retrieved photon has so well-defined waveforms that it is easy to interfere with other photons. We also show that the scheme can allow the heralded quantum state transfer, exchange and entanglement distribution between distant nodes. The ability to detect whether the operation has succeeded or not, is crucial for practical application \cite{Briegel,Haruka}.
\emph{The building block and numerical simulations}.---As shown in Fig.1(a), our model consists of a single atom inside a one-sided optical cavity. \begin{figure}
\caption{(Color online) (a) Schematic setup to reflect a single photon pulse from one-sided cavity. (b) The relevant atomic level structure and transitions.}
\label{1}
\end{figure}The relevant atomic levels are depicted in Fig. 1(b), the states $\left\vert l\right\rangle $ and $\left\vert r\right\rangle $ correspond to two stable ground states, and $\left\vert e\right\rangle $ denotes an excited state. The cavity supports two degenerate cavity modes $a^{h}$ and $a^{v}$ with different polarizations h and v. We assume that the transitions $\left\vert l\right\rangle \leftrightarrow\left\vert e\right\rangle $ and $\left\vert r\right\rangle \leftrightarrow\left\vert e\right\rangle $ are resonantly coupled to two cavity modes $a^{h}$ and $a^{v}$ with the strength $g_{h}$ and $g_{v}$, respectively. The interaction between the atom and cavity modes is described by $H_{I}=g_{h}\left\vert e\right\rangle \left\langle l\right\vert a^{h}+g_{v}\left\vert e\right\rangle \left\langle r\right\vert a^{v}+H.c.$. The time evolutions of cavity modes $a^{h}$ and $a^{v}$ are given by \cite{Cirac,Gardiner,Duan3} \begin{align} \dot{a}^{h(v)}\left( t\right) & =-i\left[ a^{h(v)}\left( t\right) ,H_{I}^{^{\prime}}\right] -\frac{\kappa}{2}a^{h(v)}\left( t\right) \nonumber\\ & -\sqrt{\kappa}a_{in}^{h(v)}\left( t\right) , \end{align} where $H_{I}^{^{\prime}}=H_{I}-i\frac{\gamma_{e}}{2}\left\vert e\right\rangle \left\langle e\right\vert $ \cite{Fleischhauer,Duan4}, $\kappa$ is the cavity decay rate for two cavity modes, and $\gamma_{e}$ is spontaneous-emission rate of the excited state. The output operator $a_{out}^{h(v)}\left( t\right) $ is connected with the input operator $a_{in}^{h(v)}\left( t\right) $ by the input-output relation $a_{out}^{h(v)}\left( t\right) =a_{in}^{h(v)}\left( t\right) +\sqrt{\kappa}a^{h(v)}\left( t\right) $.
In this paper, two cases are considered: (i) the atom is prepared in the state $\left\vert l\right\rangle $ ( or $\left\vert r\right\rangle $), and a v-polarized (h-polarized) photon $\left\vert v\right\rangle $ ($\left\vert h\right\rangle $) is injected. In this case, the Hamiltonian $H_{I}^{^{\prime }}$ does not work and the injected photon sees an empty cavity. Thus, the polarization of the input photon is not changed and we have \cite{Gardiner,Duan3} \begin{equation} a_{out}\left( \omega\right) =\frac{-\kappa/2-i\delta}{\kappa/2-i\delta }a_{in}\left( \omega\right) , \end{equation} where we assume that the center frequency of input photon pulse $\omega_{0}$ is resonant with the cavity mode and $\delta=\omega-\omega_{0}$. If $\kappa \gg\delta$, we have $a_{out}\left( \omega\right) \approx-a_{in}\left( \omega\right) $. Next we consider the case (ii): the atom is prepared in the state $\left\vert l\right\rangle $ ( or $\left\vert r\right\rangle $), and a h-polarized (v-polarized) photon $\left\vert h\right\rangle $ ($\left\vert v\right\rangle $) is injected. In this case, taking the adiabatic limit \cite{Fleischhauer,Duan4}, we can obtain the input-output relation \begin{equation} a_{out}\left( \omega\right) =a_{out}^{h(v)}\left( \omega\right) +a_{out}^{v(h)}\left( \omega\right) , \end{equation} with \begin{equation} a_{out}^{h(v)}\left( \omega\right) =[1-\frac{\kappa}{\kappa/2-i\delta +2g_{h(v)}^{2}\kappa/(4g_{v(h)}^{2}+i\kappa\gamma_{e})}]a_{in}^{h(v)}\left( \omega\right) , \end{equation} and \begin{equation} a_{out}^{v(h)}\left( \omega\right) =\frac{g_{h(v)}\kappa/g_{v(h)}} {[\kappa/2-i\delta+g_{h(v)}^{2}\kappa/(2g_{v(h)}^{2}+i\kappa\gamma_{e} /2)]}a_{in}^{h(v)}\left( \omega\right) . \end{equation} It is seen from Eq. (3-5) that if conditions $\kappa\gg\delta$, $4g_{v(h)} ^{2}\gg\kappa\gamma_{e}$, and $g_{h}\approx g_{v}$ are satisfied, we have $a_{out}^{v(h)}\left( \omega\right) \approx$ $a_{in}^{h(v)}\left( \omega\right) $. Therefore, the polarization of the input photon is changed and we realize state flip operation $\left\vert l\right\rangle \left\vert h\right\rangle \leftrightarrow\left\vert r\right\rangle \left\vert v\right\rangle $.
Now, we give a detailed analysis of this building block through numerical simulation with the Hamiltonian \cite{Fleischhauer,Duan4} \begin{align} H & =H_{I}^{^{\prime}}+
{\displaystyle\sum\limits_{\digamma=h,v}}
{\displaystyle\int\nolimits_{-\omega_{b}}^{+\omega_{b}}}
\omega d\omega\Theta_{\digamma}^{\dagger}(\omega)\Theta_{\digamma} (\omega)\nonumber\\ & +i\sqrt{\frac{\kappa}{2\pi}}
{\displaystyle\sum\limits_{\digamma=h,v}}
{\displaystyle\int\nolimits_{-\omega_{b}}^{+\omega_{b}}}
d\omega\lbrack a^{\digamma}\Theta_{\digamma}^{\dagger}(\omega)-a^{\digamma \dagger}\Theta_{\digamma}(\omega)], \end{align} where $\Theta_{\digamma}(\omega)$ with the standard relation $[\Theta _{\digamma}(\omega),\Theta_{\digamma}^{\dagger}(\omega^{^{\prime}} )]=\delta(\omega-\omega^{^{\prime}})$ denotes the one-dimensional free-space mode coupled to the cavity modes.
Assuming that the input single-photon pulse is Gaussian pulse of the form $f\left( t\right) \propto\exp\left[ -\left( t-\frac{T}{2}\right) ^{2}/\left( \frac{T}{5}\right) ^{2}\right] $, where $T$ is the pulse duration. We first consider above-mentioned case (ii) that a polarized photon $\left\vert h\right\rangle $ ($\left\vert v\right\rangle $) is injected into cavity and the atom is prepared in state $\left\vert l\right\rangle $ $(\left\vert r\right\rangle )$. Let $P$ be the success probability for the flip $\left\vert l\right\rangle \left\vert h\right\rangle \leftrightarrow \left\vert r\right\rangle \left\vert v\right\rangle $. In Fig. 2, we plot $P$ versus the normalized cavity coupling rate $g/\kappa$, assuming $\gamma _{e}=\kappa$, with different the pulse width: $\kappa T=\{10,20,30,40,50,60,120\}$. From Fig. 2, \begin{figure}
\caption{(Color online) The success probability P for the flip $\left\vert l\right\rangle \left\vert h\right\rangle \leftrightarrow\left\vert r\right\rangle \left\vert v\right\rangle $ as a function of $g/\kappa$ for $\kappa T=\{10,20,30,40,50,60,120\}$, on the assumption that $\gamma_{e}=\kappa$ and $g_{h}
=g_{v}$=g.}
\label{a}
\end{figure}we see that when $g\geq2\kappa$ and $\kappa T$ $\geq60$ the success probability can be up to $90\%.$ \begin{figure}
\caption{(Color online) The shape functions $|f(t)|$ for the input pulse (solid curve) and the output pulse (dashed curve) for the case $\kappa T$ = (a) 10, (b) 30, (c) 60, and (d) 90. Other common parameters: $\gamma_{e}=\kappa$, $g_{h}
=g_{v}$=g, and $g=2\kappa$.}
\label{aa}
\end{figure}Fig. 3 shows that when $\kappa T$ $\geq60$ the output pulse shapes
$|f(t)|$ almost completely overlap with the input pulse shapes. Then, we consider the case (i) that a polarized photon $\left\vert h\right\rangle $ ($\left\vert v\right\rangle $) is injected into cavity and the atom is in the state $\left\vert r\right\rangle $ $(\left\vert l\right\rangle )$. In this case, the atom is decoupled from the cavity field, thus the influence of the atomic spontaneous emission can be ignored and the dominant noise is the distortion between the output and the input pulse. In Fig. 4, we plot the output and the input pulses shapes with different the pulse width: $\kappa T=\{30,60,90,120\}$.
\begin{figure}
\caption{(Color online) The shape functions $|f(t)|$ for the input pulse (solid curve) and the output pulse (dashed curve) for the case $\kappa T$ = (a) 30, (b) 60, (c) 90, and (d) 120.}
\label{s}
\end{figure}\emph{Quantum swap gate and Quantum memory}.---Assume that the injected photon and the atom are initially in arbitrary superposition of two polarized states and two ground states, respectively, i.e., $(\alpha\left\vert h\right\rangle +\beta\left\vert v\right\rangle )\otimes(\zeta\left\vert l\right\rangle +\eta\left\vert r\right\rangle ),$ where coefficients $\alpha$, $\beta$, $\zeta$, and $\eta$ satisfy relations $\left\vert \alpha\right\vert ^{2}+\left\vert \beta\right\vert ^{2}=1$ and $\left\vert \zeta\right\vert ^{2}+\left\vert \eta\right\vert ^{2}=1$. After the photon pulse is reflected by the cavity, the state of total system in the ideal case will evolve into $(-\eta\left\vert h\right\rangle +\zeta\left\vert v\right\rangle )\otimes(-\beta\left\vert l\right\rangle +\alpha\left\vert r\right\rangle )$, which corresponds to quantum state exchange operation between photon and atom, apart from phase factors that can be eliminated by the appropriate subsequent logic operations \cite{Lin}. We quantify the quality of swap gate through a numerical simulation, and the parameters are referred to Ref. \cite{Sauer}, i.e., $(g_{0},\kappa,\gamma_{e})/2\pi=(27,4.8,6)MHz$. Suppose that the initial state of system is given by $\left\vert \Psi(0)\right\rangle =(\left\vert h\right\rangle +\left\vert v\right\rangle )\otimes(\left\vert l\right\rangle +\left\vert r\right\rangle )/2$, Fig. 5 shows that the swap gate has a high fidelity and the variation of fidelity is about 0.01 for g varying from $g_{0}$ to $g_{0}/2$.
\begin{figure}
\caption{(Color online) The fidelity of the quantum state exchange operation with $g/g_{0}$. Here we have assumed a Gaussian shape for the input pulse with $f\left( t\right) \propto\exp[ -(t-\frac{T}{2})^{2}/(\frac{T}{5})^{2}] $, and chosen the parameters $T=5\mu s$, $(g_{0},\kappa,\gamma_{e})/2\pi=(27,4.8,6)MHz$, $g_{h}
=g_{v}$=g.}
\label{2}
\end{figure}
Based on above-mentioned swap operation, we consider the heralded quantum storage. Suppose that a photon is initially in arbitrary polarized state $(\alpha\left\vert h\right\rangle +\beta\left\vert v\right\rangle )$ and atom is prepared in the state $\left\vert r\right\rangle $. After the photon pulse is reflected by the cavity, the state of system becomes \begin{equation} (\alpha\left\vert h\right\rangle +\beta\left\vert v\right\rangle )\otimes\left\vert r\right\rangle \rightarrow\left\vert h\right\rangle \otimes(\beta\left\vert l\right\rangle -\alpha\left\vert r\right\rangle ). \end{equation} Detection of the reflected photon in the state $\left\vert h\right\rangle $ heralds the mapping of the input polarization state onto atom. We note that Fleischhauer et al. have proposed an interesting scheme for storing single-photon quantum states by adiabatic evolution of dark states, which has a high success probability, but not be heralded \cite{Fleischhauer}. For transmitting entangled polarization state over long distances in realistic noisy channels, Ref. \cite{Lloyd} proposed a robust method for capturing photon in optical cavities, and storing it in atoms, in which they detected if the atom has jumped out of the initial state, i.e., absorbed a photon, using the fluorescence technique by driving a cycling transition from initial state of the atom to an accessorial excited state.
The approach for retrieving photonic states is similar to that for storing process. A photon pulse in the state $\left\vert h\right\rangle $ is reflected by the cavity, the state evolution of system can be represented by \begin{equation} \left\vert h\right\rangle \otimes(\beta\left\vert l\right\rangle -\alpha\left\vert r\right\rangle )\rightarrow(\alpha\left\vert h\right\rangle +\beta\left\vert v\right\rangle )\otimes\left\vert r\right\rangle . \end{equation} After exchanging quantum states, the photonic qubit has been successfully retrieved and leaving atom in initial state $\left\vert r\right\rangle $ for storage of another photon.
\emph{Quantum communication protocols}.---One application of our scheme is the heralded quantum state transfer and exchange between two atomic memories are shown in Fig. 6(a). \begin{figure}
\caption{(Color online) (a) Quantum state transfer from cavity A to cavity B. (b) Creation of entanglement between two atomic memories. Where D, PBS, C, BS, and M denote single photon detector, polarizing beam splitter, circulator, 50/50 beam splitter, and mirror, respectively.}
\label{6}
\end{figure}Suppose that the initial state of a single photon pulse and two atomic memories is $\left\vert \Phi(0)\right\rangle =\left\vert h\right\rangle \otimes(\alpha\left\vert l\right\rangle _{A}+\beta\left\vert r\right\rangle _{A})\otimes\left\vert l\right\rangle _{B}$, where $\left\vert \theta \right\rangle _{\chi}(\theta=l,r$, $\chi=A,B)$ denotes quantum state of the atom trapped in cavity $\chi$. The photon pulse is sequentially reflected by the cavities A and B, the state of the combined system in the ideal case will evolve into $\left\vert v\right\rangle \otimes\left\vert r\right\rangle _{A}\otimes(\alpha\left\vert l\right\rangle _{B}+\beta\left\vert r\right\rangle _{B})$. Detection of the photon in the state $\left\vert v\right\rangle $ heralds that quantum state transfer from cavity A to cavity B has been performed successfully. If the initial state of the photon-atom system is prepared in $\left\vert \Phi^{^{\prime}}(0)\right\rangle =\left\vert h\right\rangle \otimes(\alpha\left\vert l\right\rangle _{A}+\beta\left\vert r\right\rangle _{A})\otimes(\zeta\left\vert l\right\rangle _{B}+\eta\left\vert r\right\rangle _{B})$, the photon pulse is sequentially reflected by the cavities A and B, then reflected by the cavity A again. the state of combined system will evolve into $\left\vert h\right\rangle \otimes(\zeta\left\vert l\right\rangle _{A}+\eta\left\vert r\right\rangle _{A})\otimes(\alpha \left\vert l\right\rangle _{B}+\beta\left\vert r\right\rangle _{B})$. After the detection of the photon in the state $\left\vert h\right\rangle $, one can known the quantum states of the atoms trapped in cavities A and B have been directly exchanged.
Another immediate application is the heralded distribution of entanglement between two atomic memories, which is shown in Fig. 6(b). A single-photon pulse in the state $\left\vert v\right\rangle $ is divided into two paths by a 50/50 beam splitter (BS), one is reflected by a one-sided optical cavity trapped an single atom with the quantum state $\left\vert r\right\rangle $. The other is reflected by a mirror. The state evolution of this process can be written as $\left\vert v\right\rangle \left\vert r\right\rangle \rightarrow (\left\vert v\right\rangle \left\vert r\right\rangle +\left\vert h\right\rangle \left\vert l\right\rangle )/\sqrt{2}$, which is an atom-photon maximal entangled state. Then the state of the photon is stored into another atomic memory by detection of the reflected photon. After the successful generation of entanglement between two atomic memories within the attenuation length, one wants to extend the quantum communication distance. This is done through entanglement swapping \cite{Briegel,Duan,Razavi,Simon}. Suppose that we start with two pairs of entangled memories. First, one of each entangled pair is mapped into single photon with retrieval operation. Second, two photons from two pairs of entangled ensembles are performed Bell-state measurement with two photon interference \cite{Huang}. After an entangled state has been established between two distant memories, we can use it in entanglement-based communication protocols, such as quantum teleportation, cryptography, and Bell inequality detection with linear optics elements \cite{Duan}, since the quantum states between atomic memories and photons can be transferred in a reversible manner \cite{Kimble}.
Next we briefly address the experiment feasibility of the proposed schemes. We consider a $^{87}$Rb atom trapped in a one-sided Fabry--Perot cavity \cite{Sauer}. The states $\left\vert l\right\rangle $ and $\left\vert r\right\rangle $ correspond to $\left\vert F=1,m=-1\right\rangle $ and $\left\vert F=1,m=1\right\rangle $ of $5S_{1/2}$ ground levels, respectively, while $\left\vert e\right\rangle $ corresponds to $\left\vert F=1,m=0\right\rangle $ of $5P_{3/2}$ excited level. The relevant cavity QED parameters for this system are assumed as $(g_{0},\kappa,\gamma_{e} )/2\pi=(27,4.8,6)MHz$ \cite{Sauer}, which fit well the condition $4g_{0} ^{2}/\kappa\gamma_{e}\sim102\gg$ $1$. Suppose that the average photon absorption rate in a fiber is $1-e^{-L/L_{att}}$, where $L$ is the length of the fiber and the channel attenuation length $L_{att}\sim22km$ \cite{Simon}. If the single-photon detection efficiency is $\eta_{d}\sim0.95$ \cite{Simon}, the success probability for heralded quantum state or generation of entanglement between two atomic memories with the distance $L\sim10km$ is $P_{1}\sim P^{2}\eta_{d}e^{-L/L_{att}}\sim0.55$. For quantum communication over long distance $L_{m}=2^{m}L$, one needs mth (m = 1, 2,...,N) entanglement connection. The total probability to create entanglement across two communication nodes at a distance of 1280 km is about $P_{1}
{\displaystyle\prod\nolimits_{n=1}^{6}}
P_{n}\sim P_{1}
{\displaystyle\prod\nolimits_{n=1}^{6}}
(\frac{1}{2}P^{2}\eta_{d}^{2}e^{-L/L_{att}})^{6}\sim0.0002$.
\emph{Conclusion}.---In summary, we have analyzed the heralded quantum memory for single-photon polarization state with a single atom trapped in an optical cavity \cite{Sauer}, and demonstrated its applications in quantum communication network \cite{Kimble}. In our scheme, storage and retrieval of photon polarization states have a high probability and fidelity. Heralded storage, entanglement distribution, and quantum communication are achieved by detecting the reflected photon. Thus in a realistic application operation errors due to all sources of photon loss, including atomic spontaneous emission, cavity mirror absorption and scattering, and photon collection, are always signaled by the absence of a photon count. As a result, the photon loss only decreases the success probability but has no contribution to the gate infidelity if the operation succeeds (i.e., if a photon count is registered). The ability to detect whether the operation has succeeded or not, is crucial for practical application.
\textbf{Acknowledgments:} This work was funded by National Natural Science Foundation of China (Grant No. 10574022 and Grant No. 60878059), the Natural Science Foundation of Fujian Province of China (Grant No. 2007J0002), the Foundation for Universities in Fujian Province (Grant No. 2007F5041), and \textquotedblleft Hundreds of Talents \textquotedblright\ program of the Chinese Academy of Sciences.
\end{document} |
\begin{document}
\title{MGProx: A nonsmooth multigrid proximal gradient method with adaptive restriction
for strongly convex optimization
hanks{Submitted to the editors DATE.
unding{
Supported in part by a joint postdoctoral fellowship by
the Fields Institute for Research in Mathematical Sciences and the University of Waterloo, and in part by Discovery Grants from the Natural Sciences and Engineering Research Council (NSERC) of Canada.
}
\begin{abstract} We study the combination of proximal gradient descent with multigrid for solving a class of possibly nonsmooth strongly convex optimization problems. We propose a multigrid proximal gradient method called MGProx, which accelerates the proximal gradient method by multigrid, based on utilizing hierarchical information of the optimization problem. MGProx applies a newly introduced adaptive restriction operator to simplify the Minkowski sum of subdifferentials of the nondifferentiable objective function across different levels. We provide a theoretical characterization of MGProx. First we show that variables at all levels exhibit a fixed-point property at convergence. Next, we show that the coarse correction is a descent direction for the fine variable in the general nonsmooth case. Lastly, under some mild assumptions we provide the convergence rate for the algorithm. In the numerical experiments, we show that MGProx has a significantly faster convergence speed than proximal gradient descent and proximal gradient descent with Nesterov's acceleration on nonsmooth convex optimization problems such as the Elastic Obstacle Problem. \end{abstract}
\begin{keywords} multigrid, restriction, proximal gradient, subdifferential, convex optimization,
obstacle problem \end{keywords}
\begin{MSCcodes} 49J52, 49M37, 65K05, 65N55, 90C25, 90C30, 90C90, \end{MSCcodes}
\section{Introduction}\label{sec:mgopt} We study the combination of two iterative algorithms: proximal gradient descent and multigrid, to solve the following class of optimization problems \begin{equation}\label{prob:minfg} \argmin_x ~ F_0(x) \coloneqq f_0(x) + g_0(x), \end{equation} where function $f_0 : \mathbb{R}^n \rightarrow \mathbb{R}$ is
proper, $L_0$-smooth and $\mu_0$-strongly convex, and function $g_0 : \mathbb{R}^n \rightarrow \overline{\mathbb{R}} \coloneqq \mathbb{R} \cup \{+\infty\}$ is proper, possibly nonsmooth, convex, lower semi-continuous, and separable. We recall that a function $\zeta(x) : \mathbb{R}^n \rightarrow \mathbb{R}$ \begin{itemize} \item is $L$-smooth if $\zeta$ is $C^{1,1}_L$ and $\nabla \zeta$ is $L$-Lipschitz. I.e., for all $x,y$ in $\mathbb{R}^n$, $\nabla \zeta(x)$ exists and \begin{equation}
\zeta(y) \leq \zeta(x) + \big\langle \nabla \zeta(x), y-x \big\rangle + \frac{L}{2}\|y-x\|_2^2; \label{def:Lsmooth} \end{equation}
\item is $\mu$-strongly convex (and thus coercive) with $\mu>0$ if $\zeta(x) - \frac{\mu}{2}\| x \|_2^2$ is convex;
\item is separable if $\zeta(x) = \sum_i \zeta_i(x_i)$. \end{itemize}
\paragraph{Modern models are nonsmooth} Advancements in nonsmooth (i.e., nondifferentiable) optimization \cite{moreau1962fonctions,rockafellar1970convex,shor1985minimization,combettes2005signal,beck2009fast,bauschke2011convex,beck2017first} enable the use of nonsmooth $g_0$ in the formulation \eqref{prob:minfg}. Then \eqref{prob:minfg} captures many models in machine learning and computational science (e.g., see \cite{combettes2005signal,parikh2014proximal} and the references therein), where $f_0$ is a smooth data fitting term and the nonsmooth $g_0$ models the constraint(s) and/or regularization(s) of the application. A popular tool for solving \eqref{prob:minfg} is the proximal gradient method \cite{passty1979ergodic,fukushima1981generalized,combettes2005signal,parikh2014proximal,beck2017first}, to be reviewed in \cref{sec:intro:subsec:prox}.
\paragraph{Classical problems in scientific computing are smooth} Setting $g \equiv 0$ in \eqref{prob:minfg} gives the problem class of smooth strongly convex optimization, i.e., \begin{equation}\label{prob:mgopt} \min_{x \in \mathbb{R}^n} ~ f_0(x), \end{equation} which subsumes many problems in scientific computing, e.g., in solving classes of partial differential equations (PDEs). If problem \eqref{prob:mgopt} comes from the discretization of certain classes of PDE problems,
multigrid methods \cite{fedorenko1962relaxation,brandt1977multi,hackbusch1980convergence,hackbusch1985multi,nash2000multigrid}, to be reviewed in \cref{sec:intro:subsec:mgopt}, are among the fastest known method for solving \eqref{prob:mgopt}.
\paragraph{This work: bridging smoothness and nonsmoothness} Multigrid and nonsmooth optimization are two communities that seldom interact. In this work we link these two fields and develop a method that can handle nonsmooth problems while enjoying the fast convergence from multigrid. We propose \texttt{MGProx} that accelerates the proximal-gradient method by multigrid to solve Problem \eqref{prob:minfg} efficiently. Below we review multigrid and the proximal gradient method.
\subsection{Classical multigrid and notation}\label{sec:intro:subsec:mgopt} Multigrid dates back to the 1960s with works by Fedorenko \cite{fedorenko1962relaxation} on solving the Poisson equation, and was then further developed by Brandt \cite{brandt1977multi} and Hackbusch \cite{hackbusch1980convergence}. There are many multigrid frameworks; in this work we focus on \texttt{MGOPT}: a full approximation scheme \cite{brandt1977multi} nonlinear multigrid method which was applied and extended to optimization problems by Nash \cite{nash2000multigrid}. \texttt{MGOPT} speeds up the convergence of an iterative algorithm (called smoothing or relaxation) by using a hierarchy of coarse discretizations of $f_0$: it first constructs a series of coarse auxiliary problems of the form \begin{equation} \min_{x_\ell} \, f_\ell(x_\ell) - \langle \tau_{\ell-1 \rightarrow \ell}, \,x_\ell \rangle, \qquad \ell \in \{ 1,2,\dots, L \}, \label{prob:mgopt_coarse} \end{equation} where $\tau_{\ell-1 \rightarrow \ell}$ carries information from level $\ell-1$ to level $\ell$. \texttt{MGOPT} then makes use of the solution of \eqref{prob:mgopt_coarse} to solve \eqref{prob:mgopt}. The convergence of the overall algorithm is sped up by the correction from the coarse levels and by the fact that the coarse problems are designed to be ``less expensive'' to solve than the given ones.
\paragraph{Notation} The symbol $x_0$ (or $x$) is called the fine variable. The symbol $x_\ell$ in \eqref{prob:mgopt_coarse} with $\ell \geq 1$ is called coarse variable. The subscript $\ell \in \{0,1,\dots, L \}$ denotes the level. A larger $\ell$ means a coarser level with lower resolution (fewer variables). We use $\ell=0$ to denote the finest level with the highest resolution and $\ell = L$ to denote the coarsest level with the lowest resolution. For the remainder of the paper, $L$ without a subscript stands for the number of levels, whereas $L_{\ell}$ denotes the smoothness parameter for the level-$\ell$ problem.
At a level $\ell$, the coarse version of the vector $x_\ell \in \mathbb{R}^{n_\ell}$ is $x_{\ell+1} = \overline{\mathcal{R}}(x_\ell) \coloneqq \overline{R}x_\ell$ where $\overline{R} \in \mathbb{R}^{n_{\ell+1} \times n_{\ell}}$ with $n_{\ell+1} \leq n_{\ell}$ is called a restriction matrix. Similarly, given $x_{\ell+1} \in \mathbb{R}^{n_{\ell+1}}$ and a prolongation matrix $\overline{P} \in \mathbb{R}^{ n_{\ell} \times n_{\ell +1}}$, we obtain the level-$\ell$ version of $x_{\ell+1}$ as $x_{\ell} = \overline{\mathcal{P}}(x_{\ell+1}) \coloneqq \overline{P} x_{\ell+1}$. We let $\overline{P} = c \overline{R}^\top$ where scaling factor $c>0$ and $\overline{R}$ are pre-defined. In multigrid, choosing $(\overline{R},\overline{P})$ depends on the application. In this work, for the applications we consider commonly chosen $(\overline{R},\overline{P})$.
Let $x_{\ell}^{k}$ be the level-$\ell$ variable at iteration $k$, Algorithm~\ref{algo:mgopt} shows 2-level ($\ell \in \{0,1\}$) \texttt{MGOPT} \cite{nash2000multigrid} for solving \eqref{prob:mgopt}, with the steps in the algorithm explained as follows: \begin{itemize} \item (i): $\sigma: \mathbb{R}^n \rightarrow \mathbb{R}^n$ denotes an update iteration that we refer to as pre-smoothing. In this work we focus on $\sigma$ being the proximal gradient operator.
\item (ii): the restriction step.
\item (iii): the vector $\tau_{0 \rightarrow 1}^{k+1}$ carries the information at level $\ell = 0$ to level $\ell = 1$.
\item (iv): the coarse problem \eqref{prob:mgopt_coarse} is a ``smaller version'' of the original fine problem. The function $f_1 = \overline{\mathcal{R}}(f_0)$ is the coarsening of $f_0$ and the linear term $\langle \tau_{0 \rightarrow 1}^{k+1}, \xi \rangle$ links the coarse variable with the $\tau$-correction information from the fine variable.
\item (v): the updated coarse variable $x_1^{k+1}$ is used to update the fine variable $y_0^{k+1}$.
Here $\alpha >0$ is a stepsize.
\item (vi): this step is the same as (i). \end{itemize} Furthermore, due to the $\tau$-correction (i)-(vi) guarantee a fixed-point property when the sequence $\big\{x^k_0\big\}_{k \in \mathbb{N}}$ produced by the 2-level \texttt{MGOPT} algorithm converges.
\begin{algorithm} \caption{2-level \texttt{MGOPT} \cite{nash2000multigrid} for an approximate solution of \eqref{prob:mgopt}} \label{algo:mgopt} \begin{algorithmic} \STATE{Initialize $x_0^1$, $\overline{R}$ and $\overline{P}$} \FOR{$k = 1,2,\dots$}
\STATE{(i)~~~~ $y^{k+1}_0 = \sigma(x^k_0)$
pre-smoothing
} \STATE{(ii)~~~ $y^{k+1}_{1} = \overline{R} y_0^{k+1}$
coarse variable
} \STATE{(iii)~~ $\tau^{k+1}_{0 \rightarrow 1} = \nabla f_1( y^{k+1}_1 ) - \overline{R} \nabla f_0( y^{k+1}_0 )$
create the tau vector
} \STATE{(iv)~~ $x^{k+1}_1 = \displaystyle \argmin_{\xi} \, f_1(\xi) - \langle \tau_{0\rightarrow 1}^{k+1}, \xi \rangle$
solve the coarse problem (exactly) } \STATE{(v)\,~~~ $z^{k+1}_0 = y^{k+1}_0 + \alpha \overline{P}\big( x^{k+1}_1 - y_1^{k+1} \big)$
coarse correction
} \STATE{(vi)~\,~ $x^{k+1}_0 = \sigma (z^{k+1}_0)$
post-smoothing~\, }
\ENDFOR \end{algorithmic} \end{algorithm}
\begin{remark}[\texttt{MGOPT} has no theoretical convergence guarantee]\label{remark:NashWrong}
The proof of \cite[Theorem 1]{nash2000multigrid} on the convergence of \texttt{MGOPT} requires additional assumptions. In short the proof states the following: on solving \eqref{prob:mgopt} with an iterative algorithm $x^{k+1} \coloneqq \sigma (x^k)$ where the update map $\sigma : \mathbb{R}^n \rightarrow \mathbb{R}^n$ is assumed to be converging from any starting point $x^1$, now suppose $\rho : \mathbb{R}^n \rightarrow \mathbb{R}^n$ is some other operator with the descending property that $f_0 (\rho(x)) \leq f_0(x)$. Then \cite[Theorem 1]{nash2000multigrid} claimed that an algorithm consisting of interlacing $\sigma$ with $\rho$ repeatedly is also convergent. This is generally not true without further assumptions. E.g., consider a function $f(x_1,x_2)$ that is equal to $\frac{1}{1+x_2^2}$ on the set $U \coloneqq \{ (x_1,x_2) : |x_1| \geq 1 \}$ and on the complementary set $\mathbb{R}^2 \setminus U$ that $f(x_1,x_2)$ has a unique minimizer at $(0,0)$. Then $\sigma : (x_1,x_2) \mapsto \frac{9}{10}(x_1,x_2)$ and $\rho|_U : (x_1,x_2) \mapsto (\frac{10}{9}x_1,2x_2)$ satisfies the hypothesis but diverges from any stationary point in $\{(x_1,x_2) : |x_1| \geq \frac{10}{9}\}$. \end{remark}
\subsection{Proximal gradient method}\label{sec:intro:subsec:prox} Nowadays subgradient \cite{rockafellar1970convex} and proximal operator \cite{moreau1962fonctions} are standard tools for designing first-order algorithms to solve nonsmooth optimization problems \cite{combettes2005signal,parikh2014proximal,beck2017first}, especially for large-scale optimization where computing higher-order derivatives (e.g. the Hessian) is not feasible. Here we give a quick review of the proximal operator and proximal gradient operator. We review subgradient in \cref{sec:mgprox:subsec:subdifferential}.
Rooted in the concept of Moreau's envelope \cite{moreau1962fonctions}, the proximal gradient method was first introduced in the 1980s in \cite[Eq. (4)]{fukushima1981generalized} as a generalization of the proximal point method \cite{rockafellar1976monotone}. Under the abstraction of monotone operator, the proximal gradient method is understood as a forward-backward algorithm \cite{passty1979ergodic}, and it was later popularized by \cite{combettes2005signal} as the proximal forward-backward splitting. Nowadays the proximal gradient method is an ubiquitous tool in machine learning \cite{parikh2014proximal,beck2017first}.
The proximal gradient method solves problems of the form \eqref{prob:minfg} as follows. Starting from an initial guess $x^1$, the method updates the variable by a gradient descent step (with a stepsize $\alpha > 0$) followed by a proximal step associated with $g_0$: \begin{subequations}\label{eq:5} \begin{align} x^{k+1} &\,= \textrm{prox}_{\alpha g_0} \Big( x^k - \alpha \nabla f_0(x^k) \Big),
\label{updt:proxgrad} \\ \textrm{prox}_{\alpha g_0}(x) & \coloneqq
\underset{u}{\argmin} ~ \Big\{\, \alpha g_0(u) + \frac{1}{2} \| u - x \|_2^2 \,\Big\}
= \underset{u}{\argmin} ~ \Big\{\, g_0(u) + \frac{1}{2\alpha} \| u - x \|_2^2 \,\Big\}. \label{def:prox_opertor} \end{align} \end{subequations} If $f_0$ is $L_0$-smooth \eqref{def:Lsmooth}, we can set stepsize $\alpha$ in \eqref{updt:proxgrad} as $\alpha \in (0, \frac{2}{L_0} )$ \cite{beck2017first}. The proximal operator \eqref{def:prox_opertor} itself is also an optimization problem, and in practice many commonly used $g_0$ are ``proximable'' that \eqref{def:prox_opertor} has an efficiently computable closed-form solution. The proximal gradient method has many useful properties. To keep the introduction short, we introduce these properties later when needed.
\subsection{Contributions} In this work, our contributions are: \begin{enumerate} \item We propose \texttt{MGProx} (multigrid proximal gradient method) to solve \eqref{prob:minfg}. It generalizes \texttt{MGOPT} on smooth problems to nonsmooth problems using proximal gradient as the smoothing method. A key ingredient in \texttt{MGProx} is a newly introduced adaptive restriction operator in the multigrid process to handle the Minkowski sum of subdifferentials, to be explained in \cref{sec:mgprox}.
\item We provide theoretical results for 2-level \texttt{MGProx}: we show that \begin{itemize} \item \texttt{MGProx} exhibits a fixed-point property, see Theorem~\ref{thm:fpp};
\item the coarse correction update (in step (v) of Algorithm~\ref{aglo:2levelMG_exact}) is a descent direction for the fine variable, see the subdifferential obtuse angle condition in Theorem~\ref{thm:coarse_dir_descent} and Lemma~\ref{lemma:alphaexists} for the existence of a coarse correction stepsize that provides a descent condition;
\item the sequence $\{ x^k_0 \}_{k \in \mathbb{N}}$ at the finest level converges to the optimal value asymptotically with a rate $ \frac{1}{k}$ and $\big( 1 - \frac{\mu_0}{L_0} \big)^k$ , see Theorems~\ref{thm:rate1k} and \ref{thm:mgprox_converge_proxPL}; this result also establishes the convergence of \texttt{MGOPT} (for $\sigma$ being gradient update) in the convex case (see remark~\ref{remark:NashWrong}). \end{itemize}
\item We apply \texttt{MGProx} to Elastic Obstacle Problem, and show that multigrid can accelerate the standard proximal gradient method; we also show that \texttt{MGProx} runs faster than the Nesterov-accelerated proximal gradient method. See \cref{sec:exp}.
\end{enumerate}
\begin{remark}[On the convexity of $f_0$] We assume $f_0$ to be $\mu_0$-strongly convex for proving the theorems. In practice, \texttt{MGProx} can also be used if $f_0$ is not strongly convex but we do not have theoretical guarantees. \end{remark}
\subsection{Literature review} The idea of multigrid is natural when handling large-scale elliptic PDE problems.
\subsubsection{Nonlinear multigrid} The 2-level MGOPT algorithm (Algorithm \ref{algo:mgopt}) is an example of a \emph{full approximation scheme} (FAS) multigrid method for nonlinear problems. The FAS approach, which was first described in \cite{brandt1977multi}, adds a $\tau$-correction term to the coarse nonlinear problem to ensure that the multigrid cycle satisfies a fixed-point property.
There is an alternative multigrid approach for solving nonlinear problems, which is the so-called Newton-multigrid method (see, e.g., \cite[Ch. 6]{briggs2000multigrid}), where the fine-level problem is first linearized using Newton's method and the linear systems in each Newton iteration are solved approximately using a linear multigrid method.
In the context of optimization problems, nonlinear multigrid methods can be devised to either work directly on the optimization problem and coarse versions of the optimization problem (as \texttt{MGOPT} does), or they can be designed to work on the fine-level optimality conditions and coarse versions of them.
\subsubsection{A review of multigrid on nonsmooth composite minimization} \paragraph{Early multigrid methods} Early multigrid methods for non-smooth problems like \eqref{prob:minfg} pertain to the case of constrained optimization problems where $g_0$ is an indicator function on the feasible set. For example, \cite{brandt1983multigrid} and \cite{mandel1984multilevel} develop multigrid methods for a symmetric positive definite (SPD) quadratic optimization problem with a bound constraint, which is equivalent to a linear complementarity problem. This applies, for example, to linear Elastic Obstacle Problems where $g_0$ is a box indicator function that models non-penetration constraints. In \cite{hackbusch1983multi} this is extended to more general constrained nonlinear variational problems with SPD Fr\'echet derivatives, and to their associated nonlinear variational inequalities. In more recent work, \cite{graser2009truncated} develops a Newton-MG method for an SPD quadratic optimization problem with more general but separable nonsmooth $g_0$. This is extended in \cite{graser2019truncated} to a nonlinear objective function with nonsmooth $g_0$.
\paragraph{How our approach differs} Our approach is similar to \cite{brandt1983multigrid,hackbusch1983multi,mandel1984multilevel} in that we use a FAS approach, but our approach applies to general $g_0$ functions that go beyond indicator functions and include nonsmooth regularizations. While \cite{brandt1983multigrid} and \cite{mandel1984multilevel} deal with linear problems, our approach applies to general nonlinear $f_0$. In contrast to \cite{brandt1983multigrid} and \cite{hackbusch1983multi}, we don't use injection for the restriction operation, which often leads to slow multigrid convergence, but instead we use an adaptive restriction and interpolation mechanism that precludes coarse-grid updates to active points. Our adaptive restriction and interpolation mechanism is similar to the truncation process used in \cite{graser2009truncated,graser2019truncated}, but our approach uses a FAS framework while \cite{graser2009truncated,graser2019truncated} use Newton-MG, and, most importantly, we provide a convergence proof with convergence rates $\mathcal{O}(\frac{1}{k})$ and $(1-\frac{\mu_0}{L_0})^k$, while \cite{graser2019truncated} does not provide results on the rate of convergence. Furthermore, Newton-MG requires the computation of second order information (the Hessian), while MGProx is a first-order method.
\paragraph{Multigrid in image processing} Besides PDEs, multigrid was used in the 1990s in image processing for solving problems with a nondifferentiable total variation semi-norm in image recovery (e.g., \cite{vogel1996iterative,chantextordmasculine1998multigrid}). Note that these works bypassed the non-smoothness by \textit{smoothing} the total variation term, making them technically only solving \eqref{prob:mgopt} but not \eqref{prob:minfg}.
\paragraph{Multigrid in machine learning} In the 2010s multigrid started to appear in machine learning, e.g., $\ell_1$-regularized least squares \cite{treister2012multilevel} and Nonnegative Matrix Factorization \cite{gillis2012multilevel}. We remark that these works are not true multigrid method as there is no $\tau$ in the schemes, nor is the information of the fine variable carried to the coarse variable when solving the problem.
\paragraph{Recent work} Lastly we note that a recent paper \cite{parpas2017multilevel} proposed a multilevel proximal gradient method with a FAS multigrid structure, however it also bypassed the technically challenging part of nonsmoothness by using a smooth approximation of $g_0$, making it similar to \cite{vogel1996iterative,chantextordmasculine1998multigrid} in that they are only solving \eqref{prob:mgopt} but not \eqref{prob:minfg}.
\subsection{Organization} In \cref{sec:mgprox} we present the 2-level \texttt{MGProx} method and discuss its theoretical properties. In \cref{sec:multilevel} we present the general multi-level \texttt{MGProx} method and discuss computational issues. We then demonstrate the performance of \texttt{MGProx} compared with other methods on several test problems in \cref{sec:exp}. In \cref{sec:conc} we conclude the paper.
\section{A two-level multigrid proximal gradient method}\label{sec:mgprox} In \cref{sec:mgprox:subsec:RP}-\ref{sec:mgprox:subsec:adaptiveR}, we review subgradients for nonsmooth functions, discuss their interaction with restriction (the coarsening operator), introduce the notion of adaptive restriction, and define the $\tau$ vector that carries the cross-level information. We introduce the 2-level \texttt{MGProx} method in \cref{sec:mgprox:subsec:2lv} and we provide theoretical results about the algorithm: fixed-point property (Theorem~\ref{thm:fpp}), descent property (Theorem~\ref{thm:coarse_dir_descent}), existence of coarse correction stepsize (Lemma~\ref{lemma:alphaexists}) and convergence rates (Theorems~\ref{thm:rate1k} and \ref{thm:mgprox_converge_proxPL}). Lastly in \cref{sec:mgprox:subsec:tau} we discuss some additional details of the $\tau$ vector selection.
\subsection{Functions at different levels}\label{sec:mgprox:subsec:RP} Following \cref{sec:intro:subsec:mgopt}, we use $f_\ell(x_{\ell}) : \mathbb{R}^{n_\ell} \rightarrow \mathbb{R}$ to denote functions at different coarse levels.
We denote the restriction of the fine objective function $F_0$ in \eqref{prob:minfg} as $ F_1 \coloneqq \mathcal{R}(F_0) = \mathcal{R}(f_0 + g_0)$, where $\mathcal{R}$ is defined below.
\begin{definition}[Restriction]\label{def:restriction} At level $\ell \in \mathbb{N}$, given a function $f_\ell : \mathbb{R}^{n_\ell} \rightarrow \mathbb{R}$, the restriction $\mathcal{R}$ of $f_\ell$, denoted as $f_{\ell+1} \coloneqq \mathcal{R}(f_\ell)$, is defined as $f_{\ell+1}(x_{\ell+1}) \coloneqq f_{\ell}(P x_{\ell+1})$, where $P : \mathbb{R}^{n_{l+1}} \rightarrow \mathbb{R}^{n_l}$ is a prolongation matrix associated with the restriction matrix $R : \mathbb{R}^{n_\ell} \rightarrow \mathbb{R}^{n_{\ell+1}}$ as $P = cR^\top$ where $c>0$ is a predefined constant. \end{definition}
\paragraph{Adaptive restriction and non-adaptive restriction} We shall recall that a contribution of this work is the introduction of the adaptive restriction, to be discussed in \cref{sec:mgprox:subsec:adaptiveR}. To differentiate the classical non-adaptive restriction (and the associated prolongation) from the adaptive version, we denote the non-adaptive restriction by $\overline{\mathcal{R}}, \overline{R},\overline{\mathcal{P}},\overline{P}$, and denote the adaptive one by $\mathcal{R}, R,\mathcal{P},P$. We remark that Definition~\ref{def:restriction} can be used for both versions of restriction and prolongation.
Composition with an injective affine map preserves strong convexity, so $f_{\ell+1}$ is strongly convex provided $f_{\ell}$ is.
Now we give an example of $\overline{R},\overline{P}$. We give another example in \cref{sec:exp}. \begin{example}\label{eg:RP} (Full weighting restriction and prolongation matrices for a vector)
\[ \overline{R} \coloneqq \dfrac{1}{4} \begin{bmatrix} 2 & 1 & \\
& 1 & 2 & 1 \\
& & & \ddots & \end{bmatrix} \in \mathbb{R}^{ n' \times n}, ~~ \overline{P} \coloneqq \frac{1}{2} \begin{bmatrix} 2 &
\\ 1 & 1
\\
& 2
\\
& 1 & 1
\\
& & \ddots \end{bmatrix} \in \mathbb{R}^{n \times n'}, ~~ n' = {\Big\lceil \frac{n-1}{2} \Big\rceil}.
\] $\overline{R}$ reduces the dimensions in half, and $\overline{P} = c\overline{R}^\top$ doubles the dimension. Here, $c =2$. \end{example}
\subsection{Review of subgradient and subdifferential of nonsmooth functions}\label{sec:mgprox:subsec:subdifferential} The subdifferential \cite{rockafellar1970convex,shor1985minimization,bauschke2011convex} is a standard framework used in convex analysis to deal with nondifferentiable functions. A convex function $g(x) : \mathbb{R}^n \rightarrow \overline{\mathbb{R}}$ is called nonsmooth if it is not differentiable for some $x$ in $\mathbb{R}^n$.
A point $q \in \mathbb{R}^n$ is called a subgradient of $g$ at $x$ if for all $y \in \mathbb{R}^n$ the inequality $g(y) \geq g(x) + \langle q, y -x \rangle$ holds. The subdifferential of $g$ at a point $x$ is defined as the set of all subgradients of $g$ at $x$, i.e., \begin{equation}
\partial g(x) \coloneqq \Big\{~
q \in \mathbb{R}^n ~~\big|~~ g(y) \geq g(x) + \langle q, y -x \rangle ~\forall y \in \mathbb{R}^n ~\Big\} ~ \subset \mathbb{R}^n, \label{def:subdifferential_II} \end{equation} so $\partial g(x)$ is generally set-valued. If $g$ is differentiable at $x$, then $\nabla g(x)$ exists and the set $\partial g(x)$ reduces to the singleton $\big\{\nabla g(x)\big\}$.
For the purpose of this work we now review a second definition of subdifferential \cite[Def.1.1.4, p.165]{hiriart2004fundamentals}: $\partial g(x)$ is the nonempty compact convex set $\mathcal{S} \subset \mathbb{R}^n$ whose support function $\displaystyle \sup_{s}\{ \langle s, x\rangle \,|\, s \in \mathcal{S} \}$ is the directional derivative of $g$ at $x$. See \cite[Theorem 1.2.2]{hiriart2004fundamentals} for the equivalence between definitions.
\paragraph{Subdifferential sum rule}~~ For two functions $f, g$, generally $\partial (f+g) \neq \partial f \oplus \partial g$, where $\oplus$ denotes Minkowski sum, since subdifferentials are generally set-valued. The sum rule $\partial (f+g) = \partial f \oplus \partial g$ holds if $f,g$ satisfy a qualification condition (e.g. \cite[Theorem 3.36]{beck2017first}): the relative interior of the domain of $f$ has a non-empty intersection with the relative interior of domain of $g$, i.e., $\forall x \in \textrm{dom} f \,\cap\, \textrm{dom} g$, \begin{equation} \textrm{ri} (\textrm{dom}\, f) \,\cap\, \textrm{ri} (\textrm{dom}\, g) \neq \varnothing
\implies
\partial \Big( f(x) + g(x) \Big) = \partial f(x) \oplus \partial g(x). \label{fact:subgrad_sum_rule} \end{equation} We have \eqref{fact:subgrad_sum_rule} for $f_0, g_0$ as $\textrm{dom}\, f_0
= \mathbb{R}^n$ by assumption. By Definition~\ref{def:restriction}, we have \eqref{fact:subgrad_sum_rule} for the coarse functions. To sum up, in this work the sum rule \eqref{fact:subgrad_sum_rule} holds for all levels $\ell$: \begin{equation} \partial F_\ell(x_\ell) \coloneqq \partial \Big( f_\ell(x_\ell) + g_\ell(x_\ell) \Big) = \partial f_\ell(x_\ell) \oplus \partial g_\ell(x_\ell) = \nabla f_\ell(x_\ell) + \partial g_\ell (x_\ell) , \label{ass:F_sum_rule} \end{equation} where $+$ is used instead of $\oplus$ in $\nabla f_\ell(x_\ell) + \partial g_\ell (x_\ell)$ because $\nabla f_\ell(x_\ell)$ is a singleton.
\paragraph{Notation for sets} From now on, when we encounter an expression containing both set-valued vector(s) and singleton vector(s), we underline the set-valued term(s) for visual clarity.
\subsection{Adaptive restriction and the \texorpdfstring{$\tau$}{} vector}\label{sec:mgprox:subsec:adaptiveR} Since subdifferentials are set-valued, we define $\tau$ in \texttt{MGProx} as a set. At a level $\ell$, we define a set $\underline{\tau_{\ell \rightarrow \ell+1}} ~\coloneqq~ \underline{\partial F_{\ell+1}(x_{\ell+1})} \oplus (- R) \underline{\partial F_\ell (x_\ell)} $, where $\ell \rightarrow \ell+1$ specifies that $\tau$ is connecting level $\ell$ to $\ell+1$, and the matrix $R$ here is an adaptive restriction operator that we will define soon. In \texttt{MGProx} we choose an element of $\underline{\tau_{\ell \rightarrow \ell+1}}$ as the tau vector. That is, at level $\ell=0$, \begin{subequations} \begin{align} \tau_{0 \rightarrow 1} ~\, \in \,~ \underline{\tau_{0 \rightarrow 1}} & ~\coloneqq~ \underline{\partial F_1(x_1)} \oplus (- R) \underline{\partial F_0(x_0)} \label{def:tau_mgprox1} \\ &\overset{\eqref{ass:F_sum_rule}}{=} \nabla f_1(x_1) - R \nabla f_0(x_0) + \underline{\partial g_1(x_1)} \oplus (- R) \underline{\partial g_0(x_0)}. \label{def:tau_mgprox2} \end{align} \end{subequations} Note that $\underline{\tau_{0 \rightarrow 1}}$ is a function of two points at two different levels. In \eqref{def:tau_mgprox1} $\underline{\tau_{0 \rightarrow 1}}$ is the Minkowski sum of two subdifferentials $\partial F_1(x_1) $ and $-R\partial F_0(x_0)$ which are generally set-valued. To obtain a tractable coarse-grid optimization problem (corresponding to line (iv) in Algorithm~\ref{algo:mgopt}) we need to avoid complications coming from the Minkowski sum, and we do this by modifying the standard restriction (and prolongation) as in \cref{eg:RP} by zeroing out columns in $\overline{R}$ to form $R$ such that the second subdifferential $R \partial g_0(x_0)$ in \eqref{def:tau_mgprox2} is a singleton vector. Similarly, we zero out the corresponding rows in $\overline{P}$ to form $P$ for the coarse correction step, such that non-differentiable fine points are not corrected by the coarse grid. This zeroing out process is adapted to the current point $x_0$, so we call this $R$ an adaptive restriction operator. In other words, the purpose of the adaptive restriction is to reduce a generally set-valued subdifferential $R\partial g_0(x_0)$ to a singleton. We denote the adaptive operator $R$ corresponding to a point $x$ as $R(x)$ and thereby the adaptive restriction of $x$ is denoted as $R(x)x$. Sometimes we just write $Rx$ if the meaning is clear from the context. Based on the above discussion, we now formally define adaptive restriction.
\begin{definition}[Adaptive restriction operator for separable $g$]\label{eg:adaptiveR_L1} For a possibly nonsmooth function $g : \mathbb{R}^n \rightarrow \mathbb{R}$ that is separable, i.e., with $x = [x_1,x_2,\dots,x_n]$, $g(x) = \sum g_i(x_i)$ where $g_i$ is a function only of component $x_i$, given a full restriction operator $\overline{R}$ and a vector $x$, the adaptive restriction operator $R$ with respect to a function $g$ at $x$ is defined by zeroing out the columns of $\overline{R}$ corresponding to the elements in $\partial g$ that are set-valued. \label{def:adaptiveR} \end{definition}
Now we give an illustrative example. See \cref{sec:exp} for other examples.
\begin{example}[$\ell_1$ norm]
$g(x) = \| x \|_1$ has subdifferential $[ \frac{\partial}{\partial x} g(x)]_i = \frac{\partial}{\partial x_i} |x_i|$. Absolute value is nondifferentiable at 0. The subdifferential of $|x|$ at 0 is the closed interval $[-1,1]$. Let $A_g(x) \coloneqq \big\{\, i \,|\, x_i = 0 \, \big\}$. Then the adaptive $R$ is obtained by zeroing out the columns of $\overline{R}$ with indices in $A_g$. Then $R \partial g(x)$ is a singleton vector. \end{example}
\paragraph{$\tau$ is an element of a set}~~ Now it is clear that the subdifferential $R \partial g_0(x_0)$ in \eqref{def:tau_mgprox2} is a singleton by definition. Note that the first subdifferential $\partial g_1(x_1)$ in \eqref{def:tau_mgprox2} is possibly set-valued, thus the right-hand side of \eqref{def:tau_mgprox2} is generally set-valued and so is $\underline{\tau_{0 \rightarrow 1}}$, and we define $\tau_{0 \rightarrow 1}$ to be a member of the set $\underline{\tau_{0 \rightarrow 1}}$. We emphasize that in the algorithm to be discussed below we can pick any value for $\tau_{0 \rightarrow 1}$ in the set. We will explore the choice of $\tau$ after we have given a complete picture of \texttt{MGProx}.
We are now ready to present \texttt{MGProx}. Here we present the 2-level \texttt{MGProx} method for illustration, and we move to the general multi-level version in \cref{sec:multilevel}. By the adaptive $R$, now all the Minkowski addition are trivial addition so we use $+$ instead of $\oplus$.
\subsection{A 2-level \texorpdfstring{\texttt{MGProx} }{}algorithm}\label{sec:mgprox:subsec:2lv} Similar to the 2-level \texttt{MGOPT} method for solving Problem \eqref{prob:mgopt}, the 2-level \texttt{MGProx} method (Algorithm \ref{aglo:2levelMG_exact}) solves Problem \eqref{prob:minfg} by utilizing a coarse problem defined as \begin{equation}\label{prob:minfg_coarse} \argmin_{\xi \in \mathbb{R}^{n_1}}\, \bigg\{\, F^{\tau}_1 (\xi) ~\coloneqq~ F_1(\xi) - \langle \tau_{0 \rightarrow 1}^{k+1}, \xi \rangle ~=~ f_1 (\xi) + g_1(\xi) - \langle \tau_{0 \rightarrow 1}^{k+1}, \xi \rangle \,\bigg\}. \end{equation}
\begin{algorithm} \caption{2-level \texttt{MGProx} for an approximate solution of \eqref{prob:minfg}} \label{aglo:2levelMG_exact} \begin{algorithmic} \STATE{Initialize $x^1_0$, $R$ and $P$} \FOR{$k = 1,2,\dots$}
\STATE{(i)~~~~ $y^{k+1}_0 = \textrm{prox}_{\frac{1}{L_0}g_0} \Big (x^k_0 - \frac{1}{L_0} \nabla f(x^k_0) \Big)$
level-0 proximal gradient step
} \STATE{(ii)~~~ $y^{k+1}_1 = R(y^{k+1}_0) y^{k+1}_0 $
construct the level-1 coarse variable
} \STATE{(iii)~~ $\tau_{0 \rightarrow 1}^{k+1} \hspace{-1mm} \in\, \underline{\partial F_1(y^{k+1}_1 )} - R(y^{k+1}_0) \, \underline{\partial F_0(y^{k+1}_0)}$
construct the tau vector
} \STATE{(iv)~~ $ x^{k+1}_1 = \underset{\xi}{\argmin} \, \Big\{ F_1^{\tau}(\xi) \coloneqq F_1(\xi) - \langle \tau_{0 \rightarrow 1}^{k+1}, \xi \rangle \Big\} $
solve the level-1 coarse problem } \STATE{(v)\,~~~ $z^{k+1}_0 = y^{k+1}_0 + \alpha P\big( x^{k+1}_1 - y^{k+1}_1 \big)$
coarse correction
} \STATE{(vi)~~\, $x^{k+1}_0 = \textrm{prox}_{\frac{1}{L_0}g_0} \Big (z^{k+1}_0 - \frac{1}{L_0} \nabla f(z^{k+1}_0) \Big)$
level-0 proximal gradient step~~ }
\ENDFOR \end{algorithmic} \end{algorithm}
Here are some remarks for the steps in Algorithm~\ref{aglo:2levelMG_exact}. \begin{itemize}
\item (i): we perform one or more proximal gradient iterations on the fine variable with a constant stepsize $\frac{1}{L_0}$, where $L_0$ is the Lipschitz constant of $\nabla f_0$.
\item (iii): we pick a value within the set to define $\tau$;
as we are now using adaptive $R$, we use $+$ instead of $\oplus$ in the expression of $\tau$.
\item (iv): $\alpha > 0$ is a stepsize; for its selection see \cref{sec:mgprox:subsec:alpha}. \end{itemize}
\subsubsection{Fixed-point property} Algorithm~\ref{aglo:2levelMG_exact} exhibits the following fixed-point property.
\begin{theorem}[Fixed-point]\label{thm:fpp} In Algorithm~\ref{aglo:2levelMG_exact}, if $x^k_0$ solves \eqref{prob:minfg}, then we have the fixed-point properties $x^{k+1}_0 =y^{k+1}_0 = x^k_0$ and $x^{k+1}_1 = y^{k+1}_1$. \begin{proof} The fixed-point property of the proximal gradient operator \cite[page 150]{parikh2014proximal} gives \begin{equation}\label{thm:fpp:e1}
y^{k+1}_0 \overset{\textrm{fixed-point}}{=}
x^k_0
\overset{\textrm{assumption}}{=} \argmin \, F_0(x). \end{equation} As a result, the coarse variable satisfies \begin{equation}\label{thm:fpp:e2} y^{k+1}_1 \coloneqq R y^{k+1}_0 \overset{\eqref{thm:fpp:e1}}{=} Rx^k_0, \end{equation} The subgradient 1st-order optimality to $ y^{k+1}_0 \overset{\eqref{thm:fpp:e1}}{\in} \argmin ~ F_0(x)$ gives $0 \in \underline{\partial F_0( y^{k+1}_0 )}$. Multiplying by $-R$ (which reduces the set $\underline{\partial F_0(x^k_0)}$ to a singleton) gives \begin{equation}\label{thm:fpp:e3}
0 = - R \underline{\partial F_0(x^k_0)}. \end{equation} Then adding $\partial F_1( y^{k+1}_1)$ on both sides of \eqref{thm:fpp:e3} gives \begin{subequations} \begin{align} &&\underline{\partial F_1( y^{k+1}_1)} &= \underline{\partial F_1( y^{k+1}_1)} - R(x^k_0) \underline{\partial F_0(x^k_0) } \label{thm:fpp:e4a} \\ && &\overset{\eqref{def:tau_mgprox1}}{\ni} \tau_{0 \rightarrow 1}^{k+1} \label{thm:fpp:e4b} \end{align} \end{subequations} In \eqref{thm:fpp:e3}, $- R \underline{\partial F_0(x^k_0)} $ is the zero vector, so the equality in \eqref{thm:fpp:e4a} holds since we are adding zero to a (non-empty) set. The inclusion \eqref{thm:fpp:e4b} follows from \eqref{def:tau_mgprox1} as $\underline{\partial F_1( y^{k+1}_1)} - R(x^k_0) \underline{\partial F_0(x^k_0) }$ is the set $\underline{\tau_{0 \rightarrow 1}^{k+1}}$.
Now rearranging \eqref{thm:fpp:e4b} gives $0 \in \underline{\partial F_1( y^{k+1}_1)} - \tau_{0 \rightarrow 1}^{k+1}$, which is exactly the subgradient 1st-order optimality condition for the coarse problem $ \underset{\xi}{\argmin} \, F_1( \xi ) - \big\langle \tau_{0 \rightarrow 1}^{k+1} , \xi \big\rangle$. By strong convexity of $ F_1( \xi ) - \big\langle \tau_{0 \rightarrow 1}^{k+1} , \xi \big\rangle$, the point $y^{k+1}_1$ is the unique minimizer of the coarse problem, so $x^{k+1}_1 = y^{k+1}_1$ by step (iv) of the algorithm and $x^{k+1}_0 = y^{k+1}_0 \overset{\eqref{thm:fpp:e1}}{=} x^k_0$ by steps (v) and (vi). \end{proof} \end{theorem}
Theorem~\ref{thm:fpp} shows that at convergence, we have fixed-point $x_0^{k+1}=y_0^{k+1}$ at fine level and also $x_1^{k+1} = y_1^{k+1}$ at the coarse level. Next we show that when $x_1^{k+1} \neq y_1^{k+1}$, the functional value sequence is converging.
\subsubsection{Coarse correction descent: angle condition} In nonsmooth optimization, descent direction properties are drastically different from smooth optimization \cite{noll2014convergence}. For example for the subgradient method, the classical angle condition no longer describes a useful set of search directions for the subgradient. In \texttt{MGProx} the coarse correction direction $P(x^{k+1}_1 - y^{k+1}_1)$ is a nonsmooth descent direction, and we will show that $P(x^{k+1}_1 - y^{k+1}_1)$ decreases the objective function value, based on the theorem below and Lemma~\ref{lemma:alphaexists}.
\begin{theorem}[Angle condition of coarse correction]\label{thm:coarse_dir_descent} For $P(x^{k+1}_1 - y^{k+1}_1) \neq 0$, the following directional derivative is strictly negative \begin{equation}\label{iq:descent_dir} \Big\langle \underline{\partial F_0(y^{k+1}_0)} , P(x^{k+1}_1 - y^{k+1}_1) \Big\rangle
< 0. \end{equation}
\end{theorem}
Before we prove the theorem we emphasize that \eqref{iq:descent_dir} applies for any subgradient in the set $\underline{\partial F_0(y^{k+1}_0)} $. Furthermore, \[ \eqref{iq:descent_dir}
\iff
\Big\langle P^\top \underline{\partial F_0(y^{k+1}_0)} , x^{k+1}_1 - y^{k+1}_1 \Big\rangle < 0
\overset{P^\top = cR, \, c >0}{\iff}
c \Big\langle R \underline{\partial F_0(y^{k+1}_0)} , x^{k+1}_1 - y^{k+1}_1 \Big\rangle < 0 . \] As $c, R,P$ are all element-wise nonnegative, showing \eqref{iq:descent_dir} is equivalent to showing \begin{equation} \Big\langle R \underline{\partial F_0(y^{k+1}_0)} , x^{k+1}_1 - y^{k+1}_1 \Big\rangle < 0, \label{pf:descent_to_prove} \end{equation} where $R \underline{\partial F_0(y^{k+1}_0)} $ is a singleton vector for all subgradients in $\underline{\partial F_0(y^{k+1}_0)}$ due to the adaptive $R$.
\begin{proof} By definition $\tau_{0 \rightarrow 1}^{k+1} \overset{\eqref{def:tau_mgprox1}}{\in} \underline{\partial F_1(y_1^{k+1})} - R \underline{\partial F_0(y_0^{k+1})}$ hence \begin{equation} R \underline{\partial F_0(y_0^{k+1})} \in \underline{\partial F_1(y_1^{k+1})} - \tau_{0 \rightarrow 1}^{k+1} \overset{\eqref{prob:minfg_coarse}}{=} \underline{\partial F_1^\tau(y_1^{k+1})}, \label{thm:coarse_dir_descent:e1} \end{equation} showing that $R \underline{\partial F_0(y_0^{k+1})} $ is a subgradient of $F_1^{\tau}$ at $y_1^{k+1}$. For any subgradient in the subdifferential $\underline{\partial F_1^\tau(y_1^{k+1})} $, we have the following which implies \eqref{pf:descent_to_prove}: \[ \begin{array}{rcl} \Big\langle \underline{\partial F_1^{\tau}(y^{k+1}_1)}, x^{k+1}_1 - y^{k+1}_1 \Big\rangle ~<~ F_1^{\tau}(x^{k+1}_1) - F_1^{\tau}(y^{k+1}_1) ~<~ 0, \end{array} \] where the first strict inequality is due to $F_1^\tau$ being a strongly convex function (which implies strict convexity)
; the second inequality is by $x^{k+1}_1 \coloneqq \underset{\xi}{\argmin}\, F_1^{\tau}(\xi)$ and the assumption that $x_1^{k+1} \neq y_1^{k+1}$. \end{proof}
\begin{remark} Theorem~\ref{thm:coarse_dir_descent} holds for convex but not strongly convex $f_0$ by replacing $<$ with $\leq$. \end{remark}
\subsubsection{Existence of coarse correction stepsize \texorpdfstring{$\alpha_k$}{}} \label{sec:mgprox:subsec:alpha} Based on Theorem~\ref{thm:coarse_dir_descent}, we now show that there exists a stepsize $\alpha_k > 0$ such that \begin{equation} F_0(z^{k+1}_0) \coloneqq F_0\Big(\, y^{k+1}_0 + \alpha_k P(x^{k+1}_1 - y^{k+1}_1) \,\Big) < F_0(y^{k+1}_0). \label{iq:coarse_descent_on_F} \end{equation}
\begin{lemma}[Existence of stepsize]\label{lemma:alphaexists} There exists $\alpha_k > 0$ such that \eqref{iq:coarse_descent_on_F} is satisfied for $P(x^{k+1}_1 - y^{k+1}_1) \neq 0$. \end{lemma}
To prove the lemma, we make use the second definition of subdifferential we discussed in \cref{sec:mgprox:subsec:subdifferential}: $\underline{\partial F_0(y_0^{k+1})}$ is a compact convex set whose support function is the directional derivative of $F_0$ at $y_0^{k+1}$. Note that $F_0 : \mathbb{R}^{n_0} \rightarrow \overline{\mathbb{R}}$ will never reach $+\infty$ at $z_0^{k+1}$ since $z_0^{k+1}$ is obtained by the proximal gradient step, so we can make use of the result on directional derivative in \cite[Def. 1.1.4, p.165]{hiriart2004fundamentals} associated with subdifferential.
\begin{proof} We prove the lemma in 3 steps. \begin{enumerate}
\item (Halfspace) The strict inequality in Theorem~\ref{thm:coarse_dir_descent} means that $\underline{\partial F_0(y_0^{k+1})}$ is strictly inside a halfspace with normal vector $p = P(x^{k+1}_1 - y^{k+1}_1)$.
\item (Strict separation) Being a compact convex set, $\underline{\partial F_0(y_0^{k+1}0)}$ lying strictly on one side of the hyperplane must be a positive distance (say $\alpha_k > 0$) from that hyperplane.
\item (Support and directional derivative) Evaluating the support function of $\underline{\partial F_0(y_0^{k+1})}$, i.e., the directional derivative of $F_0$ at $y_0^{k+1}$ in the direction $p$, we have \eqref{iq:coarse_descent_on_F}. \end{enumerate} \end{proof}
Now we see that Theorem~\ref{thm:coarse_dir_descent} implies Lemma~\ref{lemma:alphaexists} which then implies the descent condition \eqref{iq:coarse_descent_on_F}. Now by \eqref{iq:coarse_descent_on_F} together with the sufficient descent property of proximal gradient (\cref{lemma:suff_descent_proxgrad}), the sequence $\big\{F_0(x^k_0)\big\}_{k \in \mathbb{N}}$ produced by Algorithm~\ref{aglo:2levelMG_exact} converges, because the sequence $\big\{F_0(x^k_0)\big\}_{k \in \mathbb{N}}$ is monotonically decreasing and $F_0$ is bounded below.
\begin{lemma}[Sufficient descent property of proximal gradient]\label{lemma:suff_descent_proxgrad} For step (i) in Algorithm~\ref{aglo:2levelMG_exact}, we have \begin{equation} F_0( y^{k+1}_0) \leq
F_0( x^k_0) - \dfrac{1}{2L_0}\big\| G_0(x_0^k) \big\|_2^2, \quad G_0( x_0^k) = L_0 \Big[ x_0^k - \textrm{prox}_{ \frac{1}{L} g_0 }\Big( x_0^k - \frac{1}{L_0} \nabla f_0(x_0^k) \Big) \Big]. \label{eqn:suffdescentPGD} \end{equation} Here, $L_0$ is the Lipschitz constant of $\nabla f_0$ and $G_0( x_0^k)$ is called the proximal gradient map of $F_0$ at $x_0^k$. The inequality also holds for step (vi). See \cite[Lemma 10.4]{beck2017first} for more details. \end{lemma}
\subsubsection{Tuning the coarse correction stepsize \texorpdfstring{$\alpha_k$}{}} \label{sec:mgprox:subsec:alpha_tune} First, exact line search is impractical: finding $\alpha_k \coloneqq \underset{\alpha \geq 0}{\argmin}\, F_0\Big( y^{k+1}_0 + \alpha P (x^{k+1}_1 - y^{k+1}_1) \Big) $ is generally expensive. Next, classical inexact line searches such as the Wolfe conditions, Armijo rule, Goldstein line search (e.g., see \cite[Chapter 3]{nocedal1999numerical}) cannot be used here as they were developed for smooth functions. While it is possible to develop nonsmooth version of these methods, such as a nonsmooth Armijio rule in tandem with backtracking on functions that satisfy the Kurdyka-\L ojasiewicz inequality with other additional conditions in \cite{noll2014convergence}, this is out of the scope of this work.
For this paper, we use simple naive backtracking as shown in Algorithm~\ref{aglo:naivelinesearch}, which just enforces \eqref{iq:coarse_descent_on_F} without any sufficient descent condition. While we acknowledge that the traditional wisdom in optimization tells that naive descent conditions such as \eqref{iq:coarse_descent_on_F} are generally not enough to obtain convergence to the optimal point, we note that \texttt{MGProx} is not solely using the coarse correction to update the variable; instead it is a chain of interlaced iterations of proximal gradient descent and coarse correction, and we will show next that the sufficient descent property of proximal gradient descent \eqref{eqn:suffdescentPGD} alone provides enough descending power for the function value $F_0$ to convergence to the optimal value.
\begin{algorithm} \caption{Naive line search}\label{aglo:naivelinesearch} \begin{algorithmic} \STATE{Set $\alpha > 0$ and let $\epsilon > 0$ to be a small number (e.g. $10^{-15}$)} \WHILE{true} \IF{$F_0\Big( y^{k+1}_0 + \alpha P \big(x^{k+1}_1 - y^{k+1}_1 \big) \Big) \leq F_0\Big( y^{k+1}_0 \Big)$} \STATE{Return $z_0^{k+1}=y^{k+1}_0 + \alpha P \big(x^{k+1}_1 - y^{k+1}_1\big) $, break.} \ELSIF{$\alpha > \mathcal{O}(\epsilon) $} \STATE{$\alpha = \alpha/2 $.} \ELSE \STATE{Return $z_0^{k+1}=y^{k+1}_0$, break.} \ENDIF \ENDWHILE
\end{algorithmic} \end{algorithm}
\subsubsection{Asymptotic \texorpdfstring{$\mathcal{O}(1/k)$}{} convergence rate} Inequality \eqref{iq:coarse_descent_on_F} implies that in the worst case the coarse correction $P(x^{k+1}_1 - y_1^{k+1})$ in the multigrid process is ``doing nothing'' on $y_0^{k+1}$, which occurs when $P(x^{k+1}_1 - y_1^{k+1}) = 0$ or $x^{k+1}_1 = y^{k+1}_1$.
We now show that the descent inequality $F_0(x^{k+1}_0) \leq F_0(z^{k+1}_0) \leq F_0(y^{k+1}_0)$ implies that the convergence rate of the sequence $\big\{ F_0(x_0^k) \big\}_{k \in \mathbb{N}} $ for $\{x_0^{k} \}_{k\in \mathbb{N}}$ generated by \texttt{MGProx} (Algorithm~\ref{aglo:2levelMG_exact}) follows the (asymptotic) convergence rate of the proximal gradient method, which is $\mathcal{O}(\frac{1}{k})$ \cite{beck2017first}. In Theorem~\ref{thm:rate1k} we show that $\big\{F_0(x^k_0)\big\}_{k \in \mathbb{N}}$ converges to $F_0^* \coloneqq \inf F_0(x)$ asymptotically with such a classical rate.
\begin{theorem}\label{thm:rate1k} The sequence $\{x_0^k\}_{k \in \mathbb{N}}$ generated by \texttt{MGProx} (Algorithm~\ref{aglo:2levelMG_exact}) for solving Problem \eqref{prob:minfg} satisfies $ F_0(x_0^{k+1}) - F_0^* \leq \max\Big\{8 \delta^2 L_0,\, F_0(x^1_0) - F_0^* \Big\}\frac{1}{k} $, where $F_0^* = F_0(x^*)$ for $x^* = \argmin F_0$, the point $x^1_0 \in \mathbb{R}^n$ is the initial guess, $L_0$ is the Lipschitz constant of $\nabla f_0$, and $\delta$ is the diameter of the sublevel set $\mathcal{L}_{\leq F_0(x^1_0)}$ defined in Lemma~\ref{lemma:diamSubLvSet}. \end{theorem}
Note that we cannot invoke a standard theorem about the convergence of proximal gradient descent such as \cite[Theorem 10.21]{beck2017first}, because we interlace proximal gradient steps with coarse corrections.
Before the proof, we note that all the functions and variables in this subsubsection are at level $\ell=0$ so we omit the subscript. The constant $L$ should be understood as the Lipschitz constant of $\nabla f(x)$. The proof is based on standard techniques in first-order methods. To make the proof more accessible, we divide the proof into four lemmas: \begin{itemize}
\item Lemma~\ref{lemma:mgprox_suff_descent}: we derive a sufficient descent inequality for the \texttt{MGProx} iteration.
\item Lemma~\ref{lemma:quadOverestimator}: we derive a quadratic overestimator of $F$.
\item Lemma~\ref{lemma:diamSubLvSet}: we give an upper bound for $\| x^k -x^*\|_2$ and $\| y^{k+1} -x^*\|_2$ for all $k$.
\item Lemma~\ref{lemma:sequence}: we recall a convergence rate for a certain a monotonic sequence. \end{itemize} Using these lemmas, we follow the strategy used in \cite{karimi2017imro} to prove Theorem~\ref{thm:rate1k}.
\begin{lemma}[Sufficient descent of \texttt{MGProx} iteration] \label{lemma:mgprox_suff_descent} For all iterations $k$, we have \begin{equation} F(x^{k+1}) - F^* \leq
\frac{L}{2}\Big(\| x^k - x^* \|_2^2 - \| y^{k+1} - x^* \|_2^2\Big). \label{lemma:mgprox_suff_descent_iq} \end{equation} \end{lemma}
\begin{proof} By convexity and $L$-smoothness of $f$, for all $y^{k+1}, x^k, \xi$ we have \[ \begin{array}{rlr} f(y^{k+1})
&\leq f(x^k) + \langle \nabla f(x^k) , y^{k+1}-x^k\rangle + \frac{L}{2} \| y^{k+1} - x^k \|_2^2 & f \text{ is }L\text{-smooth} \dots (i) \\ f(x^k) &\leq f(\xi) - \langle \nabla f(x^k) , \xi - x^k \rangle & f \text{ is convex} \dots (ii) \\ f(y^{k+1})
&\leq f(\xi) - \langle \nabla f(x^k) , \xi- y^{k+1} \rangle + \frac{L}{2} \| y^{k+1} - x^k \|_2^2 & (i) + (ii) \\ &=
f(\xi) - \big\langle \nabla f(x^k) , \xi - x^k + \frac{1}{L}G(x^k) \big\rangle + \frac{1}{2L} \| G(x^k) \|_2^2 & y^{k+1} = x^k - \frac{1}{L}G(x^k) \end{array} \] where $G(x^k)$ is the proximal gradient map of $F$ at $x^k$, see \eqref{eqn:suffdescentPGD}.
Next, adding $g(y^{k+1}) = g\big( x^k - \frac{1}{L}G(x^k) \big)$ on the both sides of the last inequality gives \begin{equation} F(y^{k+1}) \leq f(\xi) - \Big\langle \nabla f(x^k) , \xi - x^k + \frac{1}{L}G(x^k) \Big\rangle
+ \frac{1}{2L} \| G(x^k) \|_2^2 + g\Big( x^k - \frac{1}{L}G(x^k) \Big). \label{eqn:proof:zyx} \end{equation} Based on the properties of the coarse correction (Theorem~\ref{thm:coarse_dir_descent} and Lemma~\ref{lemma:alphaexists}) and the sufficient descent property of the proximal gradient update \eqref{eqn:suffdescentPGD}, we have \[ F(x^{k+1}) ~\overset{\eqref{eqn:suffdescentPGD}}{\leq}~ F(z^{k+1}) ~\overset{\cref{thm:coarse_dir_descent}, \,\cref{lemma:alphaexists}}{\leq}~ F(y^{k+1}), \] so we can replace $F(y^{k+1}) $ in \eqref{eqn:proof:zyx} by $F(x^{k+1})$ and obtain \begin{equation} F(x^{k+1}) \leq f(\xi) - \Big\langle \nabla f(x^k), \, \xi - x^k + \frac{1}{L}G(x^k) \Big\rangle
+ \frac{1}{2L} \| G(x^k) \|_2^2 + g\Big( x^k - \frac{1}{L}G(x^k) \Big). \label{lemma:mgprox_suff_descent1} \end{equation} In the following we deal with the term $g\Big( x^k - \frac{1}{L}G(x^k) \Big)$ in \eqref{lemma:mgprox_suff_descent1}. First, by the convexity of $g$, for all $\xi$ we have \[ \begin{array}{lrcl} & g(\xi) &\geq& g\Big( x^k - \frac{1}{L}G(x^k) \Big) + \Big\langle\, \partial g \big(x^k - \frac{1}{L}G(x^k) \big),\, \xi - \big(x^k - \frac{1}{L}G(x^k) \big) \,\Big\rangle \\ \iff & g\Big( x^k - \frac{1}{L}G(x^k) \Big) &\leq& g(\xi) - \Big\langle\, \partial g \big(x^k - \frac{1}{L}G(x^k) \big),\, \xi - \big(x^k - \frac{1}{L}G(x^k) \big) \,\Big\rangle. \end{array} \] By the subgradient optimality of the proximal subproblem associated with $g$, we can show that $G(x^k) - \nabla f(x^k) \in \partial g \big(x^k - \frac{1}{L}G(x^k) \big) $, hence \begin{equation} g\Big( x^k - \frac{1}{L}G(x^k) \Big) ~~\leq~~ g(\xi) - \Big\langle G(x^k) - \nabla f(x^k), \, \xi -\big(x^k - \frac{1}{L}G(x^k) \big) \Big\rangle. \label{lemma:mgprox_suff_descent2} \end{equation} Combining \eqref{lemma:mgprox_suff_descent1} and \eqref{lemma:mgprox_suff_descent2} with $\xi=x^* \coloneqq \argmin F$ gives \[ \begin{array}{rcl} F(x^{k+1}) &\leq& F^* - \Big\langle G(x^k) ,\, x^* - x^k + \frac{1}{L}G(x^k) \Big\rangle
+ \frac{1}{2L} \| G(x^k) \|_2^2 \\ &=& F^* - \Big\langle G(x^k) ,\, x^* - x^k \Big\rangle
- \frac{1}{2L} \| G(x^k) \|_2^2 \\ &=& F^* + \frac{L}{2}\Big(
\| x^k - x^* \|_2^2 - \| x^k - x^* - \frac{1}{L}G(x^k) \|_2^2 \Big) \quad\overset{x^k - \frac{1}{L}G(x^k) \,\eqqcolon\, y^{k+1}}{\iff}~~~ \eqref{lemma:mgprox_suff_descent_iq} \end{array} \] where completing the squares is used in the second equal sign. \end{proof} \begin{remark} We name the inequality \eqref{lemma:mgprox_suff_descent_iq} sufficient descent because it resembles the sufficient descent property of the proximal gradient iteration \eqref{eqn:suffdescentPGD}.
Also, by definition, $F(x^{k+1}) \geq F^*$, hence \eqref{lemma:mgprox_suff_descent_iq} implies $\| x^k - x^* \|_2^2 \geq \| y^{k+1} - x^* \|_2^2$. \end{remark}
The following lemma is similar to \cite[Lemma 2.3]{beck2009fast} and \cite[Lemma 3, Eq.(5.9)]{karimi2017imro} . \begin{lemma}[A quadratic overestimator] \label{lemma:quadOverestimator} For all $x$, we have \begin{equation} F(x) - F(x^{k+1}) \geq L \langle x^k - y^{k+1}, x - x^k \rangle
+ \dfrac{L}{2} \| y^{k+1} - x^k \|_2^2. \label{lemma:QI} \end{equation} \end{lemma}
\begin{proof} By the convexity of $f$ and $g$, \begin{align} f(x)\geq&\, f(x^k) + \langle \nabla f(x^k) , x - x^k \rangle & f \text{ is convex} \dots (i) \nonumber \\ g(x) \geq&\, g(y^{k+1}) + \langle \partial g (y^{k+1}) , x- y^{k+1} \rangle & g \text{ is convex} \dots (ii) \nonumber \\ F(x)\geq &\, f(x^k) + \langle \nabla f(x^k) , x - x^k \rangle + g(y^{k+1}) + \langle \partial g (y^{k+1}) , x- y^{k+1} \rangle & (i)+(ii) \label{lemma:QI1} \end{align} By definitions \eqref{updt:proxgrad}, \eqref{def:prox_opertor}, the proximal gradient iteration is a majorization-minimization process that updates $x^k$ based on minimizing a local quadratic overestimator $Q$ of $x^k$, i.e., $y^{k+1} = \textrm{prox}_{\frac{1}{L}g}\big( x^k - \frac{1}{L}\nabla f(x^k)\big)$ is equivalent to \begin{equation} y^{k+1} ~=~ \underset{\xi }{\argmin} \, \Big\{~ Q(\xi ; x^k) ~\coloneqq~ f(x^k) + \big\langle \nabla f(x^k) , \xi - x^k \big\rangle
+ \dfrac{L}{2} \|\xi - x^k \|_2^2 + g(\xi ) ~\Big\}. \label{lemma:IQ_MM} \end{equation} Being an overestimator, we have $F(x) \leq Q(x;x^k)$, which implies for all $x$ \begin{equation} \begin{array}{cl} & \hspace{-2mm} F(x) - F(y^{k+1}) \\ \geq& \hspace{-2mm} F(x) - Q(y^{k+1} ; x^k) \\ \overset{\eqref{lemma:IQ_MM}}{=}& \hspace{-2mm} F(x) - f(x^k) - \big\langle \nabla f(x^k) , y^{k+1}- x^k \big\rangle
- \dfrac{L}{2} \| y^{k+1} - x^k \|_2^2 - g(y^{k+1}) \\ \overset{\eqref{lemma:QI1}}{\geq}& \hspace{-2mm} \langle \nabla f(x^k) , x - x^k \rangle +\langle \partial g (y^{k+1}) , x- y^{k+1} \rangle - \big\langle \nabla f(x^k) , y^{k+1} - x^k \big\rangle
- \dfrac{L}{2} \| y^{k+1} - x^k \|_2^2 \\ =& \hspace{-2mm} \langle \nabla f(x^k) + \partial g (y^{k+1}), x - y^{k+1} \rangle
- \dfrac{L}{2}\| x^k - y^{k+1} \|_2^2. \end{array} \label{lemma:IQ2} \end{equation}
Applying the subgradient optimality condition to \eqref{lemma:IQ_MM} at $y^{k+1}$ gives \[ 0 ~\in~ \nabla f(x^k) + L(y^{k+1}-x^k) + \partial g (y^{k+1}) ~~~\iff~~~ L(x^k-y^{k+1}) ~\in~ \nabla f(x^k) + \partial g (y^{k+1}), \] so $L(x^k-y^{k+1})$ can be substituted in the first term of
the last line of \eqref{lemma:IQ2} and we have \[ \begin{array}{rcl} F(x) - F(y^{k+1}) &\geq& L \langle x^k-y^{k+1}, x - y^{k+1} \rangle
- \dfrac{L}{2}\| x^k - y^{k+1} \|_2^2 \\ &=& L\langle x^k-y^{k+1}, x - x^k + x^k - y^{k+1} \rangle
- \dfrac{L}{2}\| x^k - y^{k+1} \|_2^2 \quad \iff \eqref{lemma:QI}. \end{array} \] \end{proof}
\begin{lemma}[Diameter of sublevel set] \label{lemma:diamSubLvSet} At initial guess $x^1 \in \mathbb{R}^n$, define \[ \begin{array}{rll} \mathcal{L}_{\leq F(x^1)} \hspace{-2mm} &\coloneqq
\Big\{\, x \in \mathbb{R}^n ~|~ F(x) \leq F(x^1) \,\Big\}, & \textrm{(sublevel set of $x^1$)} \\ \delta = \text{diam }\mathcal{L}_{\leq F(x^1)} \hspace{-2mm} &\coloneqq \sup \Big\{ \,
\| x - y \|_2 ~|~ F(x) \leq F(x^1), F(y) \leq F(y^1) \,\Big\}. & \textrm{(diameter of $\mathcal{L}_{\leq F(x^1)}$)} \end{array} \] Then for $x^* \coloneqq \argmin F(x) $, we have
$\| x^k - x^* \|_2 \leq \delta $ and $\| y^k - x^* \|_2 \leq \delta $ for all $k$. \begin{proof} We have $F(x^*) \leq F(x^1)$ by definition. By the descent property of the coarse correction and proximal gradient updates, we have $F(x^k) \leq F(x^1)$ and $F(y^k) \leq F(x^1)$ for all $k$. These results mean that $x^k,y^{k+1}$ and $x^*$ are inside $\mathcal{L}_{\leq F(x^1)}$, therefore both
$\| x^k - x^* \|_2$ and $\| y^{k+1} - x^* \|_2$ are bounded above by $\delta $. Lastly, $F$ is strongly convex so $\mathcal{L}_{\leq F(x^1)}$ is bounded and $\delta < +\infty$. \end{proof} \end{lemma}
\begin{lemma}[Monotone sequence] \label{lemma:sequence} For a nonnegative sequence $\{\omega_k\}_{k\in \mathbb{N}} \rightarrow \omega^*$ that is monotonically decreasing with $\omega_1 - \omega^* \leq 4\mu$ and $\omega_k - \omega_{k+1} \geq \frac{(\omega_{k+1} - \omega^*)^2}{\mu}$, it holds that $\omega_k - \omega^* \leq \frac{4\mu}{k}$ for all $k$.
\begin{proof} By induction. See proof in \cite[Lemma 4]{karimi2017imro}. \end{proof} \end{lemma}
Now we are ready to prove Theorem~\ref{thm:rate1k}. \begin{proof} Rearranging the sufficient descent inequality in Lemma~\ref{lemma:mgprox_suff_descent} gives \[ \begin{array}{rcl} F^* - F(x^{k+1}) &\geq& \displaystyle
\frac{L}{2}\Big( \| y^{k+1} - x^* \|_2^2 - \| x^k - x^* \|_2^2\Big)
\\ &=& \displaystyle
\frac{L}{2} \Big(\| y^{k+1} - x^* \|_2 - \| x^k - x^* \|_2 \Big)\,
\Big( \| y^{k+1} - x^* \|_2 + \| x^k - x^* \|_2 \Big)
\\ &\geq& \displaystyle -\frac{L}{2}
\| x^k - y^{k+1} \|_2 \,
\Big(\| x^k - x^* \|_2 + \| y^{k+1} - x^* \|_2\Big), \end{array} \] where the last inequality is by the triangle inequality
$\| y^{k+1} - x^* \|_2 + \| x^k - y^{k+1} \|_2 \geq \| x^* - x^k \|_2$. Rearranging the inequality gives \begin{equation}
\| x^k - y^{k+1} \|_2 \geq \dfrac{-2}{L}
\frac{F^* - F(x^{k+1}) }{\| x^k - x^* \|_2 + \| y^{k+1} - x^* \|_2} = \dfrac{2}{L}
\frac{F(x^{k+1}) - F^* }{\| x^k - x^* \|_2 + \| y^{k+1} - x^* \|_2}. \label{thm:rate_4} \end{equation} Applying Lemma~\ref{lemma:diamSubLvSet} to \eqref{thm:rate_4} gives \begin{equation}
\| x^k - y^{k+1} \|_2 \geq \frac{F(x^{k+1}) - F^*}{\delta L}. \label{thm:rate_5} \end{equation} Note that \eqref{thm:rate_5} implies that if the fine sequence converges (i.e., $x^k = y^{k+1}$), then we have $F(x^{k+1}) = F^*$.
Now applying Lemma~\ref{lemma:quadOverestimator} with $x = x^k$ gives \[
F(x^k) - F(x^{k+1}) \geq \dfrac{L}{2} \| y^{k+1} - x^k \|_2^2 \overset{\eqref{thm:rate_5}}{\geq} \dfrac{ (F(x^{k+1}) - F^*)^2}{2\delta^2 L}. \] This inequality shows that the sequence $\{ \omega_k \}_{k\in \mathbb{N}}$ with $\omega_k \coloneqq F(x^k)$ satisfies the condition $\omega_k - \omega_{k+1} \geq \frac{(\omega_k - \omega_{k+1} )^2}{\mu}$ in Lemma~\ref{lemma:sequence}. To complete the proof, applying Lemma~\ref{lemma:sequence} to the monotonically decreasing sequence $\{F(x^k)\}_{k \in \mathbb{N}}$ with $\mu = 2\delta^2L$, we have \[ F_0(x^{k+1}_0) - F^*_0 \leq \max\big\{\, 8 \delta^2 L_0, F_0(x^1_0) - F_0^* \,\big\} \dfrac{ 1} {k}, \] where we put back the subscript 0 for clarity. \end{proof}
Theorem~\ref{thm:rate1k} shows that $\{F_0(x^k_0)\}_{k \in \mathbb{N}}$ for solving Problem \eqref{prob:minfg} satisfies a sublinear asymptotic convergence bound of $\mathcal{O}(\frac{1}{k})$. Below we show that $\{F_0(x^k_0)\}_{k \in \mathbb{N}}$ also satisfies a linear convergence bound.
\subsubsection{Linear convergence rate by Proximal P{\L}\, inequality} All the functions and variables here are at level 0 so we omit the subscripts. Now we show that $\big\{F( x^k) \big\}_{k \in \mathbb{N}}$ converges to $F^*$ with a linear rate using the \emph{Proximal Polyak-{\L}ojasiewics inequality} \cite[Section 4]{karimi2016linear}. The function $F$ in Problem \eqref{prob:minfg} is called ProxP\L\, if there exists $\mu > 0$ such that \begin{equation} \dfrac{1}{2} \mathcal{D}_g(x,L) \geq \mu \big( F(x) - F^* \big) \qquad \forall x, \tag{ProxP{\L}} \label{def:ProxPL} \end{equation} where $\mu$ is called the ProxP\L\, constant and \begin{equation} \mathcal{D}_g(x,\alpha) \coloneqq -2\alpha \min_z \, \bigg\{
\dfrac{\alpha}{2} \| z - x \|_2^2 + \big\langle z - x, \nabla f(x) \big\rangle + g(z) - g(x) \bigg\} . \label{def:dg} \end{equation} Intuitively, $\mathcal{D}_g$ is defined based on the proximal gradient operator: \[ \begin{array}{rcl} \textrm{prox}_{\frac{1}{L}g} \bigg( x - \dfrac{\nabla f(x)}{L} \bigg) &\overset{\eqref{lemma:IQ_MM}}{=}&
\underset{z}{\argmin} \, \dfrac{L}{2} \| z - x \|_2^2 + \big\langle z - x, \nabla f(x) \big\rangle + g(z) - g(x). \end{array} \] It has been shown in \cite{karimi2016linear} that if $f$ in \eqref{prob:minfg} is $\mu$-strongly convex, then $F$ is $\mu$-ProxP\L. Now we prove the linear convergence rate of Algorithm~\ref{aglo:2levelMG_exact}.
Note that a standard result such as \cite[Theorem 10.29]{beck2017first} on convergence of proximal gradient for strongly convex functions is not directly applicable because, as mentioned above, we interleave proximal gradient steps with coarse correction steps.
\begin{theorem}\label{thm:mgprox_converge_proxPL} Let $x^1_0$ be the initial guess of the algorithm, $F^*_0 = F_0(x_0^*)$ and $x_0^* = \argmin \, F_0(x)$. The sequence $\{x_0^k\}_{k \in \mathbb{N}}$ generated by \texttt{MGProx} (Algorithm~\ref{aglo:2levelMG_exact}) for solving Problem \eqref{prob:minfg} satisfies $ F_0(x_0^{k+1}) - F_0^* \leq \Big( 1-\dfrac{\mu_0}{L_0} \Big)^k \Big( F_0(x_0^1) - F_0^* \Big). $
\begin{proof} First, by assumption $F$ is strongly convex, so $F$ is $\mu$-ProxP\L\, with $\mu > 0$ and \[ \renewcommand*{\arraystretch}{1.33} \begin{array}{rcl} F(x^{k+1}) &\overset{\eqref{eqn:suffdescentPGD}}{\leq}& \hspace{-2mm} F(z^{k+1}) \\ &\overset{\eqref{iq:coarse_descent_on_F}}{\leq}& \hspace{-2mm} F(y^{k+1}) \\ &=& \hspace{-2mm} f(y^{k+1}) + g(y^{k+1}) + g(x^k) - g(x^k) \\ &\overset{\eqref{def:Lsmooth}}{\leq}& \hspace{-2mm}
f(x^k) + \langle \nabla f(x^k), y^{k+1} - x^k \rangle + \frac{L}{2} \| y^{k+1} - x^k \|_2^2 + g(x^k) + g(y^{k+1}) - g(x^k) \\ &=& \hspace{-2mm}
F(x^k) + \langle \nabla f(x^k), y^{k+1} - x^k \rangle + \frac{L}{2} \| y^{k+1} - x^k \|_2^2 + g(y^{k+1}) - g(x^k) \\ &=& \hspace{-2mm} F(x^k) - \frac{1}{2L} \underbrace{(-2L)\Big(
\langle \nabla f(x^k), y^{k+1} - x^k \rangle + \frac{L}{2} \| y^{k+1} - x^k \|_2^2 + g(y^{k+1}) - g(x^k)
\Big)}_{\overset{\eqref{def:dg}}{ = } \, \mathcal{D}_g(x^k, L), \, \text{ since }y^{k+1} \,\coloneqq\, \underset{z}{\argmin} \, \frac{L}{2} \| z - x^k \|_2^2 + \langle z - x^k, \nabla f(x^k)\rangle + g(z) - g(x^k).} \\ &\overset{\eqref{def:ProxPL}}{\leq}& \hspace{-2mm} F(x^k) - \frac{\mu}{L} \big(F(x^k) - F^*\big). \end{array} \] Adding $-F^*$ on both sides of the inequality gives $ F(x^{k+1}) - F^* \leq \big( 1 - \frac{\mu}{L}\big) \big(F(x^k) - F^*\big) $. Applying this inequality recursively completes the proof. \end{proof} \end{theorem}
\begin{remark} We now give several remarks about the result. \begin{itemize} \item Convergence rate: for a $\mu_0$-strongly convex and $L_0$-smooth $f_0$, we have $0 < \mu_0 \leq L_0$ and $0 \leq 1 - \frac{\mu_0}{L_0} < 1$.
\item Since $x^*$ is unique, we also conclude that the sequence $\{x^k_0\}_{k \in \mathbb{N}}$ converges to $x^*$.
\item Depending on the value of $\mu_0$, for $k$ not too large, the sublinear convergence rate $\frac{1}{k}$ from Theorem~\ref{thm:rate1k} gives a better bound than the linear rate $(1-\frac{\mu_0}{L_0})^k$ from Theorem~\ref{thm:mgprox_converge_proxPL}, this is the case when $\mu_0 \ll L_0$. \end{itemize} \end{remark} Lastly, we emphasize that the bounds in Theorem~\ref{thm:rate1k} and Theorem~\ref{thm:mgprox_converge_proxPL} are loose bounds for Algorithm~\ref{aglo:2levelMG_exact} since we only show that the coarse correction can guarantee the descent condition $F_0(z_0^{k+1}) \leq F_0(y_0^{k+1})$ but not a stronger sufficient descent condition.
\subsubsection{On the selection of \texorpdfstring{$\tau$}{}}\label{sec:mgprox:subsec:tau} Recall that the tau vector comes from a set: \[ \tau_{0 \rightarrow 1}^{k+1} ~\overset{\eqref{def:tau_mgprox2}}{\in}~ \underline{\tau_{0 \rightarrow 1}^{k+1}} \coloneqq \underbrace{ \nabla f_1(y_1^{k+1}) - R \partial F_0(y_0^{k+1}) }_{\textrm{singleton}} + \underbrace{ \partial g_1(y_1^{k+1} ) }_{\textrm{set-valued}}. \] We emphasize that for our theoretical results to hold we can choose any value in the set $\underline{\tau_{0 \rightarrow 1}^{k+1}}$ to define $\tau_{0 \rightarrow 1}^{k+1}$ in Algorithm~\ref{aglo:2levelMG_exact}. First, the results of in Theorems~\ref{thm:fpp} and~\ref{thm:coarse_dir_descent} hold for any $\tau$ in the set. Second, all the convergence bounds (Theorems~\ref{thm:rate1k} and \ref{thm:mgprox_converge_proxPL}) only contain constants at level $\ell = 0$ and are independent of the choice of tau vector.
\paragraph{Optimal tau selection seems difficult} Recall the two steps in the algorithm related to the coarse correction, \[ \begin{array}{rcl} x_1^{k+1}(\tau) &=& \underset{\xi}{\argmin}\, F_1(\xi) - \Big\langle \underbrace{\nabla f_1(y_1^{k+1}) - R \partial F_0(y_0^{k+1}) + \partial g_1(y_1^{k+1}) }_{\ni \tau} ,\,\xi \Big\rangle, \\ x_0^{k+1}(\tau) &=& y_0^{k+1} + \alpha(\tau) P \Big( x_1^{k+1}(\tau) - y_1^{k+1} \Big), \end{array} \] where $x_0^{k+1}, x_1^{k+1}$ and $\alpha$ are all a function of $\tau$. Now it seems tempting to ``optimally tune'' $\tau_{0 \rightarrow 1}^{k+1}$ so that it maximizes the gap $F_0(y_0^{k+1}) - F_0(x_0^{k+1}) $: \[ \tau_{0\rightarrow 1}^{k+1} \in \underset{\tau \,\in\, \underline{\tau_{0 \rightarrow 1}^{k+1}}}{\argmax} \, F(y_0^{k+1}) - F_0\big(x_0^{k+1}(\tau) \big) = \underset{\tau}{\argmin} \, F_0\big(x_0^{k+1}(\tau) \big). \] However, this problem generally has no closed-form solution and it is intractable to solve numerically.
In the numerical experiments we will verify that the sequence produced by \texttt{MGProx} converges for different values of $\tau$ confirming the theory. We also find that the convergence speed does not depend much on the choice of $\tau$ in practical computations.
\section{A multi-level MGProx}\label{sec:multilevel} Now we generalize the 2-level \texttt{MGProx} to multiple levels. The 2-level \texttt{MGProx} method constructs a coarse problem at level ($\ell = 1$), and uses the solution of such problem to help solve the original fine-level problem ($\ell = 0$). If the fine problem has a large problem size, solving the coarse problem exactly is generally expensive. Hence it is natural to consider applying multigrid recursively until the coarse problem on the coarsest level is no longer expensive to solve. An $L$-level \texttt{MGProx} cycle with a V-cycle structure is shown in Algorithm~\ref{algo:LlevelMG_exact}. We clarify the naming of the variables in the algorithm as follows: at each iteration $k$, we have $x_\ell^k$: variable before pre-smoothing on level $\ell$; $y_\ell^{k+1}$: variable after pre-smoothing on level $\ell$; $z_\ell^{k+1}$: variable after coarse-grid correction on level $\ell$; and $w_\ell^{k+1}$: variable after post-smoothing on level $\ell$. Note that, to obtain a well-defined recursion in Algorithm~\ref{algo:LlevelMG_exact}, we choose the superscript for the $x$ variables equal to $k$ on all levels. In the 2-level algorithm we chose a different convention, writing the $x$ variable on level 1 as $x_{1}^{k+1}$.
\begin{algorithm} \caption{$L$-level \texttt{MGProx} with V-cycle structure for an approximate solution of \eqref{prob:minfg}} \label{algo:LlevelMG_exact} \begin{algorithmic} \STATE{Initialize $x^1_0$ and the full version of $R_{\ell \rightarrow \ell+1}, P_{\ell+1 \rightarrow \ell }$ for $\ell \in \{0,1,\dots,L-1\}$ }
\FOR{$k = 1,2,\dots$}
\STATE{Set $\tau_{-1 \rightarrow 0 }^{k+1}=0$}
\FOR{$\ell = 0,1,\dots, L-1$}
\STATE{$y^{k+1}_\ell \,~~~= \textrm{prox}_{\frac{1}{L_{\ell}}g_{\ell}}
\bigg (
x^{k}_\ell - \dfrac{\nabla f_\ell(x^k_\ell) - \tau_{\ell-1 \rightarrow \ell }^{k+1}}{L_\ell}
\bigg)$
pre-smoothing
}
\STATE{$x^{k}_{\ell+1} \,~~~= R_{\ell \rightarrow \ell+1}(y^{k+1}_{\ell}) \, y^{k+1}_{\ell} $
restriction to next level
}
\STATE{
$\tau_{\ell \rightarrow \ell+1}^{k+1} \in \underline{\partial F_{\ell+1}(x^{k}_{\ell+1} )} - R_{\ell \rightarrow \ell+1}(y^{k+1}_{\ell}) \, \underline{\partial F_\ell(y^{k+1}_{\ell})}$
create tau vector ~
}
\ENDFOR
\STATE{$w_L^{k+1} = \underset{\xi}{\argmin} \, \Big\{\, F_L^{\tau}(\xi) \coloneqq F_L(\xi) - \langle \tau_{L-1 \rightarrow L}^{k+1}, \xi \rangle \,\Big\}$
solve the level-$L$ coarse problem}
\FOR{$\ell = L-1, L - 2, \dots, 0$}
\STATE{
$z_\ell^{k+1} = y^{k+1}_\ell + \alpha P_{\ell+1 \rightarrow \ell}\big( w^{k+1}_{\ell+1} - x^{k}_{\ell+1} \big)$
coarse correction
}
\STATE{
$w^{k+1}_\ell = \textrm{prox}_{\frac{1}{L_\ell}g_\ell}
\bigg(
z^{k+1}_\ell - \dfrac{\nabla f_\ell(z^{k+1}_\ell) -\tau_{\ell-1 \rightarrow \ell }^{k+1}}{L_\ell}
\bigg)$
post-smoothing
}
\ENDFOR
\STATE{$x^{k+1}_{0} \,~~~= w^{k+1}_0$
update the fine variable}
\ENDFOR \end{algorithmic} \end{algorithm}
Here are some further remarks about Algorithm~\ref{algo:LlevelMG_exact}. \begin{itemize} \item $L_\ell$ is the Lipschitz constant of $\nabla f_\ell$.
\item At level $\ell \neq L $, we are essentially performing two proximal gradient iterations (pre-smoothing + post-smoothing) and a coarse correction.
At the coarsest level $\ell = L$, we perform an exact update by solving the coarse problem exactly.
\item From the traditional wisdom of classical multigrid, more than one pre-smoothing and post-smoothing steps can be beneficial to accelerate the overall convergence. We implemented such multiple smoothing steps in the numerical tests. \end{itemize}
\begin{remark}[Convergence of Algorithm~\ref{algo:LlevelMG_exact}] Regarding the finest level function value $\{F_0(x_0^k)\}_{k \in \mathbb{N}}$, Theorem~\ref{thm:rate1k} and Theorems~\ref{thm:mgprox_converge_proxPL} and \ref{thm:mgprox_converge_proxPL} all hold for the multilevel Algorithm~\ref{algo:LlevelMG_exact}, since the angle condition of the coarse correction (Theorem~\ref{thm:coarse_dir_descent}) also holds for multilevel \texttt{MGProx} when the coarse problem is solved inexactly. To be specific, the last inequality $F_1^{\tau}(x^{k+1}_1) - F_1^{\tau}(y^{k+1}_1) < 0$ in the proof of Theorem~\ref{thm:coarse_dir_descent} holds when the coarse problem on $x^{k+1}_1$ is solved inexactly by a combination of proximal gradient iterations and a coarse-grid correction with line search. \end{remark}
\begin{comment}
\subsection{Approximate per-iteration computational complexity}\label{sec:multilevel:cost} We now discuss the per-iteration cost of a $L$-level \texttt{MGProx}. Let the cost of one prox-grad iteration at the finest level be one work unit, $\textrm{WU}$. Suppose Problem \eqref{prob:minfg} comes from solving a two-dimensional PDE. On level $\ell=1$ with variable $x_1 \in \mathbb{R}^{n_1}$ we have $n_1 = \frac{1}{2^2} n_0 = \frac{1}{4}n_0$ and the cost of one prox-grad iteration on $x_1$ is $\frac{1}{4}\textrm{WU}$. Similarly, the cost of one prox-grad iteration on $x_2$ is $\frac{1}{16}\textrm{WU}$. Now assume Algorithm~\ref{algo:LlevelMG_exact} takes $N_L^k > 0$ iterations of prox-grad steps at iteration $k$ to solve the coarsest problem on $x_L$ until convergence, then the total cost of all prox-grad operations in one iteration of the algorithm is $ 2 \left( 1 + \frac{1}{4} + \dots + N_L^k\frac{1}{4^{L-1}} \right) \textrm{WU}. $ We now assume that the level parameter $L$ is chosen appropriately such that the problem on the coarsest level is a scalar problem, and further assume that the computational cost of such a scalar problem has a cost of $\mathcal{O}(1)$ complexity, then we can ignore $N_L^k$ in the sum and get \[ 2 \left( 1 + \frac{1}{4} + \dots + \frac{1}{4^{L-1}} \right) \textrm{WU}
= 2 \left( \dfrac{1-\frac{1}{4^{L-1}}}{1-\frac{1}{4}} \right) \text{WU}
\leq \frac{8}{3}\text{WU}. \] Hence, all the prox-grad steps per V-cycle iteration amount to at most 2.67 times the cost of performing one fine prox-grad iteration. \end{comment}
\section{Numerical results}\label{sec:exp} We now demonstrate the capability of \texttt{MGProx} to solve application problems in the form of \eqref{prob:minfg}. We test \texttt{MGProx} on three problems related to the Elastic Obstacle Problem (EOP). In \cref{sec:exp:subsec:EOP} we review the EOP and derive a linearized EOP problem. In \cref{sec:exp:subsec:mgprox_test} we provide results for the linearized EOP in one dimension: we compare \texttt{MGProx} with the standard proximal gradient method and proximal gradient method with Nesterov's acceleration, and investigate results for \texttt{MGProx} under different parameter settings. Then we show results for \texttt{MGProx} compared with Nesterov's acceleration on for the linearized EOP in two dimensions in \cref{sec:exp:subsec:2dEOP_exp}. Next, in \cref{sec:exp:subsec:EOP:penalty} we derive a penalty formulation for the linearized EOP problem and we show results for it. In \cref{sec:exp:subsec:EOP:origin} we formulate the original nonlinear EOP in penalty form and show results for it.
\subsection{Elastic Obstacle Problem (EOP)}\label{sec:exp:subsec:EOP} The EOP \cite{brandt1983multigrid,mandel1984multilevel,rodrigues1987obstacle,caffarelli1998obstacle} describes the shape of an elastic membrane covering an obstacle $\phi$: find the equilibrium state of the membrane under a non-penetration constraint defined by $\phi$. Given $\phi : \Omega \rightarrow \mathbb{R}$ in a two-dimensional domain $\Omega \subset \mathbb{R}^2$ (we use $\Omega = [0,1] \times [0,1]$ in our numerical tests), determine the location of a membrane $u(x,y) : \Omega \rightarrow \mathbb{R}$ with the lowest elastic potential energy.
\paragraph{EOP as a surface area model} Assuming the elastic potential energy of the membrane is proportional to the surface area, the EOP can be written as
\begin{comment} \begin{equation} \begin{array}{rll} \displaystyle \min_{ u } &
\displaystyle \int_\Omega \sqrt{1 +\| \,\nabla u \, \|^2_{L^2}} \textrm{dx} & ~~~\text{minimal surface area}, \\ ~~~\text{s.t.} & u \geq \phi ~~ \text{on } \Omega & ~~~\text{non-penetration constraint}, \\ & u = 0 \,\,\, \text{on } \partial \Omega & ~~~\text{boundary condition}, \end{array} \tag{EOP} \label{EOP_org} \end{equation} \end{comment} \begin{equation} \min_{ u }
\int_\Omega \sqrt{1 +\| \,\nabla u \, \|^2_{L^2}} \textrm{dx} ~~~\text{s.t.}~~~ u \geq \phi ~~ \text{on } \Omega, ~~ u = 0 \,\,\, \text{on } \partial \Omega, \tag{EOP} \label{EOP_org} \end{equation}
where $\textrm{dx}$ denotes the total differential, $\nabla u : \Omega \rightarrow \mathbb{R}^2$ is the gradient field of $u$, and the norm $\| \cdot \|_{L^2}$ induced by the inner product $\langle \,\cdot\,,\,\cdot\,\rangle$ is the $L^2$ norm for functions. Note that we take $\phi \leq 0$ on $\partial \Omega$, so the boundary condition $u = 0$ on $\partial \Omega$ makes sense. Figure~\ref{fig:peakWrap} shows an example of covering nine conic obstacles.
\begin{figure}
\caption{Covering nine conic obstacles. Left: nine cones. Right: the solution $u$ obtained by solving \eqref{EOP} using \texttt{MGProx} proposed in this work. }
\label{fig:peakWrap}
\end{figure}
\paragraph{Linearized EOP} An approximate version of Problem (EOP) can be obtained by linearization. The function $\sqrt{1+x^2}$ is strongly convex. Near $x=0$ it has a Taylor series $1 + \frac{1}{2}x^2 - \frac{1}{8}x^4 + \frac{1}{16}x^6 - \frac{1}{128}x^8 + o(x^9)$. Ignoring the constant 1 and higher order terms gives \begin{equation} \displaystyle \min_{ u }
\displaystyle \int_\Omega \dfrac{1}{2} \| \,\nabla u \, \|^2_{L^2} \textrm{dx} ~~~\text{s.t.}~~~ u \geq \phi ~~ \text{on } \Omega, ~ u = 0 \,\,\, \text{on } \partial \Omega,
\tag{aEOP} \label{EOP} \end{equation} where ``a'' in aEOP stands for approximate.
\paragraph{Applications of EOP} The EOP was initially motivated by mathematical physics \cite{rodrigues1987obstacle} such as thin-plate solid dynamics and elastostatics (see e.g., \cite[Section 4]{tran20151} for related problems). EOP also finds applications in stochastic optimal control and financial mathematics of American options \cite[Proposition 12.4]{tankov2003financial}. Here we solely focus on solving EOP as a nonsmooth optimization problem and use it to illustrate the effectiveness of \texttt{MGProx}. Many methods have been proposed to numerically solve EOP: apart from the multigrid approaches mentioned in the introduction, there are adaptive finite element methods \cite{hoppe1994adaptive}, penalty methods \cite{scholz1984numerical}, level set methods \cite{majava2004level}, and $\ell_1$ penalty methods \cite{tran20151}.
\subsubsection{Discretization of Problem (aEOP)} We apply direct discretization of the integral in (aEOP) to derive a minimization problem in the form of \eqref{prob:minfg}. Using a grid of $N\times N$ internal points with $\Delta x=\Delta y=h =\frac{1}{N+1}$ on $\Omega$, let $u_{ij} = u( ih, jh)$ with $i,j$ ranging from $1$ to $N$. Then $
{\displaystyle \int_\Omega} \frac{1}{2} \| \nabla u(x,y) \|^2 dxdy \approx {\displaystyle \sum_{i,j}}
\frac{1}{2} \| \nabla u_{ij} \|^2 h^2 = \frac{h^2}{2} \langle Q_0 u,u\rangle, $ where $u$ is the vectorized representation of the matrix $u_{ij}$, and $Q_0 \in \mathbb{R}^{N^2 \times N^2}$ is a discrete Laplacian operator that approximates $\nabla^2$: it is a block tridiagonal matrix consisting of tridiagonal blocks $B \in \mathbb{R}^{N \times N}$ and identity blocks $I \in \mathbb{R}^{N \times N}$, \[ Q_0 \coloneqq \frac{1}{h^2} \begin{bmatrix} B & I \\ I & B & \ddots \\
& \ddots & \ddots & I \\
& & I & B \end{bmatrix}, \qquad B \coloneqq \begin{bmatrix} -4 & 1 \\ 1 & -4 & \ddots \\ & \ddots & \ddots & 1 \\ & & 1 & -4 \end{bmatrix}. \]
Note that $L_0 \coloneqq \| Q_0\|_2 = \frac{1}{h^2}8\sin^2\big( \frac{N\pi}{2(N+1)} \big) \approx \frac{8}{h^2}$ \cite{leveque2007finite}, which will be used for the gradient descent stepsize.
Using indicator function $i_{\geq \phi}$ (which $=\infty$ at grid point $(i,j)$ if $u_{ij} < \phi_{ij}$ and $=0$ otherwise) to represent the constraint $u \geq \phi$ and noting that $i_{\geq \phi}$ is invariant to scalar scaling, (aEOP) is discretized as $ \min
\frac{h^2}{2} \langle Q_0 u,u\rangle
+ h^2 i_{\geq \phi}(u).$ Ignoring $h^2$ in the whole expression, performing a change of variable $v = u - \phi \geq 0$ and letting $p_0 = -Q_0\phi$ yields the problem: \begin{equation} \min_v \dfrac{1}{2} \langle Q_0 v, v \rangle - \langle p_0, v \rangle + i_+(v). \tag{Shifted aEOP} \label{prob:elastic:displace} \end{equation} The reason we perform a change of variable is that in \eqref{prob:elastic:displace} the constraint is simplified to nonnegativity.
Since $Q_0$ is positive definite, Problem \eqref{prob:elastic:displace} is a nonsmooth strongly convex optimization problem and it has a unique minimizer.
\subsubsection{Implementation}\label{sec:exp:subsec:elastic:subsubset:mgprox_iter} Now we implement \texttt{MGProx} to solve (Shifted aEOP).
\paragraph{Proximal gradient iteration}
Let $\frac{1}{\| Q_0 \|_2} > 0$ be the stepsize and let $(x)_+ \coloneqq \max\{ 0 , x \}$ defined element-wise. Then the proximal gradient iteration for \eqref{prob:elastic:displace} is \begin{equation} v^{k+1} ~=~
\textrm{prox}_{\frac{1}{\| Q_0 \|_2} i_+} \left(
v - \frac{1}{\| Q_0 \|_2}(Q_0 v^k - p_0 ) \right) ~=~
\left( v^k - \frac{1}{\| Q_0 \|_2}(Q_0 v^k - p_0 )\right)_+. \label{iterate:ProjGD} \end{equation}
\paragraph{Restriction and prolongation} Consider the matrix $u$ at a particular resolution level $\ell$. The (full) restriction of such variable, denoted as $\hat{u} \coloneqq \overline{\mathcal{R}}(u)$ at level $\ell+1$, can be defined using the so-called full weighting operator: for all $1 \leq i,j \leq \frac{n}{2}-1$, \[ \hat{u}_{ij} \coloneqq [\overline{\mathcal{R}}(u)]_{ij} = \frac{1}{16} \left[ \begin{array}{l} 4 u_{2i,2j} + 2 \big( u_{2i,2j+1} + u_{2i,2j-1} + u_{2i+1,2j} + u_{2i-1,2j} \big) \\ + u_{2i-1,2j+1} + u_{2i-1,2j-1} + u_{2i+1,2j-1} + u_{2i-1,2j+1} \end{array} \right]. \] Using a block-tridiagonal matrix $\overline{R}$, the expression $\hat{u} \coloneqq \overline{\mathcal{R}}(u)$ can be written as a linear equation $\textrm{vec}(\hat{u}) = \overline{R}\textrm{vec}(u)$ where $\textrm{vec}$ is vectorization. We abuse notation by treating $u$ as $\textrm{vec}(u)$. For the adaptive version of $R$, we follow Definition~\ref{def:adaptiveR}. For the prolongation matrix $P$, we take $P=c R^\top$ with constant $c=2$.
\paragraph{Subdifferential} To proceed with \texttt{MGProx} we need to compute the subdifferential of the constraint function $i_+(v)$. The subdifferential of the indicator function is the normal cone.
\begin{definition}[Normal cone to a set] Given a non-empty closed convex set $\mathcal{C} \subset \mathbb{R}^n$, the normal cone of $\mathcal{C}$ at a point $\bar{x} \in \mathbb{R}^n$, denoted by $N_{\mathcal{C}}(\bar{x})$, is defined as (e.g., \cite[Eq. 6.35]{bauschke2011convex}) \[ N_\mathcal{C}(\bar{x}) \coloneqq \begin{cases}
\Big\{ v \in \mathbb{R}^n ~\Big|~ \langle v, x - \bar{x}\rangle \leq 0 \text{ for all } x \in \mathcal{C} \Big\} & \bar{x} \in \mathcal{C} \\ \varnothing & \bar{x} \notin \mathcal{C} \end{cases}. \] \end{definition} For the objective function \eqref{prob:elastic:displace} with $g_0(v) = i_+(v)$, we have \begin{equation} \big[ \partial i_{+}(v) \big]_i = \begin{cases} 0 & v_i > 0 \\ -t, t \geq 0 & v_i = 0 \\ \varnothing & v_i < 0 \end{cases}, \label{eqn:subdiff_indicator} \end{equation} i.e., if a component $v_i=0$, the subdifferential $\big[ \partial i_{+}(v) \big]_i$ is a set of rays pointing to $-\infty$.
\paragraph{\texttt{MGProx} iteration} The operations of \texttt{MGProx} at level $\ell=0$ in the top inner for loop in Algorithm~\ref{algo:LlevelMG_exact} for solving \eqref{prob:elastic:displace} are \[ \begin{array}{rcl} y_0^{k+1} &\overset{\eqref{iterate:ProjGD}}{=}& \max\left\{ 0 \,,\, x^k_0 - \frac{Q_0 x^k_0 + p_0 }{L_0} \right\}, \\ x_1^{k+1} &=& R_{0 \rightarrow 1}(y_0^{k+1}) y_0^{k+1}, \\ \tau_{0 \rightarrow 1}^{k+1} &\in& \nabla f_1(x_1^{k+1}) + \underline{\partial g_1(x_1^{k+1})} - R_{0 \rightarrow 1} \nabla f_0(y_0^{k+1}) - R_{0 \rightarrow 1} \underline{\partial g_0(y_0^{k+1})}, \\ &=& (Q_1 x_1^{k+1} + p_1) + \underline{N_{\mathbb{R}_+^{n_1} }(x_1^{k+1})} - R_{0 \rightarrow 1} (Q_0 y_0^{k+1} +p_0) - R_{0 \rightarrow 1} \underline{N_{\mathbb{R}_+^{n_0} } (y_0^{k+1})}, \end{array} \] where $N_{\mathbb{R}_+^{n}} $ is the normal cone of $\mathbb{R}^n_+$. For the next level, we define the coarse matrix $Q_1 = \overline{R}_{0 \rightarrow 1}Q_0\overline{P}_{1 \rightarrow 0}$ and the coarse vector $p_1 = \overline{R}_{0 \rightarrow 1}p_0$. Note that we are using the non-adaptive full restriction operator for $Q_1$ and $p_1$, see Remark~\ref{remark:fullR_f0}. The term $\overline{R}_{0 \rightarrow 1}Q_0\overline{P}_{1 \rightarrow 0}$ is called the \textit{Galerkin product}, which is a classical way to generate the coarse operator in multigrid. We use $Q_1, p_1$ to define the level $\ell= 1$ coarse problem as ${\displaystyle \argmin_\xi} \, \dfrac{1}{2} \langle Q_1 \xi, \xi \rangle - \langle p_1, \xi \rangle - \langle \tau_{0 \rightarrow 1}^{k+1}, \xi \rangle + i_+(\xi) $, which has the same form as (Shifted aEOP), and the same \texttt{MGProx} iteration can be used for all coarse problems recursively.
\begin{remark}[$\tau$ is nonempty] $\tau$ in \texttt{MGProx} is never an empty set. Based on the \texttt{MGProx} iteration, $y_0^{k+1} \in \mathbb{R}_+^n$ so $N_{\mathbb{R}_+^{n_0}}(y_0^{k+1}) \neq \varnothing$. Furthermore both $R$ and $y_0^{k+1}$ are (element-wise) nonnegative, so $y_1^{k+1} = R y_0^{k+1}$ is also nonnegative and therefore $N_{\mathbb{R}_+^{n_1}}(y_1^{k+1}) \neq \varnothing$. \end{remark}
\subsection{Test problem 1: 1-dimensional (Shifted aEOP) on truncated sine wave} \label{sec:exp:subsec:mgprox_test} Here we consider 1-dimensional (Shifted aEOP) with the obstacle $\phi(\xi) = [\sin \xi]_+ \coloneqq \max(0, \sin \xi)$ over the domain $\Omega \coloneqq [0,3\pi]$. It is easy to see that this problem has a closed-form solution. \begin{proposition}\label{thm:truncated_sine} Let $u^*$ be the exact solution to the 1-dimensional (Shifted aEOP). We have $u^*(\xi) = \sin \xi$ for $ \xi \in [0, \frac{1}{2}\pi]$ or $\xi \in [\frac{5}{2}\pi,3\pi]$ and $u^*(\xi) = 1$ for $ \xi \in [\frac{1}{2}\pi, \frac{5}{2}\pi]$.
\end{proposition}
\paragraph{Experimental setup}
We initialize $x_0^1 = x^\textrm{ini}_0$ as a random nonnegative vector and compute the initial function value $F_0(x_0^1)$ and the initial norm of the proximal gradient map $\|G_0(x^ \textrm{ini}_0)\|_2$. We stop the algorithm using the proximal first-order optimality condition, i.e., we stop the algorithm when
$\|G_0(x^k_0)\|_2 / \|G_0(x^\textrm{ini}_0)\|_2 \leq 10^{-15}$. All the experiments are conducted in MATLAB R2022a using a Mac mini (2018) running MacOS on a 3GHz 6-Core Intel i5 processor with 16 GB memory.
We report the following values:
$ \|G_0(x^k_0)\|_2 / \|G_0(x^\textrm{ini}_0)\|_2$ and $ ( F_0(x^k_0) - F_0^\textrm{min} )/ F_0(x^\textrm{ini}_0) $, where $F_0^\textrm{min}$ is the lowest objective function value achieved among all the tested methods. The MATLAB code is available at \url{angms.science}.
\paragraph{MGProx setup} We run two versions of multilevel \texttt{MGProx}: \texttt{MGProx-1} and \texttt{MGProx-10}, where \texttt{MGProx-n} refers to \texttt{MGProx} with $n$ pre-smoothing and post-smoothing steps on all levels. We take enough levels to make the coarsest problem sufficiently small. Specifically, for the 1-dimensional (Shifted aEOP), the coarsest problem has $2^2-1 = 3$ variables. For the $\tau$ selection, on all the levels we use $t = 0$ for the normal cone $\partial i_+$ in \eqref{eqn:subdiff_indicator}.
\paragraph{Line search setup for coarse correction} For the line search of the coarse stepsize, we simply run Algorithm~\ref{aglo:naivelinesearch} (i.e., naive line search) up to machine accuracy. We implement the line search in a way that catastrophic cancellation \cite{vavasis2013some} is avoided so that we can check $F_0\Big( y^{k+1}_0 + \alpha P \big(x^{k+1}_1 - y^{k+1}_1 \big) \Big) \leq F_0\Big( y^{k+1}_0 \Big)$ with machine precision.
For $n>1$, we also consider the \texttt{MGProx}$^+$$-n$ variant of the algorithm, where we put Nesterov's acceleration into the pre-smoothing and post-smoothing steps of \texttt{MGProx}$-n$.
\subsubsection{Comparisons with proximal gradient and with Nesterov's acceleration} \label{sec:exp:compare_nest} We compare \texttt{MGProx} with the proximal gradient method (\texttt{Prox}) with update iteration $x_0^{k+1} = \textrm{prox}_{\frac{1}{L}g_0} \Big( x_0^k - \frac{\nabla f_0(x_0^k)}{L_0} \Big)$, and the proximal gradient with Nesterov's acceleration (\texttt{Nest}) \cite{nesterov1983method,beck2009fast}: starting from $y_0^1 = x_0^1$, \texttt{Nest} performs the iteration $ x_0^{k+1} = \textrm{prox}_{\frac{1}{L}g_0} \Big( y_0^k - \frac{\nabla f_0(y_0^k)}{L_0} \Big) $, $ y_0^{k+1} = x_0^{k+1} + \beta_k \big( x_0^{k+1} - x_0^{k} \big) $, where $\beta_k$ is an extrapolation parameter. Let $F^* = \inf_x F(x)$. It was proved in \cite{beck2009fast} that Nesterov's acceleration improves the functional asymptotic convergence rate of the proximal gradient method under our general conditions for Problem \eqref{prob:minfg} from $F(x^k) - F^* \leq \mathcal{O}(\frac{1}{k})$ to $F(x^k) - F^* \leq \mathcal{O}(\frac{1}{k^2})$, in which the $\mathcal{O}(\frac{1}{k^2})$ rate is optimal for all gradient-based first-order optimization methods \cite{nesterov1983method}. There are many ways to choose $\beta_k$ as long as it satisfies certain conditions (e.g. see \cite[Eq.15]{tseng2008accelerated} or the book by Nesterov \cite{nesterov2003introductory}). In this work we choose $\beta_k = \frac{k-1}{k+2}$, which is a simple and efficient choice for $\beta_k$ \cite{attouch2016rate}. It should be noted that the Nesterov's acceleration method described here is accelerated gradient for the case when the smooth term $f_0$ is convex but not strongly convex. A different choice of extrapolation factor is recommended for strongly convex $f_0$ (e.g., see \cite[Section 10.7.7]{beck2017first}). However, in the problem under consideration, the ratio $\frac{\mu_0}{L_0}$ tends rapidly to 0 as the mesh size is increased, a well-known property of discretizations of second-order elliptic problems. Therefore, we decided to treat the problem as non-strongly convex to match the limiting case.
\begin{table}[ht]\label{table:1dEOP_sine_across_lv} \centering \arraycolsep=0.01pt
\begin{tabular}{c||ccccc} & \multicolumn{5}{c}{Iterations ($k$)} \\ \hline \# variables & \texttt{Prox} & \texttt{Nest} & \texttt{MG-1} & \texttt{MG-10} & \texttt{MG$^+$-10} \\ \hline\hline $2^8-1=255$ & $3.07\times 10^5$ & $1.65\times 10^5$ & $2.69\times 10^2$ & $4.90\times 10^1$ & $4.20\times 10^1$ \\ $2^{10}-1=1023$& $4.38\times 10^6$ & $9.91\times 10^5$ & $7.87\times 10^2$ & $7.54\times 10^2$ & $1.09\times 10^2$ \\ $2^{12}-1=4095$& $6.18\times 10^7$ & $7.73\times 10^6$ & $5.37\times 10^3$ & $6.13 \times 10^3$ & $2.66 \times 10^3$ \\ \hline \hline & \multicolumn{4}{c}{Time (sec.)} \\ \hline \# variables & \texttt{Prox} & \texttt{Nest} & \texttt{MG-1} & \texttt{MG-10} & \texttt{MG$^+$-10} \\ \hline\hline $2^8-1=255$ & $6.30\times 10^{-1}$ & $3.70\times 10^{-1}$ & $6.33\times 10^{-2}$ & $3.57\times 10^{-2}$ & $3.34\times 10^{-2}$ \\ $2^{10}-1=1023$& $2.05\times 10^1$ & $4.96\times 10^0$ & $2.62\times 10^{-1}$ & $4.21\times 10^{-1}$ & $2.91\times 10^{-1}$ \\ $2^{12}-1=4095$& $1.27\times 10^3$ & $1.88\times 10^2$ & $4.48\times 10^0$ & $8.34\times 10^0$ & $3.81\times 10^0$ \end{tabular} \caption{Convergence results for 1-dimensional (Shifted aEOP). \texttt{Prox} is the proximal gradient method, \texttt{Nest} is the proximal gradient method with Nesterov's acceleration, \texttt{MG-1} is $L$-level \texttt{MGProx} with 1 pre-smoothing and post-smoothing step, \texttt{MG-10} is \texttt{MGProx} with 10 pre-smoothing and post-smoothing steps, and \texttt{MG$^+$-10} is \texttt{MG-10} with Neterov's acceleration embedded in the pre-smoothing and post-smoothing steps. The \texttt{MGProx} are $L$-level (with $L=6,8,10$ levels for the three rows in the table). } \end{table}
Referring to Table~\ref{table:1dEOP_sine_across_lv}, we can see that in general \texttt{MGProx} has faster convergence speed than the proximal gradient method and proximal gradient method with Nesterov's acceleration, both in terms of number of iterations and total execution time. Typical convergence plots are shown in Fig.\ref{fig:typical}.
\begin{figure}
\caption{Typical convergence plots of \texttt{Prox}, \texttt{Nest}, \texttt{MGProx-1}, \texttt{MGProx-10} and \texttt{MGProx$^+$-10} for 1-dimensional (Shifted aEOP). The number of variables in this experiment is $2^9-1=511$. All \texttt{MGProx} methods use 7 levels. }
\label{fig:typical}
\end{figure}
\begin{comment} \begin{remark}[Combining MGProx with Nesterov's acceleration] In each iteration of \texttt{MGProx} the algorithm builds coarse problems based on the current fine variable at level $\ell = 0$. In other words, if the quality of $x_{0}^k$ is ``bad'', the whole algorithm will propagate such ``bad information'' to the coarse grids. As Nesterov's acceleration has a ripple effect \cite{o2015adaptive}, i.e., Nesterov's acceleration is not monotone, one can expect that it is not beneficial to add Nesterov's acceleration as an outer iteration to \texttt{MGProx} for speed up. We confirmed this in numerical tests (not shown). \end{remark} \end{comment}
\subsubsection{On varying \texttt{MGProx}'s settings} For completeness, we investigate the effect of choosing different $\tau$ in the subdifferential. The subdifferential of $\partial i_+(v)$ in \eqref{eqn:subdiff_indicator} is a normal cone of rays pointing to $-\infty$ with a parameter $t \in [0,\infty)$. In this experiment we test \texttt{MGProx} with $t \in \{0, 0.1, 0.5, 1, 2\}$ and show numerically that the convergence speed of \texttt{MGProx}-$1$ is not sensitive to the choice of $t$, see Fig.\ \ref{fig:t}. In the same figure, for \texttt{MGProx}-$n$ with $n>1$ the convergence speed is sensitive to the choice of $t$, but most of this sensitivity disappears for \texttt{MGProx}$^+$-$n$ with $n>1$, which is the method we use in our numerical tests because it gives the best performance. Here, all the other settings of \texttt{MGProx} are held fixed.
\begin{figure}
\caption{Typical convergence pattern for varying $\tau$ for 1-dimensional (Shifted aEOP), showing that the asymptotic convergence speed of \texttt{MGProx-1} and \texttt{MGProx$^+$-10} is not very sensitive to how $\tau$ is selected within the subdifferential. Here we fix the number of fine-level variables as $2^9-1=511$ and use 7 levels for all \texttt{MGProx} methods. }
\label{fig:t}
\end{figure}
\subsection{Test problem 2: 2-dimensional (Shifted aEOP)}\label{sec:exp:subsec:2dEOP_exp} We report the results of tests for the 2-dimensional (Shifted aEOP). Here $\phi(x,y) = \max\{0,\sin(x)\} \max\{0,\sin(y)\}$ where $x,y \in [0,3\pi]$. This obstacle is the direct generalization of the 1-dimensional (Shifted aEOP) in \cref{sec:exp:compare_nest}. We use the same setting as in test 1. Here we only compare \texttt{Nest} and \texttt{MGProx-25}. The numerical results are shown in Table~\ref{table:2d_peak_across_lv}. In general we have the same conclusions as in \cref{sec:exp:compare_nest}.
\begin{comment} \begin{table}[ht]\label{table:2d_peak_across_lv} \centering \arraycolsep=0.01pt
\begin{tabular}{c||cc|cc}
& \multicolumn{2}{c|}{Iterations ($k$)} & \multicolumn{2}{c}{Time (sec.)} \\ \hline Number of variables & \texttt{Nest} & \texttt{MGProx$^+$-25} & \texttt{Nest} & \texttt{MGProx$^+$-25} \\ \hline\hline $(2^5 - 1)^2 = 961$ & $6.81\times 10^4$ & $1.40\times 10^1$ & $3.74\times 10^{-1}$ & $1.10\times 10^{-2}$ \\ $(2^6 - 1)^2 = 3969$ & $2.40 \times 10^5$ & $2.00\times 10^1$ & $1.32\times 10^{0}$ & $4.63\times 10^{-2}$ \\ $(2^7 - 1)^2 = 16129$ & $7.35\times 10^5$ & $5.00\times 10^1$ & $7.16\times 10^{0}$ & $4.11\times 10^{-1}$ \\ $(2^8 - 1)^2 = 65025$ & $2.51\times 10^6$ & $1.65\times 10^2$ & $1.01\times 10^{2}$ & $4.57\times 10^{0}$ \\ $(2^9 - 1)^2 = 261121$ & $4.84\times 10^6$ & $2.15\times 10^2$ & $8.79\times 10^{2}$ & $3.50\times 10^{1}$ \end{tabular} \caption{Convergence results for 2-dimensional (Shifted aEOP). \texttt{Nest} is the proximal gradient method with Nesterov's acceleration. \texttt{MGProx$^+$-25} is $L$-level ({\color{red}with $L=3,4,5,6,7$ for each row of the table}) \texttt{MGProx} $^+$ with 25 pre-smoothing and post-smoothing steps. } \end{table} \end{comment}
\begin{table}[ht]\label{table:2d_peak_across_lv} \centering \arraycolsep=0.01pt
\begin{tabular}{c||cccc} & \multicolumn{4}{c}{Iterations ($k$)} \\ \hline \# variables & \texttt{Prox} & \texttt{Nest} & \texttt{MGProx-1} & \texttt{MGProx$^+$-25} \\ \hline\hline $(2^5 - 1)^2 = 961$ & $7.93 \times 10^3$ & $7.49 \times 10^3$ & $9.30 \times 10^1$ & $1.60 \times 10^1$ \\ $(2^7 - 1)^2 = 16129$ & $1.23 \times 10^5$ & $7.12 \times 10^4$ & $4.63 \times 10^2$ & $5.70 \times 10^1$ \\ $(2^9 - 1)^2 = 261121$ & - & $4.84\times 10^6$ & $6.36\times 10^3$ & $7.85\times 10^2$ \\ \hline \hline & \multicolumn{4}{c}{Time (sec.)} \\ \hline \# variables & \texttt{Prox} & \texttt{Nest} & \texttt{MGProx-1} & \texttt{MGProx$^+$-25} \\ \hline\hline $(2^5 - 1)^2 = 961$ & $3.98\times 10^{-2}$ & $3.97\times 10^{-2}$ & $3.80\times 10^{-2}$ & $1.23\times 10^{-2}$ \\ $(2^7 - 1)^2 = 16129$ & $1.11\times 10^{1}$ & $7.42\times 10^{0}$ & $9.55\times 10^{-1}$ & $5.11\times 10^{-1}$ \\ $(2^9 - 1)^2 = 261121$ & - & $8.79\times 10^{2}$ & $2.55\times 10^2$ & $1.23\times 10^2$ \end{tabular} \caption{Convergence results for 2-dimensional (Shifted aEOP). \texttt{Nest} is the proximal gradient method with Nesterov's acceleration. \texttt{MGProx$^+$-25} is $L$-level \texttt{MGProx} $^+$ with 25 pre-smoothing and post-smoothing steps. Here $L=3,5,7$ for \texttt{MGProx} in the three rows of the table.} \end{table}
\subsection{Test problem 3: (Shifted aEOP) in penalty form}\label{sec:exp:subsec:EOP:penalty} We now consider \begin{equation} \displaystyle \min_{ u }
\displaystyle \int_\Omega \dfrac{1}{2} \| \,\nabla u \, \|^2_{L^2} \textrm{dx}
+ \lambda \int_{\Omega} \| \,(\phi - u)_+ \, \|_{L^1} \textrm{dx} ~\text{ s.t. }~ u = 0 \text{ on } \partial \Omega, \tag{aEOP-penalty} \label{EOP_pen} \end{equation}
where $\| \,\cdot\, \|_{L^1}$ is the $L^1$ norm of a function and $\lambda >0$ is a pre-defined penalty parameter. Following a similar procedure as in \cref{sec:exp:subsec:EOP} to discretize (aEOP-penalty) yields the problem $\displaystyle \min_u \dfrac{1}{2} \langle Q_0 u, u \rangle
+ \lambda \| \,(\phi - u)_+ \, \|_1 $. Since $u \geq \phi$ is equivalent to $v \coloneqq u - \phi \geq 0$, a change of variables yields \begin{equation} \min_v \dfrac{1}{2} \langle Q_0 v, v \rangle - \langle p_0, v \rangle
+ \lambda \| \,(-v)_+ \, \|_1. \label{prob:elastic:displace:penalty} \tag{Shifted aEOP-penalty} \end{equation}
Given a vector $c$, the subdifferential and proximal operator of $\lambda \|\, (c-v)_+ \,\|_1$ are \begin{equation} \Big[
\partial \| \, (c-v)_+ \, \|_1 \Big]_i = \begin{cases} -1 & v_i < c_i \\ [-1,0] & v_i = c_i \\ 0 & v_i > c_i \end{cases} , ~\, \Big[
\textrm{prox}_{\lambda \| \, (c-\,\cdot\,)_+ \, \|_1}(v) \Big]_i = \begin{cases} v_i + \lambda & v_i + \lambda < c_i \\ c_i & v_i \leq c_i \leq v_i + \lambda \\ v_i & v_i > c_i \end{cases}. \label{eqn:subdiff_prox_maxpen} \end{equation} In the experiments, we use the same setting as in \cref{sec:exp:compare_nest}, with the penalty parameter $\lambda = 90$. Table \ref{table:1dEOPpen_sine_across_lv} shows 1-dimensional experimental results, and Fig.\ref{fig:EOPpen1d} shows typical convergence curves of the algorithms. In general we have the same conclusion as in \cref{sec:exp:compare_nest}, and \texttt{MGProx} performs clearly better than competing methods. For the 2-dimensional (Shifted aEOP-penalty), we have similar results as in \cref{sec:exp:subsec:2dEOP_exp} and we do not show them here.
\begin{table}[ht]\label{table:1dEOPpen_sine_across_lv} \centering \arraycolsep=0.01pt
\begin{tabular}{c||cccc} & \multicolumn{4}{c}{Iterations ($k$)} \\ \hline \# variables & \texttt{Prox} & \texttt{Nest} & \texttt{MGProx-1} & \texttt{MGProx$^+$-10} \\ \hline\hline
$2^8-1=255$ & $2.99\times 10^5$ & $7.62\times 10^5$ & $7.60\times 10^2$ & $3.90\times 10^1$ \\ $2^{10}-1=1023$& $4.27\times 10^6$ & $8.71\times 10^5$ & $3.01\times 10^2$ & $5.90\times 10^1$ \\ $2^{12}-1=4095$& $6.01\times 10^7$ & $7.93\times 10^6$ & $1.32\times 10^3$ & $1.03 \times 10^2$ \\ \hline \hline & \multicolumn{4}{c}{Time (sec.)} \\ \hline \# variables & \texttt{Prox} & \texttt{Nest} & \texttt{MGProx-1} & \texttt{MGProx$^+$-10} \\ \hline\hline
$2^8-1=255$ & $8.56\times 10^{-1}$ & $2.23\times 10^{-1}$ & $2.50\times 10^{-2}$ & $1.74\times 10^{-2}$ \\ $2^{10}-1=1023$& $3.10\times 10^1$ & $6.65\times 10^0$ & $1.36\times 10^{-1}$ & $4.46\times 10^{-2}$ \\ $2^{12}-1=4095$& $2.06\times 10^3$ & $3.01\times 10^2$ & $1.13\times 10^0$ & $2.12\times 10^{-1}$ \end{tabular} \caption{Convergence results for 1-dimensional (Shifted aEOP-penalty). } \end{table}
\begin{figure}
\caption{Typical convergence pattern for 1-dimensional (Shifted aEOP-penalty). The problem has $2^{9}-1 = 4095$ variables. Here $L=7$ for \texttt{MGProx} and $\lambda=90$. }
\label{fig:EOPpen1d}
\end{figure}
\subsection{Test problem 4: Nonlinear EOP}\label{sec:exp:subsec:EOP:origin} We now report results on solving the original EOP without linearization. In particular, we consider the most complicated problem expression: the penalty form of EOP without the change of variable. We assume zero boundary conditions. The discretized problem is \begin{equation}
\min_{u} \sqrt{1 + \langle Q u, u \rangle } + \lambda \| \, (\phi - u )_+\, \|_1. \tag{EOP-penalty} \end{equation} Note that this problem has not been considered in most of the EOP literature. In particular, previous multigrid methods for aEOP do not apply to (EOP-penalty).
\paragraph{\texttt{MGProx} can be used to solve EOP-penalty}
Before we report results, we first show in the following proposition that (EOP-penalty) is within the framework of Problem \eqref{prob:minfg}. For the subdifferential and proximal operator of $\lambda \| \, (\phi - u )_+\, \|_1$, see Equation \eqref{eqn:subdiff_prox_maxpen}.
\begin{proposition}
$\sqrt{1 + \langle Q u, u \rangle }$ is strongly convex and its gradient is $\|Q\|_2$-Lipschitz. \begin{proof} Let $f(u) \coloneqq \sqrt{1 + \langle Q u, u \rangle }$. Then $\nabla f(u) = \frac{Qu}{\sqrt{1 + \langle Q u, u \rangle }}$ and \begin{equation} \nabla^2 f(u) ~=~ \frac{Q}{\sqrt{1 + \langle Q u, u \rangle }} - \frac{Quu^\top Q}{(1 + \langle Q u, u \rangle )^{\frac{3}{2}} } ~=~ \frac{A^\top A }{\sqrt{1 + \langle A u, Au \rangle }} - \frac{A^\top A uu^\top A^\top A}{(1 + \langle A u, Au \rangle )^{\frac{3}{2}} }, \label{eqn:hess_eop} \end{equation} where $A^\top A = Q$ and the columns of $A$ are independent. With $Q$ being the discretization of the Laplacian $-\nabla^2$, the matrix $A$ is the discretization of the gradient operator $\nabla$ and $A^\top$ is the discretization of the gradient transpose $\nabla^\top$, which is the negative of divergence, $-\textrm{div} \,\cdot\,$.
We now establish that $f(u)$ is strongly convex in $u$ on a bounded domain by showing $\nabla^2 f(u) $ is positive definite. For $\zeta \neq 0$, \begin{equation} \begin{array}{rcl} \langle \nabla^2 f(u) \zeta, \, \zeta \rangle &\overset{\eqref{eqn:hess_eop}}{=}& \displaystyle \frac{\zeta^\top A^\top A \zeta}{\sqrt{1 + \langle A u, Au \rangle }} - \frac{ \zeta^\top A^\top A uu^\top A^\top A \zeta}{(1 + \langle A u, Au \rangle )^{\frac{3}{2}} } \\ &=& \displaystyle
\frac{\| A \zeta \|_2^2}{\sqrt{1 + \langle A u, Au \rangle }} -
\frac{ \big| \langle Au , A \zeta \rangle \big|^2}{(1 + \langle A u, Au \rangle )^{\frac{3}{2}} } \\ &\overset{\text{Cauchy-Schwarz}}{\geq}& \displaystyle
\frac{\| A \zeta \|_2^2}{\sqrt{1 + \langle A u, Au \rangle }} -
\frac{ \| Au\|_2^2 \, \|A \zeta\|_2^2}{(1 + \langle A u, Au \rangle )^{\frac{3}{2}} } \\ &=& \displaystyle
\frac{ \| A \zeta \|_2^2}{(1 + \langle A u, Au \rangle )^{\frac{3}{2}} } \\ &\overset{\zeta\neq0}{>}& 0. \end{array} \label{eqn:hess_eop_derive} \end{equation}
Now we show $\nabla f(u)$ is $\| Q\|_2$-Lipschitz. For all $\zeta \neq 0$, the second line of \eqref{eqn:hess_eop_derive} gives \[
\langle \nabla^2 f(u) \zeta, \, \zeta \rangle ~\overset{\eqref{eqn:hess_eop_derive}}{\leq}~
\frac{\| A \zeta \|_2^2}{\sqrt{1 + \langle A u, Au \rangle }} ~\leq~
\| A \zeta \|_2^2 ~=~
\langle A \zeta, \, A \zeta \rangle ~\overset{A^\top A = Q}{=}~
\langle Q \zeta, \, \zeta \rangle \] which implies $\nabla^2 f(u) \preceq Q$ for all $u$, and equality holds when $u = 0$. We finish the proof by the fact that the largest eigenvalue of $\nabla^2 f$ is the Lipschitz constant of $\nabla f$ \cite[Lemma 1.2.2]{nesterov2003introductory}. \end{proof} \end{proposition}
\paragraph{Experimental results} We use the same settings as in the previous experiments. In general we have the same conclusions as in \cref{sec:exp:compare_nest} on both the 1-dimensional (EOP-penalty) and 2-dimensional (EOP-penalty). For example, for the 1-dimensional (EOP-penalty) with $2^8-1 = 255$ variables, \begin{itemize}
\item the proximal gradient method took $1.72\times 10^7$ iterations (65.79 seconds) to converge,
\item the proximal gradient method with Nesterov's acceleration took $9.73\times 10^6$ iterations (38.44 seconds) to converge,
\item \texttt{MGProx-1} took $2.28\times 10^4$ iterations (11.03 seconds) to converge,
\item \texttt{MGProx-10} took $1.73\times 10^3$ iterations (1.13 seconds) to converge,
\item \texttt{MGProx$^+$-10} took $1.73\times 10^3$ iterations (1.15 seconds) to converge. \end{itemize} Here we set $\lambda=5$ in the model and $L=6$ for all \texttt{MGProx} methods. The convergence patterns look similar to Fig.~\ref{fig:EOPpen1d}.
\begin{comment} {\color{red} \paragraph{On the grid-independent convergence of MGProx} A reason why classical multigrid is popular on certain structured smooth problems is that the number of iterations needed to each convergence is independent of problem size. This is not the case for \texttt{MGProx} applied to EOP problems as the number of iterations needed to reach convergence for \texttt{MGProx} increases with problem size. We comment that this may be due to the fact that we are employing a coordinate-descent type update in the coarse correction step: in the coarse correction step, due to the definition of the adaptive prolongation $P$, we only update a portion of the components of the fine variable vector. Leaving some elements in the fine variable untouched may be is the reason why \texttt{MGProx} does not achieve classical multigrid convergence.
When the active sets on all levels are fixed, \texttt{MGProx} achieves grid-independent convergence. For example, when $\phi = -1$ the projection is never activated, and in this case \texttt{MGProx} reduces to classical multigrid. } \end{comment}
\section{Conclusion}\label{sec:conc} In this work we study the combination of proximal gradient descent and multigrid method for solving a class of possibly non-smooth strongly convex optimization problems. We propose the \texttt{MGProx} method, introduce the adaptive restriction operator and provide theoretical convergence results. Numerical results confirm the efficiency of \texttt{MGProx} compared with the proximal gradient method and proximal gradient method with Nesterov's acceleration for solving various Elastic Obstacle Problems.
\end{document} |
\begin{document}
\title{Scheduling of Operations in Quantum Compiler $^*$ \thanks{*) © 2020 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, including reprinting/republishing this material for advertising or promotional purposes, collecting new collected works for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works. Full citation of this paper: Toshinari Itoko and Takashi Imamichi. ``Scheduling of Operations in Quantum Compiler,'' in \textit{Proceedings of the International Conference on Quantum Computing and Engineering}. IEEE, 2020, pp. 337-344.} }
\author{\IEEEauthorblockN{Toshinari Itoko} \IEEEauthorblockA{\textit{IBM Quantum} \\ \textit{IBM Research - Tokyo}} \and \IEEEauthorblockN{Takashi Imamichi} \IEEEauthorblockA{\textit{IBM Quantum} \\ \textit{IBM Research - Tokyo}} }
\maketitle
\begin{abstract} When scheduling quantum operations, a shorter overall execution time of the resulting schedule yields a better throughput and higher fidelity output. In this paper, we demonstrate that quantum operation scheduling can be interpreted as a special type of job-shop problem. On this basis, we provide its formulation as Constraint Programming while taking into account commutation between quantum operations. We show that this formulation improves the overall execution time of the resulting schedules in practice through experiments with a real quantum compiler and quantum circuits from two common benchmark sets. \end{abstract}
\section{Introduction} \label{sec:introduction}
Although rapid progress in quantum computing device technology has dramatically increased the coherence time of quantum bits (or qubits), the currently available quantum computers remain in the so-called noisy intermediate scale quantum regime~\cite{preskill2018quantum}. For noisy quantum computers, it is important to schedule the operations on qubits to be as short as possible because this increases the probability of completing all of the operations before any qubit decoheres, thus obtaining computational results with higher fidelity. Even for fault-tolerant quantum computers, shortening the duration of compiled schedules would increase the throughput.
Compilers for quantum computers (or quantum compilers) take a quantum circuit, which is a sequence of quantum operations, as an input program and generate a corresponding sequence of control instructions that are executable on the target hardware. For example, in the case of quantum computers using superconducting qubits, a quantum operation is compiled into several controls (e.g., a microwave pulse) for a certain period of time. In general, any given quantum operation has its own processing time and occupies its acting qubits for the duration as a computational resource. For this reason, scheduling, through which the execution start time of each quantum operation is determined without any overlapping, is an essential task in quantum compilers. We call this task \emph{quantum operation scheduling}. In this paper, we aim to minimize the overall execution time. In the context of scheduling tasks across multiple resources (qubits, in the case of quantum operation scheduling), the time between the start of the first task and the end of the final task across all resources is known as the makespan of the schedule. Schedule length, overall execution time, and makespan are used interchangeably in this work.
The rough compilation flow considering the quantum operation scheduling task independently is as follows. \begin{enumerate} \item Gate decomposition: A task decomposes unitary operations called \emph{gates} with three or more qubits into those with one or two qubits. \item Local simplification: A task simplifies a specific sequence of gates into one gate (or cancels them out). \item Qubit routing: A task transforms a given circuit into an equivalent circuit so that all two-qubit gates are executed on limited pairs of qubits (depending on the physical implementation of the quantum computing device). \item Quantum operation scheduling. \item Control instruction mapping: A task maps each quantum operation to the corresponding control instructions. \end{enumerate}
Most of the previous studies on quantum compilers have handled quantum operation scheduling within the context of its before and after tasks, e.g., qubit routing and control instruction mapping ~\cite{venturelli2017temporal,metodi2006scheduling,guerreschi2018two,shi2019optimized}. However, in practice, decomposing an entire compilation job into independent tasks is becoming more common in the software architecture of quantum compilers, similar to that of classical compilers, e.g.,~\cite{qiskit}. Therefore, we focus on the following research question: How much can we optimize the resulting schedule in quantum operation scheduling by itself?
In this paper, we examine quantum operation scheduling (QOS) and analyze its theoretical properties and practical usefulness. Our main contributions are as follows. \begin{itemize} \item We show that QOS obtains greater degrees of freedom for optimizing the resulting schedule by further considering the commutativity of particular quantum operations (Section~\ref{sec:qos}). \item We demonstrate that QOS can be reduced to a special type of job-shop problem that has a disjunctive graph representation (Section~\ref{sec:qos-jsp}) so that we can formulate QOS as Constraint Programming and Mixed Integer Programming, which are common techniques for the job-shop problem or scheduling in general (Section~\ref{sec:solution}). \item We demonstrate through experiments with two common benchmark sets that the consideration of commutativity in QOS reduces the schedule length by up to 7.36\%~(Section~\ref{sec:experiment}). \end{itemize}
\section{Related Work} \label{sec:related-work}
The job-shop problem, also known as job shop scheduling, is a well known optimization problem in computer science and operations research, and many variations of it have been studied~\cite{blazewicz1996job, zhang2019review}. See Section~\ref{sec:jsp} for its definition. Algorithms to solve this problem include exact ones such as branch-and-bound based on a Mixed Integer Programming (MIP) formulation~\cite{manne1960job}, heuristic ones such as shifting bottleneck~\cite{adams1988shifting}, and meta-heuristic ones such as simulated annealing~\cite{van1992job}. In this paper, we mainly focus on the exact algorithm, as we want to determine the effect of optimizing quantum operation scheduling (QOS).
Task scheduling, which is the scheduling of computational tasks on multiple classical processors, has been extensively studied~\cite{topcuoglu2002performance,sinnen2007task}. A special type of task scheduling, Directed Acyclic Graph (DAG) scheduling, which deals with heterogeneous processors~\cite{canon2008comparative,valouxis2013dag}, is most similar to QOS, but it differs in the way that resource constraints are handled. In DAG scheduling, every task can be executed on any processor with a different cost, i.e., the resource constraint is soft, while in quantum operation scheduling, every quantum operation has fixed qubit operands that are not interchangeable, i.e., the resource constraint is hard.
Qubit routing is a task that transforms a given circuit into an equivalent circuit so that all two-qubit operations in it can be executed on limited pairs of qubits. Schedule length can be approximated by circuit depth, which is the schedule length when assuming all operations have the same unit processing time. Therefore, qubit routing with the objective of minimizing the circuit depth~\cite{maslov2008quantum,bhattacharjee2017depth} or two-qubit gate depth~\cite{cowtan2019qubit,childs2019circuit} can be viewed as approximate quantum operation scheduling with qubit routing. Although the algorithms for this may be applicable to scheduling without qubit routing, they provide only approximate solutions, not the exact ones to QOS.
While we define quantum operation scheduling independently of hardware technology, there are several studies on scheduling specialized for quantum computers based on ion trap technology~\cite{mohammadzadeh2009improving, bahreini2015minlp}. These works consider a combination of scheduling and qubit routing under a hardware structure model, called macroblocks,
and propose heuristic algorithms to solve it.
Several studies have considered the commutation of quantum operations in scheduling~\cite{venturelli2017temporal,metodi2006scheduling,guerreschi2018two,shi2019optimized}.
Venturelli et al.~\cite{venturelli2017temporal} examined the scheduling of quantum operations as a subproblem of qubit routing. They proposed an exact method using a temporal planner and showed it works well for QAOA circuits, which have many commuting gates. Although their method is applicable to quantum operation scheduling without qubit routing, our methods discussed in Section~\ref{sec:solution} are simpler and perform sufficiently well for the specific scheduling problem considered in this paper.
Guerreschi and Park~\cite{guerreschi2018two} proposed a two-step solution that decomposes the problem with qubit routing and solves quantum operation scheduling (without qubit routing) in the first step. They provide a list scheduling heuristic algorithm using upward ranking but not any exact algorithm for scheduling.
Other studies have considered quantum operation scheduling as a subtask of qubit routing~\cite{metodi2006scheduling} or control optimization~\cite{shi2019optimized}. While they provide practical heuristic algorithms for solving the task that includes scheduling, the exact algorithm for scheduling is not discussed.
\section{Problem} \label{sec:problem}
\subsection{Quantum Operation Scheduling} \label{sec:qos} We define quantum operation scheduling as the problem of finding a \emph{schedule} for a given quantum circuit. A quantum circuit is a sequence of quantum operations. Many of them are unitary operations called \emph{gates}. Each of the quantum operations has acting qubits and its own processing time. A quantum circuit is given as a sequence: e.g., $[H(1), \ensuremath{\mathit{CX}}(1, 2), X(2)]$. Here, $H(1)$ denotes a Hadamard gate acting on qubit $1$, $\ensuremath{\mathit{CX}}(1, 2)$ denotes a Controlled-NOT (or CNOT) gate acting on control qubit $1$ and target qubit $2$, and $X(2)$ denotes a NOT gate acting on qubit $2$. Quantum circuits are typically depicted in a circuit diagram, as shown in Fig.~\ref{fig:qc}.
\begin{figure}
\caption{Diagram representation of a quantum circuit}
\label{fig:qc}
\end{figure}
For simplicity, we assume all of the operations have the same unit processing time in Fig.~\ref{fig:qc}. If we ignore commutation between gates, the gate dependency graph is linear, i.e., $H(1)$ must precede $\ensuremath{\mathit{CX}}(1, 2)$ and $\ensuremath{\mathit{CX}}(1, 2)$ must precede $X(2)$, and we obtain a trivial schedule (makespan = 3), as shown in Fig.~\ref{fig:std-sched}.
We call the graph representing the dependencies among gates in a circuit the \emph{dependency graph}. In contrast, if we consider that $\ensuremath{\mathit{CX}}(1, 2)$ and $X(2)$ commute, we have a different dependency graph: $H(1)$ must precede $\ensuremath{\mathit{CX}}(1, 2)$, but there is no restriction on $X(2)$, so we can obtain a shorter schedule (makespan = 2), as shown in Fig.~\ref{fig:ext-sched}. This is compelling evidence that commutation rules should be considered when scheduling circuit operations.
\begin{figure}
\caption{Standard DAG}
\label{fig:dag-std}
\caption{Schedule with makespan = 3}
\label{fig:sched-3}
\caption{Standard dependency graph and resulting schedule}
\label{fig:std-sched}
\end{figure} \begin{figure}
\caption{Extended DAG}
\label{fig:dag-ext}
\caption{Schedule with makespan = 2}
\label{fig:sched-2}
\caption{Extended dependency graph and resulting schedule}
\label{fig:ext-sched}
\end{figure}
A schedule is defined by the start times of the operations in a given circuit. Any schedule must satisfy two elementary constraints: \emph{precedence} and \emph{non-overlap}. The precedence constraint restricts the execution order of operations to obey a partial order represented as a dependency graph. The non-overlap constraint allows only the processing of one operation on a qubit at a time.
Generally, the supported basic operations and the processing times depend on the target hardware. Hereafter, we assume that basic operations are given and that all circuits have already been decomposed into them. We also assume that each processing time of the basic operations is fixed and given as a parameter. The dependency graph of a provided quantum circuit varies depending on which commutation rules are considered. Taking these details into account, we formally define a quantum operation scheduling problem as follows. \newtheorem{qos}{Quantum Operation Scheduling} \renewcommand{\theqos}{} \begin{qos} Given a quantum circuit as a sequence of basic operations, each processing time of each operation, and a set of commutation rules between basic operations, find a schedule that satisfies precedence and non-overlap constraints with the minimum makespan. \end{qos}
\subsection{Job-shop Problem and its Disjunctive Graph Representation} \label{sec:jsp} We review a basic version of the job-shop problem as follows. Let $J=\{J_1,\ldots,J_n\}$ be a set of $n$ jobs and $M=\{M_1,\ldots,M_m\}$ be a set of $m$ machines. Each job $J_j$ has an operation sequence $O_j$ to be processed in a specific order, called the \emph{precedence constraint}. We denote the $k$-th operation in $O_j$ by $O_{jk}$. Each operation $O_{jk}$ requires exclusive use of a specific machine for its processing time $p_{jk}$, called the \emph{non-overlap constraint}. A schedule is a set of start (or completion) times for each operation $t_{jk}$ that satisfies both constraints. The objective of the job-shop problem is minimization of the makespan.
The job-shop problem is often represented by a disjunctive graph $G = (V, C \cup D)$, where \begin{itemize} \item $V$ is a set of nodes representing the operations $O_{jk}$, \item $C$ is a set of conjunctive (directed) edges representing the order of the operations in any job, and \item $D$ is a set of disjunctive edges representing pairs of operations that must be processed on the same machine. \end{itemize} For each node, the processing time and the required machine of its corresponding operation is attached. Conjunctive edges $C$ represent the precedence constraint and disjunctive edges $D$ represent the non-overlap constraint. Note that disjunctive edges whose direction is fixed by some conjunctive edges can be omitted. That means any disjunctive edge can be removed if there exists a path from one end of the edge to the other on a conjunctive graph $(V, C)$. Figure~\ref{fig:jsp-graph} shows an example of a disjunctive graph representing the job-shop problem. The operation $O_{11}$ must be processed in machine $M_1$ and it takes $1$ time unit. The disjunctive edge $(O_{12}, O_{13})$ is omitted since its direction is fixed by the conjunctive edge at the same place.
\begin{figure}
\caption{Disjunctive graph representation of a job-shop problem}
\label{fig:jsp-graph}
\end{figure}
On the basis of this disjunctive graph representation, the job-shop problem can be seen as a problem of determining the direction of disjunctive edges while keeping the resulting graph acyclic. This is equivalent to determining the ordering of the operations processed on the same machine, and such ordering yields a unique schedule, called a \emph{semi-active schedule}~\cite{fleming1997genetic}, by sequencing operations as early as possible.
\begin{figure}
\caption{Two solutions to the job-shop problem in Fig.~\ref{fig:jsp-graph}}
\label{fig:jsp-solutions}
\end{figure}
Figure~\ref{fig:jsp-solutions} shows two solutions to the job-shop problem defined by the disjunctive graph depicted in Fig.~\ref{fig:jsp-graph}. As shown, a different selection of the direction of the disjunctive edges results in a different solution. Among the operations $\{O_{12}, O_{13}, O_{22}\}$, we cannot select \{$(O_{22}, O_{12})$, $(O_{13}, O_{22})$\} because it produces a cycle $O_{12} \rightarrow O_{13} \rightarrow O_{22} \rightarrow O_{12}$. In Solution A, the directed edge $(O_{11}, O_{21})$ determines the order of operations processed on machine $M_1$, and \{$(O_{12}, O_{22})$, $(O_{13}, O_{22})$\} determine that on machine $M_2$.
\subsection{Disjunctive Graph Representation of Quantum Operation Scheduling} \label{sec:qos-jsp} Technically, quantum operation scheduling can be seen as a special type of job-shop problem with the following properties: (1) one job has one operation, (2) a precedence constraint is given as a partial ordering among all operations
instead of total ordering of operations per job, and (3) multiple machines (i.e., qubits) can be occupied by a single operation at the same time. Those properties preserve enough conditions to represent the problem by a disjunctive graph $G = (V, C \cup D)$. For property (1), we need to define the problem on operations without jobs, but this does not change the fact that nodes $V$ represent operations. For properties (2) and (3), we need to modify the definition of conjunctive edges $C$ and disjunctive edges $D$, respectively as follows.
The quantum operation scheduling can be represented by a disjunctive graph $G = (V, C \cup D)$, where \begin{itemize} \item $V$ is a set of nodes representing the quantum operations in a given circuit, \item $C$ is a set of conjunctive edges representing dependencies among the operations, i.e., edges of the dependency graph, and \item $D$ is a set of disjunctive edges representing pairs of operations that act on the same qubit and (possibly) commute with one another. \end{itemize} For each operation (i.e., node) $i \in V$, the processing time $p_i$ and the acting qubits are attached. Note that conjunctive graph $(V, C)$ is a DAG given as a dependency graph. Conjunctive edges $C$ and disjunctive edges $D$ still represent the precedence constraint and non-overlap constraint, respectively. It is known that the dependency graph for any circuit can be computationally constructed under several popular commutation rules~\cite{itoko2020optimization}. When provided with the dependency graph of a quantum circuit, the disjunctive graph representation can be computationally constructed. In fact, it is possible to define disjunctive edges by all the pairs of nodes that acting on the same qubit. This definition is redundant, but there is no problem with including edges (pairs of operations) that do not commute with one another. The point is that it must include all of the commuting pairs. Starting from the redundant edges, we can define the minimal disjunctive edges by removing edges that have a path on the conjunctive graph. However, this comes at a significant computation cost. There is an in-between definition that picks up operations acting on any qubit and splits them into sets of operations commuting each other within each of the sets. We used this definition for the experiments discussed in Section~\ref{sec:experiment}. Although this cannot provide the minimal edges because the operations within a set may not commute each other when considering operations acting on the other qubits, it has fewer edges than the most redundant definition and requires less computation cost than the minimal definition.
With the disjunctive graph representation of the quantum operation scheduling, we take the commutation of operations into account on the basis of the difference of the dependency graph. We call the dependency graph that considers only the trivial commutation between operations not sharing their acting qubits \emph{standard DAG} and the dependency graph that considers commutation rules in addition to the trivial ones \emph{extended DAG}.
\begin{figure}
\caption{Standard DAG}
\label{fig:qos-std}
\caption{Extended DAG}
\label{fig:qos-ext}
\caption{Disjunctive graphs representing quantum operation scheduling for different dependency graphs}
\label{fig:qos-graphs}
\end{figure}
Figure~\ref{fig:qos-graphs} shows disjunctive graphs representing two quantum operation scheduling problems that have the same operations but different dependency (conjunctive) graphs (standard DAG and extended DAG). In the case of extended DAG, we can select the order of $O_2$ and $O_3$, while in standard DAG, all the orders among operations are fixed.
It generally holds that if we consider a standard DAG, there are no disjunctive edges, i.e., $D=\emptyset$. This means there is a unique semi-active schedule because each ordering of the operations processed on the same qubit is uniquely determined by the conjunctive DAG. However, if we consider an extended DAG, we have fewer edges in $C$ and some edges in $D$. This creates the room for selecting a better ordering of the operations processed on the same qubit, i.e., optimizing the schedule.
\section{Formulation} \label{sec:solution} We provide a Constraint Programming (CP) formulation and a Mixed Integer Programming (MIP) formulation for the quantum operation scheduling problem (QOS) defined in the previous section. By solving them, we can find the optimal solution of QOS and analyze how much we can improve the resulting schedule in QOS. In this section, we assume that the disjunctive graph representation of quantum operation scheduling $G = (V, C \cup D)$ for a given circuit has already been constructed.
\subsection{Constraint Programming Formulation} \label{sec:cp} Let $x_i$ be an interval variable describing the start and end time of operation $i \in V$, whose duration is fixed to its processing time $p_i$. Using functions commonly supported by CP solvers, e.g. $\mathbf{interval\_var}$, quantum operation scheduling is formulated as a CP:
\[
\begin{array}{ll}
\mbox{minimize} & \max \{\mathbf{end\_of}(x_i) \mid i \in V\} \\
\mbox{subject to} & \mathbf{end\_before\_start}(x_i, x_j),\ \forall (i, j) \in C,\\
& \mathbf{no\_overlap}(x_k, x_l),\ \forall (k, l) \in D,\\
& x_i \equiv \mathbf{interval\_var}(\mathrm{duration}=p_i),\ \forall i \in V.\\
\end{array}
\] Here $\mathbf{end\_of}(x_i)$ takes the end time of $x_i$, so the objective is the minimization of the makespan. The constraint $\mathbf{end\_before\_start}(x_i, x_j)$ means the end time of $x_i$ must precede the start time $x_j$, so it represents the precedence constraint. The constraint $\mathbf{no\_overlap}(x_k, x_l)$ means the interval $x_k$ must not overlap the interval $x_l$, so it represents the non-overlap constraint. Note that, for any operation pair $(i, j) \not\in D$, the precedence constraint guarantees no overlap between them. All of $\mathbf{end\_of}$, $\mathbf{end\_before\_start}$, $\mathbf{no\_overlap}$, and $\mathbf{interval\_var}$ are supported in the IBM ILOG CP Optimizer.
\subsection{Mixed Integer Programming Formulation} \label{sec:mip} Let $x_i$ be a variable representing the start time of operation $i \in V$ and $p_i$ be a constant parameter representing its processing time. Let $t$ be a makespan of the schedule defined by $x$. Let $y_{kl}$ be an indicator (Boolean) variable that takes True (1) if operation $k$ precedes operation $l$ and False (0) if $l$ precedes $k$. Using these, quantum operation scheduling is formulated as a MIP:
\[
\begin{array}{ll}
\mbox{minimize} & t \\
\mbox{subject to} & x_i + p_i \leq x_j,\ \forall (i, j) \in C,\\
& \ y_{kl} \Rightarrow x_k + p_k \leq x_l,\ \forall (k, l) \in D,\\
& \neg y_{kl} \Rightarrow x_l + p_l \leq x_k,\ \forall (k, l) \in D,\\
& x_i + p_i \leq t,\ \forall i \in V,\\
& 0 \leq x_i \in \mathbb{R},\ \forall i \in V,\\
& y_{kl} \in \{0, 1\},\ \forall (k, l) \in D.\ \\
\end{array}
\] The inequality $x_i + p_i \leq x_j$ represents the precedence constraint. The two inequalities using $\Rightarrow$ represent the non-overlap constraint.
Note that the constraints with indicator variables $y$ can be translated into linear constraints by applying the so-called the big-M technique. However, recent MIP solvers have the capability to handle such indicator constraints very well, so we leave the formulation with indicator variables.
\section{Experiment} \label{sec:experiment}
We conducted two experiments. In the first one, we evaluate how much the consideration of commutation between operations improves the schedule in quantum operation scheduling. In the second one, we investigate how much the extent of improvement in scheduling can be affected by the optimization level in a previous task.
\subsection{Common Experimental Settings} Both of the experiments were conducted in a real compiling environment. As the target quantum computing device for compilation, we used the IBM Q Johannesburg, which has 20 qubits (see~\cite{corcoles2019challenges} for the details). We implemented our scheduling algorithms within Qiskit 0.18.0 (Terra 0.13.0), which is an open-source quantum computing software development framework~\cite{qiskit}.
We first transpiled all of the circuits to make them executable on the IBM Q Johannesburg, i.e., we solved the qubit routing problem to map given circuits onto the device topology. For this, we used the ``transpile()'' function in Qiskit and set the ibmq\_johannesburg backend, fixed the seed\_transpiler to 1, the optimization\_level to 2, and left the other options as the default. During transpiling, all of the circuits were decomposed into the basis gates $\{u1, u2, u3, \ensuremath{\mathit{CX}}\}$, which are elementary gates supported by the backend. Here $u1, u2, u3$ are single-qubit gates and $\ensuremath{\mathit{CX}}$ is a two-qubit gate. The execution time for each gate (gate length) is provided as the backend properties. Note that it can differ depending on which qubit(s) the gate acts on, e.g. the length of $u3(1)$ can be different from that of $u3(2)$. We used those as of April 11, 2020.
We then applied our scheduling algorithms to the transpiled circuits. We used the real processing time for each of the basis gates provided as backend properties. We considered three commutation rules on the basis gates---$u1(i)\leftrightarrow\ensuremath{\mathit{CX}}(i,j)$, $\ensuremath{\mathit{CX}}(i,j)\leftrightarrow\ensuremath{\mathit{CX}}(i,k)$, and $\ensuremath{\mathit{CX}}(i,k)\leftrightarrow\ensuremath{\mathit{CX}}(j,k)$---in the construction of the extended DAGs for the transpiled circuits.
We used the IBM ILOG CP Optimizer and CPLEX 12.9.0 to solve the scheduling problem based on the CP and MIP formulation described in Section~\ref{sec:solution}, respectively.
\subsection{Improvement by Considering Commutation in Quantum Operation Scheduling} \label{sec:experiment-improve} In the first experiment, we quantified the significance of considering the commutation of operations in quantum operation scheduling. Specifically, we evaluated the improvement by comparing the best solutions (makespans) of the formulation constrained by standard DAG with those by extended DAG as shown in Table~\ref{tab:improvement}.
\begin{table}[htbp]
\centering
\caption{Comparison of makespans [$dt$] ($1\,dt=2/9\,$ns) obtained by the formulation based on standard-DAG (Std-DAG) and those based on extended DAG (Ext-DAG) for 16 circuits from the RevLib benchmark.
For the Ext-DAG formulation, the solutions by the Constraint Programming solver with the time limit of ten seconds are listed.
The Qubits and Gates columns list the number of qubits and gates in the input circuits.
The $\Delta$ column lists the improvement rate from Std-DAG to Ext-DAG.}
\label{tab:improvement}
\begin{tabular}{lrr|rrr} \hline Circuit name & Qubits & Gates & Std-DAG & Ext-DAG & $\Delta$ \\ \hline mini\_alu\_305 & 10 & 173 & 24,940 & 24,308 & 2.53\% \\ qft\_10 & 10 & 200 & 24,358 & 24,074 & 1.17\% \\ sys6-v0\_111 & 10 & 215 & 30,604 & 30,114 & 1.60\% \\ rd73\_140 & 10 & 230 & 35,848 & 35,484 & 1.02\% \\ ising\_model\_10 & 10 & 480 & 4,210 & 4,210 & 0.00\% \\ wim\_266 & 11 & 986 & 141,914 & 138,658 & 2.29\% \\ sym9\_146 & 12 & 328 & 50,458 & 50,170 & 0.57\% \\ rd53\_311 & 13 & 275 & 40,420 & 40,164 & 0.63\% \\ ising\_model\_13 & 13 & 633 & 4,210 & 4,210 & 0.00\% \\ 0410184\_169 & 14 & 211 & 40,356 & 39,074 & 3.18\% \\ sym6\_316 & 14 & 270 & 47,178 & 46,404 & 1.64\% \\ rd84\_142 & 15 & 343 & 39,812 & 38,490 & 3.32\% \\ cnt3-5\_179 & 16 & 175 & 19,630 & 19,366 & 1.34\% \\ cnt3-5\_180 & 16 & 485 & 69,854 & 68,326 & 2.19\% \\ qft\_16 & 16 & 512 & 50,674 & 50,088 & 1.16\% \\ ising\_model\_16 & 16 & 786 & 4,370 & 4,370 & 0.00\% \\ \hline \end{tabular} \end{table}
For this experiment, we used quantum circuits from the test dataset provided by Zulehner et al.~\cite{zulehner2018efficient}, which originated from the RevLib benchmark ~\cite{soeken2011revkit}. We selected 16 circuits with 10--16 qubits and less than 1000 gates from among them. We used the as-soon-as-possible heuristic scheduling algorithm implemented in Qiskit to find the unique solutions from the standard DAG formulation (Std-DAG). We also used the CP solver with a 10-sec time limit to find the best possible solutions from the extended DAG formulation (Ext-DAG).
Looking at the $\Delta$ column in Table~\ref{tab:improvement}, i.e., the improvement rates from Std-DAG to Ext-DAG, we can see they were non-negative and varied depending on the circuit structures from 0.00\% to 3.32\% (median 1.26\%). These results demonstrate that the commutation-aware formulation we proposed in Section~\ref{sec:qos} can improve the resulting schedule length in a practical situation. Although they may look marginal, they may be welcomed by those who have abundant time for compilation and need more optimization.
\begin{table*}[htbp] \centering \caption{Difference in improvement rates ($\Delta$ column) of makespans from scheduling with standard DAG (Std-DAG) compared to those with extended DAG (Ext-DAG) using CP solver after applying a naive gate decomposition or optimized gate decomposition. } \label{tab:diff}
\begin{tabular}{l|rrr|rrr}
\hline Circuit name & \multicolumn{3}{|c|}{Naive gate decomposition} & \multicolumn{3}{c}{Optimized gate decomposition} \\
& Std-DAG & Ext-DAG & $\Delta$ & Std-DAG & Ext-DAG & $\Delta$ \\ \hline Mod 5\_4 & 6,328 & 5,984 & 5.44\% & 6,016 & 5,578 & 7.28\% \\ VBE-Adder\_3 & 19,126 & 18,852 & 1.43\% & 11,416 & 11,148 & 2.35\% \\ CSLA-MUX\_3 & 22,238 & 21,438 & 3.60\% & 19,774 & 18,778 & 5.04\% \\ RC-Adder\_6 & 28,606 & 27,564 & 3.64\% & 20,408 & 18,906 & 7.36\% \\ Mod-Red\_{21} & 33,160 & 32,348 & 2.45\% & 28,320 & 27,494 & 2.92\% \\ Mod-Mult\_{55} & 13,942 & 13,740 & 1.45\% & 14,478 & 14,070 & 2.82\% \\ Toff-Barenco\_3 & 12,388 & 12,388 & 0.00\% & 4,812 & 4,812 & 0.00\% \\ Toff-NC\_3 & 9,108 & 9,108 & 0.00\% & 3,818 & 3,818 & 0.00\% \\ Toff-Barenco\_4 & 15,992 & 15,582 & 2.56\% & 9,006 & 8,678 & 3.64\% \\ Toff-NC\_4 & 12,272 & 11,834 & 3.57\% & 8,016 & 7,626 & 4.87\% \\ Toff-Barenco\_5 & 21,600 & 21,378 & 1.03\% & 19,424 & 19,138 & 1.47\% \\ Toff-NC\_5 & 13,536 & 12,984 & 4.08\% & 9,262 & 8,920 & 3.69\% \\ Toff-Barenco\_{10} & 90,940 & 89,248 & 1.86\% & 59,282 & 57,282 & 3.37\% \\ Toff-NC\_{10} & 43,156 & 42,546 & 1.41\% & 28,720 & 28,322 & 1.39\% \\ GF($2^4$)-Mult & 33,160 & 32,492 & 2.01\% & 33,646 & 33,148 & 1.48\% \\ GF($2^5$)-Mult & 37,948 & 36,480 & 3.87\% & 44,702 & 44,030 & 1.50\% \\ GF($2^6$)-Mult & 63,856 & 61,870 & 3.11\% & 64,784 & 64,180 & 0.93\% \\ \hline \end{tabular} \end{table*}
We also examined the MIP solver (with the same 10-sec time limit) to find solutions of the Ext-DAG; however, all of the solutions were slightly worse or equal to those by the CP solver. Hence, we omitted these results in Table~\ref{tab:improvement}. As for the solutions (i.e., makespans) from the standard DAG formulation, we verified that those by the CP and MIP solvers were exactly the same as those by the as-soon-as-possible heuristic scheduling algorithm implemented in Qiskit as expected.
\subsection{Performance Variation by Optimization Level of Previous Task}
In the second experiment, we investigated how a previous task affects the solution quality in the quantum operation scheduling task. To this end, as a previous task, we picked the gate decomposition task that decomposes gates with three or more qubits into those with one or two qubits. We changed the optimization level in the gate decomposition task and observed how it affects the improvement rates of makespans from scheduling with standard DAG (Std-DAG) compared to those with extended DAG (Ext-DAG), as shown in Table~\ref{tab:diff}.
For this experiment, we used 17 circuits from the test dataset provided by Nam et al.~\cite{nam2018automated}. To change the optimization level in the gate decomposition task, we used both their input circuit data (with ``\_before'' suffix in their file names) and output circuit data after the heavy optimization proposed in~\cite{nam2018automated} (with ``\_after\_heavy'' suffix) as our input circuits to be scheduled. Those correspond with the Naive gate decomposition column and Optimized gate decomposition column, respectively. Note that, in the Naive case, gates are decomposed by a simple rule-based algorithm implemented in Qiskit before scheduling. As in the previous experiment, we used the as-soon-as-possible heuristic scheduling algorithm implemented in Qiskit to find the unique solutions from the Std-DAG formulation and the CP solver with a 10-sec time limit to find the best possible solutions from the Ext-DAG formulation.
As shown in the two $\Delta$ columns in Table~\ref{tab:diff}, we can observe clear improvement from Std-DAG to Ext-DAG no matter which gate decomposition algorithm we used before QOS: Min: 0.00\%--Median: 2.45\%--Max: 5.44\% (Naive) and Min: 0.00\%--Median: 2.82\%--Max: 7.36\% (Optimized). This again confirms that our commutation-aware formulation proposed in Section~\ref{sec:qos} can improve the resulting schedule length in a practical situation.
When we compare the improvement rates ($\Delta$ column) from scheduling after the naive gate decomposition with those after the optimized gate decomposition in Table~\ref{tab:diff}, there are two key findings. First, they have a similar median: 2.45\% (Naive) and 2.82\% (Optimized). This suggests that, on average, our commutation-aware scheduling can stably improve the resulting schedule no matter how much circuits has been optimized in a previous task (at least in the gate optimization task). Second, the improvement rates for each individual circuit differs between Naive and Optimized. Specifically, they increase from Naive to Optimized for ten circuits and decrease for five circuits. This suggests that the optimization level of the previous task significantly affects the optimization gain in QOS.
Comparing the makespans in the Std- or Ext-DAG column under naive gate decomposition with those under optimized gate decomposition in Table~\ref{tab:diff}, we can see that they decrease for 13 out of 17 circuits, as expected, but increase for four circuits. The latter four exceptional cases stem from negative interference among optimization tasks before scheduling, i.e., between gate decomposition and some tasks done within `transpile()` in Qiskit, and they are not caused by any errors in the scheduling.
\section{Discussion} The basic version of the job-shop problem as a decision problem is known to be NP-complete~\cite{garey2002computers}. Since QOS is a special variant of the job-shop problem, as discussed in Section~\ref{sec:qos-jsp}, it is not necessary for it to be NP-complete. Identifying the theoretical complexity of QOS would be an interesting avenue for future work.
Throughout this paper, we have investigated how to minimize the overall execution time of the resulting schedule. Although this certainly contributes to obtaining computational results with higher fidelity, there should be more direct approaches that attempt to maximize the output fidelity by considering gate-dependent errors. In fact, such approaches have recently proposed in qubit routing~\cite{tannu2019not,murali2019noise,nishio2020extracting}. Utilizing techniques like this for QOS is also left for future work.
The two formulations (CP/MIP) discussed in Section~\ref{sec:solution} are useful for the theoretical best case analysis because their solvers implement exact algorithms that can find the optimal solution in the long run. They may also be sufficient for certain practical applications, since CP/MIP solvers usually implement problem-agnostic heuristic algorithms to find the best possible solution within a limited time. However, for the use cases where the compilation time is too critical to use CP/MIP solvers, it is worth considering heuristic algorithms specialized for QOS.
As an example, we developed a heuristic algorithm based on the Heterogeneous-Earliest-Finish-Time (HEFT) algorithm for task scheduling. We provide its details in Appendix.
Such a heuristic algorithm can complement the CP/MIP-based approach.
\section{Conclusion} We investigated quantum operations scheduling for the problem of scheduling quantum operations in a given circuit with the shortest total execution time. We demonstrated that quantum operations scheduling can be interpreted as a special type of job-shop problem where we consider the commutation between quantum operations to make room for optimization. We provided a Constraint Programming formulation and showed through experiments with real circuits and a compiler that solving quantum operations scheduling independently improved the schedule length by the modest rate up to 7.36\%.
\section*{Acknowledgment} We thank Dmitri Maslov, Lauren Capelluto, Thomas A. Alexander, and Rudy Raymond for their helpful comments.
\begin{table*}[htbp] \centering \caption{Makespans and their improvement rates ($\Delta$) from scheduling with standard DAG (Std-DAG) compared to those with extended DAG (Ext-DAG) using HEFT heuristic algorithm or CP solver after applying a naive gate decomposition or optimized gate decomposition. } \label{tab:heft}
\begin{tabular}{l|rrr|rrr}
\hline Circuit name & \multicolumn{3}{|c|}{Naive gate decomposition}
& \multicolumn{3}{c}{Optimized gate decomposition} \\
& Std-DAG & \multicolumn{2}{c|}{Ext-DAG}
& Std-DAG & \multicolumn{2}{c}{Ext-DAG} \\
& & \multicolumn{1}{c}{HEFT ($\Delta$) } & \multicolumn{1}{c|}{CP ($\Delta$) }
& & \multicolumn{1}{c}{HEFT ($\Delta$) } & \multicolumn{1}{c}{CP ($\Delta$) } \\ \hline Mod 5\_4 & 6,328 & 6,076 (3.98\%) & 5,984 (5.44\%) & 6,016 & 5,670 (5.75\%) & 5,578 (7.28\%) \\ VBE-Adder\_3 & 19,126 & 18,972 (0.81\%) & 18,852 (1.43\%) & 11,416 & 11,148 (2.35\%) & 11,148 (2.35\%) \\ CSLA-MUX\_3 & 22,238 & 21,444 (3.57\%) & 21,438 (3.60\%) & 19,774 & 18,914 (4.35\%) & 18,778 (5.04\%) \\ RC-Adder\_6 & 28,606 & 27,584 (3.57\%) & 27,564 (3.64\%) & 20,408 & 19,136 (6.23\%) & 18,906 (7.36\%) \\ Mod-Red\_{21} & 33,160 & 32,640 (1.57\%) & 32,348 (2.45\%) & 28,320 & 27,570 (2.65\%) & 27,494 (2.92\%) \\ Mod-Mult\_{55} & 13,942 & 13,822 (0.86\%) & 13,740 (1.45\%) & 14,478 & 14,270 (1.44\%) & 14,070 (2.82\%) \\ Toff-Barenco\_3 & 12,388 & 12,388 (0.00\%) & 12,388 (0.00\%) & 4,812 & 4,998 ($-$3.87\%) & 4,812 (0.00\%) \\ Toff-NC\_3 & 9,108 & 9,108 (0.00\%) & 9,108 (0.00\%) & 3,818 & 3,818 (0.00\%) & 3,818 (0.00\%) \\ Toff-Barenco\_4 & 15,992 & 15,786 (1.29\%) & 15,582 (2.56\%) & 9,006 & 8,678 (3.64\%) & 8,678 (3.64\%) \\ Toff-NC\_4 & 12,272 & 11,834 (3.57\%) & 11,834 (3.57\%) & 8,016 & 7,698 (3.97\%) & 7,626 (4.87\%) \\ Toff-Barenco\_5 & 21,600 & 21,388 (0.98\%) & 21,378 (1.03\%) & 19,424 & 19,220 (1.05\%) & 19,138 (1.47\%) \\ Toff-NC\_5 & 13,536 & 13,002 (3.95\%) & 12,984 (4.08\%) & 9,262 & 8,920 (3.69\%) & 8,920 (3.69\%) \\ Toff-Barenco\_{10} & 90,940 & 89,418 (1.67\%) & 89,248 (1.86\%) & 59,282 & 57,678 (2.71\%) & 57,282 (3.37\%) \\ Toff-NC\_{10} & 43,156 & 42,600 (1.29\%) & 42,546 (1.41\%) & 28,720 & 28,494 (0.79\%) & 28,322 (1.39\%) \\ GF($2^4$)-Mult & 33,160 & 32,508 (1.97\%) & 32,492 (2.01\%) & 33,646 & 33,160 (1.44\%) & 33,148 (1.48\%) \\ GF($2^5$)-Mult & 37,948 & 36,734 (3.20\%) & 36,480 (3.87\%) & 44,702 & 44,464 (0.53\%) & 44,030 (1.50\%) \\ GF($2^6$)-Mult & 63,856 & 62,174 (2.63\%) & 61,870 (3.11\%) & 64,784 & 64,278 (0.78\%) & 64,180 (0.93\%) \\ \hline \end{tabular} \end{table*}
\appendix
We show how the Heterogeneous-Earliest-Finish-Time (HEFT) algorithm for task scheduling can be used for quantum operation scheduling with a slight modification. The original HEFT algorithm is designed for scheduling with a soft resource constraint, i.e., every operation can be executed on any processor with a different cost. We adjust it here so that it can work with a hard resource constraint, i.e., every operation has fixed qubit operands that are not interchangeable.
The original HEFT algorithm consists of two phases: an \emph{operation prioritizing phase} for computing the priorities of all operations based on upward ranking and a \emph{processor selection phase} for scheduling the highest priority operation at the moment on the processor, which minimizes the operation's finish time~\cite{topcuoglu2002performance}. In the processor selection phase, the algorithm considers the possibility of inserting an operation in the earliest idle time-slot between two already scheduled operations. Only the idle time-slots that preserve precedence constraints, i.e., that comply with the dependency graph, are considered in this phase. This \emph{insertion-based policy} allowing the insertion in the idle time-slot characterizes the HEFT algorithm.
While keeping this insertion-based policy, we adjust the HEFT algorithm so that every operation is assigned to the fixed qubits (i.e., processors in the original term), which means we no longer need to select qubits in the processor selection phase. Note that it is necessary for the adjusted HEFT algorithm to maintain scheduled time-slots across qubits, whereas the original algorithm simply maintains the time-slots by processors. The process flow of the HEFT algorithm for quantum operation scheduling is shown in Algorithm \ref{algo:heuristic}.
\begin{algorithm}[htb]
\small
\caption{HEFT algorithm for QOS}
\label{algo:heuristic}
\begin{algorithmic}[1]
\State $G = (V, C)$: dependency graph of a QOS problem
\State Compute upward rank $r(u)$ for each operation $u \in V$ by
$$ r(u) = d(u) + \max_{v \in succ(u)}{r(v)} $$
where $succ(u)$ is the set of immediate successors of $u$,
$d(u)$ is the duration of $u$, and $r(e) = d(e)$ for any exit operation $e$.
\State $ready\_time(u) = 0$ for all $u \in V$.
\For{all $u \in V$ in descending order of $r(u)$}
\State Insert $u$ at the start time $t$ of the earliest idle time-slot (whose duration $> d(u)$) after $ready\_time(u)$.
\For{all $v \in succ(u)$}
\State $ready\_time(v) = \max(ready\_time(v), t + d(u))$.
\EndFor
\EndFor
\end{algorithmic} \end{algorithm}
We conducted experiments to check the solution quality of the adjusted HEFT algorithm with the same benchmark sets and experimental settings as used in Section~\ref{sec:experiment}. For all of the instances under the formulation with extended DAG (Ext-DAG), the HEFT algorithm always succeeded in finding solutions slightly worse than or equal to those by the CP solver. The medians of improvement rates from Std-DAG to Ext-DAG with the HEFT algorithm (the CP solver) were 1.16\% (1.26\%) for circuits by Zulehner et al.~\cite{zulehner2018efficient} used in Table~\ref{tab:improvement} and 1.67\% (2.45\%) and 2.35\% (2.82\%) for circuits by Nam et al.~\cite{nam2018automated} used in Table~\ref{tab:diff} with naive and optimized gate decomposition, respectively. This suggests that the HEFT algorithm is a good option for adding a bit more optimization in cases where not much compilation time is available.
All the results of these latter two experiments are listed in Table~\ref{tab:heft}. We can see a negative improvement at Toff-arenco\_3 using optimized gate decomposition and the HEFT algorithm. This can happen because considering further commutation in the formulation with Ext-DAG yields a broader search space for algorithms, and it provides an opportunity to find not only a better solution than Std-DAG but also a worse one. However, this is not a big issue in practice in cases where we can afford to select the better of the solution by the as-soon-as-possible algorithm with Std-DAG and that by the HEFT algorithm with Ext-DAG.
\end{document} |
\begin{document}
\title{Adaptive Refinement for $hp$--Version Trefftz Discontinuous
Galerkin Methods for the Homogeneous Helmholtz Problem}
\begin{abstract} In this article we develop an $hp$--adaptive refinement procedure for Trefftz discontinuous Galerkin methods applied to the homogeneous Helmholtz problem. Our approach combines not only mesh subdivision ($h$--refinement) and local basis enrichment ($p$--refinement), but also incorporates local directional adaptivity, whereby the elementwise plane wave basis is aligned with the dominant scattering direction. Numerical experiments based on employing an empirical {\em a posteriori} error indicator clearly highlight the efficiency of the proposed approach for various examples. \\ \\ {\bfseries Keywords}\quad Homogeneous Helmholtz problem, Discontinuous Galerkin methods, Trefftz methods, Adaptivity, $hp$-finite element methods \\ \\ {\bfseries Mathematics Subject Classification (2010)}\quad 65N30, 65N50, 35J05 \end{abstract}
\section{Introduction} Trefftz discontinuous Galerkin (TDG) methods are finite element schemes which employ discontinuous test and trial functions whose restriction to each mesh element belongs to the kernel of the differential operator to be discretized. For time-harmonic wave problems, Trefftz discretization spaces are made of oscillating functions with the same frequency as that of the underlying analytical solution. This results in improved approximation properties, as compared to standard piecewise polynomial spaces. Moreover, based on Trefftz spaces, one can construct discontinuous Galerkin methods which feature unconditional unique solvability, as well as coercivity of the discrete bilinear forms in suitable (mesh-dependent) norms. We focus here on the case of the Helmholtz problem and refer, e.g., to the survey~\cite{Hiptmair2016} for a review of the construction, properties, and relevant literature of Trefftz methods for its numerical approximation.
The purpose of this article is to develop an efficient $hp$--adaptive refinement algorithm for TDG methods applied to the homogeneous Helmholtz problem; we will specifically consider the ultra-weak variational formulation with plane wave basis functions~\cite{Cessenat1998}. Within the adaptive procedure, elements will be marked for refinement based on employing an empirical {\em a posteriori} error indicator, stimulated by the upper bounds derived in~\cite{Kapita2015} for the $h$--version of the TDG method. For the $h$--version of the plane wave discontinuous Galerkin method, incorporating Lagrange multipliers, a similar error indicator has been presented in \cite{Amara2009}. Once an element has been marked for refinement, a decision must then be made regarding the type of refinement to be undertaken, i.e., whether the element should be subdivided ($h$--refinement), or whether the local basis should be enriched ($p$--refinement). The choice of whether to $h$-- or $p$--refine an element is typically based on the observation that when the underlying solution is smooth, then $p$--refinement will be more efficient in terms of reducing the error, for a given increase in the number of degrees of freedom, than if the element is subdivided. On the other hand, if the solution is not smooth, then $h$--refinement should be employed. In general, {\em a posteriori} error estimators only provide an estimate of the local elementwise error, but do not indicate which type of refinement should be employed. Within the existing literature a number of algorithms have been devised for determining the type of refinement ($h$-- or $p$--) to be undertaken. For a comprehensive review of this subject, we refer to~\cite{Mitchell2011,Mitchell2014}, and the references cited therein. In the present context, given the oscillatory nature of solutions to high-frequency scattering problems, the exploitation of $hp$--strategies based on local regularity estimation techniques is not generally applicable. Thereby, we consider an alternative approach based on estimating the predicted decay rate of the {\em a posteriori} estimator, given the refinement history of each element; see, for example, \cite{Melenk2001}. For {\em a posteriori} error estimation of conforming finite element approximations of the Helmholtz problem, we refer, e.g., to \cite{BabI,BabII} and~\cite{SauterDoerfler}; analogous bounds have been established for polynomial-based discontinuous Galerkin finite element methods in \cite{SauterZech, Zech}.
In addition to standard $h$-- and $hp$--adaptivity, we also consider the issue of directional refinement of the underlying plane wave basis employed within our TDG scheme. In particular, we rotate the underlying elementwise plane wave basis in order that the first basis function is aligned with the local dominant propagation direction; strategies for determining the local dominant propagation direction have been proposed in~\cite{Amara2014,Betcke2011,Betcke2012,Gittelson2008}, for example. Stimulated by the work undertaken on anisotropic mesh adaptation in~\cite{Formaggia2001,Formaggia2003}, cf., also,~\cite{ghh-paper,hall-thesis}, we propose an alternative approach based on studying the properties of the Hessian of the computed TDG solution. More precisely, the principal eigenvector of the Hessian of the solution indicates the dominant direction of wave propagation. However, since eigenvectors are only unique up to scalar multiples, the precise wave direction must be fixed, based on exploiting an impedance condition. In this way, we can locally orientate the elementwise plane wave basis to reduce the error in the underlying computed TDG solution in a simple and computationally cheap manner. When combined with $hp$--refinement, the resulting adaptive procedure is capable of generating highly optimized $hp$--refined Trefftz spaces. Indeed, the efficiency of the proposed strategy is illustrated for a number of test problems, where we compare the performance between an $h$-- and $hp$--refinement algorithm, both with and without directional adaptivity.
The outline of this article is as follows: in Section~\ref{sec:pde_tdg} we introduce the model problem to be studied within this article, together with its TDG discretization. Then in Section~\ref{section:adaptive_refinement} we develop an $hp$--refinement algorithm, based on employing both local mesh subdivision and local basis enrichment, together with directional adaptivity for the underlying Trefftz space. The performance of this procedure is studied in Section~\ref{sec:numerical_examples} through a series of two-- and three--dimensional examples. Finally, in Section~\ref{sec:conclusions} we summarize the work undertaken within this article and highlight potential future directions of research.
\section{Model problem and TDG discretization} \label{sec:pde_tdg}
In this section we state the model problem to be studied in this article, together with its TDG discretization; for further details, we refer to \cite{Hiptmair2016}, for example.
\subsection{Model problem}
We study the homogeneous Helmholtz equation; to this end, we let $\Omega\subset\mathbb{R}^d$, $d=2,3$, be an open bounded, Lipschitz domain with boundary $\partial \Omega$. Thereby, we seek $u:\Omega\mapsto {\mathbb C}$ such that \begin{equation} \label{eqn:helmholtz}
\begin{aligned}
-\Delta u-k^2 u &=0 && \text{in } \Omega\;,\\
\frac{\partial u}{\partial \vect{n}} + ik\vartheta u &= g_R && \text{on } \Gamma_R\;, \\
u &=g_D && \text{on } \Gamma_D\;,
\end{aligned} \end{equation} where $\vect{n}$ denotes the unit outward normal vector on the boundary $\partial\Omega$, and $\Gamma_R$ and $\Gamma_D$ are non-overlapping open subsets of $\partial\Omega$, such that $\partial\Omega=\overline{\Gamma}_R\cup\overline{\Gamma}_D$. Furthermore, $i$ is the imaginary unit, $\vartheta=\pm 1$, $g_R\in L^2(\Gamma_R)$, and we assume, for the moment, that the (real-valued) wavenumber $k$ is constant in $\Omega$.
\subsection{Meshes and spaces} We partition $\Omega$ into computational meshes $\{\mesh\}_{h>0}$ consisting of non-overlapping (curvilinear) polygons/polyhedra $K$, which potentially include hanging nodes, such that $\overline{\Omega}=\bigcup_{K\in\mesh}{\overline{K}}$. Moreover, we assume that the family of subdivisions $\{\mesh\}_{h>0}$ is shape regular \cite[pp. 61, 114, and 118]{Braess}. For each element $K\in\mesh$, we write $h_K$ to denote its diameter and $\vect{n}_K$ signifies the unit outward normal vector to $K$ on~$\partial K$; we set $h:=\max_{K\in\mesh}h_K$. Furthermore, we introduce the mesh skeleton $\face$, defined by $\face=\cup_{K\in\mesh}\partial K$; we write $\face[I]$ and $\face[B]$ to denote the interior and boundary skeletons, respectively, defined by $\face[I]=\face\setminus\partial\Omega$ and $\face[B]=\partial\Omega$. Implicitly, we assume that the finite element mesh $\mesh$ respects the decomposition of the boundary, in the sense that, given an element face $f\subset \partial K$, $K\in\mesh$, which lies on the boundary $\partial\Omega$, i.e., $f\subset\partial\Omega$, then $f$ is entirely contained within either $\Gamma_R$ or $\Gamma_D$.
Let $K$ and $K'$ be two adjacent elements of~ $\mesh$, and $\vect{x}$ an arbitrary point on the interior face $f\subset \face[I]$ given by $f=(\partial K\cap\partial K')^\circ$. Furthermore, let $v$ and~$\vect{w}$ be scalar- and vector-valued functions, respectively, that are sufficiently smooth inside each element~$K,K'$. Then, the averages of $v$ and $\vect{w}$ at $\vect{x}\in f$ are given by \[
\avg{v}=\frac{1}{2}(v|_{K}+v|_{K'}), \qquad \avg{\vect{w}}
=\frac{1}{2}(\vect{w}|_{K}+\vect{w}|_{K'}), \] respectively. Similarly, the jumps of $v$ and $\vect{w}$ at $\vect{x}\in f$ are given by \[
\jmp{v} =v|_{K}\,\vect{n}_{K}+v|_{K'}\,\vect{n}_{K'},\qquad
\jmp{\vect{w}}=\vect{w}|_{K}\cdot\vect{n}_{K}+\vect{w}|_{K'}\cdot\vect{n}_{K'}, \] respectively.
Given $K\in\mesh$ the local Trefftz space is defined by \[
T(K) \coloneqq \{ v\in H^1(K) : -\Delta v - k^2 v = 0 \}; \] with this notation, we write \[
T(\mathcal{T}_h) \coloneqq \{ v\in L^2(\Omega) : v\vert_K \in T(K), K\in\mesh\}. \] Thereby, given a local space $V_{p_K}(K) \subset T(K)$, of finite dimension $p_K\geq 1$, the corresponding TDG finite element space is defined by \[
V_{\vect{p}}(\mathcal{T}_h) \coloneqq \{ v \in T(\mesh) : v\vert_K \in V_{p_K}(K), K\in\mesh \}, \] where $\vect{p} = \{p_K:K\in\mesh\}$.
\subsection{TDG discretization}
Equipped with the TDG finite element space $V_{\vect{p}}(\mesh)$ defined on the mesh partition $\mesh$ of $\Omega$, the TDG approximation of~\eqref{eqn:helmholtz} is given by: find $u_{hp}\in V_{\vect{p}}(\mesh)$ such that \begin{equation}\label{eqn:bilinear_form} \mathcal{A}_{h}(u_{hp},v_{hp})=\ell_{h}(v_{hp}) \end{equation} for all $v_{hp}\in V_{\vect{p}}(\mesh)$, where \begin{align*}
\mathcal{A}_{h}(u,v)=
& \int_{\face[I]} \left( \avg{u}\jmp{\nabla_h \conj{v}}
-\beta(ik)^{-1} \jmp{\nabla_h u}\jmp{\nabla_h \conj{v}}
-\avg{\nabla_h u}\cdot\jmp{\conj{v}}
+\alpha ik \jmp{u}\cdot\jmp{\conj{v}} \right)\, \mathrm{d}s\\
&+\int_{\Gamma_R}( (1-\delta)(u\nabla_h \conj{v}\cdot\vect{n}+ik\vartheta u\conj{v})
-\delta((ik\vartheta)^{-1}(\nabla_h u\cdot\vect{n})(\nabla_h \conj{v}\cdot\vect{n})
+\nabla_h u\cdot\vect{n}\,\conj{v}))\, \mathrm{d}s \\
&+\int_{\Gamma_D}(-\nabla_h u\cdot\vect{n}\,\conj{v}+ \alpha ik u\conj{v})\, \mathrm{d}s, \\
\ell_{h}(v)=&
\int_{\Gamma_R}g_R((1-\delta)\conj{v}
-\delta(ik\vartheta)^{-1}\nabla_h \conj{v}\cdot\vect{n})\, \mathrm{d}s
+\int_{\Gamma_D}g_D(\alpha ik \conj{v} -\nabla_h \conj{v}\cdot\vect{n})\, \mathrm{d}s, \end{align*} and $\nabla_h$ denotes the broken gradient operator, defined elementwise. Here, $\alpha>0$, $\beta>0$ and $0<\delta\leq \nicefrac12$ are given penalty parameters. We note that the selection of these penalty parameters has been studied in a number of different contexts within the literature; in particular, here we mention the ultra-weak variational formulation (UWVF), cf. \cite{Cessenat1998}, the DG-type scheme studied in~\cite{Gittelson2009}, and \cite{Hiptmair2014} which considered their selection on locally refined meshes; cf.~\cite[Table 1]{Hiptmair2016}. For the purposes of this article we consider the UWVF, corresponding to the choice $\alpha=\beta=\delta=\nicefrac{1}{2}$.
\subsection{Plane wave basis functions}
Finally, in this section we outline the choice of the underlying discrete space $V_{p_K}(K)$, $K\in\mesh$. To this end, we select $V_{p_K}(K)$ to be a local space consisting of plane waves in $p_K$ different directions, all with the same wavenumber $k$. We note that, under suitable assumptions on $K$ and the choice of plane wave directions, $V_{p_K}(K)$ approximates smooth Trefftz functions with the same order of convergence as polynomials of degree $q_K$, where \begin{equation}
p_K =
\begin{cases}
2q_K + 1, & d=2, \\
(q_K+1)^2, & d= 3;
\end{cases}
\label{eqn:effective_poly_deg} \end{equation} see~\cite{Moiola2011}. Thereby, $q_{K}$ is referred to as the \emph{effective polynomial degree} of the discrete Trefftz space; we set $\vect{q} = \{q_K:K\in\mesh\}$. More precisely, we write \begin{equation}\label{eqn:pw_basis} V_{p_K}(K) \coloneqq \left\{ v \in T(K) : v(\vect{x}) = \sum_{\ell=0}^{p_K-1} \alpha_\ell \mathrm{e}^{ik\vect{d}_{K,\ell}\cdot(\vect{x}-\vect{x}_K)}, \alpha_\ell\in\mathbb{C}\right\}, \end{equation} where $\vect{x}_K$ is the center of mass of element $K$ and $\vect{d}_{K,\ell}$, $\ell=0,\dots,p_K-1$, are $p_K$ evenly distributed unit direction vectors (with respect to the unit ball). For $d=2$ we can simply define \begin{equation} \label{eqn:plane_wave:2d} \vect{d}_{K,\ell} = (\cos(\nicefrac{2\pi\ell}{p_K}), \sin(\nicefrac{2\pi\ell}{p_K}))^\top, \qquad \ell=0,\dots,p_K-1; \end{equation} for $d=3$ we employ the directions determined by the extremal (maximum determinant) points on $S^2$, cf. \cite{Sloan2004,Womersley2007Online}.
\section{Adaptive mesh refinement}\label{section:adaptive_refinement}
In this section we develop an automatic adaptive refinement algorithm which is capable of not only marking elements for refinement, but also determining the type of refinement to be undertaken. In particular, here we consider both $h$-- and $p$--refinement, whereby the local element is subdivided, or the number of elementwise plane wave directions is enriched, respectively, as well as directional refinement which seeks to rotate the local plane wave basis in order to align it with the principal scattering direction.
\subsection{\emph{A posteriori} error indicator}\label{sec:indicator}
In the absence of rigorous {\em a posteriori} error bounds for the numerical approximation of \eqref{eqn:helmholtz} by the TDG scheme \eqref{eqn:bilinear_form}, which are sharp with respect to both the local mesh size $h_K$ and the number of local plane waves $p_K$ employed on each element $K\in\mesh$, we employ an empirical error estimator stimulated by the work undertaken in \cite{Kapita2015} in the $h$--version setting. To this end, we first introduce the dual problem: find $z\in H^1(\Omega)$, such that \begin{equation*}
\begin{aligned}
-\Delta z-k^2 z &=u-u_{hp} && \text{in } \Omega\;,\\
\frac{\partial z}{\partial \vect{n}} + ik\vartheta z &= 0 && \text{on } \Gamma_R\;, \\
z &=0 && \text{on } \Gamma_D\;.
\end{aligned} \end{equation*} Noting that $z\in H^{\nicefrac{2}{3}+s}(\Omega)$, $0<s\leq\nicefrac{1}{2}$, cf.~\cite{Hiptmair2014}, we recall the following (second) {\em a posteriori} error bound from \cite{Kapita2015}.
\begin{theorem} \label{thm:apost} Assume that the mesh $\mesh$ is shape-regular, locally quasi-uniform, in the sense that, for two elements $K$ and $K'$ which share a face $f\subset\face[I]$, there is a constant $\tau$, independent of $h$, such that $$ \tau^{-1} \leq \nicefrac{h_K}{h_{K'}} \leq \tau $$ for all choices of $K$ and $K'$, and that $\mesh$ is quasi-uniform in the vicinity of $\Gamma_R$, i.e., for all $K\in\mesh$ which lie on the boundary $\Gamma_R$, i.e., so that $\partial K\cap \Gamma_R \neq \emptyset$, there exists $\tau_R$ such that $$ \nicefrac{h}{h_K} \leq \tau_R. $$ Then, for $g_D\equiv 0$ and fixed $p_K$, $K\in\mesh$, the following {\em a posteriori} bound holds:
\begin{equation*}
\norm{u-u_{hp}}_{L^2(\Omega)} \leq {\mathfrak E}(u_{hp},h) \equiv C \left(\sum_{K\in\mesh} \eta_K^2 \right)^{\nicefrac{1}{2}}, \end{equation*} where \begin{equation} \begin{aligned} \eta_K^2 &=\norm*{\alpha^{\nicefrac{1}{2}} h_K^{s} \jmp{u_{hp}}}_{L^2(\partial K \setminus \partial\Omega)}^2
+k^{-2}\norm*{\beta^{\nicefrac12} h_K^{s} \jmp{\nabla u_{hp}}}_{L^2(\partial K \setminus \partial\Omega )}^2 \\
&\quad+ k^{-2}\norm*{\delta^{\nicefrac{1}{2}} h_K^{s} \left( g_R - \nabla u_{hp}\cdot\vect{n}_K + ik u_{hp}\right)}_{L^2(\partial K \cap \Gamma_R)}^2
+\norm*{\alpha^{\nicefrac{1}{2}} h_K^{s} u_{hp}}_{L^2(\partial K \cap \Gamma_D)}^2, \end{aligned} \label{eqn:error_indicator_kmw} \end{equation} where $C$ is a positive constant, which is independent of $h$. \end{theorem}
We stress that the {\em a posteriori} error bound stated in Theorem~\ref{thm:apost} depends on the regularity index $s$; thereby, {\em a priori} knowledge of $s$ is required in order to yield a fully computable bound. Moreover, the dependence of $C$ on $\vect{p}$, or equivalently $\vect{q}$, is unclear; indeed, to the best of our knowledge, an $hp$--version generalization of Theorem~\ref{thm:apost} is not currently available within the literature. Thereby, we propose the following {\em empirical} error estimator, where for simplicity of notation we also denote it by ${\mathfrak E}$, for the $hp$--version TDG method: \begin{equation} \label{eqn:error_bound}
{\mathfrak E}(u_h,h,\vect{p}) = \left( \sum_{K\in\mesh} \eta_K^2\right)^2, \end{equation} where \begin{equation} \begin{aligned} \eta_K^2 &=\norm*{\alpha^{\nicefrac{1}{2}} h_K^{\nicefrac12}q_K^{-\nicefrac12} \jmp{u_{hp}}}_{L^2(\partial K \setminus \partial\Omega )}^2
+\norm*{\beta^{\nicefrac12}
h_K^{\nicefrac32}q_K^{-\nicefrac32} \jmp{\nabla
u_{hp}}}_{L^2(\partial K \setminus \partial\Omega)}^2 \\
&\quad+ \norm*{\delta^{\nicefrac{1}{2}} h_K^{\nicefrac32}q_K^{-\nicefrac32} \left( g_R - \nabla u_{hp}\cdot\vect{n}_F + ik u_{hp}\right)}_{L^2(\partial K \cap \Gamma_R)}^2
+\norm*{\alpha^{\nicefrac{1}{2}} h_K^{\nicefrac12}q_K^{-\nicefrac12} (g_D-u_{hp})}_{L^2(\partial K \cap \Gamma_D)}^2.
\label{eqn:error_indicator} \end{aligned} \end{equation}
We stress that the choice of the exponents of $h_K$ and $q_K$ have been selected on the basis of numerical experimentation on a problem with a smooth analytical solution; for details, see Section~\ref{section:aposteriori_effectivity} below. Compared to the error indicator \eqref{eqn:error_indicator_kmw} from \cite{Kapita2015}, we note that we have a factor of $h$ instead of $k^{-1}$ in the terms with $\jmp{\nabla u_{hp}}$ and in the Robin boundary terms. These are both dimensionally correct, but reproducing the numerical experiments conducted in Section~\ref{section:aposteriori_effectivity} with the dependency on $k^{-1}$ results in different effectivities for different wavenumbers $k$.
\subsection{Plane wave directional adaptivity}\label{sec:direction_adapt}
In this section, we discuss the design of a practical algorithm for determining the direction vectors $\vect{d}_{K,\ell}$, $\ell=0,\dots,p_K-1$, used to define the plane wave basis within each element $K$ in the computational mesh $\mesh$. The key observation is that, many wave propagation problems typically exhibit a dominant direction of propagation of the underlying wave within each element in $\mesh$. Thereby, by aligning the plane wave basis in an appropriate fashion, we expect to attain a significant reduction of the error in the computed TDG solution. Indeed, in the simple case when the analytical solution is a plane wave, then if the direction for one of the plane wave basis functions is selected such that it is aligned with this plane wave direction, then the TDG method will exactly recover the analytical solution, subject to rounding errors.
The essential idea here is to simply rotate the element basis according to the predicted elementwise dominant direction. For simplicity of presentation, let us consider the two-dimensional case, i.e., $d=2$; we note that $d=3$ follows in an analogous manner, cf.~Remarks~\ref{remark:potential_direction} \& \ref{remark:direction} below. In two-dimensions, the standard plane wave directions are generally selected to be evenly spaced, with the first direction $\vect{d}_{K,0} = (1,0)^\top$ always pointing along the $x$-axis, cf.~\eqref{eqn:plane_wave:2d} (in the three-dimensional setting, the first direction vector typically points along the $z$-axis). Alternatively, assuming that a dominant elementwise direction, denoted by $\vect{\frak{d}}_K$, can be determined within each $K\in\mesh$, then the direction vectors for the plane wave basis functions in $K$ are chosen such that the first plane wave direction is aligned with $\vect{\frak{d}}_K$, i.e.,~\eqref{eqn:plane_wave:2d} is replaced by \begin{equation} \label{eqn:plane_wave:2d:rotated} \vect{d}_{K,\ell} = (\cos(\nicefrac{2\pi\ell}{p_K} + \theta_K), \sin(\nicefrac{2\pi\ell}{p_K} + \theta_K))^\top, \end{equation} $\ell=0,\ldots,p_K-1$, where $\theta_K$ is the angle between $\vect{\frak{d}}_K$ and the $x$-axis.
Clearly, in general, the dominant elementwise direction $\vect{\frak{d}}_K$, $K\in\mesh$, cannot be determined {\em a priori}, but instead must be numerically estimated as part of the solution process. In this regard, a number of algorithms have been proposed within the literature; here we mention the ray-tracing approach developed in~\cite{Betcke2011,Betcke2012}, though this includes terms involving integrals over the elements within the underlying TDG formulation. In \cite{Amara2014}, the optimal angle of rotation was numerically estimated based on adding an extra unknown into the problem; however, this leads to a system of nonlinear equations to be computed. Finally, \cite{Gittelson2008} uses an approximation of \[
\frac{\nabla e(\vect{x}_0)}{ike(\vect{x}_0)}, \] at a given point $\vect{x}_0\in K$, $K\in\mesh$, where $e$ denotes the error.
Stimulated by the work undertaken in~\cite{Formaggia2001,Formaggia2003}, cf. also~\cite{ghh-paper,hall-thesis}, on the design of anisotropically refined computational meshes, in this section we compute an estimate of $\vect{\frak{d}}_K$, $K\in\mesh$, based on the properties of the Hessian of the TDG solution $u_{hp}$. Indeed, we note that the principal eigenvector, i.e., the eigenvector corresponding to the largest eigenvalue in absolute value, of the Hessian of a given function indicates the direction of most rapid variation, and thereby, in our context, the dominant direction of wave propagation. With this in mind, writing $\boldsymbol{\mathcal{H}}(\varphi,\vect{x}_0)$ to denote the Hessian matrix of a given function $\varphi$, evaluated at the point $\vect{x}_0\in\mathbb{R}^d$, in Algorithm~\ref{algo:potential_direction} we outline the steps involved in computing a {\em potential} dominant plane wave direction $\hat{\vect{\frak{d}}}_K$ for a given element $K\in\mesh$. Table~\ref{table:potential_direction} summarizes how this potential first plane wave direction $\hat{\vect{\frak{d}}}_K$ is selected; for the numerical experiments presented in Section~\ref{sec:numerical_examples}, we set $\Lambda =2$. We note that in the case when no primary propagation direction is determined, then we leave the first plane wave direction unchanged.
\begin{algorithm}[t!] \caption{Computation of the potential first plane wave direction $\hat{\vect{\frak{d}}}_K$ for element $K$.} \label{algo:potential_direction} \begin{algorithmic}[1] \State Input: the TDG solution $u_{hp}$ of the discrete problem~\eqref{eqn:bilinear_form} and the parameter $\Lambda>1$. \State Writing $\vect{x}_K$ to denote the centroid of $K$, $K\in\mesh$, evaluate the eigenpairs $(\lambda_1, \vect{v}_1), (\lambda_2, \vect{v}_2)$ of $\boldsymbol{\mathcal{H}}(\Real(u_{hp}\vert_K),\vect{x}_K)$, and $(\mu_1, \vect{w}_1), (\mu_2, \vect{w}_2)$ of $\boldsymbol{\mathcal{H}}(\Imag(u_{hp}\vert_K),\vect{x}_K)$, such that $\abs{\lambda_1} \geq \abs{\lambda_2}$ and $\abs{\mu_1} \geq \abs{\mu_2}$. \If{$\abs{\lambda_1} \geq \Lambda \abs{\lambda_2}$}
\If{$\abs{\mu_1} \geq \Lambda \abs{\mu_2}$}
\If{$\abs{\lambda_1} \geq \Lambda \abs{\mu_1}$}
\State $\hat{\vect{\frak{d}}}_K \gets \vect{v}_1$
\ElsIf{$\abs{\mu_1} \geq \Lambda \abs{\lambda_1}$}
\State $\hat{\vect{\frak{d}}}_K \gets \vect{w}_1$
\Else
\State $\hat{\vect{\frak{d}}}_K \gets \frac{\vect{v}_1+\vect{w}_1}{\norm{\vect{v}_1+\vect{w}_1}}$
\EndIf
\Else
\If{$\abs{\lambda_1} \geq \Lambda \abs{\mu_1}$}
\State $\hat{\vect{\frak{d}}}_K \gets \vect{v}_1$
\Else
\State No primary propagation direction
\EndIf
\EndIf \Else
\If{$\abs{\mu_1} \geq \Lambda \abs{\mu_2}$}
\If{$\abs{\mu_1} \geq \Lambda \abs{\lambda_1}$}
\State $\hat{\vect{\frak{d}}}_K \gets \vect{w}_1$
\Else
\State No primary propagation direction
\EndIf
\Else
\State No primary propagation direction
\EndIf \EndIf \end{algorithmic} \end{algorithm}
\begin{remark}\label{remark:potential_direction} We note that in the case when $d=3$, $\boldsymbol{\mathcal{H}}(\Real(u_{hp}\vert_K),\vect{x}_K)$ and $\boldsymbol{\mathcal{H}}(\Imag(u_{hp}\vert_K),\vect{x}_K)$ each have a third eigenpair, $(\lambda_3,\vect{v}_3)$ and $(\mu_3, \vect{w}_3)$, respectively. However, if the eigenpairs are sorted such that $\abs{\lambda_1} \geq \abs{\lambda_2} \geq \abs{\lambda_3}$ and $\abs{\mu_1} \geq \abs{\mu_2} \geq \abs{\mu_3}$, the third eigenpairs \emph{never} represent a dominant direction, and thereby Algorithm~\ref{algo:potential_direction} can be used to identify $\hat{\vect{\frak{d}}}_K$, $K\in\mesh$, without modification. \end{remark}
\begin{table}[t!] \centering
\begin{tabular}{c|c|c|c||c} $\abs{\lambda_1} \geq C \abs{\lambda_2}$ & $\abs{\mu_1} \geq C\abs{\mu_2}$ & $\abs{\lambda_1} \geq C\abs{\mu_1}$ & $\abs{\mu_1}\geq C\abs{\lambda_1}$ & First Plane Wave $\hat{\vect{\frak{d}}}_K$ \\\hline\hline \ding{51} & \ding{51} & \ding{51} & \ding{55} & $\vect{v}_1$ \\ \ding{51} & \ding{51} & \ding{55} & \ding{51} & $\vect{w}_1$ \\ \ding{51} & \ding{51} & \ding{55} & \ding{55} & $\frac{(\vect{v}_1+\vect{w}_1)}{\norm{\vect{v}_1+\vect{w}_1}}$ \\ \ding{51} & \ding{55} & \ding{51} & \ding{55} & $\vect{v}_1$ \\ \ding{51} & \ding{55} & \ding{55} & --- & --- \\ \ding{55} & \ding{51} & \ding{55} & \ding{51} & $\vect{w}_1$ \\ \ding{55} & \ding{51} & --- & \ding{55} & --- \\ \ding{55} & \ding{55} & --- & --- & --- \end{tabular} \caption{Summary of selection of first plane wave direction $\hat{\vect{\frak{d}}}_K$ using Algorithm~\ref{algo:potential_direction}.} \label{table:potential_direction} \end{table}
Noting that eigenvectors are only unique up to scalar multiples, the vector $\hat{\vect{\frak{d}}}_K$, $K\in\mesh$, evaluated according to Algorithm~\ref{algo:potential_direction} may be pointing in precisely the opposite direction to the primary wave propagation direction. Thereby, to ensure that $\hat{\vect{\frak{d}}}_K$, $K\in\mesh$, is correctly oriented, we study the impedance trace on the boundary of a ball $B_\delta(\vect{x}_K)$ of radius $\delta$, centered at $\vect{x}_K$, of both the numerical solution and a plane wave with (the desired) propagation direction $\vect{\frak{d}}_K$. As we let $\delta\to 0$, we expect that the numerical solution should be closely approximated by the plane wave in the primary propagation direction.
More precisely, given $K\in\mesh$, the impedance trace of the plane wave \[
\tilde{u}_K(\vect{x}) = \mathrm{e}^{ik\vect{\frak{d}}_K \cdot(\vect{x}-\vect{x}_K)} \] on $\partial B_\delta(\vect{x}_K)$ is given by \begin{eqnarray}
(\nabla\tilde{u}_K(\vect{x})\cdot\vect{n}_{B_\delta} + ik\tilde{u}_K(\vect{x}))
|_{\partial B_\delta(\vect{x}_K)} &=& (ik(\vect{\frak{d}}_K \cdot\vect{n}_{B_\delta}+1)\,\mathrm{e}^{ik \vect{\frak{d}}_K \cdot(\vect{x}-\vect{x}_K)}
)|_{\partial B_\delta(\vect{x}_K)}, \label{eqn:impedance_trace} \end{eqnarray} where $\vect{n}_{B_\delta}$ denotes the unit outward normal vector on $\partial B_\delta(\vect{x}_K)$. Setting $\vect{x}=\vect{x}_K + \delta \hat{\vect{\frak{d}}}_K$ in \eqref{eqn:impedance_trace} and noting that, at this point of evaluation, $\vect{n}_{B_\delta} = \hat{\vect{\frak{d}}}_K$, we deduce that \[ \frac{\nabla\tilde{u}_K(\vect{x}_K + \delta\hat{\vect{\frak{d}}}_K )\cdot\vect{n}_{B_\delta} + ik\tilde{u}_K(\vect{x}_K + \delta \hat{\vect{\frak{d}}}_K )}{ik} = \begin{cases}
2 \mathrm{e}^{ik\delta}, & \text{if } \hat{\vect{\frak{d}}}_K = \vect{\frak{d}}_K , \\
0, & \text{if } \hat{\vect{\frak{d}}}_K = -\vect{\frak{d}}_K . \end{cases} \] Thereby, the (potential) dominate direction of propagation $\hat{\vect{\frak{d}}}_K$, $K\in\mesh$, predicted according to Algorithm~\ref{algo:potential_direction} may be corrected to yield the dominant direction $\vect{\frak{d}}_K$ on the basis of Algorithm~\ref{algo:direction}; this direction will then be selected as the first plane wave direction on element $K$, $K\in\mesh$. For simplicity, throughout this article we set $\delta=0$.
\begin{algorithm}[t!] \caption{Evaluation of the first plane wave direction $\vect{\frak{d}}_K$ for element $K$.} \label{algo:direction} \begin{algorithmic}[1] \State Input: the TDG solution $u_{hp}$ of the discrete problem~\eqref{eqn:bilinear_form}, the parameter $0\leq \delta\to 0$, and $\hat{\vect{\frak{d}}}_K$ computed by Algorithm~\ref{algo:potential_direction}. \State The first plane wave direction $\vect{\frak{d}}_K$ on element $K$, $K\in\mesh$, is given by \[
\vect{\frak{d}}_K = \begin{cases}
-\hat{\vect{\frak{d}}}_K , & ~\mbox{ if } ~\Real\left(\frac{\nabla u_{hp}(\vect{x}_K + \delta\hat{\vect{\frak{d}}}_K)\cdot\hat{\vect{\frak{d}}}_K + ik u_{hp}(\vect{x}_K + \delta\hat{\vect{\frak{d}}}_K)}{ik}\right) < \mathrm{e}^{ik\delta}, \\
\hat{\vect{\frak{d}}}_K , & ~\mbox{ if } ~\Real\left(\frac{\nabla u_{hp}(\vect{x}_K + \delta\hat{\vect{\frak{d}}}_K )\cdot\hat{\vect{\frak{d}}}_K + ik u_{hp}(\vect{x}_K + \delta\hat{\vect{\frak{d}}}_K)}{ik}\right) \geq \mathrm{e}^{ik\delta}. \\
\end{cases} \] \end{algorithmic} \end{algorithm}
\begin{remark}\label{remark:direction} In the three--dimensional setting, once the selection of the primary wave propagation direction $\vect{\frak{d}}_K$ has been computed on the basis of Algorithms~\ref{algo:potential_direction} \&~\ref{algo:direction}, we then select the remaining wave directions, $\vect{d}_{K,\ell}$, $\ell=1,\dots,p_K-1$, by applying a transformation matrix $T\in\mathbb{R}^{3\times 3}$ to the original `reference' directions $\vect{\tilde{d}}_{K,\ell}$, $\ell=1,\dots,p_K-1$, respectively, where $\vect{\tilde{d}}_{K,0}$ points along the $z$-axis, cf. above. Thereby, $$ \vect{d}_{K,\ell}=T \vect{\tilde{d}}_{K,\ell} , $$ $\ell=1,\dots,p_K-1$, where $T$ is selected such that \[ \vect{\frak{d}}_K \equiv \vect{d}_{K,0} = T \begin{pmatrix} 0 \\ 0 \\ 1 \end{pmatrix} \equiv T \vect{\tilde{d}}_{K,0}. \] We note that the selection of $T$ is not unique; writing $\vect{\frak{d}}_K = (d_x, d_y, d_z)^\top$, we define $T$ to be the identity matrix if $d_x=d_y=0$; otherwise, we set \[ T = \begin{pmatrix} \frac{d_x d_z}{\sqrt{d_x^2+d_y^2}} & \frac{d_y}{\sqrt{d_x^2+d_y^2}} & d_x \\ \frac{d_y d_z}{\sqrt{d_x^2+d_y^2}} & -\frac{d_x}{\sqrt{d_x^2+d_y^2}} & d_y \\ -\sqrt{d_x^2+d_y^2} & 0 & d_z \end{pmatrix}. \] \end{remark}
\subsection{$hp$--Adaptive mesh refinement}\label{sec:hp}
In this section we discuss the design of an automatic algorithm for generating sequences of $hp$--adaptively refined TDG finite element spaces in an efficient manner. This topic has been extensively studied within the finite element element literature in the case when the local element spaces consist of polynomial functions; for a comprehensive review, we refer to~\cite{Mitchell2011, Mitchell2014}. In general, the key underlying principle of most $hp$--refinement strategies is to employ local mesh subdivision ($h$--refinement) in regions where the solution is not smooth, while local enrichment of the finite element space ($p$--refinement) is undertaken elsewhere. Given that such regularity information is generally unknown {\em a priori}, several strategies have been developed to {\em a posteriori} estimate the local smoothness of the analytical solution, based on its numerical approximation; cf. \cite{Houston2005}, for example. However, in the context of TDG schemes for the numerical approximation of high-frequency time-harmonic wave problems, the extraction of such regularity information is expected to be unreliable due to the oscillatory nature of the computed numerical solution.
Thereby, as an alternative to directly estimating local smoothness of the solution, we employ the {\em a posteriori} error indicator~\eqref{eqn:error_indicator} to select the type of refinement to be undertaken on the basis of the refinement history of the current element, cf.~\cite{Melenk2001}. More precisely, following \cite{Melenk2001} refinements are selected based on checking if the local error estimate has decayed according to the expected rate of convergence based on the last type of refinement employed. If the expected rate of convergence is achieved, then $p$--refinement is performed; otherwise, $h$--refinement is undertaken. The variant of \cite[Algorithm 4.4]{Melenk2001} we employ here is summarized in Algorithm~\ref{algo:refinement}. Here, we note that $\gamma_h$, $\gamma_p$, and $\gamma_n$ are control parameters; for the purposes of this article, we select $\gamma_h=4$, $\gamma_p=0.4$, and $\gamma_n=1$. Furthermore, the number of child elements, $N$, cf. step 10. in Algorithm~\ref{algo:refinement}, is dependent on the type of subdivision, i.e., isotropic/anisotropic, undertaken, as well as the element shape; for isotropic refinement of tensor-product elements, we have that $N=2^d$.
\begin{algorithm}[t!] \begin{algorithmic}[1] \caption{$hp$--Adaptive refinement algorithm.}\label{algo:refinement} \State Input the control parameters $\gamma_h$, $\gamma_p$, and $\gamma_n$. \State{Choose a coarse initial mesh~$\mesh[h,0]$ of~$\Omega$ and a corresponding low-order starting (effective) polynomial degree vector~$\vect{q}_0$, together with the total dimension vector $\vect{p}_0$ defined as in \eqref{eqn:effective_poly_deg}.} \State{Set the initial predicted error indicator $\eta_{K,0}^{\mathrm{pred}}=\infty$ for all $K\in\mesh[h,0]$.} \For{$i=0,1,\ldots,$ until sufficiently many iterations have been performed.}
\State{Solve \eqref{eqn:bilinear_form} for $u_{hp} \in V_{\vect{p}_i}(\mesh[h,i])$.}
\State{Compute the {\em a posteriori} error indicators $\eta_{K,i}\equiv \eta_K$,
$K\in \mesh[h,i]$, and mark elements for refinement based on their relative magnitude.}
\For{$K\in\mesh[h,i]$}
\If{$K$ is marked for refinement}
\If{$\eta_{K,i} > \eta_{K,i}^{\mathrm{pred}}$}
\State Perform $h$--refinement: Subdivide $K$ into $N$ children $K_s, s = 1,\dots,N$, and set
\State $(\eta_{K_s,i+1}^{\mathrm{pred}})^2\gets \frac{1}{N}\gamma_h \left(\frac{1}{2}\right)^{2q_K} \eta_{K,i}^2$, $1\leq s \leq N$.
\Else
\State Perform $p$--refinement: $q_K \gets q_K+1$
\State $(\eta_{K,i+1}^{\mathrm{pred}})^2\gets \gamma_p \eta_{K,i}^2$
\EndIf
\Else
\State $(\eta_{K,i+1}^{\mathrm{pred}})^2\gets \gamma_n (\eta_{K,i}^{\mathrm{pred}})^2$
\EndIf
\EndFor
\State Construct the new mesh $\mesh[h,i+1]$ and corresponding Trefftz space $V_{\vect{p}_{i+1}}(\mesh[h,i+1])$. \EndFor \end{algorithmic} \end{algorithm}
\begin{remark} We note that in \cite{Melenk2001} the initial values of the predicted error indicator $\eta_{K,0}^{\mathrm{pred}}$, $K\in\mesh[h,0]$, are set to zero; thereby, this ensures that $h$--refinement is undertaken the first time an element is refined. In contrast, in Algorithm~\ref{algo:refinement} we set $\eta_{K,0}^{\mathrm{pred}}=\infty$ for all $K\in\mesh[h,0]$ which instead leads to $p$--enrichment being undertaken as the first refinement of a given element, since the TDG method for the numerical approximation of the Helmholtz equation is intrinsically a high-order method. \end{remark}
\begin{remark} Plane wave directional adaptivity can be performed at different stages within Algorithm~\ref{algo:refinement}; for example, the following options are available: \begin{itemize} \item undertake directional adaptivity only on elements marked for $p$--refinement, \item undertake directional adaptivity on all elements marked for refinement, with $h$--refinement performed after plane wave direction adaptivity, or \item undertake directional adaptivity on every element $K\in\mesh$, even if the element $K$ has not been marked for refinement. \end{itemize} In Section~\ref{sec:numerical_examples} we shall numerically investigate each of these approaches in order to assess their relative computational performance in terms of error reduction. \end{remark}
\begin{remark} As a final remark, we note that within Algorithm~\ref{algo:refinement} we employ the fixed fraction refinement strategy to select elements for refinement, cf. step 6; throughout this article we set the refinement fraction equal to $25\%$. \end{remark}
\section{Numerical experiments} \label{sec:numerical_examples}
In this section, we present a series of numerical experiments to highlight the practical performance of the $hp$--refinement algorithm, with directional adaptivity, proposed in Algorithm~\ref{algo:refinement}. Throughout this section we shall compare the performance of the proposed $hp$--adaptive refinement strategy with the corresponding algorithm based on exploiting only local mesh subdivision, i.e., $h$--refinement. The numerical experiments presented within this section have been undertaken using the AptoFEM software package~\cite{aptofem}.
\subsection{Plane wave direction adaptivity}\label{section:plane_wave_refine} \begin{table}[pt] \centering
\begin{tabular}{c|c|r@{.}l@{$\times$}l|r@{.}l@{$\times$}l|c} & & \multicolumn6{c}{Relative $L^2(\Omega)$-Error} & \\
$q$ & No of Dofs & \multicolumn3{c|}{Standard TDG} & \multicolumn3c{Direction Adaptivity} & \% Reduction\\ \hline 3 & 112 & \quad 2&015 & $10^{0}$ & \quad 1&959 & $10^{0}$ & 2.7\% \\ 4 & 144 & 5&027 & $10^{-1}$ & 3&194 & $10^{-1}$ & 36.5\% \\ 5 & 176 & 7&414 & $10^{-2}$ & 2&658 & $10^{-2}$ & 64.1\% \\ 6 & 208 & 1&616 & $10^{-2}$ & 6&320 & $10^{-3}$ & 60.9\% \\ 7 & 240 & 3&420 & $10^{-3}$ & 1&435 & $10^{-3}$ & 58.0\% \\ 8 & 272 & 5&154 & $10^{-4}$ & 3&011 & $10^{-4}$ & 41.6\% \\ 9 & 304 & 8&928 & $10^{-5}$ & 6&908 & $10^{-5}$ & 22.6\% \\ \end{tabular} \caption{Plane Wave Refinement: Comparison of the relative $L^2$-error for uniform $p$--refinement (without direction adaptivity), and $p$--refinement with direction adaptivity (Algorithm~\ref{algo:direction}).} \label{table:pw_adapt} \end{table}
\begin{figure}
\caption{Plane Wave Refinement: Plane wave directions of \protect\subref{fig:eff:20} initial mesh and after \protect\subref{fig:eff:30} $1$, \protect\subref{fig:eff:40} $2$ and \protect\subref{fig:eff:50} $3$ $p$--refinements with plane wave refinement (Algorithm~\ref{algo:direction})}
\label{fig:directions:p3}
\label{fig:directions:p4}
\label{fig:directions:p5}
\label{fig:directions:p6}
\label{fig:directions}
\end{figure}
In this first example, we study the effect of adjusting the plane wave directions while employing a fixed computational mesh with uniform $p$--refinement. To this end, we consider problem~\eqref{eqn:helmholtz} with $\Omega=(0,1)^2$, $\Gamma_R=\partial\Omega$, and $\Gamma_D\equiv\emptyset$; furthermore, the Robin boundary condition $g_R$ is selected such that the analytical solution $u$ of~\eqref{eqn:helmholtz} is given by \begin{equation} \label{eqn:hankal_anal} u(x,y) = \mathcal{H}_0^{(1)} \left( k \sqrt{ (x+\nicefrac14)^2+y^2 } \right), \end{equation} where $\mathcal{H}_0^{(1)}$ denotes the Hankel function of the first kind of order 0. Throughout this section, we set $k=20$; note that for this problem the analytical solution $u$ is smooth in $\Omega$.
Here, the underlying computational mesh consists of 16 uniform square elements; on each element we initially select the effective polynomial degree $q=2$, i.e., $p=5$. In Table~\ref{table:pw_adapt} we compare the computed relative $L^2$-error based on employing uniform $p$--refinement of the underlying TDG space $V_{\vect{p}}(\mesh)$ in the two cases when the standard TDG scheme is employed, i.e., when the local plane wave directions are kept fixed, and when plane wave directional adaptivity is utilised, based on exploiting Algorithm~\ref{algo:direction} (direction adaptivity). We note that, since uniform $p$--refinement is employed in both cases, then at each step of the refinement, both schemes possess the same number of degrees of freedom. At each step of the refinement algorithm, we observe that the exploitation of directional adaptivity leads to roughly 50\% reduction in the relative $L^2$-error when compared to the corresponding quantity computed for the standard TDG method (without direction adaptivity). We note, however, that in the case when $q=3$, the relative $L^2$-error is only reduced by a small amount when directional adaptivity is employed; this is due to the fact that the local plane wave directions are computed based on the numerical solution evaluated with $q=2$, which is numerically too inaccurate to reliably predict the correct local direction of wave propagation. Furthermore, we also note that, as the number of plane waves increases, the improvement in the relative $L^2$-error decreases; this is caused by the fact that, as the number of plane waves increases for the standard TDG scheme, one of the directions will get closer to the actual dominant direction.
In Figure~\ref{fig:directions} we plot, for each element, the initial plane wave directions and the plane wave directions computed after $1$, $2$, and $3$ uniform $p$--refinements employing directional adaptivity. We emphasize the first plane wave direction with a larger arrow, i.e., the dominant wave direction as determined by Algorithm~\ref{algo:direction}. Moreover, we overlay the directions on top of a contour plot showing the real part of the analytical solution~\eqref{eqn:hankal_anal}. From Figure~\ref{fig:directions}, we can clearly observe that the directional adaptivity algorithm is able to accurately determine the dominant wave direction after a few refinements.
\begin{table}[pt] \centering
\begin{tabular}{c|c|r@{.}l@{$\times$}l|r@{.}l@{$\times$}l|r@{.}l@{$\times$}l} & & \multicolumn9{c}{Relative $L^2(\Omega)$-Error} \\
$q$ & No of Dofs & \multicolumn3{c|}{Initial} & \multicolumn3c{One Direction Adapt.} & \multicolumn3c{Two Direction Adapts.} \\ \hline 3 & 112 & 2&015 & $10^{0}$ & \qquad 8&755 & $10^{-1}$ & \qquad 5&856 & $10^{-1}$ \\ 4 & 144 & 5&027 & $10^{-1}$ & 1&267 & $10^{-1}$ & 1&149 & $10^{-1}$ \\ 5 & 176 & 7&414 & $10^{-2}$ & 2&614 & $10^{-2}$ & 2&584 & $10^{-2}$ \\ 6 & 208 & 1&616 & $10^{-2}$ & 6&330 & $10^{-3}$ & 6&327 & $10^{-3}$ \\ 7 & 240 & 3&420 & $10^{-3}$ & 1&435 & $10^{-3}$ & 1&435 & $10^{-3}$ \\ 8 & 272 & 5&154 & $10^{-4}$ & 3&011 & $10^{-4}$ & 3&011 & $10^{-4}$ \\ \end{tabular} \caption{Plane Wave Refinement: Comparison of the relative $L^2$-error with fixed effective polynomial degree, $q=3,\dots,8$, and direction adaptivity (Algorithm~\ref{algo:direction}).} \label{table:pw_adapt_fixed_q} \end{table}
Finally, in this section we consider performing more than one directional adaptivity step after each uniform $p$--refinement. To this end, in Table~\ref{table:pw_adapt_fixed_q} we compare the relative $L^2$-error for the initial directions, as well as after one and two steps of directional adaptivity have been performed, for the case when $q=3,\dots,8$. Here, we observe that additional application of the direction adaptivity algorithm does not lead to a significant reduction in the relative $L^2$-error; indeed, most of the reduction, when compared to the standard TDG scheme, without directional adaptivity, is attained after one step of Algorithm~\ref{algo:direction}. Moreover, we emphasise that this first step may be undertaken in a very computationally cheap manner.
\begin{figure}
\caption{Effectivities for $h$--refinement with fixed effective polynomial degree of the smooth analytical Hankel solution with different wavenumbers.}
\label{fig:eff:20}
\label{fig:eff:30}
\label{fig:eff:40}
\label{fig:eff:50}
\label{fig:eff}
\end{figure} \begin{figure}
\caption{Effectivities of individual components of the error indicators for $h$--refinement with fixed effective polynomial degree of the smooth analytical Hankel solution with different wavenumbers.}
\label{fig:eff:u:20}
\label{fig:eff:gradu:20}
\label{fig:eff:robin:20}
\label{fig:eff:u:30}
\label{fig:eff:gradu:30}
\label{fig:eff:robin:30}
\label{fig:eff:u:40}
\label{fig:eff:gradu:40}
\label{fig:eff:robin:40}
\label{fig:eff:u:50}
\label{fig:eff:gradu:50}
\label{fig:eff:robin:50}
\label{fig:eff:parts}
\end{figure}
\subsection{Efficiency of the \emph{a posteriori} error indicator}\label{section:aposteriori_effectivity}
The selection of the exponents of $h_K$ and $q_K$ in the weights present in~\eqref{eqn:error_indicator}, together with the independence on the wavenumber $k$, have been determined by numerical experimentation. To this end, we considered the example presented in the previous section, cf.~\eqref{eqn:hankal_anal}, whereby the numerical approximation is computed on a series of uniform computational meshes, with uniform effective polynomial degrees $q$, for a range of wave numbers $k$. In each case, we computed the effectivity index of each constituent term arising in ${\mathfrak E}(u_h,h,\vect{p})$, whereby the dependency on $h_K$, $q_K$, and $k$ was eliminated; note that with the removal of $h_K$, $q_K$, and $k$, the effectivity index of each term is computed by dividing by $\norm{u-u_{hp}}_{L^2(\Omega)}$. More precisely, effectivity indices were computed for $q=3,\dots,8$ and $k=20,30,40,50$, based on starting from a uniform $4\times 4$ mesh consisting of square elements. On the basis of these results, the dependence of each term on $h_K$, $q_K$, and $k$ was established. The final effectivity indices for the correctly scaled empirical {\em a posteriori} error indicator ${\mathfrak E}(u_h,h,\vect{p})$, i.e., ${\mathfrak E}(u_h,h,\vect{p})/\norm{u-u_{hp}}_{L^2(\Omega)}$ are presented in Figure~\ref{fig:eff}. Here, we observe that that the effectivity indices have roughly the same values for all the selected values of $h$, $q$, and $k$; however, at higher wave numbers, pre-asymptotic behaviour leads to some increase in the effectivity indices as the mesh is refined, due to the fact that the mesh size is too large for the wavelength. We note that this behaviour is more noticeable in the case when $q=3$.
Finally, we compute the effectivity index for each individual term arising in the definition of the error indicator ${\mathfrak E}(u_h,h,\vect{p})$, cf.~\eqref{eqn:error_indicator}. More precisely, we define \begin{align*} \mathcal{E}_{\jmp{u_{hp}}} &\coloneqq \frac{\left(\sum_{K\in\mesh} \norm*{\alpha^{\nicefrac{1}{2}} h_K^{\nicefrac12}q_F^{-\nicefrac12} \jmp{u_{hp}}}_{L^2(\partial K \setminus \partial\Omega)}^2\right)^{\nicefrac12}}{\norm*{u-u_{hp}}_{L^2(\Omega)}}, \\ \mathcal{E}_{\jmp{\nabla u_{hp}}} &\coloneqq \frac{\left(\sum_{K\in\mesh} \norm*{\beta^{\nicefrac12} h_K^{\nicefrac32}q_K^{-\nicefrac32} \jmp{\nabla u_{hp}}}_{L^2(\partial K \setminus \partial\Omega)}^2\right)^{\nicefrac12}}{\norm*{u-u_{hp}}_{L^2(\Omega)}}, \\ \mathcal{E}_{R} &\coloneqq \frac{\left(\sum_{K\in\mesh} \norm*{\delta^{\nicefrac{1}{2}} h_K^{\nicefrac32}q_K^{-\nicefrac32} \left( g_R - \nabla u_{hp}\cdot\vect{n}_F + ik u_{hp}\right)}^2_{L^2(\partial K \cap \Gamma_R)}\right)^{\nicefrac12}}{\norm*{u-u_{hp}}_{L^2(\Omega)}}; \end{align*} the results for the case when $k=20,30,40,50$ are depicted in Figure~\ref{fig:eff:parts}. Here, we observe that each individual effectivity index is roughly constant for all the selected values of $h$, $q$, and $k$, except within the pre-asymptotic region. For this smooth problem, we clearly observe that the dominant part of the error indicator involves the jump in the gradient of the numerical solution.
\begin{remark} We note there that we have only computed the weightings for the interior and Robin faces. In the case of Dirichlet boundary conditions we assume that the weighting scales the same as the term involving $\jmp{u_{hp}}$. \end{remark}
\subsection{$hp$--Adaptive refinement}
In this section we consider computational performance of the proposed $hp$--adaptive refinement algorithm, with directional adaptivity, for a range of test problems in both two- and three-dimensions. To this end, employ the fixed fraction refinement strategy to mark elements for refinement; throughout this section, we set the refinement fraction equal to 25\% of the elements with the largest contribution to the error bound. Furthermore, we allow the meshes $\mesh$ to be `1-irregular', i.e., each face of any element $K\in\mesh$ contains at most one hanging node (which, for simplicity, we assume to be at the barycenter of the corresponding face) and each edge of each face contains at most one hanging node (yet again assumed to be at the barycenter of the edge). We also only allow the effective polynomial degree $q_K$ to vary by one between neighbouring elements.
For each test problem, we compare the performance of employing $hp$--adaptive refinement with $h$--adaptivity. In the latter case, we consider a standard $h$--adaptive algorithm, i.e., adaptive mesh refinement without directional adaptivity, as well as an $h$--adaptive strategy which incorporates directional adaptivity; here, we shall consider the two cases when directional adaptivity is either undertaken only on the elements marked for refinement, as well as the case when it is performed on all elements in the computational mesh. In the $hp$--setting, similar comparisons will be made, in addition to studying the case when directional adaptivity is only performed on elements marked for $p$--refinement.
We note that when $hp$--refinement is exploited we often reach a point where the $L^2$--norm of the error and {\em a posteriori} error bound stagnates, in the sense that both quantities no longer tend to zero, and indeed may start to oscillate, as further refinement is undertaken. This is caused by the fact that as the relative magnitude of $q_K$, with respect to $h_Kk$, becomes large, the local plane wave bases are very ill-conditioned. In this situation, we simply stop the numerical experiments and discard further results; however, possible improvements based on ensuring $q_K$ is well behaved with respect to $h_Kk$ could be implemented; cf. \cite{Cessenat1998, Huttunen2002, Luostari2013} for details.
\subsubsection{Example 1 --- Smooth solution (Hankel function)} \label{sec-hprefine-hankel} \begin{figure}
\caption{Example 1: \protect\subref{fig:hankel:error:20h} $L^2$-error and \protect\subref{fig:hankel:eff:20h} Effectivity index for $h$--refinement with wavenumber $k=20$; \protect\subref{fig:hankel:error:20hp} $L^2$-error and \protect\subref{fig:hankel:eff:20hp} Effectivity index for $hp$--refinement with $k=20$; \protect\subref{fig:hankel:error:50h} $L^2$-error and \protect\subref{fig:hankel:eff:50h} Effectivity index for $h$--refinement with $k=50$; \protect\subref{fig:hankel:error:50hp} $L^2$-error and \protect\subref{fig:hankel:eff:50hp} Effectivity index for $hp$--refinement with $k=50$.}
\label{fig:hankel:error:20h}
\label{fig:hankel:eff:20h}
\label{fig:hankel:error:20hp}
\label{fig:hankel:eff:20hp}
\label{fig:hankel:error:50h}
\label{fig:hankel:eff:50h}
\label{fig:hankel:error:50hp}
\label{fig:hankel:eff:50hp}
\end{figure} \begin{figure}
\caption{Example 1: Comparison of relative $L^2$-error for $h$-- and $hp$--refinement, with direction adaptivity on all elements, for wavenumbers \protect\subref{fig:hankel:errorcompare:20} $k=20$ and \protect\subref{fig:hankel:errorcompare:50} $k=50$.}
\label{fig:hankel:errorcompare:20}
\label{fig:hankel:errorcompare:50}
\label{fig:hankel:errorcompare}
\end{figure} \begin{figure}
\caption{Example 1: Meshes after 8 \protect\subref{fig:hankel:mesh:h:20} $h$-- and \protect\subref{fig:hankel:mesh:hp:20} $hp$--refinements for wavenumber $k=20$; meshes after 8 \protect\subref{fig:hankel:mesh:h:50} $h$-- and \protect\subref{fig:hankel:mesh:hp:50} $hp$--refinements for wavenumber $k=50$.}
\label{fig:hankel:mesh:h:20}
\label{fig:hankel:mesh:hp:20}
\label{fig:hankel:mesh:h:50}
\label{fig:hankel:mesh:hp:50}
\label{fig:hankel}
\end{figure}
In this section, we again consider the problem outlined in Section~\ref{section:plane_wave_refine}. Furthermore, we select the initial mesh to consist of $8\times 8$ uniform square elements and set $q_K=3$ on each $K\in\mesh$. Firstly, in Figures~\ref{fig:hankel:error:20h} and \ref{fig:hankel:error:50h} we compare the relative error in the $L^2$-norm to the number of degrees of freedom in the TDG space $V_{\vect{p}}(\mathcal{T}_h)$, when $h$--refinement is employed, with the wavenumbers $k=20$ and $k=50$, respectively. In each case, we consider the performance of the underlying adaptive algorithm when both the standard TDG scheme (without direction adaptivity) is employed, as well as the corresponding method with directional adaptivity; in this latter setting, we consider the cases when either directional adaptivity is undertaken on only the elements marked for refinement, as well as when it is exploited on every element in the computational mesh $\mesh$. Analogous results are presented in Figures~\ref{fig:hankel:error:20hp} and \ref{fig:hankel:error:50hp} in the $hp$--setting, respectively; here, we compare standard $hp$--refinement, with $hp$--adaptivity incorporating directional adaptivity. In the latter case, different directional adaptivity strategies are considered: firstly, directional adaptivity is performed only on elements marked for $p$--refinement; secondly, directional adaptivity is undertaken on all elements marked for refinement; finally, directional adaptivity is applied to every element in $\mesh$. In the $hp$--setting we observe exponential convergence of the error as the finite element space is adaptively enriched: indeed, on a linear-log scale, the convergence lines are roughly straight. Thereby, it is clear that the exploitation of the proposed $hp$--refinement algorithm, with directional adaptivity, leads to a significant reduction in the $L^2$-norm of error, for a given number of degrees of freedom, when compared to the same quantity computed with $h$--refinement alone; cf. Figure~\ref{fig:hankel:errorcompare}.
In both the $h$-- and $hp$--refinement cases, we generally observe that the error is decreased when directional refinement is employed. Moreover, it is evident in the $hp$--setting that selecting more elements for directional refinement generally leads to a smaller error, for a given number of degrees of freedom; this is particularly noticeable in the case when $k=50$. In Figures~\ref{fig:hankel:eff:20h}, \ref{fig:hankel:eff:20hp}, \ref{fig:hankel:eff:50h}, and \ref{fig:hankel:eff:50hp} we plot the effectivity indices for each of the above refinement strategies for the case when $k=20,50$; here, we observe that they remain roughly constant during adaptive $h$--/$hp$--mesh refinement, and are roughly the same for the two different wavenumbers, with the notable exception of the pre-asymptotic region for $k=50$.
Finally, in Figures~\ref{fig:hankel:mesh:h:20}--\ref{fig:hankel:mesh:hp:50}, we show the meshes after 8 $h$-- and $hp$--refinements, with directional adaptivity employed on all elements, for both $k=20$ and $k=50$; here, the $hp$--meshes show the effective polynomial degree $q_K$ for each element. Given the smoothness of the analytical solution on $\Omega$, we observe that the resulting computational meshes are almost uniform; indeed, in the $hp$--setting almost uniform $p$--refinement has been undertaken.
\subsubsection{Example 2 --- Singular solution} \begin{figure}
\caption{Example 2: \protect\subref{fig:besselsin:error:20h} $L^2$-error and \protect\subref{fig:besselsin:eff:20h} Effectivity index for $h$--refinement with wavenumber $k=20$; \protect\subref{fig:besselsin:error:20hp} $L^2$-error and \protect\subref{fig:besselsin:eff:20hp} Effectivity index for $hp$--refinement with $k=20$; \protect\subref{fig:besselsin:error:50h} $L^2$-error and \protect\subref{fig:besselsin:eff:50h} Effectivity index for $h$--refinement with $k=50$; \protect\subref{fig:besselsin:error:50hp} $L^2$-error and \protect\subref{fig:besselsin:eff:50hp} Effectivity index for $hp$--refinement with $k=50$.}
\label{fig:besselsin:error:20h}
\label{fig:besselsin:eff:20h}
\label{fig:besselsin:error:20hp}
\label{fig:besselsin:eff:20hp}
\label{fig:besselsin:error:50h}
\label{fig:besselsin:eff:50h}
\label{fig:besselsin:error:50hp}
\label{fig:besselsin:eff:50hp}
\label{fig:besselsin}
\end{figure} \begin{figure}
\caption{Example 2: Meshes after 8 \protect\subref{fig:besselsin:mesh:h:20} $h$-- and \protect\subref{fig:besselsin:mesh:hp:20} $hp$--refinements for wavenumber $k=20$; meshes after 8 \protect\subref{fig:besselsin:mesh:h:50} $h$-- and \protect\subref{fig:besselsin:mesh:hp:50} $hp$--refinements for wavenumber $k=50$.}
\label{fig:besselsin:mesh:h:20}
\label{fig:besselsin:mesh:hp:20}
\label{fig:besselsin:mesh:h:50}
\label{fig:besselsin:mesh:hp:50}
\label{fig:besselsin:mesh}
\end{figure}
In this second example, we consider problem~\eqref{eqn:helmholtz} posed on the L-shaped domain $\Omega=(-1,1)^2\setminus(0,1)\times(-1,1)$, $\Gamma_R=\partial\Omega$, and $\Gamma_D\equiv\emptyset$, with Robin boundary condition $g_R$ selected so that the analytical solution is given, in polar coordinates $(r,\varphi)$, by \[
u(r,\theta) = \mathcal{J}_{\nicefrac23}(kr)\sin(\nicefrac{2\theta}{3}); \] we note that the gradient of $u$ has a singularity at the origin.
As in the previous example, we again compare the performance of the $h$-- and $hp$--adaptive refinement algorithms, both in the standard setting, as well as when directional adaptivity is employed; here, we again consider the analogous directional refinement strategies employed in Section~\ref{sec-hprefine-hankel}. To this end, in Figures~\ref{fig:besselsin:error:20h} and \ref{fig:besselsin:error:50h} we compare the relative error in the $L^2$-norm with the number of degrees of freedom in the TDG space $V_{\vect{p}}(\mathcal{T}_h)$ when $h$--refinement is employed for $k=20$ and $k=50$, respectively; the respective convergence plots in the $hp$--setting are given in Figures~\ref{fig:besselsin:error:20h} and \ref{fig:besselsin:error:50h}. Here, we observe that although exploiting $hp$--refinement leads to exponential convergence of the relative $L^2$-norm of the error as $V_{\vect{p}}(\mathcal{T}_h)$ is enriched, in both the $h$-- and $hp$--settings, we observe that the magnitude of the error, computed both with and without directional refinement, is roughly identical; i.e., directional refinement does not lead to any reduction in the computed TDG solution when either $h$--/$hp$--refinement is employed. We note that, for this particular problem, this behaviour is expected, since the error in the computed TDG solution is dominated by the influence of the singularity at the origin, rather than local wave propagation.
In Figures~\ref{fig:besselsin:eff:20h}, \ref{fig:besselsin:eff:20hp}, \ref{fig:besselsin:eff:50h}, and \ref{fig:besselsin:eff:50hp} we plot the effectivity indices when both $h$-- and $hp$--refinement is employed for the case when $k=20,50$. In all cases, we observe that the effectivity indices are roughly constant for this singular problem, though when $h$--refinement is employed, on highly refined meshes, we see a slight drop in the computed effectivity indices. Finally, in Figures~\ref{fig:besselsin:mesh:h:20}--\ref{fig:besselsin:mesh:hp:50}, we show the meshes after 8 $h$-- and $hp$--refinements, with direction adaptivity employed on all elements, for both $k=20$ and $k=50$. As we would expect, in both the $h$-- and $hp$--settings, mesh subdivision is concentrated in the vicinity of the singularity located at the origin; away from this region, the $h$--refinement algorithm employs almost uniform mesh subdivision, while the $hp$--refinement strategy employs the necessary combination of local mesh refinement and local polynomial enrichment, as required, to reduce the error in the computed TDG solution.
\subsubsection{Example 3 --- Transmission/internal reflection}
We now consider the case of transmission and internal reflection of a plane wave across a fluid-fluid interface in the domain $\Omega=(-1,1)^2$, $\Gamma_R\equiv \emptyset$, and $\Gamma_D=\partial\Omega$, with two different refractive indices, cf.~\cite[Section 6.3]{Kapita2015}. The interface between the two regions is located at $y=0$; in this setting the wavenumber $k$ is given by the piecewise constant function \[
k(x,y) = \begin{cases}
k_1 \coloneqq \omega n_1 & \text{if } y \leq 0,\\
k_2 \coloneqq \omega n_2 & \text{if } y > 0,
\end{cases} \] where, we select $\omega=11$, $n_1=2$, and $n_2=1$. Throughout this section we impose an appropriate inhomogeneous Dirichlet boundary condition, so that the analytical solution $u$ to \eqref{eqn:helmholtz} is given, for a constant $0\leq\theta_i\leq\nicefrac\pi2$, by \[
u(x,y) = \begin{cases}
T \mathrm{e}^{i(K_1x+K_2y)} & \text{if } y>0 ,\\
\mathrm{e}^{ik_1(x\cos(\theta_i)+y\sin(\theta_i))} + R \mathrm{e}^{ik_1(x\cos(\theta_i)-y\sin(\theta_i))} & \text{if } y<0,
\end{cases} \] where $K_1=k_1\cos(\theta_i)$, $K_2 = \sqrt{k_2^2-k_1^2\sin^2(\theta_i)}$, \[
R=-\frac{K_2-k_1\sin(\theta_i)}{K_2+k_1\sin(\theta_i)}, \] and $T=1+R$. We note that there exists a critical angle $\theta_{crit}$, such that when $\theta_i>\theta_{crit}$ the wave is refracted, while $\theta_i<\theta_{crit}$ results in internal reflection, cf.~\cite[Section 6.3]{Kapita2015}. As in \cite{Kapita2015} we perform numerical experiments for both internal reflection ($\theta_i=29^\circ$) and refraction ($\theta_i=69^\circ$). To highlight the reflection and refraction behaviour, in Figures~\ref{fig:reflection:anal} and~\ref{fig:refraction:anal} we show the analytical solution when $\theta_i=29^\circ$ and $\theta_i=69^\circ$, respectively.
\begin{figure}
\caption{Example 3: Analytical solutions (real part) when \protect\subref{fig:reflection:anal} $\theta_i=29^\circ$ resulting in internal reflection, and \protect\subref{fig:refraction:anal} $\theta_i=69^\circ$ resulting in refraction.}
\label{fig:reflection:anal}
\label{fig:refraction:anal}
\label{fig:reflection_anal}
\end{figure} \begin{figure}
\caption{Example 3: \protect\subref{fig:reflection:error:h} $L^2$-error and \protect\subref{fig:reflection:eff:h} Effectivity index for $h$--refinement with reflection ($\theta_i=29^\circ$); \protect\subref{fig:reflection:error:hp} $L^2$-error and \protect\subref{fig:reflection:eff:hp} Effectivity index for $hp$--refinement with reflection ($\theta_i=29^\circ$); \protect\subref{fig:refraction:error:h} $L^2$-error and \protect\subref{fig:refraction:eff:h} Effectivity index for $h$--refinement with refraction ($\theta_i=69^\circ$); \protect\subref{fig:refraction:error:hp} $L^2$-error and \protect\subref{fig:refraction:eff:hp} Effectivity index for $hp$--refinement with refraction ($\theta_i=69^\circ$).}
\label{fig:reflection:error:h}
\label{fig:reflection:eff:h}
\label{fig:reflection:error:hp}
\label{fig:reflection:eff:hp}
\label{fig:refraction:error:h}
\label{fig:refraction:eff:h}
\label{fig:refraction:error:hp}
\label{fig:refraction:eff:hp}
\label{fig:reflection}
\end{figure} \begin{figure}
\caption{Example 3: Meshes after 7 \protect\subref{fig:reflection:mesh:h} $h$-- and \protect\subref{fig:reflection:mesh:hp} $hp$--refinements for reflection ($\theta_i=29^\circ$); meshes after 7 \protect\subref{fig:refraction:mesh:h} $h$-- and \protect\subref{fig:refraction:mesh:hp} $hp$--refinements for refraction ($\theta_i=69^\circ$).}
\label{fig:reflection:mesh:h}
\label{fig:reflection:mesh:hp}
\label{fig:refraction:mesh:h}
\label{fig:refraction:mesh:hp}
\label{fig:reflection:mesh}
\end{figure}
To account for the jump in the wavenumber $k$, the value of $k$ present in the integrals along the interface $y=0$ in the TDG scheme \eqref{eqn:bilinear_form} is replaced by $\omega$. We select the initial mesh to consist of $8 \times 8$ uniform square elements, so that the interface between the two materials is captured by the mesh; thereby, the wavenumber is constant in every element, and hence the TDG space~\eqref{eqn:pw_basis} and error indicators~\eqref{eqn:error_indicator} can be easily modified to treat this example by setting the wavenumber for each element equal to the wavenumber of the material within which the element is contained. Firstly, we consider the case when there is an internal reflection, i.e., when $\theta_i=29^\circ$; to this end, in Figures~\ref{fig:reflection:error:h} and \ref{fig:reflection:error:hp} we plot the relative error in the $L^2$-norm against the number of degrees of freedom in $V_{\vect{p}}(\mathcal{T}_h)$ using both $h$-- and $hp$--refinement, respectively. As for the previous numerical experiments, here we again observe exponential convergence of the error when $hp$--refinement is employed. Furthermore, in both the $h$-- and $hp$--version settings, we observe that employing directional adaptivity does not improve the magnitude of the error; indeed, in the $hp$--version setting, initially the standard refinement approach is superior, though as $V_{\vect{p}}(\mathcal{T}_h)$ is enriched, we again observe the benefits of employing directional adaptivity. This behaviour is perhaps expected, since for the internal reflection case, no waves are present above the $y=0$ line and moreover it does not possess a dominant wave propagation direction below the $y=0$ line due to the reflected waves, cf.~Figure~\ref{fig:reflection:anal}. In Figures~\ref{fig:reflection:eff:h} and \ref{fig:reflection:eff:hp}, we plot the effectivity indices for both refinement strategies, respectively; here we observe that, apart from an initial pre-asymptotic region, the effectivity indices are roughly constant.
The corresponding convergence plots for the refraction case, i.e., when $\theta_i=69^\circ$, are presented in Figures~\ref{fig:refraction:error:h} and \ref{fig:refraction:error:hp} when both $h$-- and $hp$--refinement are employed, respectively; in the latter setting, we again observe exponential convergence of the computed relative $L^2$-norm of the error. Moreover, in contrast to the case when there is an internal reflection, here we observe the computational benefits of employing directional adaptivity, in the sense that this typically leads to a reduction in the error, for a given fixed number of degrees of freedom, when compared to the standard refinement strategy; this is particularly evidenced in the $hp$--setting. Indeed, in this case there is a dominant propagation direction throughout the domain, cf.~Figure~\ref{fig:refraction:anal}. Figures~\ref{fig:refraction:eff:h} and~\ref{fig:refraction:eff:hp} show the effectivity indices computed using both $h$-- and $hp$--refinement, respectively; analogous behaviour is observed as for the internal reflection case, i.e., the effectivity indices become roughly constant, after an initial pre-asymptotic region.
Finally, in Figures~\ref{fig:reflection:mesh:h} \&~\ref{fig:reflection:mesh:hp} we show the meshes after 7 $h$-- and $hp$-- adaptive mesh refinements have been performed, respectively, in the case of an internal reflection, i.e., $\theta_i=29^\circ$. Here, the $h$--refinement strategy concentrates most of the elements in the $y<0$ region; although, there is some refinement above $y=0$ to resolve the exponentially decaying solutions present there. Additional mesh smoothing has also been undertaken here to ensure that there is only one hanging node per face, cf.~\cite{Kapita2015}. The $hp$--refinement algorithm also performs some $h$--refinement below the $y=0$ line, though this region is largely $p$--refined; however, most of the refinement occurs around the $y=0$ line to resolve the exponentially decaying solutions. Some $p$--refinement occurs in the rest of the $y>0$ region, which is caused by enforcing the condition that the effective polynomial degree may only vary by one between neighboring elements. In the refraction case, i.e., $\theta_i=69^\circ$, cf. Figures~\ref{fig:refraction:mesh:h} \&~\ref{fig:refraction:mesh:hp}, we note a sharp boundary at the $y=0$, with more refinement undertaken in the $y<0$ region than the region $y>0$.
\subsubsection{Example 4 --- 3D smooth solution (plane wave)} \begin{figure}
\caption{Example 4: \protect\subref{fig:cube:error:20h} $L^2$-error and \protect\subref{fig:cube:eff:20h} Effectivity index for $h$--refinement with wavenumber $k=20$; \protect\subref{fig:cube:error:20hp} $L^2$-error and \protect\subref{fig:cube:eff:20hp} Effectivity index for $hp$--refinement with $k=20$; \protect\subref{fig:cube:error:50h} $L^2$-error and \protect\subref{fig:cube:eff:50h} Effectivity index for $h$--refinement with $k=50$; \protect\subref{fig:cube:error:50hp} $L^2$-error and \protect\subref{fig:cube:eff:50hp} Effectivity index for $hp$--refinement with $k=50$.}
\label{fig:cube:error:20h}
\label{fig:cube:eff:20h}
\label{fig:cube:error:20hp}
\label{fig:cube:eff:20hp}
\label{fig:cube:error:50h}
\label{fig:cube:eff:50h}
\label{fig:cube:error:50hp}
\label{fig:cube:eff:50hp}
\label{fig:cube}
\end{figure}
In this final example, we consider problem \eqref{eqn:helmholtz} posed on the domain $\Omega=(0,1)^3$, $\Gamma_R=\partial\Omega$, and $\Gamma_D\equiv\emptyset$, with Robin boundary condition $g_R$ selected so that the analytical solution $u$ to \eqref{eqn:helmholtz} is given by \[
u(\vect{x}) = \mathrm{e}^{ik\vect{\vect{d}\cdot\vect{x}}}, \] where $\vect{d}_j = \nicefrac{1}{\sqrt{3}}$ for $j=1,2,3$.
In Figures~\ref{fig:cube:error:20h} and \ref{fig:cube:error:50h} we present the performance of the proposed directional adaptivity algorithm employing $h$--refinement with wavenumbers $k=20$ and $k=50$, respectively; the analogous results for $hp$--refinement are given in Figures~\ref{fig:cube:error:20hp} and \ref{fig:cube:error:50hp}, respectively. As in the two--dimensional setting, we observe that selecting more elements for directional adaptivity at each step of the proposed refinement strategy, leads to a greater reduction in the relative $L^2$-norm of the error, for a fixed number of degrees of freedom, when compared to the standard case when directional adaptivity is not employed. Of course, given the simple nature of the analytical solution for this problem, we clearly expect directional adaptivity to be advantageous. In the case when the wavenumber $k=50$ we note that both $h$-- and $hp$--refinement strategies are essentially in the pre-asymptotic region; however, performing directional adaptivity ensures that the method leaves this pre-asymptotic region after only a few mesh refinements. Finally, in Figures~\ref{fig:cube:eff:20h}, \ref{fig:cube:eff:20hp}, \ref{fig:cube:eff:50h}, and \ref{fig:cube:eff:50hp} we plot the effectivity indices of both the $h$-- and $hp$--refinement algorithms for the case when $k=20,50$. We note, especially in the $hp$--refinement case, that the effectivity indices are roughly constant but do slightly rise after the pre-asymptotic region.
\section{Concluding remarks} \label{sec:conclusions}
In this article we have developed an automatic $hp$--adaptive refinement algorithm for the TDG approximation of the homogeneous Helmholtz equation. In addition to employing both local mesh subdivision and local basis enrichment, we also locally rotate the underlying plane wave basis in such a manner so that the first basis function is aligned with the dominant wave direction. The choice to $h$-- or $p$--refine an element is based on a prediction of how much reduction we expect to observe in the elementwise error indicator, when a particular refinement is performed. The alignment of the local basis with the dominant wave direction is undertaken on the basis of an eigenvalue analysis of the Hessian of the numerical solution, together with a correction computed from an impedance condition. The computational efficiency of the proposed adaptive strategy has been studied through a series of numerical examples; indeed, the application of $hp$--refinement, with directional adaptivity, leads to a significant reduction in the computed error compared to standard refinement strategies. We also note that performing directional adaptivity on all elements generally leads to a greater reduction in the error than the corresponding case when only elements marked for refinement are directionally adapted; clearly, this error reduction is attained while keeping the number of degrees of freedom in the underlying TDG space fixed. Future work will be devoted to the derivation of robust $hp$--version {\em a posteriori} error bounds, as well as the application to problems of engineering interest.
\end{document} |
\begin{document}
\title{{\bf Pythagorean triangles within Pythagorean triangles }} \author{Konstantine Zelator\\ Department of Mathematics\\ 301 Thackeray Hall\\ 139 University Place\\ The University of Pittsburgh\\ Pittsburgh, PA 15260\\ USA\\ and\\ P.O. Box 4280\\ Pittsburgh, PA 15203\\ kzet159@pitt.edu\\ e-mails: konstantine\underline{\ }zelator@yahoo.com}
\maketitle
\section{Introduction}
\parbox[b]{1.5in}{Suppose that $CBA$ is a Pythagorean triangle with sidelengths
$\left| \overline{AB}\right| = c,\ \left|\overline{CA}\right|=b$, and
$\left|\overline{CB}\right| =a$; that is, a right triangle with the right
angle at $C$; and with $a,b,c$ being positive integers such that
$a^2+b^2=c^2$. Then (without loss of generality -- $a$ and $b$ may be
switched),}\hspace{.25in} \hspace{1.0in} \epsfig{file=fig1.eps,width=1.5in}
\begin{equation}\left\{ \begin{array}{l} a=d(m^2-n^2),\ b=d(2mn),\ c=d(m^2+n^2)\\ \\ {\rm where}\ d,m,n\ {\rm are\ positive\ integers\ such\ that}\\ \\ m > n,\ (m,n) =1,\ {\rm and}\ m+1\equiv 1({\rm mod}\ 2)\end{array} \right\}\label{E1} \end{equation}
{\bf Note:} Throughout this paper, $(X,Y)$ will stand for the greatest common divisor of two integers $X$ and $Y$.
Thus, the condition $(m,n) =1$ says that $m$ and $n$ are relatively prime, their greatest common divisor is $1$. Also, the condition $m+n\equiv 1({\rm
mod}2)$ says that $m$ and $n$ have different parities; one of them is even, the other odd. The formulas in (\ref{E1}), are the well known parametric formulas describing the entire family of Pythagorean triangles or triples.
A derivation of the formulas can be found in references \cite{1} and \cite{2}. For a wealth of historic information on Pythagorean triangles see \cite{2} or \cite{3}.
Now, consider a point $P$ on the hypotenuse $\overline{AB}$, and let $D$ and $E$ be the intersection points of the two lines through $P$ and parallel to $\overline{CA}$ and $\overline{CB}$; with the sides $\overline{CB}$ and $\overline{CA}$ respectively. Two right triangles are formed; the triangles $BDP$ and $APE$. Let $x$ and $y$ denote the lengths of line segments $\overline{DP}$ and $\overline{PE}$ respectively. Also, let $h_1 =
\left|\overline{BP}\right|$ and $h_2 = \left|\overline{AP}\right|$. Then,
\begin{equation} \left\{ \begin{array}{l}
\left|\overline{DP}\right| = \left|\overline{CE}\right| = x\ {\rm and}\
\left|\overline{PE} \right| = \left|\overline{DC}\right| = y.\\ \\
{\rm Thus},\ \left|\overline{BD}\right| = \left|\overline{BC}\right| -
\left|\overline{DC}\right| = a-y;\\ \\
{\rm and}\ \left|\overline{AE}\right| = \left|\overline{AC}\right| -
\left|\overline{CE}\right| = b-x \end{array} \right\} \label{E2} \end{equation}
Both right triangles $BDP$ and $APE$ are similar to the right triangle of $CBA$. We have the similarity ratios,
$\left\{ \begin{array}{ccccc}\dfrac{x}{b} & = & \dfrac{a-y}{a} & = &
\dfrac{h_1}{c} \\ \\ \dfrac{y}{a} & = & \dfrac{b-x}{b} & = & \dfrac{h_2}{c} \end{array}\right\}$
\hspace{2.75in} \begin{tabular}{l} (3i)\\ \\ \\ (3ii) \end{tabular}
\setcounter{equation}{3}
Since $a,b,c$ are (positive) integers, it follows, by inspection, from (3i) that if one of $x,y$, or $h_1$ is a rational number, then all three of them must be rational numbers. Hence, either all three $x,y,h_1$ are rationals, or otherwise, all three of them must be irrational. Likewise, it follows from (3ii) that either all three $x,y,h_2$ are rational or all three are irrational. Combining these two observations, we infer that
\fbox{\parbox{4.5in}{{\it Either all four $x,y,h_1,h_2$ are rational numbers
or, otherwise, all four of them are irrationals.}}}
In Section 2, we state three lemmas from number theory. One of them (Euclid's Lemma) is well known. We offer proofs for the other two.
In Section 3, we prove Theorems 1 and 2; Theorem 2 is a corollary of Theorem 1.
In Section 4,we consider and analyze three special cases. These are the cases when the point $P$ is the midpoint $M$ of the hypotenuse $\overline{AB}$; when $P$ is the point $I$ where the angle bisector of the $90^{\circ}$ angle at $C$ intersects the hypotenuse $\overline{AB}$, and when the point $P$ is the foot $F$ of the perpendicular from $C$ to the hypotenuse $\overline{AB}$.
Back to Section 3. In Theorem 1 we prove that the two right triangles $BDP$ and $PEA$ in Figure 1 are either both Pythagorean or neither of them is a Pythagorean triangle (assuming, of course, that $BCA$ is a Pythagorean triangle). It then follows, and this is part of Theorem 2, that when the triangle $BCA$ is a primitive Pythagorean triangle, neither of the triangles, $BDP$ and $PEA$ are Pythagorean for any position of the point $P$ along the hypotenuse $\overline{AB}$.
In Section 5 (Theorem 6), we postulate that given a Pythagorean triangle with side lengths $a=d(m^2-n^2),\ b=d(2mn)$, and $c=d(m^2+n^2)$, where $d,m,n$ are positive integers such that $d \geq 2$, $(m,n) =1$, $m > n$, and $m+n \equiv 1({\rm mod}\ 2)$. Then there are exactly $d-1$ positions of the point $P$, such that triangles $BDP$ and $PEA$ are both Pythagorean.
In Section 6, we will examine the general question of when, in addition to the two triangles $BDP$ and $APE$ being Pythagorean, the four congruent right triangles (within the rectangle $CDPE$) $CDP,\ CEP,\ DCE$, and $EPD$ are also Pythagorean. We derive a family of non-primitive Pythagorean triangles $CBA$ with that property.
\noindent{\bf Note:} In addition to the notation $(k,\ell)$ denoting the greatest common divisor of two integers, $k$ and $\ell$, the notation $t|v$, will stand for ``The integer $t$ is a divisor of the integer $v$''.
\section{Three lemmas from number theory}
\begin{lemma} (Euclid's Lemma): Suppose that $a,b,c$, are natural numbers
such that $c|ab$ (i.e., $c$ is a divisor of the product $ab$). If
$(c,a)=1$, then $c|b$.
\end{lemma}
For a proof of this well-known result, the reader
may refer to \cite{1} or \cite{2}.
\begin{lemma} Let $m,n$ be positive integers such that $m > n,\ (m,n)=1$, and
$m+n \equiv 1({\rm mod}2)$. Then
\begin{enumerate} \item[(i)] $(m^2+n^2,2mn) =1$
\item[(ii)] $(m^2+n^2, m^2-n^2) = 1$
\item[(iii)] $(m^2-n^2, 2mn)=1$ \end{enumerate} \end{lemma}
\begin{proof}
\ \ \newline
\begin{enumerate} \item[(i)] We show that $m^2+n^2$ and $2mn$
have no prime divisors in common. If, to the contrary, $p$ were a prime
divisor of both $m^2+n^2$ and $2mn$, then $p$ would be odd, since
$m^2+n^2 \equiv 1({\rm mod}2)$, by virtue of the hypothesis
$m+n\equiv1({\rm mod}2)$. Thus, $p|2mn$ implies, since $(p,2) =1$, that
$p|mn$ (by Lemma 1). But $p$ is a prime, so $p|mn$ implies that $p$ must
divide at least one of $m,n$. If $p|m$, then from $p|m^2+n^2$, it follows
that $p|n^2$, and so $p|n$. Thus, $p|m$ and $p|n$ contradicting the
hypothesis that $(m,n) -1$
\item[(ii)] A similar argument left to the reader ($p$ must divide the sum of
$m^2+n^2$ and $m^2-n^2$, and their difference. Hence, $p|2n^2$ and $p|2m^2$, which since $p$ is odd, eventually implies $p|n$ and $p|m$, a contradiction).
\item[(iii)] A similar argument as in (i). \end{enumerate}
\end{proof}
\section{A theorem and a corollary}
\begin{theorem} Suppose that $ABC$ is a Pythagorean triangle with the
right angle at $C$; and with the three sidelengths satisfying the formulas in
(\ref{E1}), namely $a=d(m^2-n^2),\ b=d(2mn),\ c=d(m^2+n^2)$, where $d,m,n$
are positive integers such that $m>n,\ (m,n)=1$, and $m+n\equiv 1({\rm
mod}\ 2)$.
Let $P$ be a point on the hypotenuse $\overline{AB}$, distinct from $A$ and $B$. Furthermore, suppose that $D$ is the foot of the perpendicular from $P$ to the side $\overline{CB}$; and $E$ the foot of the perpendicular from $P$ to the side $\overline{CA}$, as in Figure 1. Then, either both right triangles $BDP$ and $APE$ are Pythagorean or neither of them is.
Moreover, if they are both Pythagorean, then the sidelengths\linebreak
$\left|\overline{BD}\right| = a-y,\ \left|\overline{DP}\right| =x$, and
$\left|\overline{BP}\right| = h_1$ of the triangle $BDP$ satisfy the formulas,
$$ a-y=\delta (m^2-n^2),\ x= \delta(2mn),\ h_1 = \delta(m^2+n^2). $$
While the sidelengths $\left|\overline{PE}\right|=y,\
\left|\overline{EA}\right| = b-x,\ \left|\overline{PA}\right| = h_2$ of the triangle $PEA$ satisfy the formulas
$$ y=(d-\delta)(m^2-n^2),\ b - x = (d-\delta)(2mn),\ h_2 = (d-\delta)(m^2+n^2) $$
\noindent where $\delta$ is a positive integer such that $1 \leq \delta \leq d-1$ \end{theorem}
\begin{proof} Suppose that the triangle $BDP$ is Pythagorean. We will prove
that the tringle $APE$ must also be Pythagorean; then so must the triangle $BDP$ be.
Since the triangle $BDP$ is Pythagorean, its three sidelengths, $x,\ a-y$, and $h_1$ (see Figure 1) must be natural numbers. From (3i)
\begin{equation} \begin{array}{rlll} \Rightarrow & x & = & \dfrac{b\cdot h_1}{c}
\underset{{\rm by}\ (1)}{=} \dfrac{d(2mn)}{d(m^2+n^2)} \cdot h_1;\\ \\ & x & = & \dfrac{2mnh_1}{m^2+n^2} \end{array} \label{E4} \end{equation}
From the conditions $(m,n)=1$ and $m+n \equiv 1({\rm mod}\ 2)$, it follows by Lemma 2(i) that
\hspace*{1.75in} $(m^2+n^2, 2mn)=1$
(4i)
Since $x$ is a natural number, equation (\ref{E4}) says that the integer $m^2+n^2$ must be a divisor of the product $2mnh_1$, which clearly implies, by (4i) and Lemma 1, that $h_1$ must be divisible by $m^2+n^2$.
\hspace{1.75in} $h_1 = \delta \cdot (m^2+n^2) $
(4ii)
\noindent for some positive integer $\delta$; and since $h_1$ is the length hypotenuse $\overline{BP}$ (triangle $BDP$), and the point $P$ lies strictly between $A$ and $B$, it is clear that
$$
h_1 = \left|\overline{BP}\right| < c = \left|\overline{BA}\right| = d(m^2+n^2),$$
\noindent which together with (4ii) clearly show that
\hspace{1.75in} $\begin{array}{l} 1 \leq \delta < d;\ {\rm or\
equivalently},\\ \\ 1 \leq \delta \leq d-1 \end{array}$
(4iii)
Note that by (4iii), we must have $d \geq 2$. Going back to (\ref{E4}) and using (4ii) we get
\hspace{1.75in} $x = (2mn)\delta$
(4iv)
and so, by (4iv), (3i), (4ii), and (\ref{E1}), we further obtain
\hspace{.5in} $\begin{array}{rcl} a-y & = & \delta(m^2-n^2);\ y= a-\delta
(m^2-n^2);\\ \\ y & = & d(m^2-n^2) - \delta(m^2-n^2) = (d-\delta)(m^2-n^2) \end{array}$
(4v)
By using (1), (3i), and (4v) we also get
$$ b-x=(d-\delta)(2mn)$$
\noindent and
$$h_2 = (d-\delta)(m^2+n^2). $$
The proof is complete. \end{proof}
\begin{theorem} Let $CBA$ be a Pythagorean triangle, with the 90 degree
angle at $C$. Also, let $\left|\overline{CB}\right| = a,\
\left|\overline{CA}\right| = b$ and $\left|\overline{BA}\right| = c$, be the three sidelengths so that $a = d(m^2-n^2),\ b=d(2mn), c=d(m^2+n^2)$ where $m,n,d$ are positive integers such that $m > n,\ (m,n) =1$, and $m+n \equiv 1({\rm mod}\ 2)$.
Let $P$ be a point on the hypotenuse $\left|\overline{BA}\right|$ and strictly between the end-points $B$ and $A$.
Let $D$ and $E$ be the feet of the perpendiculars from the point $P$ to the sides $\overline{CB}$ and $\overline{CA}$ respectively.
\begin{enumerate} \item[(i)] If $d=1$. i.e., if the Pythagorean triangle $CBA$ is primitive,
then neither of the right triangles $PDB$ and $PEA$ is Pythagorean.
\item[(ii)] If $d=2$, and the point $P$ is coincident with the midpoint $M$
of the hypotenuse $\overline{BA}$, then both triangles $PDB$ and $PEA$ are
Pythagorean. Otherwise, if $P \neq M$, neither of these two triangles is
Pythagorean.
\item[(iii)] If $d=3$, and the point $P$ is such that
$\dfrac{\left|\overline{PB}\right|}{\left|\overline{PA}\right|} = \dfrac{1}{3}$ or
$\dfrac{2}{3}$, then both triangles $PDB$ and $PEA$ are Pythagorean.
Otherwise, if
$\dfrac{\left|\overline{PB}\right|}{\left|\overline{PA}\right|} \neq
\dfrac{1}{3},\ \dfrac{2}{3}$, then neither of these triangles are
Pythagorean. \end{enumerate} \end{theorem}
\begin{proof}
\begin{enumerate}\item[(i)] If $d=1$, then neither of the two
right triangles, $BDP$ and $PEA$ can be Pythagorean since according to
Theorem 1, the natural number $\delta$ must
satisfy $1 \leq \delta \leq d-1$, which is impossible when $d=1$.
\item[(ii)] Suppose that $d=2$.
If the point $P$ coincides with the midpoint
$M$ of the hypotenuse $\overline{BA}$, then each of the triangles $BDP$ and
$PEA$ is half the size of the triangle $CBA$. So, by inspection,
$$ \begin{array}{rcl}
\left|\overline{BD}\right| = \left|\overline{PE}\right| & = & \dfrac{a}{2} = \dfrac{2(m^2-n^2)}{2} = m^2-n^2 \\ \\
\left|\overline{DP}\right| = \left|\overline{EA}\right| & = & \dfrac{b}{2} = \dfrac{2(2mn)}{2} = 2mn\\ \\
\left|\overline{BP}\right| = \left|\overline{PA}\right| & = & \dfrac{c}{2} = \dfrac{2(m^2+n^2)}{2} = m^2+n^2, \end{array} $$
\noindent which proves that both triangles $BDP$ and $PEA$ are (in fact primitive) Pythagorean triangles. Conversely, if both triangles are Pythagorean, then by Theorem 1, it follows that $1 \leq \delta \leq d-1=2-1=1,$ \linebreak $ 1 \leq \delta \leq 1,\ \delta = 1$ which establishes that each of the triangles is half the size of triangle of $CBA$; which implies that $P$ is the midpoint of $\overline{BA}$.
\item[(iii)] Assume that $d=3$.
Suppose $\dfrac{\left|\overline{PB}\right|}{\left|\overline{PA}\right|} = \dfrac{1}{3}$ or $\dfrac{2}{3}$. If
$\dfrac{\left|\overline{PB}\right|}{\left|\overline{PA}\right|} = \dfrac{1}{3}$, then the triangle $BDP$ is $\dfrac{1}{3}$ the size of triangle $CBA$ and the triangle $PEA$ is $\dfrac{2}{3}$ the size of $CBA$. We have,
$$
\begin{array}{rcll} \left|\overline{BD}\right| & = & \dfrac{a}{3} =
\dfrac{3(m^2-n^2)}{3} = m^2-n^2, & \left|\overline{DP}\right| = \dfrac{b}{3}
= \dfrac{3(2mn)}{3} = 2mn,\\ \\
\left|\overline{PB}\right| & = & \dfrac{c}{3} = \dfrac{3(m^2 + n^2)}{3} =
m^2+n^2 & \end{array} $$
\noindent and $\left|\overline{PE}\right| = \dfrac{2a}{3} = 2(m^2-n^2),\
\left|\overline{EA}\right| = \dfrac{2b}{3} = 2(2mn),$ \linebreak $
\left|\overline{PA}\right| = \dfrac{2c}{3}=2(m^2+n^2)$. It is clear that both triangles $BDE$ and $PEA$ are Pythagorean.
The argument for the case
$\dfrac{\left|\overline{PB}\right|}{\left|\overline{PA}\right|} = \dfrac{2}{3}$ is similar (we omit the details).
Now, the converse. Assume that both triangles, $BDP$ and $PEA$, are Pythagorean. Then by Theorem 1 we must have,
$$ 1 \leq \delta \leq d-1 = 3-1 = 2;\ \ \delta = 1\ {\rm or}\ 2. $$
Using the formulas for the sidelengths (of triangles $BDP$ and $PEA$), found in Theorem 1 we easily see that $\dfrac{\left|\overline{PA}\right|}{\left|\overline{PB}\right|} = \dfrac{1}{3}$, if $\delta = 1$. While $\dfrac{\left|\overline{PA}\right|}{\left|\overline{PB}\right|} = \dfrac{2}{3}$, if $\delta = 2$. The proof is complete. \end{enumerate} \end{proof}
\section{ Three special cases}
\begin{enumerate} \item[A.] {\bf Case 1: When the point $P$ is the midpoint $M$ of the
hypotenuse $\overline{BA}$}
By inspection, it is clear that all six right triangles $BDP,\ PEA,\ CDP$, $EPD,\ DCE$, and $PEC$ are all congruent and each of them is half the size of triangle $BCA$. Clearly then, by (\ref{E1}), these six triangles will be Pythagorean if and only if the integer $d$ in (\ref{E1}) is even.
\begin{theorem} Let $BCA$ be a Pythagorean triangle with the 90 degree angle
at $C$ and $\left|\overline{CB}\right| = a = d(m^2-n^2),\
\left|\overline{CA} \right| = b = d(2mn),$ \linebreak
$ \left|\overline{BA}\right| = c = d(m^2+n^2)$, where $d,m,n$ are postive
integers such that $m > n,\ (m,n)=1$ and $m+n\equiv 1({\rm mod}\ 2)$.
Let $M$ be the midpoint of the hypotenuse $\overline{BA}$ and $D, E$ the feet of the perpendiculars from $M$ to the sides $\overline{CB}$ and $\overline{CA}$ respectively (so $D$ and $E$ are the midpoints of $\overline{CB}$ and $\overline{CA}$). Then, the six right angles, $BDM,\ MEA,\ CDM,\ EMD,\ DCE$, and $MEC$ are congruent and have sidelengths as follows:
\begin{tabular}{lll} length of horizontal side & $= \dfrac{d}{2}(2mn)= dmn$\\ \\ length of veritical side & $= \dfrac{d(m^2-n^2)}{2}$\\ \\ length of hypotenuse & $= \dfrac{d(m^2+n^2)}{2}$ \end{tabular}
If $d$ is an even natural number, then the above six triangles are Pythagorean; otherwise, if $d$ is odd, they are non-Pythagorean. \end{theorem}
\item[B.] {\bf Case 2: When the point $P$ is the foot $I$ of the angle
bisector of the 90$^{\circ}$ angle at $C$}
\parbox[b]{2.0in}{Using the notation of Theorem 1, we have
$\left|\overline{BD}\right| = a-y,\left|\overline{DI}\right| = x,\\ \\
\left|\overline{BP}\right| = h_1,\ \left|\overline{EA}\right| = b-x,\\ \\
\left|\overline{EI}\right| = y,\ {\rm and}\ \left|\overline{IA}\right| =
h_2.$}\hspace{1.25in} \epsfig{file=fig2.eps,width=1.5in}
Clearly, we have $x=y$ in this case. Note that the four
congruent isosceles right triangles $DCI,\ IEC,\ DCE,\ DIE$ cannot be
Pythagorean (no Pythagorean triangle is isosceles).
By Theorem 1, the two right triangles $BDI$ and $IEA$ are either both Pythagorean or neither of them are. If they are both Pythagorean, then by Theorem 1 we have, in particular, $x=\delta(2mn)$ and \linebreak
$ y=(d-\delta)(m^2-n^2)$ with $m,n,d,\delta$ being positive integers such that $m>n,\ (m,n)=1,\ m+n\equiv1({\rm mod}\ 2)$ and $1 \leq \delta \leq d-1$ (and so $d \geq 2$).
Since $x=y$, we must have
\begin{equation} \delta (2mn) = (d-\delta)(m^2-n^2) \label{E5} \end{equation}
By Lemma 2(iii), we know that $(m^2-n^2, 2mn) = 1$. So, by Lemma 1 and
(\ref{E5}) it follows that $2mn|d-\delta$ and $m^2-n^2 | \delta$ which, in turn, leads to (when we go back to (\ref{E5}))
\hspace{1.0in} $\left\{ \begin{array}{l} \delta = K\cdot (m^2-n^2)\\ \\
d-\delta = K \cdot (2mn),\\ \\ {\rm for\ some\ positive\ integer}\ K.\\ \\ {\rm Hence}\ d=k\cdot (m^2-n^2 + 2mn) \end{array} \right\}$
(5i)
\noindent Note that clearly, from (5i), $1 \leq \delta \leq d-1$. In fact, the smallest possible value of $d$ is $7$; obtained for $K=1$ and $m=2,n=1$. Moreover, $1\leq \delta \leq d-4$ since the smallest possible value of $K \cdot (2mn)$ is $4$.
Using (5i) and Theorem 1, one can compute in terms of $m,n$, and $K$. The other four sidelengths of the triangles $BDI$ and $IEA$. Also, by (5i) we get $x=\delta(2mn) = K(2mn)(m^2-n^2) = y$. We now state the following theorem.
\begin{theorem} Let $CBA$ be a Pythagorean triangle with the $90^{\circ}$
angle at $C$ and sidelengths given by
$$
\left|\overline{CB}\right| = a = d(m^2-n^2),\ \left|\overline{CA}\right| = b =
d(2mn),\ \left|\overline{BA}\right| = c= d(m^2+n^2), $$
\noindent where $d,m,n$ are positive integers such that, $m > n,\ m+n \equiv 1({\rm mod}\ 2)$, and $(m,n) = 1$. Let $I$ be the foot of the perpendicular of the angle bisector (of the $90^{\circ}$ angle at $C$) to the hypotenuse $\overline{BA}$.
Also, let $D$ and $E$ be the feet of the perpendiculars from the point $I$ to the sides $\overline{CB}$ and $\overline{CA}$ respectively. Then, the two right triangles $BDI$ and $IEA$ are both Pythagorean precisely when (i.e., if and only if), $d=K \cdot (m^2-n^2 + 2mn)$ for some integer $K$. If $d= K\cdot
(m^2-n^2 + 2mn)$, then the sidelengths of triangle $BDI$ are given by $\left|\overline{DI}\right| = x = K \cdot (2mn)(m^2-n^2) ,\ \
h_1 = \left|\overline{BI}\right| = K(m^2-n^2)(m^2+n^2) = K(m^4-n^4)$,
and $\left| \overline{BD}\right| = a-y = K \cdot (m^2-n^2)^2$ and the sidelengths of triangle $IEA$ are given by
$\begin{array}{l} \left|\overline{IE}\right| = y =K(2mn)(m^2-n^2),\\ \\
\left|\overline{EA}\right| = b-x = K(2mn)(2mn) = K \cdot (2mn)^2, \end{array} $
and $h_2 = \left|\overline{IA}\right| = K \cdot (2mn)(m^2+n^2)$.
If the integer $d$ is not divisible by $m^2-n^2 + 2mn$, then neither of the triangles, $BDI$ and $IEA$, is Pythagorean. \end{theorem}
\item[C.] {\bf Case 3: When the point $P$ is the foot $F$ of the
perpendicular from the vertex $C$ to the hypotenuse $\overline{BA}$}
\parbox[b]{2.0in}{In this part, instead of using Theorem 1, we will first
compute the sidelengths of the triangles $BDF,\ FEA$, and the four congruent
triangles $FDC$, $DFE,\ DCE$, and $CFE$ in terms of (the sidelengths) $a,b,c$. After that we will implement the formulas in (\ref{E1}) in order to express the above sidelengths in terms of the integers $d,m,n$.} \hspace{1.0in} \epsfig{file=fig3.eps,width=1.5in}
After that we will implement Lemma 2 to be able to draw the conclusions which will lead to Theorem 5. Note that since $F$ is the foot of the perpendicular from $C$ to the hypotenuse $\overline{BA}$, the aforementioned six right triangles are all similr to the triangle $CBA$. Let $\omega$ and $\varphi$ be the degree measures of the angles $\angle CBA$ and $\angle CAB$ respectively (see Figure 3).
We have (and we set)
\begin{equation} \left\{ \begin{array}{l}
\left|\overline{CB}\right| = a,\ \left|\overline{CA}\right| = b,\
\left|\overline{BA}\right| = c\\ \\
\left|\overline{DF}\right| = \left|\overline{CE} \right| = x,\
\left|\overline{EA} \right| = b-x \\ \\
\left|\overline{DC}\right| = \left|\overline{FE}\right| = y,\
\left|\overline{BD}\right| = a-y\\ \\
\left|\overline{BF} \right| = h_1,\ \left|\overline{FA} \right|=h_2,\ \left|
\overline{CF}\right| = \left|\overline{DE} \right|=h\end{array} \right\} \label{E6} \end{equation}
Furthermore,
$$ \sin \omega = \dfrac{y}{h} = \cos\varphi = \dfrac{b}{c} \ {\rm and}\ \cos \omega = \dfrac{h}{b} = \dfrac{a}{c}; $$
\noindent and thus $h= \dfrac{ab}{c}$, which implies $y = h \cdot \cos \varphi = h \cdot \dfrac{b}{c} = \dfrac{ab}{c} \cdot \dfrac{b}{c} = \dfrac{ab^2}{c^2}$. So, $a-y = a-\dfrac{ab^2}{c^2} = \dfrac{a(c^2-b^2)}{c^2} = ({\rm since}\ c^2=a^2+b^2) \dfrac{a\cdot a^2}{c^2};\ a-y = \dfrac{a^3}{c^2}$
Next we calculate the lengths $x$ and $b-x$. We have $\tan \omega = \cot \varphi = \dfrac{y}{x}$; $\cot \omega = \tan \varphi = \dfrac{x}{y}$, and $\tan \varphi = \dfrac{a}{b}$ which gives $\dfrac{x}{y} = \dfrac{a}{b}, x = \dfrac{a}{b} \cdot y$. Since $y = \dfrac{ab^2}{c^2}$ (see above), we obtain $x = \dfrac{a}{b} \cdot \dfrac{ab^2}{c^2} = \dfrac{ba^2}{c^2}$. From this we get $b-x = b- \dfrac{ba^2}{c^2} = \dfrac{b(c^2-a^2)}{c^2} = \dfrac{b\cdot b^2}{c^2}= \dfrac{b^3}{c^2}$, since $c^2-a^2=b^2$.
Also, $\sin \omega = \dfrac{x}{h_1}; \ h_1 = \dfrac{1}{\sin \omega} \cdot x = \dfrac{c}{b} \cdot \dfrac{ba^2}{c^2} = \dfrac{a^2}{c}$. Similarly, we have $\sin \varphi = \dfrac{y}{h_2};\ h_2 = \dfrac{1}{\sin \varphi} \cdot y = \dfrac{c}{a} \cdot \dfrac{ab^2}{c^2} = \dfrac{b^2}{c}$. We summarize these lengths as follows:
\noindent {\it Sidelengths of triangle $BDF$}
\noindent $\left( \left|\overline{BD}\right| = a-y = \dfrac{a^3}{c^2},\ \left|
\overline{DF}\right| = x = \dfrac{ba^2}{c^2},\ \left| \overline{BF}\right| = h_1 = \dfrac{a^2}{c} \right)$
(6i)
\noindent{\it Sidelengths of triangle $FEA$}
\noindent$\left( \left|\overline{FE}\right| = y = \dfrac{ab^2}{c^2},\ \left|
\overline{EA}\right| = b-x = \dfrac{b^3}{c^2},\ \left| \overline{FA}\right| = h_2 = \dfrac{b^2}{c} \right)$
(6ii)
\noindent {\it Sidelengths of the four congruent triangles $FDC,\ DFE,\ DCE,\
CFE$}
\noindent $\begin{array}{rcl}\left( \left|\overline{DC}\right|\right. & = &
\left|\overline{FE}\right| = y = \dfrac{ab^2}{c^2},\ \left|\overline{DF}\right| = \left|\overline{CE}\right|\\ \\
& =& \left. x = \dfrac{ba^2}{c^2},\ \left|\overline{CF}\right| =
\left|\overline{DE}\right| = \dfrac{ab}{c} = h\right)\end{array}$
(6iii)
Next, we combine the length formulas in (6i), (6ii), and (6iii) with the formulas in (\ref{E1}), since $CBA$ is a Pythagorean triangle, to obtain the following.
\setcounter{equation}{6}
\begin{equation} \left\{ \begin{array}{rcl} a-y & = & \dfrac{d\cdot
(m^2-n^2)^3}{(m^2+n^2)^2},\ x = \dfrac{d\cdot (m^2-n^2)^2
\cdot(2mn)}{(m^2+n^2)^2} \\ \\ y & = & \dfrac{d\cdot (m^2-n^2) \cdot (2mn)^2}{(m^2+n^2)^2},\ b-x= \dfrac{d
\cdot (2mn)^3}{(m^2+n^2)^2} \\ \\ h_1 & = & \dfrac{d\cdot (m^2-n^2)^2}{m^2+n^2},\ h_2 = \dfrac{d\cdot
(2mn)^2}{m^2+n^2}\\ \\ h & =& \dfrac{d\cdot (2mn)\cdot (m^2-n^2)}{m^2+n^2} \end{array}\right\}\label{E7} \end{equation}
The following lemma from number theory is well-known and comes in handy.
\begin{lemma} Let $i_1,\ i_2,\ i_3,\ e_1,\ e_2, \ e_3$ be positive integers
such that $(i_1,i_2) = 1 = (i_1, i_3)$. Then,
\begin{enumerate} \item[(a)] $\left( i^{e_1}_1,\ i^{e_2}_2\right) = 1$
\item[(b)] $\left( i^{e_1}_1,\ i^{e_2}_2 \cdot i^{e_3}_3 \right) = 1$ \end{enumerate}
It follows from Lemmas 2 and 3 that
\hspace*{-.5in}\begin{equation} \left\{ \begin{array}{l} \left(\left( m^2+n^2\right)^2, \ \left(m^2-n^2 \right)^3\right)=1,\\ \\ \left(\left( m^2+n^2 \right)^2, \ \left(2mn\right)^2\right) = 1\\ \\ \left( m^2+n^2,\ \left(m^2- n^2\right)^2\right) = 1,\\ \\
\left(m^2+n^2,\ \left(2mn\right)^2 \right) = 1\\ \\ \left(m^2+n^2,\ \left(2mn\right) \cdot \left(m^2-n^2\right)\right) = 1 \\ \\ \left(\left(m^2+n^2\right)^2,\ \left(m^2-n^2\right)^2 \cdot \left(2mn\right) \right) = 1\\ \\ \left(\left(m^2+n^2\right)^2,\ \left(m^2-n^2\right) \cdot \left(2mn\right)^2 \right) = 1 \end{array} \right\} \label{E8} \end{equation} \end{lemma}
A careful look at formulas (\ref{E7}) and the coprimeness conditions in (\ref{E8}), in conjunction with Lemma 1, reveals that either all six triangles, $BDF,\ FEA,\ FDC,\ DFE,\ DCE$, and $CFE$ are Pythagorean; or none of them are.
They are all Pythagorean precisely (i.e., if and only if) the integer $d$ is divisible by $(m^2+n^2)^2$, i.e., when
\begin{equation} \left\{ \begin{array}{l} d= K \cdot (m^2+n^2)^2 \\ \\ {\rm for\ some\ positive\ integer}\ K\end{array}\right\} \label{E9} \end{equation}
This is precisely when all seven numbers $y,\ a-y,\ x,\ b-x,\ h_1,\ h_2$, and $h$ are integers. When (\ref{E9}) holds true, we can compute, via (\ref{E7})
and (61), (6ii), and (6iii) all the sidelengths in terms of the integers
$m,n$, and $K$.
We have the following theorem.
\begin{theorem} Let $CBA$ be a Pythagorean triangle, with the $90$-degree
angle at $C$. With $\left|\overline{CB}\right| = a= d(m^2-n^2),\
\left|\overline{CA}\right| = b = d(2mn),\ \left|\overline{BA}\right| =
d(m^2+n^2) = c$, where $d,m,n$ are positive integers such that $m > n,\
(m,n)=1$, and $m+n \equiv 1({\rm mod} 2)$.
Also, let $F$ be the foot of the perpendicular from the vertex $C$ to the hypotenuse $\overline{BA}$. Then, the six similar triangles $BDF,\ FEA$, (and the four congruent ones) $FDC,\ DFE,\ DCE,\ CFE$ are either all Pythagorean or none of them are. They are all Pythagorean precisely when (i.e., if and only if) $d=K\cdot (m^2+n^2)^2$, for some positive integer $K$. When $d$ satisfies the said condition, the sidelengths of the above six triangles are given by the following formulas.
\noindent For triangle $BDF$
\noindent $\left| \overline{BD}\right| = a-y = K \cdot \left(m^2-n^2\right),\
\left|\overline{DF} \right| = x = K \cdot \left( m^2 - n^2\right)^2 \cdot (2mn), \ {\rm and}\ h_1 = K \cdot \left(m^2+n^2\right) \cdot \left(m^2-n^2\right)^2$
\noindent For triangle $FEA$:
$\left| \overline{FE}\right| = y = K \cdot \left(m^2-n^2\right) \cdot (2mn)^2,\
\left|\overline{EA} \right| = b -x = K\cdot (2mn)^3$, and $h_2 = K \cdot \left(m^2+n^2\right) \cdot (2mn)^2$.
\noindent For the four congruent triangles $FDC,\ DFE,\ DCE, CFE$:
$\left|\overline{DC} \right| = \left|\overline{FE}\right| = y = K \cdot \left(m^2-n^2\right) \cdot \left(2mn\right)^2$,
$\left|\overline{DF} \right| = \left| \overline{CE} \right| = x = K \cdot \left( 2mn\right) \cdot \left(m^2 - n^2\right)^2,$
\noindent and
\noindent $h= \left|\overline{CF}\right| = \left| \overline{DE}\right| = K\cdot (2mn) \cdot \left(m^2-n^2\right) \cdot \left(m^2 + n^2\right) = K \cdot (2mn)\left(m^4-n^4\right)$. \end{theorem} \end{enumerate}
\noindent {\bf Numerical Examples}
If we take $K=1$ and $mn \leq 4$, then $K = 1$ and $m=2,\ n=1$; or $K=1$ and $m = 4, n=1$.
\begin{enumerate} \item[(a)] $K=1,\ m=2,\ n=1$. We obtain the following:
$\begin{array}{l} d=1 \cdot \left(2^2+1^2\right)^2 = 5^2 = 25, \ h=60,\ h_1 = 45,\
h_2 = 80,\\ \\ y= 48,\ a-y = 75-48 = 27,\ x = 36,\ b-x = 100 - 36 = 64,\\ \\ a = 75,\ b = 100,\ c = 125 \end{array} $
\item[(b)] $K = 1, \ m=4,\ n=1$. We have the following:
$\begin{array}{l} d=289,\ a-y = 15,\ x=1800,\ h_1 = 3825,\\ \\ y = 960,\ b-x = 512,\ h_2 = 1088,\ h = 1404\\ \\ a = 4335,\ b= 2312,\ c = 4913 \end{array} $ \end{enumerate}
\section{Exactly $(d-1)$ positions of $P$}
Given a Pythagorean triangle $CBA$, as in Figure 1, and with the point $P$ on the hypotenuse $\overline{BA}$, and $D$ and $E$ being the perpendicular projections of $P$ on the sides $\overline{CB}$ and $\overline{CA}$ respectively. We know from Theorem 1 that either both triangles $BDP$ and $PEA$ are Pythagorean, or neither of them are. The integer $\delta$, as described in Theorem 1 must satisfy $1 \leq \delta \leq d-1$; which means that $d\geq 2$ is a necessary condition. There are $(d-1)$ choices for $\delta$. If we subdivide the hypotenuse $\overline{BA}$ into $d$ equal length segments, each segment having length $m^2+n^2$, it is easily seen that for each such position of the point $P$ both triangles $BDP$ and $PEA$ are Pythagorean. There are exactly $(d-1)$ such positions for the point $P$ along the hypotenuse $\overline{BA}$. These are the points $P_1, \ldots , P_{d-1}$; so that each of the consecutive line segments $\overline{BP}_1,\overline{P_1P_2} , \ldots , \overline{P_{d-1}A}$ (exactly $d$ line segments) has length $m^2+n^2$.
We postulate the following theorem.
\begin{theorem} Let $CBA$ be a Pythagorean triangle with the $90^{\circ}$ angle at the vertex
$C$. With sidelengths given by $\left| \overline{CB}\right| = a =
d(m^2-n^2)$, \linebreak $\left|\overline{CA}\right| = b = d(2mn),\
\left|\overline{BA}\right| = d(m^2+n^2)$, where $d,m,n$ are positive integers such that $d\geq 2,\ m > n,\ (m,n)=1$, and $m+n \equiv ({\rm mod}\ 2)$. Also, let $P_1,\ldots, P_{d-1}$ be the $(d-1)$ points on the hypotenuse $\overline{BA}$ such that the $d$ consecutive line segments $\overline{BP}_1,\ \overline{P_1P_2}, \ldots , \overline{P_{d-1}A}$ have equal lengths; each having length $m^2 + n^2$. Then there are exactly $(d-1)$ points $P$ on the hypotenuse $\overline{BA}$ such that both trangles $BDP$ and $PEA$ are Pythagorean where $D$ and $E$ are the feet of the perpendiculars from $P$ to the sides $\overline{CB}$ and $\overline{CA}$ respectively. These $(d-1)$
points are precisely the points $P_1, \ldots ,P_{d-1}$ described above. Furthermore, each pair of Pythagorean triangles $BD_iP_i$ and $P_iE_iA$ have sidelengths given by $\left|\overline{BD}_i\right| = i \cdot
\left(m^2-n^2\right),\ \left|\overline{D_iP_i}\right| = i(2mn),\
\left|\overline{BP_i}\right| = i\left(m^2+n^2\right),\ \left|
\overline{P_iE_i}\right| = (d-i)\left(m^2-n^2\right),\ \left|\overline{E_iA}
\right| = (d-i)(2mn),\ \left|\overline{P_iA}\right| = (d-i)\left(m^2+n^2\right)$, for $i=1,\ldots , d-1$; and where $D_i$ and $E_i$ are the perpendicular projections of the point $P_i$ onto the sides $\overline{CB}$ and $\overline{CA}$ respectively. \end{theorem}
\section{Other cases}
In this section, we explore the following question. If in addition to the two triangles in Figure 1, $BDP$ and $PEA$ being Pythagorean, we require that the four congruent triangles $DCE,\ PEC,\ CDP,\ EPD$, also be Pythagorean. What are the necessary and sufficient conditions for this to occur?
For these four congruent triangles to be Pythagorean, the integers\linebreak $x=
\left|\overline{DP}\right| = \left|\overline{CE}\right|$ and $y=
\left|\overline{DC}\right| = \left|\overline{PE}\right|$ must satisfy the condition,
$$ x^2+y^2 = \ {\rm perfect\ square}. $$
\noindent Combining this with Theorem 6 leads to the following theorem.
\begin{theorem} Let $CBA$ be a Pythagorean triangle with the $90$ degree angle at the vertex
$C$; and with sidelengths, $a=\left|\overline{CB}\right| =
d\left(m^2-n^2\right)$, $b = \left|\overline{CA}\right| = d(2mn),\ c =
\left|\overline{BA} \right| = d\left(m^2+n^2\right)$ where $d,m,n$ are positive integers such that $d\geq 2,\ m>n,\ (m,n)=1$, and $m+n \equiv 1({\rm
mod}\ 2)$. Let $P$ be a point on the hypotenuse $\overline{BA}$, and $D$ and $E$ be the feet of the perpendiculars from the point $P$ onto the sides
$\overline{CB}$ and $\overline{CA}$ respectively. Also, let $x=\left|
\overline{DP}\right| = \left|\overline{CD}\right|,\ y =
\left|\overline{DC}\right| = \left|\overline{PE}\right|$ so that
$$
a-y = \left|\overline{BD}\right|\ {\rm and}\ b-x =
\left|\overline{EA}\right|.$$
\noindent Then, the two right triangles $BDP$ and $PEA$, as well as the four congruent triangles, $DCE,\ PEC,\ CDP,\ EPD$, are all (six triangles) are Pythagorean if and only if there exist positive integers $D,M,N$ such that
$$ M > N,\ (M,N)=1,\ M+N \equiv 1({\rm mod}\ 2) $$
\noindent and with either
$\left\{ \begin{array}{l} y = \delta \left(m^2-n^2\right) = D \cdot \left(
M^2-N^2\right)\\ \\ x = (d-\delta) \cdot (2mn) = D\cdot (2MN)\\ \\ \delta\ {\rm a\ positive\ integer\ such \ that\ } 1 \leq \delta \leq
d-1\end{array}\right\}$
(10i)
\noindent or
$\left\{ \begin{array}{l} y= \delta \left(m^2-n^2\right) = D \cdot (2MN)\\ \\ x = (d-\delta)(2mn) = D\cdot \left(M^2-N^2\right) \\ \\ \delta\ {\rm a \ positive\ integer\ such \ that\ } 1 \leq \delta \leq d-1\end{array}\right\}$
(10ii) \end{theorem}
The following example shows that there exist nonprimitive Pythagorean triangles such that there is no point $P$ on the hypotenuse $\overline{BA}$ such that all six triangles $BDP,\ PEA,\ DCE,\ PEC,\ CDP,\ EPD$, are Pythagorean.
\noindent{\bf Example:} Take $d=5,\ m=2,\ n=1$. Then the sidelengths of triangle $CBA$ are $a=5\cdot \left(2^2-1^2\right) = 15,\ b = 5 \cdot (2\cdot 2 \cdot 1)=20$, and $c= 5\cdot \left(2^2+1^1\right) = 25$. The possible values of the integer $\delta$ are $\delta = 1,2, \ldots , d-1 = 1,2,3,4$. Using the formulas $y=\delta \left(m^2-n^2\right)$ and $x = (d-\delta)(2mn)$ we have the following.
\begin{enumerate} \item[1.] $\delta = 1:\ y=3,\ x=(5-1)\cdot 4 = 16$
\noindent and $y^2+x^2 = 9 + 256 = 265$ not an integer square.
\item[2.] $\delta = 2,\ y=2\cdot 3=6,\ x= (5-2) \cdot 4 = 12$
\noindent and $y^2+x^2 = 36 + 144 = 180$, not a perfect square.
\item[3.] $\delta = 3,\ y= 3\cdot 3 = 9,\ x= (5-3)\cdot 4 = 8$
\noindent and $y^2+x^2 = 81 + 64 = 145$, not an integer square.\
\item[4.] $\delta = 4,\ y=4\cdot 3 = 12,\ x=(5-4)\cdot 4 =4$
\noindent and $y^2+x^2 = 144 + 16 = 160$, not a perfect square. \end{enumerate}
There are many ways in which one can use the conditions (10i) or (10ii) of Theorem 7 in order to produce families of Pythagorean triangles such that each member (of those families) has the property that there is a point $P$ on its hypotenuse such that all six triangles (as described in Theorem 7) are Pythagorean. We produce such a family.
\noindent {\bf Family 1:} Consider (10i):
\hspace{.5in} $\left.\begin{array}{rcl} y & = & \delta \left(m^2-n^2\right)
=D\left(M^2-N^2 \right)\\ \\ x & = & (d-\delta)(2mn) = D(2MN) \end{array}\right\}$
(10i)
Let $K$ be a positive integer.
Take $D= K \cdot mn\left(m^2-n^2\right)$.
From the second equation in (10i) we obtain
$$ d-\delta = K\cdot MN\left(m^2-n^2\right) $$
\noindent and from the first equation in (10i) we get
$$ \delta = Kmn\left(M^2 -N^2\right). $$
\noindent Hence, $d = \delta + KMN\left(m^2-n^2\right) = K \cdot \left[
mn\left(M^2 - N^2\right) + MN \left(m^2-n^2\right) \right]$. Obviously $1
\leq \delta \leq d - 1$ and $d\geq 2$, as required. We have the following.
\begin{center}
{\bf Family 1}
\end{center}
{\it Let $m,n,M,N$ be positive integrs such that $m > n,\ (m,n)=1$, \linebreak
$m+n\ \equiv({\rm mod}\ 2),\ M>N,\ (M,N) = 1\ M+N \equiv 1({\rm mod}\ 2)$. Also, let
$K$ be a positive integer and $\delta = Kmn \left(M^2-N^2\right),\ d= K\cdot
\left[ mn\left(M^2-N^2\right) + MN \left(m^2-n^2\right)\right]$. Consider
the Pythagorean triangle $CBA$ with sidelengths $\left|\overline{CB}\right|
= a = d \left(m^2-n^2\right),$\linebreak $ \left| \overline{CA}\right| = b = d(2mn),\
\left|\overline{BA}\right| = c = d\left(m^2 + n^2\right)$. Let $P$ be the
point on the hypotenuse $\left|\overline{BA}\right|$ such that
$\left|\overline{BP}\right| = h_1 = \delta \left(m^2 + n^2 \right)$; and let
$D$ and $E$ be the perpendicular projections of $P$ onto the sides
$\overline{CB}$ and $\overline{CA}$ respectively. Then all six right
triangles $BDP,\ PEA,\ DCE,\ PEC,\ CDP$ and $EPD$ are Pythagorean.}
\end{document} |
\begin{document}
\begin{abstract} Fix a symbol $\underline{a}$ in the mod-$\ell$ Milnor $K$-theory of a field $k$, and a norm variety $X$ for $\underline{a}$. We show that the ideal generated by $\underline{a}$ is the kernel of the $K$-theory map induced by $k\subset k(X)$ and give generators for the annihilator of the ideal. When $\ell=2$, this was done by Orlov, Vishik and Voevodsky. \end{abstract}
\title{Principal ideals in mod-$\ell$ Milnor $K$-theory}
Let $\ell$ be a prime and $k$ a field containing $1/\ell$. Given units $a_1,\ldots,a_n\in k^\times$ we can form the Steinberg symbol $\underline{a} = \{a_1,\ldots,a_n\}$ in $K^M_n(k)$; we wish to study the ideal $(\underline{a})$ generated by $\underline{a}$ in $K^M_n(k)/\ell$. What is the quotient ring $(K^M_*(k)/\ell)/(\underline{a})$, and what is the annihilator ideal $\ann(\underline{a})$, so that $(\underline{a}) = (K^M_*(k)/\ell)/\ann(\underline{a})$?
Here is the main result of this paper; it was proven for $\ell=2$ by Orlov, Vishik and Voevodsky in \cite[2.1]{ovv}.
\begin{thm}\label{ovv3.3} Suppose that $\mathrm{char}\,k = 0$, and let $X$ be a
norm variety for a nontrivial symbol $\underline{a}$ in $K^M_n(k)/\ell$. Then:
\begin{itemize}
\item[(a)] the kernel of $K^M_*(k)/\ell\map{}K^M_*(k(X))/\ell$ is the ideal of $K^M_*(k)/\ell$ generated by $\underline{a}$;
\item[(b)] the annihilator of $\underline{a}$ is the ideal of $K^M_*(k)/\ell$ generated by the norms \[
\{ N(\alpha) \in K^M_*(k)/\ell \,|\, \alpha \in K^M_*(k(x)),\ x \textrm{ a closed point in }X \}. \]
\end{itemize} \end{thm}
Theorem \ref{ovv3.3} uses the notion of a {\it norm variety}; see Definition \ref{def:splitnorm} below. The existence of norm varieties is due to Rost; the terminology comes from \cite{SJ} and \cite[1.18]{HW}.
\begin{exams}\label{ex:i=0} Theorem~\ref{ovv3.3}(a) implies that $K^M_i(k)/\ell \rto K^M_i(k(X))/\ell$ is an injection when $i<n$, that
the kernel of $K^M_n(k)/\ell\rto K^M_n(k(X))/\ell$ is exactly the cyclic subgroup generated by $\underline{a}$ and that the kernel of $K^M_{n+1}(k)/\ell\rto K^M_{n+1}(k(X))/\ell$ is the subgroup $\underline{a}\cup k^\times$.
The group of units $b$ in $k^\times/k^{\times\ell}$ such that $\{ a_1,\dots,a_n,b\}=0$ in $K^M_{n+1}(k)/\ell$ forms the degree~1 part of the ideal $\ann(\underline{a})$. This group, described in Theorem \ref{ovv3.3}(b) was originally described by Voevodsky. If $H_{p,q}(X)$ is the motivic homology of a norm variety for $\underline{a}$, $X$,
and $k$ has no extensions of degree $\ell$, Voevodsky proved in \cite[A.1 and 2.9]{SJ} that the pushforward $\pi_*:H_{-1,-1}(X)\rto H_{-1,-1}(\Spec k)=k^\times$ induces an exact sequence \addtocounter{equation}{-1} \begin{subequations} \renewcommand{\theparentequation.\arabic{equation}}{\theparentequation\alph{equation}} \begin{equation} \label{eq:motiv} 1\rto\bar H_{-1,-1}(X) \map{\pi_*} k^\times \map{\underline{a}\cup} K^M_{n+1}(k)/\ell. \end{equation} Here $\bar H_{p,q}(X)$ denotes the coequalizer of the two projections
$H_{p,q}(X\times X)\rightrightarrows H_{p,q}(X)$. Thus the degree~1 part of $\ann(\underline{a})$ is $\bar H_{-1,-1}(X)$: $\{\underline{a},b\}=0$ if and only if $b\in\bar H_{-1,-1}(X)$.
When $n=1$, write $\underline{a}=(a)$ for $a\in k^\times$, and set $E=k(\root\ell\of{a})$. Then $X=\Spec(E)$ is a norm variety for $\underline{a}$. For simplicity, suppose that $k$ contains an $\ell$-th root of unity, $\zeta$.
The degree~2 part of $(\underline{a})$ is the group of symbols $a\cup b$; under the isomorphism $H_{\mathrm{et}}^2(k,\Z/\ell)\cong {}_\ell\!\Br(k)$, $a\cup b$ is identified with the class of the cyclic algebra $A_\zeta(a,b)$ in the Brauer group. Theorem \ref{ovv3.3} describes the units $b$ for which $A_\zeta(a,b)$ is
a matrix algebra, and the division algebras (or classes $[A]\in {}_\ell\!\Br(k)$) which are equivalent to cyclic algebras. In this case, Kummer theory gives the answer: the first group is the image $N(E^\times)$ of the norm map $E^\times\rto k^{\times}$, and the second group is the class of algebras split by $E$. (See \cite[6.4.8]{WK}.) In fact, we have the classical exact sequence
\begin{equation}\label{eq:n=1i=1} 1 \rto N(E^\times) \rto k^\times \map{a\cup} H_{\mathrm{et}}^2(k,\Z/\ell) \rto H_{\mathrm{et}}^2(E,\Z/\ell)^{\Gal(E/k)}. \end{equation}
When $n=1$, Theorem \ref{ovv3.3} states that for every unit $a$ not in $k^{\times\ell}$ there are exact sequences
\begin{equation}\label{eq:n=1} 1 \rto K^M_i(E)_{\Gal(E/k)} \rto^{N} K^M_i(k) \rto^{\cup a} K^M_{i+1}(k)/\ell \rto (K^M_{i+1}(E)/\ell)^{\Gal(E/k)}; \end{equation} \end{subequations} when $i=1$ this is exactly (\ref{eq:n=1i=1}). This follows from Voevodsky's Galois computations \cite[3.2 and 3.6]{HW} ({\it cf.\ } \cite[5.2 and 6.11]{mc/2}) and the fact that $\ell\cdot K^M_i(k)\subseteq N(K^M_i(E))$. \end{exams}
Theorem \ref{ovv3.3} follows from the more technical Theorem \ref{thm:2->l}. We note that the analysis in \cite{ovv} did not need to worry about roots of unity, as any field of characteristic $0$ contains the square roots of unity, and Pfister quadrics always have points of degree~2. For an odd prime $\ell$, the existence of a norm variety with points of degree $\ell$ is established in \cite[1.21]{SJ} modulo the Norm Principle, proven in \cite[0.3]{HW-Rost}; see Chapter 10 of \cite{HW}.
\begin{thm}\label{thm:2->l}
Let $\mathrm{char}\,k = 0$. Suppose that $X$ is a norm variety for a symbol
$\underline{a}$ in $K^M_n(k)/\ell$ containing a point $x$ with $[k(x):k]=\ell$. Write $q
= n+i$ and let $\widetilde{K}^M_{q}(k(X))/\ell$ denote the equalizer
of
maps $K^M_{q}(k(X))/\ell \rrto K_{q}^M(k(X\times X))/\ell$; $\mathfrak{X}$ denotes the
$0$-coskeleton of $X$.
\noindent(a) If $\mu_\ell\subset k^\times$, there is an exact sequence for all $i$: \[ \bar H_{-i,-i}(X) \map{\pi_*} K^M_i(k) \map{\underline{a}\cup} K^M_{q}(k)/\ell \map{\iota} \widetilde{K}^M_{q}(k(X))/\ell \rto H^{q+1,q-1}(\mathfrak{X},\Z/\ell). \]
\noindent(b) If $\mu_\ell\not\subset k^\times$, set $e=[k(\zeta):k]$ and $X'=X\times_{k_1}k(\zeta)$, where $k_1=k(\zeta)\cap k(X)$. If $\mathfrak{X}'$ denotes the $0$-coskeleton of $X'$ over $k(\zeta)$, then
for all $i$ there is an exact sequence:
\[ \bar H_{-i,-i}(X)[e^{-1}]\! \map{\pi_*}\! K^M_i(k)[e^{-1}]\! \map{\underline{a}\cup}\! K^M_{q}(k)/\ell \!\map{\iota}\! \widetilde{K}^M_{q}(k(X))/\ell\!
\map{}\! H^{q+1,q-1}(\mathfrak{X}',\Z/\ell)^G.
\]
The map $\iota$ is induced by the homomorphism $k \rto k(X)$, and $G = \Gal(k'/k_1)$. \end{thm}
The sequences \eqref{eq:motiv}, \eqref{eq:n=1i=1} and \eqref{eq:n=1} begin with an injection. This is often, but not always, the case.
\begin{question} In the situation of Theorem~\ref{thm:2->l}(a) with $\mu_\ell\subset k^\times$, when is $\pi_*$ an injection? \end{question}
For $i=0$, the map $\pi_*$ is an injection: $\bar{H}_{0,0}(X)=\Z$, and its image in $K_0(k)=\Z$ is $\ell\Z$. (This observation goes back to \cite[8.7.2]{MS}.) This calculation
shows that the mod-$\ell$ reduction $\bar{H}_{0,0}(X,\Z/\ell)\rto K^M_0(k)/\ell$ of $\pi_*$ is not always an injection.
The map $\pi_*$ is an injection for $i=1$ by equation (\ref{eq:motiv}), and for $n=1$
by Lemma \ref{lem:H90} below.
However, if $k$ does not contain the $\ell^{th}$ roots of unity, $\pi_*$ need not be an injection even for $i=n=1$, as the classical Hilbert Theorem 90 can fail; see Example \ref{ex:notgalois} below.
Theorem~\ref{ovv3.3}(b) could be strengthened to only look at norms of elements in $K^M_1(k)=k^\times$ if we knew that the answer to the following question was affirmative:
\begin{question}\label{q:BassTate} If $E/F$ is a Galois extension of prime degree, is $K^M_{n+1}(E)$ always generated by symbols $\{a_1,...,a_n,b\}$ with $a_i\in F^\times$ and $b\in E^\times$?
It suffices to check the case $n=1$: is $K^M_2(E)$ is always generated by symbols $\{a,b\}$ with $a\in F^\times$ and $b\in E^\times$? \end{question}
If $\ell=2$, $\ell=3$ or $k$ is $\ell$-special, this is the case; $K^M_2 k(x)$ is generated by symbols $\{a,b\}$ with $a\in k^\times$ and $b\in k(x)^\times$; see \cite[Lemma 2]{M82}, \cite[p.\,388]{BT}. By Becher \cite[1.1]{Becher}, $K^M_n k(x)$ is also generated by symbols $\{\alpha,\beta\}$ with $\alpha\in K^M_{n-m}(k)$, $\beta\in K^M_m(k)$ if $\ell<2^{m+1}$.
The restriction to prime degree is necessary in Question \ref{q:BassTate}. Becher has pointed out in \cite[3.1]{Becher} that if $E=k(x,y)$ and $F=k(x^\ell,y^\ell)$ then $\{x,y\}$ cannot be written in this form, as the tame symbol $\partial_y:K^M_2(E)\rto k(x)^\times/k^{\times\ell}$ shows. In this case, $[E:F]=\ell^2$.
\begin{rem} Although most of our results work over perfect fields of arbitrary characteristic, the assumption that $k$ has characteristic 0 is needed in two places. \begin{itemize} \item[1)]To prove that norm varieties exist for symbols of length $n$. This
would go through for any perfect field of positive characteristic (by
induction on $n$) if we could prove that for symbols of length $n-1$ over $k$,
a norm variety $Y$ exists which satisfies the {\it Norm Principle} (see
\cite[0.3]{HW-Rost} or \cite[10.17]{HW}). The inductive step is given in
\cite[10.21]{HW}.
\item[2)] We also need characteristic~0 to show that the symmetric characteristic class $s_d(X)$ of a norm variety is nonzero modulo $\ell^2$. The proof in characteristic~0 is due to Rost (unpublished), and given in Proposition 10.13 of \cite{HW},
and depends upon the Connor--Floyd theory of equivariant cobordisms on complex $G$-manifolds (as given by Theorem 8.16 in {\it loc.\,cit.}) It is possible that a proof in characteristic $p>0$ could be given along the lines of \cite[5.2]{SJ}, if we assume resolution of singularities. \end{itemize} We will therefore state as many of our results in as much generality as possible, only restricting to characteristic zero when absolutely necessary. \end{rem}
\begin{rem} After writing this paper, we discovered that many of our results are in Yagita's paper \cite[Thm.\,10.3]{Yagita} and in the Merkurjev-Suslin paper \cite[2.1]{MS2}. The basic technique in these papers, and in ours, is the same: generalize the ideas in \cite{ovv}, using Rost's norm varieties for $\ell>2$. Yagita's proof is somewhat sketchy, as it predated a clear understanding of norm varieties. Merkurjev and Suslin prove Theorem \ref{ovv3.3}(b), but their formulation is different in the absence of roots of unity. Since neither of these results directly addresses the ring structure of $K^M_*(k)/\ell$, we feel that our exposition should be added to the public record. \end{rem}
\subsection*{Notation and conventions} We fix a prime $\ell$ and an $\ell$-th root of unity $\zeta$. We write $H^{p,q}(Y,\Z/\ell)$ for $H^p_{\text{nis}}(Y,\Z/\ell(q))$.
\section{Borel--Moore homology}
The first term in Theorem \ref{thm:2->l} uses the motivic homology group $H_{-i,-i}(X)$ of a smooth projective variety $X$ (with coefficients in $\Z$). However, it is more useful to think of it as the Borel--Moore homology group $H^{BM}_{-i,-i}(X)$, which is covariant for proper maps between smooth varieties, and contravariant for finite flat maps; see \cite[p.\,185]{biv} or \cite[16.13]{mvw}.
When $X$ is smooth projective, we have $H_{-i,-i}(X)=H^{BM}_{-i,-i}(X)$, and more generally $H_{p,q}(X,\Z)=H^{BM}_{p,q}(X,\Z)$, because the natural map from $M(X)=\Z_{\tr}(X)$ to $M^c(X)$ in $\mathbf{DM}$ is an isomorphism for smooth projective $X$. (Recall that the motivic homology groups $H_{p,q}(X,\Z)$ of $X$ are defined to be $\Hom_{\mathbf{DM}}(\Z(q)[p],M(X))$, while the Borel--Moore homology groups $H^{BM}_{p,q}(X,\Z)$ are defined to be $\Hom_{\mathbf{DM}}(\Z(q)[p],M^c(X))$; see \cite[p.\,185]{biv} or \cite[14.17, 16.20]{mvw}.)
We define $H^{BM}_{-i,-i}(X)$ to be $H^{BM}_{-i,-i}(X,\Z)$ if $\mathrm{char}\,k = 0$, and $H^{BM}_{-i,-i}(X,\Z[1/p])$ if $\mathrm{char}\,k = p>0$.
The case $i=1$ of the following result was proven in \cite{SJ}.
\begin{prop}\label{H-n-n} Let $X$ be a smooth variety over a perfect field $k$. If $i\geq 0$ we have an exact sequence
\[
\coprod\nolimits_{y}K^M_{i+1}(k(y)) \map{\textrm tame} \coprod\nolimits_{x}
K^M_i(k(x)) \map{} H^{BM}_{-i,-i}(X) \rto 0.
\] In addition, $H^{BM}_{-i,-i}(X)$ is isomorphic to $H^{2d+i,d+i}(X,\Z)$. More explicitly, $H^{BM}_{-i,-i}(X)$ is the abelian group generated by symbols $[x,\alpha]$, where $x$ is a closed point of $X$ and $\alpha\in K^M_i(k(x))$, modulo the relations \\
(i) $[x,\alpha][x,\alpha']=[x,\alpha+\alpha']$ and
\\
(ii) the image of the tame symbol $K^M_{i+1}(k(y))\rto\bigoplus
K^M_{i}(k(x))\rto H^{BM}_{-i,-i}(X)$ is zero for every codimension~1 point $y$
of $X$. \end{prop}
\begin{proof} Let $A$ denote the abelian group with generators $[x,\alpha]$ and relations (i) and (ii), described in the Proposition, and set $d=\dim(X)$.
We first show that $A$ is isomorphic to $H^d(X,\mathscr{H}^{d+i})$, where
$\mathscr{H}^q$ denotes the Zariski sheaf associated to the presheaf $H^{q,d+i}(-,\Z)$.
For each $q$, $\mathscr{H}^{q}$ is a homotopy invariant Zariski sheaf, by \cite[24.1]{mvw}. As such, it has a canonical flasque ``Gersten'' resolution on each smooth $X$ (given in \cite[24.11]{mvw}), whose $c^{th}$ term is the coproduct over codimension~$c$ points $z$ of the skyscraper sheaves $H^{q-c,d+i-c}(k(z))$, where $z$ has codimension $c$ in $X$. Taking $q=d+i$, and recalling that $K^M_i\cong H^{i,i}$ on fields, we see that the skyscraper sheaves in the $(d-1)^{st}$ and $d^{th}$ terms take values in $K^M_{i+1}(k(y))$ and $K^M_i(k(x))$. Moreover, the map $K^M_{i+1}(k(y))\rto K^M_i(k(x))$ is the tame symbol if $x\in\overline{\{ y\}}$, and zero otherwise. As $H^d(X,\mathscr{H}^{d+i})$ is obtained by taking global sections and then cohomology, it is isomorphic to $A$.
Next, we show that $A$ is isomorphic to $H^{2d+i,d+i}(X,\Z)$. To this end, consider the hypercohomology spectral sequence $E_2^{p,q}=H^{p}(X,\mathscr{H}^q) \Rightarrow H^{p+q,d+i}(X,\Z),$ Since $H^{q,d+i}=0$ for $q>d+i$, the spectral sequence is zero unless $p\le d$ and $q\le d+i$. From this we deduce that $H^{2d+i,d+i}(X,\Z) \cong H^d(X,\mathscr{H}^{d+i})\cong A$.
Finally, we show that $H^{BM}_{-i,-i}(X)$ is isomorphic to $H^{2d+i,d+i}(X,\Z)$. Suppose first that $i=0$. Then the presentation describes $CH_0(X)\cong H^{2d,d}(X,\Z)$, and by \cite{V-CH} we also have $H_{0,0}^{BM}(X)=CH_0(X)$. Thus we may assume that $i>0$.
If $\textrm{char}(k)=0$, the proof is finished by the duality calculation, which uses Motivic Duality with $d=\dim(X)$ (see \cite[16.24]{mvw} or \cite[7.1]{biv}): \[ \begin{aligned} H^{BM}_{-i,-i}(X,\Z) =& \Hom(\Z,M^c(X)(i)[i])=\Hom(\Z(d)[2d],M^c(X)(d+i)[2d+i]) \\
=& \Hom_{\mathbf{DM}}(M(X),\Z(d+i)[2d+i])=H^{2d+i,d+i}(X,\Z). \end{aligned} \]
Now suppose that $k$ is a perfect field of $\textrm{char}(k)=p>0$. As we show below in Lemma \ref{K2:p-divisible}, $K^M_i(k(x))$ and $K_{i+1}^M(k(y)$) are uniquely p-divisible for $i\ge1$ (when $x$ is closed in $X$ and $\textrm{trdeg}_k k(y)=1$). Thus $A$ must also be uniquely p-divisible.
Since $H^{2d+i,d+i}(X,\Z)\cong\!A$, the duality calculation above goes through with $\Z$ replaced by $\Z[1/p]$, using the characteristic $p$ version of Motivic Duality (see \cite[5.5.14]{Shanekelly}) and we have $H^{BM}_{-i,-i}(X,\Z[1/p])\cong\! H^{2d+i,d+i}(X,\Z[1/p])\cong\! H^{2d+i,d+i}(X,\Z)$. \end{proof}
\begin{lem}[Izhboldin]\label{K2:p-divisible} Let $E$ be a field of transcendence degree~$t$ over a perfect field $k$ of characteristic~$p$. Then $K^M_{m}(E)$ is uniquely $p$-divisible for $m>t$. \end{lem}
\begin{proof} For any field $E$ of characteristic~$p$, the group $K^M_m(E)$ has no $p$-torsion by Izhboldin's Theorem (\cite[III.7.8]{WK}), and the $d\log$ map $K^M_m(E)/p\rto\Omega^m_E$ is an injection with image $\nu(m)$; see \cite[III.7.7.2]{WK}. Since $k$ is perfect, $\Omega^1_k=0$ and $\Omega^1_E$ is $t$--dimensional, so if $m>t$ then $\Omega^m_E=0$ and hence $K^M_m(E)/p=0$. \end{proof}
\begin{exam}\label{ex:N=norm} (i) $H_{-i,-i}(\Spec E)=K^M_i(E)$ for every field $E$ over $k$, as is evident from the presentation in Lemma \ref{H-n-n}. \\ (ii) If $E$ is a finite extension of $k$, the proper pushforward from $K^M_i(E)=H_{-i,-i}(\Spec E)$ to $K^M_i(k)=H_{-i,-i}(\Spec k)$ is just the norm map $N_{E/k}$; see \cite[III.7.5.3]{WK}. \\ (iii) If $\pi:X\rto\Spec(k)$ is proper, and $x\in X$ is closed, the restriction of the pushforward \[ \pi_*:H_{-i,-i}(X)\rto H_{-i,-i}(\Spec k)=K^M_i(k) \] to $K^M_i(k(x))$ sends $[x,\alpha]$ to the norm $N_{k(x)/k}(\alpha)$. This follows from (ii) by functoriality of $H_{-i,-i}$ for the composite $\Spec k(x)\rto X \rto \Spec k$, $x\in X$ closed. From the presentation in Lemma \ref{H-n-n}, the map $N_{X/k}$ is completely determined by the formula $\pi_*[x,\alpha]=N_{k(x)/k}(\alpha)$.
In particular, the image of $\pi_*$ is the subgroup of $K^M_i(k)$ generated by the norms $N_{k(x)/k}(\alpha)$ of $\alpha\in k(x)^\times$ as $x$ ranges over the closed points of $X$. \end{exam}
\begin{lem}\label{lem:H90} Suppose that $\mu_\ell\subset k$ and $a\in k^\times$, and set $E=k(\root\ell\of{a})$, $X=\Spec(E)$. Then $\bar H_{-i,-i}(X)\cong K^M_i(E)_{\Gal(E/k)}$, and $\bar H_{-i,-i}(X)\rto K^M_i(k)$ is an injection. \end{lem}
\begin{proof} Note that $E/k$ is Galois with group $G$, so $X\times X\cong\prod_GX$ and $\bar H_{-i,-i}(X)\cong (K^M_iE)_G$ by Example \ref{ex:N=norm}(i). In this case, $(K^M_iE)_G$ is a subgroup of $K^M_i(k)$ by \eqref{eq:n=1}.
\end{proof}
\begin{exam}\label{ex:notgalois} If $E/k$ is not Galois, $\bar H_{-i,-i}(\Spec(E))\rto K^M_i(k)$ need not be an injection, even for $n=1$. One way to think of this is to realize that the classical Hilbert 90 asserts exactness of $(E\otimes E)^\times \rightrightarrows E^\times \rto k^\times$, and Hilbert 90 requires $E/k$ to be Galois. A concrete example is given by $\ell=3$, $k=\Q$, and $E=\Q(\root3\of2)$. In this case, $\Spec(E)\times\Spec(E)\cong \Spec(E\times F)$, where $F=E(\root3\of1)$, and the coequalizer $\bar H_{-1,-1}(\Spec(E))$ of $(E\times F)^\times\rightrightarrows E^\times$ does not inject into $\Q^\times$. This shows that $\pi_*$ in Theorem~\ref{thm:2->l}(a) is not always an injection. \end{exam}
\section{Norm varieties} \label{sec:prelim}
Let $\underline{a}=(a_1,\dotsc,a_n)$ be a sequence of units in a field $k$ of characteristic not equal to $\ell$.
\begin{defn} \label{def:splitnorm} A field $F$ over $k$ is said to be a {\it splitting field} for $\underline{a}$ if $\underline{a}$ vanishes in $K_n^M(F)/\ell$. We say that a variety $X$ is a {\it splitting variety} for $\underline{a}$ if $k(X)$ is a splitting field for $\underline{a}$, i.e., if $\underline{a}$ vanishes in $K_n^M(k(X))/\ell$.
Let $X$ be a splitting variety for $\underline{a}$. We say that $X$ is an {\it $\ell$-generic} splitting variety for $\underline{a}$ if any splitting field $F$ has a finite extension $E$ of degree prime to $\ell$ with $X(E)\ne\emptyset$.
A {\it norm variety} for $\underline{a}$ is a smooth projective variety $X$ of dimension $d=\ell^{n-1}-1$ which is an $\ell$-generic splitting variety for $\underline{a}$. When $\textrm{char}(k)=0$, a norm variety for $\underline{a}$ always exists (see \cite[10.16]{HW}). \end{defn}
For example, $E=k(\root\ell\of a_1)$ is a splitting field for $\underline{a}=(a_1,...,a_n)$. Since a norm variety $X$ is $\ell$-generic, there is a finite field extension $E'/E$ of degree prime to $\ell$ and an $E'$-point of $X$. The following result, due to Rost, is proven in Chapter 10 of \cite{HW}.
\begin{thm} If $\underline{a}$ is a nonzero symbol over $k$ and $\textrm{char}(k)=0$, then there exists a norm variety $X$ for $\underline{a}$ having a closed point $x$ with $[k(x):k]=\ell$. \end{thm}
We will frequently use the following fact, proven in \cite[1.21]{SJ} (see \cite[10.13]{HW}): if $k$ has characteristic~0 and $n\ge2$, the symmetric characteristic class $s_d(X)$ of a norm variety $X$ is nonzero modulo $\ell^2$ (i.e., $X$ is a {\it $\nu_{n-1}$-variety}).
\begin{defn}
Given a norm variety $X$, let $\mathfrak{X}$ denote its 0-coskeleton, i.e., the
simplicial scheme $p\mapsto X^{p+1}$ with the projections $X^{p+1}\rto X^p$ as
face maps and the diagonal inclusions as degeneracies. \end{defn}
For simplicity, we write $\mathbb{L}$ for $\Z_{(\ell)}(1)[2]$
and $R_{\tr}(\mathfrak{X})$ for $\Z_{(\ell)\;\mathrm{tr}}(\mathfrak{X})$, and regard $X$ as a Chow motive. Recall \cite[20.1]{mvw} that Chow motives form a full subcategory of $\mathbf{DM}$.
\begin{thm}\label{thm:M} Let $X$ be a norm variety for $\underline{a}$ such that $s_d(X)$ is nonzero modulo $\ell^2$. Then there is a Chow motive $M=(X,e)$ with coefficients $\Z_{(\ell)}$, such that \begin{enumerate} \item[(i)]\enspace $M=(X,e)$ is a symmetric Chow motive, i.e., $(X,e)=(X,e^t)$; \item[(ii)]\enspace The projection $X\rto \Z_{(\ell)}$ factors as $X\rto(X,e)\rto \Z_{(\ell)}$, i.e., is zero on $(X,1-e)$;
\item[(iii)]\enspace There is a motive $D$ related to the structure map $y:M\rtoR_{\tr}(\mathfrak{X})$ and its twisted dual $y^D$ by two distinguished triangles in $\mathbf{DM}$, where $b=d/(\ell-1)$: \addtocounter{equation}{-1} \begin{subequations}
\begin{align} D\otimes\mathbb{L}^b\ \rto\ &M\,\map{y}\,R_{\tr}(\mathfrak{X})\map{s} D\otimes\mathbb{L}^b[1]\ , \label{3.4.1}\\ R_{\tr}(\mathfrak{X})\otimes\mathbb{L}^d \map{y^D} &M\map{u} D\map{r} R_{\tr}(\mathfrak{X})\otimes\mathbb{L}^d[1]. \label{3.4.2} \end{align} \end{subequations} \end{enumerate} \end{thm}
\begin{proof} This is proven carefully in \cite[Ch.\,5]{HW}; the construction is due to Voevodsky \cite[pp.\,422--428]{mc/l} and appears in Section\,1 of \cite{W-Top}. Specifically, $\underline{a}$ determines a motive $A$ by (5.1), Definition 5.5 and 5.13.1 of \cite{HW}; by definition, $M=S^{\ell-1}(A)$ and $D=S^{\ell-2}(A)$. Part (i) follows from 5.19; part (ii) follows from 5.9; and part (iii) follows from 5.7 of {\it loc.\,cit.} \end{proof}
Although many of our techniques require the field $k$ to contain the $\ell$-th roots of unity, we can sometimes remove this restriction using the following observation. Given a norm variety $X$ over a field $k$, let $k_1$ denote the largest subfield of $k(\zeta)$ contained in $k(X)$. Then $X$ is also a norm variety for $\underline{a}$ over $k_1$.
\begin{lem}\label{basechange} Given a nonzero symbol $\underline{a}\in K^M_*(k)/\ell$, let $X$ be a norm variety for $\underline{a}$ over $k$. Then every component $X'$ of $X_{k(\zeta)}$ is a norm variety for $\underline{a}$ over $k(\zeta)$. \end{lem}
\begin{proof}
Clearly, $X'$ is a splitting variety for $\underline{a}$ of the right dimension. Given a
splitting field $F$ of $\underline{a}$ over $k(\zeta)$, there is a prime-to-$\ell$
extension $E$ of $F$ such that $k(\zeta)\subset E$ and such that there exists
a map $\Spec\,E\rto X$ over $k$. By basechange, there is a map
$\Spec\,E\otimes_kk(\zeta)\rto X_{k(\zeta)}$ over $k(\zeta)$. As $k(\zeta)\subset
E$, $E\otimes_kk(\zeta)$ is a $\Gal(k(\zeta)/k)$-indexed product of copies
of $E$. Since $\Gal(k(\zeta)/k)$ acts transitively on the components
of $X_{k(\zeta)}$, each component $X'$ of $X_{k(\zeta)}$ has an $E$-point.
Thus $X'$ is a norm variety over $k(\zeta)$. \end{proof}
\begin{rem}\label{rem:basechange}
$X_{k(\zeta)}$ is a $\Gal(k_1/k)$-indexed coproduct of copies of $X'=X\times_{k_1}\Spec\,k(\zeta)$. \end{rem}
\section{Reducing to Theorem~\ref{thm:2->l} over fields containing $\ell$-th roots}
We are now ready to prove Theorem~\ref{ovv3.3} assuming Theorem~\ref{thm:2->l}. Fix a field $k$ of characteristic~0, a symbol $\underline{a}$ and a norm variety $X$ for $\underline{a}$. We first observe that the statement of Theorem~\ref{ovv3.3} is equivalent to the exactness of the sequence \begin{equation}\label{eq:seq=thm} H_{-i,-i}(X)/\ell \map{\pi_*} K^M_i(k)/\ell \map{\underline{a}\cup} K^M_{i+n}(k)/\ell \map{\iota} K^M_{i+n}(k(X))/\ell . \end{equation} As observed in Example \ref{ex:i=0}, Theorem \ref{ovv3.3} for $n=1$ follows from \eqref{eq:n=1} when $\mu_\ell\subset k^\times$.
\begin{prop} Suppose that Theorem~\ref{thm:2->l} holds over $k$. Then so does Theorem~\ref{ovv3.3}. \end{prop}
\begin{proof} As the equalizer $\tilde K^M_{i+n}(k(X))/\ell$ is a subgroup of $K^M_{i+n}(k(X))/\ell$, Theorem \ref{thm:2->l} implies that there is an exact sequence \[ H_{-i,-i}(X)[e^{-1}] \map{\pi_*} K^M_i(k)[e^{-1}] \map{\underline{a}\cup} K^M_{i+n}(k)/\ell \map{\iota} K^M_{i+n}(k(X))/\ell . \] (If $\mu\subset k^\times$ then $e=1$). Exactness of \eqref{eq:seq=thm} is immediate. \end{proof}
Thus we have reduced the proof of Theorem~\ref{ovv3.3} to Theorem~\ref{thm:2->l}. We will now show that proving Theorem~\ref{thm:2->l} over fields containing $\ell$-th roots of unity suffices.
\begin{prop}\label{prop:5=>2}
Suppose that Theorem~\ref{thm:2->l} holds for all fields of
characteristic $0$ which contain $\ell$-th roots of unity. Then
Theorem~\ref{thm:2->l} holds for all fields of characteristic $0$. \end{prop}
\begin{proof}
Let $k$ be any field of characteristic $0$ not containing an $\ell^{th}$ root of unity, $\zeta$. Set $q = n+i$, $k'=k(\zeta)$, $k_1=k'\cap k(X)$, $e=[k':k]$ and $G = \Gal(k'/k_1)$, as in the statement of Theorem \ref{thm:2->l}(b). By Lemma \ref{basechange} and Remark \ref{rem:basechange}, the component $X'=X\times_{k_1}\Spec(k')$ of $X_{k'}$ is a norm variety for $\underline{a}$ over $k'$. The action of $G$ on $k'$ induces actions of $G$ on $X'$ and its 0-skeleton $\mathfrak{X}'$, and induces the last map in Theorem \ref{thm:2->l}(b): \[ \tilde K_{q}^M(k(X))/\ell \map{j} (\tilde K_{q}^M(k'(X'))/\ell)^G \map{\partial} H^{q+1,q-1}(\mathfrak{X}')^G. \] Since $e$ is prime to $\ell$, inverting $e$ in the exact sequence of Theorem \ref{thm:2->l} for $k'$ yields the exact sequence forming the bottom row of the following diagram, in which the downward arrows are base change maps and the upward arrows are the norm maps. \begin{diagram-fixed}[1.9em] { H_{-i,-i}(X)[e^{-1}] & K^M_i(k)[e^{-1}] & K^M_{q}(k)/\ell &
\tilde K_{q}^M(k(X))/\ell & H^{q+1,q-1}(\mathfrak{X}')^G \\
H_{-i,-i}(X')[e^{-1}] & K^M_i(k')[e^{-1}] & K^M_{q}(k')/\ell &
\tilde K_{q}^M(k'(X'))/\ell &
H^{q+1,q-1}(\mathfrak{X}') \\};
\to{1-1}{1-2}^{\pi_*} \to{1-2}{1-3}^{\underline{a}\cup} \to{1-3}{1-4}^{\iota}
\to{1-4}{1-5}^{\partial j}
\to{2-1}{2-2}^{\pi'_*} \to{2-2}{2-3}^{\underline{a}\cup} \to{2-3}{2-4}^{\iota}
\to{2-4}{2-5}^{\partial}
\cofib{1-2}{2-2}
\cofib{1-3}{2-3}
\to{1-4}{2-4}^{j}
\to{1-5}{2-5}
\diagArrow{->,bend left}{2-1}{1-1}^N
\diagArrow{->,bend left}{2-2}{1-2}^N
\diagArrow{->,bend left}{2-3}{1-3}^N
\end{diagram-fixed}
As each $K$-group is covariantly functorial, the diagram with the downward set of arrows commutes; the diagram with the upward set of arrows commutes by naturality and the projection formula \cite[III.7.5.2]{WK}.
The downward map $K_*^M(k) \rto K^M_*(k')$, followed by the norm map, is multiplication by $e=[k':k]$. A diagram chase now shows that the top row of the diagram is exact. \end{proof}
\begin{rem} The map $j$ is also injective in the above diagram. To see this, note that (by the projection formula) the norm $K^M_q(k'(X'))/\ell\rto K^M_q(k(X))/\ell$ induces a map $\tilde N$ from $\tilde K^M_q(k(X'))/\ell$ to $\tilde K^M_q(k(X))/\ell$, and the composition $\tilde{N}\,j$ is multiplication by $[k':k_1]$, not $e$. Note that $\tilde{N}$ does not commute with the norm $K^M_q(k')/\ell\rto K^M_q(k)/\ell$ unless $k=k_1$. \end{rem}
\section{The exact sequence} \label{sec:exact}
In this section and the next, we assume that our field $k$ contains an $\ell$-th root of unity, $\zeta$. As before, we fix a symbol $\underline{a}$ and a norm variety $X$ for $\underline{a}$, writing $\mathfrak{X}$ for the $0$-coskeleton of $X$.
Given a complex $\mathcal{F}^{\mathbf{\scriptscriptstyle\bullet}}$ of \'etale sheaves, let $\mathscr{H}^q=\mathscr{H}_{\text{nis}}^q(\mathcal{F}^{\mathbf{\scriptscriptstyle\bullet}})$ denote the Nisnevich sheaf associated to the presheaf $H_{\mathrm{et}}^q(-,\mathcal{F}^{\mathbf{\scriptscriptstyle\bullet}})$. If $\mathcal{F}$ is a locally constant \'etale sheaf (such as $\muell{i}$), $\mathscr{H}^q(\mathcal{F})$ is a Nisnevich sheaf with transfers, by \cite[6.11, 6.21 and 13.1]{mvw}.
\begin{lem}\label{H0(Hq)} If $\mathcal{F}$ is a sheaf, $H^0(\mathfrak{X},\mathscr{H}^q)$ is the equalizer of $H^0(X,\mathscr{H}^q) \rrto H^0(X\times X,\mathscr{H}^q)$. \end{lem}
\begin{proof} This is the definition of $H^0$ on a simplicial scheme; see \cite[5.2.2]{D}. Alternatively, it follows from the spectral sequence $E_1^{p,q}=H^q(X^{p+1},\mathcal{F})\Rightarrow H^{p+q}(\mathfrak{X},\mathcal{F})$ for the cohomology of a sheaf on a simplicial scheme. \end{proof}
\begin{rem}\label{rem:11.1} The Nisnevich sheaves $\mathscr{H}^q(\muell{q})$ are homotopy invariant sheaves with transfers, by \cite[24.1]{mvw}. By \cite[11.1]{mvw}, if $X$ is smooth then $H^0(X,\mathscr{H}^q(\muell{q}))$ --- and hence $H^0(\mathfrak{X},\mathscr{H}^q(\muell{q}))$ --- injects into $\mathscr{H}^q(\muell{q})(\Spec k(X))=H_{\mathrm{et}}^q(k(X),\muell{q})\cong K^M_q(k(X))/\ell$. \end{rem}
\begin{prop}\label{prop:coeff-triangle} If $\mu_\ell\subset k^\times$, there is a distinguished triangle in $\mathbf{DM}$ for each $q\ge0$:
\[ \Z/\ell(q-1) \map{\zeta} \Z/\ell(q) \rto \mathscr{H}^q(\muell{q})[-q] \rto. \] \end{prop}
\begin{proof} For any Nisnevich complex $C$ and any $q$ we have a distinguished triangle \[ \tau^{\leq q-1}C \rto \tau^{\leq q} C \rto H^q(C)[-q] \rto. \] Now let $C$ be the total direct image $R\pi_*\muell{q}$, where $\pi:\mathbf{Sm}_{\mathrm{et}}\rto\mathbf{Sm}_{\text{nis}}$, so $H_{\text{nis}}^*(X,C)=H_{\mathrm{et}}^*(X,\muell{q})$. Since $\mu_\ell\subset k^\times$, multiplication by $\zeta$ induces an isomorphism $\muell{q-1}\cong\muell{q}$. Thus we have an isomorphism $\cup\zeta: R\pi_*\muell{q-1} \map{\simeq} C$. In this case, the triangle reads: \[ \tau^{\le q-1}R\pi_*(\muell{q-1}) \map{\zeta} \tau^{\le q} R\pi_*(\muell{q}) \rto \mathscr{H}^q(\muell{q})[-q] \rto. \] By the Beilinson-Lichtenbaum conjecture (which has now been proven; see \cite[6.17]{mc/l} or \cite[Thm.\,B]{HW}), $\Z/\ell(q)\cong \tau^{\le q}C$ and
$\Z/\ell(q-1)\cong\tau^{\le q-1}R\pi_*\muell{q-1}\cong\tau^{\le q-1}C$. Combining these facts yields the distinguished triangle in question. \end{proof}
Let $\tilde\mathfrak{X}$ denote the simplicial cone of $\mathfrak{X} \rto \Spec k$. As a consequence of the Beilinson-Lichtenbaum conjectures, Voevodsky observed that
\begin{lem}\label{ovv2.2} If $X$ is smooth, the map $H^{p,q}(k,\Z/\ell)\map{} H^{p,q}(\mathfrak{X},\Z/\ell)$ is an isomorphism if $p\le q$ and an injection if $p=q+1$.
That is, $H^{p,q}(\tilde\mathfrak{X},\Z/\ell)=0$ if $p\le q+1$. \end{lem}
\begin{proof} See \cite[6.9 and 7.3]{mc/2} or \cite[1.37]{HW}. \end{proof}
\begin{prop}\label{prop:right-seq} If $\mu_\ell\subset k^\times$, there is a natural five-term exact sequence: \begin{equation*}
\makeshort{ 0 \rto H^{q,\, q-1}(\mathfrak{X},\Z/\ell)
\map{\zeta} K_q^M(k)/\ell \rto H^0(\mathfrak{X},\mathscr{H}^q(\muell{q}))
\map{\partial} H^{q+1,q-1}(\mathfrak{X},\Z/\ell).}
\end{equation*} \end{prop}
\begin{proof} Apply $H^q(\mathfrak{X},-)$ to the distinguished triangle in Proposition~\ref{prop:coeff-triangle}. Using the fact that $H^q(\mathfrak{X},C[j])=H^{q+j}(\mathfrak{X},C)$
and writing
$\mathscr{H}^q$ for $\mathscr{H}^q(\muell{q})$, we get \[ H^{-1}(\mathfrak{X},\mathscr{H}^q) \map{\partial} H^{q,q-1}(\mathfrak{X},\Z/\ell) \map{\zeta} H^{q,q}(\mathfrak{X},\Z/\ell) \rto H^0(\mathfrak{X},\mathscr{H}^q) \map{\partial} H^{q+1,q-1}(\mathfrak{X},\Z/\ell). \] The first term ($H^{-1}$) is $0$ because the coefficients are a sheaf. By Lemma \ref{ovv2.2} with $p=q$, the third term is $H^{q,q}(k,\Z/\ell)=K^M_q(k)/\ell$ \cite[Theorem 5.1]{mvw}. \end{proof}
\begin{cor}\label{2->1:n=1} Theorem \ref{thm:2->l} holds for $n=1$. \end{cor}
\begin{proof} By Proposition \ref{prop:5=>2}, we may assume $\zeta\in k$ so that $X=\Spec(E)$, $E=k(\root\ell\of a)$ and $X\times X=\coprod_GX$, where $G=\Gal(E/k)$. By Lemma \ref{H0(Hq)}, $H^0(\mathfrak{X},\mathscr{H}^q)$ is the equalizer of $H^q(X,\muell{q})\rrto \prod_G H^q(X,\muell{q})$, i.e., $H^q(X,\muell{q})^G$. Since $H^q(X,\muell{q})$ is $K^M_q(E)/\ell$, we have $H^0(\mathfrak{X},\mathscr{H}^q) \cong (K^M_q(E)/\ell)^G$. Proposition \ref{prop:right-seq} yields exactness of \[ K_q^M(k)/\ell \rto (K^M_q(E)/\ell)^G \map{\partial} H^{q+1,q-1}(\mathfrak{X},\Z/\ell). \] Now combine this with the exact sequence \eqref{eq:n=1}, using Lemma \ref{lem:H90} to identify $\bar H_{-i,-i}(X)$. \end{proof}
Our next goal, achieved in Corollary \ref{induced.map}, is to connect the first map in Proposition \ref{prop:right-seq}
to the cup product with $\underline{a}$. We assume that $n\ge2$, so that $d=\dim(X)>0$ and $s_d(X)$ is defined.
\begin{prop}\label{prop:left-seq} Let $X$ be a norm variety for $\underline{a}$ such that $s_d(X)\not\equiv0\pmod{\ell^2}$. For $i\ge0$, there is a four-term exact sequence \[\makeshort{ \bar{H}_{-i,-i}(X)_{(\ell)}\map{\pi_*} K_i^M(k)_{(\ell)} \map{r^*} H^{i+2d+1,i+d}(D,\Z_{(\ell)})\rto0.
}\] Suppose in addition that $X$ has a point of degree $\ell$. Then the following sequence is exact: \[\makeshort{ \bar{H}_{-i,-i}(X)\map{\pi_*} K_i^M(k)\map{r^*} H^{i+2d+1,i+d}(D,\Z_{(\ell)})\rto0.
}\] \end{prop}
\begin{proof} Let $M$, $D$ and $\mathbb{L}$ be as in Theorem \ref{thm:M}. Since $H^{p,q}(M[1]) = H^{p-1,q}(M)$, applying $H^{i+2d+1,i+d}(-,\Z_{(\ell)})$ to the distinguished triangle in \eqref{3.4.2} gives us the exact sequence
\[\makeshort{
H^{i+2d,i+d}(M,\Z_{(\ell)}) \map{} H^{i+2d,i+d}(\mathfrak{X}\otimes\mathbb{L}^d) \map{r^*}
H^{i+2d+1,i+d}(D,\Z_{(\ell)}) \map{u^*} H^{i+2d+1,i+d}(M,\Z_{(\ell)})
}\] where for brevity we have written $H^{p,q}(\mathfrak{X}\otimes\mathbb{L}^d)$ for $\Hom_{\mathbf{DM}}(R_{\tr}(\mathfrak{X})\otimes\mathbb{L}^d,\Z_{(\ell)}(q)[p])$. We will show that this may be rewritten as the 4-term sequence of the proposition.
The last term $H^{i+2d+1,i+d}(M,\Z_{(\ell)})$ vanishes because $M$ is a direct summand of $X$, and $H^{p,q}(X,\Z_{(\ell)})=0$ whenever $p-q>\dim(X)$; see \cite[3.6]{mvw}. Similarly, the first term, $H^{i+2d,i+d}(M,\Z_{(\ell)})$, is a summand of $H^{i+2d,i+d}(X,\Z_{(\ell)})$, which we showed to be isomorphic to $H_{-i,-i}(X,\Z_{(\ell)})$ if $i\ge0$, in the proof of Proposition \ref{H-n-n}. Therefore we may replace the first term by $H_{-i,-i}(X,\Z_{(\ell)})$.
Since $X\!\rto\!\Spec(k)$ factors through $\mathfrak{X}$, the map $\pi_*:H_{-i,-i}(X,\Z_{(\ell)})\rto H_{-i,-i}(k,\Z_{(\ell)})=K^M_i(k)_{(\ell)}$ factors through $\bar H_{-i,-i}(X,\Z_{(\ell)})$, the coequalizer of the two projections from $H_{-i,-i}(X\times X,\Z_{(\ell)})$.
We also know that
\begin{align*} H^{i+2d,i+d}(\mathfrak{X}\otimes\mathbb{L}^d) &= \Hom_{\mathbf{DM}}(\mathfrak{X}\otimes\mathbb{L}^d,\Z_{(\ell)}(i+d)[i+2d])
= \Hom_{\mathbf{DM}}(\mathfrak{X}, \Z_{(\ell)}(i)[i]) \\
& = H^{i,i}(\mathfrak{X},\Z_{(\ell)}) \cong H^{i,i}(\Spec k,\Z_{(\ell)})
\cong K_i^M(k)\otimes\Z_{(\ell)} = K_i^M(k)_{(\ell)},
\end{align*} where the last two isomorphisms follow from Lemma \ref{ovv2.2} and the Nestorenko-Suslin-Totaro Theorem \cite[5.1]{mvw}. Thus we have constructed an exact sequence \[\makeshort{
\bar H_{-i,-i}(X,\Z_{(\ell)}) \map{\pi_*} K_i^M(k)_{(\ell)} \map{r^*}
H^{i+2d+1,i+d}(D,\Z_{(\ell)}) \rto 0. }\]
When $X$ has a point $x$ of degree $\ell$ over $k$, every element $\alpha$ of $K^M_i(k)$ has $\ell\,\alpha=\pi_*([x,\alpha])$, so the cokernel of $\pi_*:H_{-i,-i}(X)\rto H_{-i,-i}(k)=K^M_i(k)$ has exponent $\ell$, and is the same as the cokernel of $\bar{H}_{-i,-i}(X,\Z_{(\ell)}) \rto K_i^M(k)_{(\ell)}$. Thus we can replace the first two terms of the exact sequence with these to get the desired sequence. \end{proof}
\begin{cor}\label{induced.map} If $\mu_\ell\subset k^\times$, there are maps $\alpha_i: H^{i+2d+1,i+d}(D,\Z_{(\ell)})\rto H^{n+i,n+i-1}(\mathfrak{X},\Z/\ell)$ for all $i$ so that $\underline{a}\cup: K^M_{i}(k)/\ell\rto K^M_{n+i}(k)/\ell$ (the cup product with $\underline{a}$) factors as \[ K^M_{i}(k)/\ell \rfib^{r^*} H^{i+2d+1,i+d}(D,\Z_{(\ell)})\map{\alpha_i} H^{n+i,n+i-1}(\mathfrak{X},\Z/\ell) \rcofib^{\zeta} K^M_{n+i}(k)/\ell. \] \end{cor}
\begin{proof} Set $q=n+i$. For each closed point $x$ of $X$, the diagram
\begin{diagram}
{ K^M_i(k(x))/\ell & K^M_i(k)/\ell \\
K^M_q(k(x))/\ell & K^M_q(k)/\ell \\};
\arrowsquare{N}{\underline{a}\cup=0}{\underline{a}\cup}{N} \end{diagram}
commutes by the projection formula \cite[III.7.5.2]{WK}. Thus the map $H_{-i.-i}(X)\rto K^M_q(k)/\ell$ is zero, since by Proposition \ref{H-n-n} it is induced by the maps $$K^M_i(k(x))/\ell \map{N} K^M_i(k)/\ell \map{\underline{a}\cup}K^M_{q}(k)/\ell.$$ By Proposition \ref{prop:left-seq}, the cup product factors through the quotient $H^{i+2d+1,i+d}(D,\Z_{(\ell)})$ of $K^M_i(k)/\ell$.
It remains to show that the image $\underline{a} K^M_i(k)$ of the cup product lands in the subgroup $H^{q,q-1}(\mathfrak{X},\Z/\ell)$ of $K^M_q(k)/\ell$. Since $H^0(X,\mathscr{H}^q(\muell{q}))$ is a subgroup of $K^M_{q}(k(X))/\ell$ (by Remark \ref{rem:11.1}), it suffices by Proposition \ref{prop:right-seq} to show that $\underline{a} K^M_i(k)$ vanishes in $K^M_{q}(k(X))/\ell$. This is so because $k(X)$ splits $\underline{a}$. \end{proof}
In Corollary \ref{cor:Q-surj}, we will show that the map $\alpha_i$ is an isomorphism. The inverse of $\alpha_i$ will be constructed using the cohomology operations $Q_i$ constructed in \cite[p.\,51]{RPO}. Each $Q_i$ has bidegree $(2\ell^i-1,\ell^i-1)$; see {\it loc.\,cit.} or \cite[13.3]{HW} for a summary of their properties. Thus the composite $Q=Q_{n-1}Q_{n-2}\cdots Q_0$ has bidegree $(2b\ell-n+2,b\ell-n+1)$, where $b=d/(\ell-1)$.
\begin{defn} Define the $\Z$-graded ring $\mathbb{H}^*(k)$
by
\[ \mathbb{H}^i(-) = \bigoplus_{s\in \Z} H^{i+s,s}(-,\Z/\ell). \] In particular, $\mathbb{H}^0(k)\cong K^M_*(k)/\ell$.
The cohomology operation $Q$ maps $\mathbb{H}^i(Y)$ to $\mathbb{H}^{i+b\ell+1}(Y)$. Note that $\mathbb{H}^i(\tilde\mathfrak{X})=0$ for $i\le1$, by Lemma \ref{ovv2.2}. \end{defn}
Now the operations $Q_j$ vanish on each $K^M_p(k)/\ell=H^{p,p}(k,\Z/\ell)$, because $H^{p,q}(k,\Z/\ell)=0$ for $p>q$. Since the $Q_j$ are derivations (\cite[13.10]{HW}), this means that $\mathbb{H}^*(Y)$ is a graded $K^M_*(k)/\ell$-module for each $Y$, and each $Q_j:\mathbb{H}^i(Y)\map{}\mathbb{H}^{i+\ell^j}(Y)$ is a $K^M_*(k)/\ell$-module homomorphism. Thus $Q:\mathbb{H}^i \rto \mathbb{H}^{i+b\ell+1}$ is also a $K^M_*(k)/\ell$-module homomorphism.
\begin{lem} \label{lem:Q-inj} Let $X$ be a norm variety over a field of characteristic~0, and let $\mathfrak{X}$ be its 0-coskeleton.
Then the map
$Q:\mathbb{H}^1(\mathfrak{X})\rto\mathbb{H}^{b\ell+2}(\mathfrak{X})$
is an injection. \end{lem}
\begin{proof} Since $H^{p,q}(\Spec k,\Z/\ell) = 0$ for $p>q$, we have $\mathbb{H}^i(\Spec k)=0$ for $i>0$. This yields isomorphisms $\mathbb{H}^i(\mathfrak{X})\map{\cong}\mathbb{H}^{i+1}(\tilde\mathfrak{X})$ for all $i>0$.
In particular, $\mathbb{H}^1(\mathfrak{X}) \cong \mathbb{H}^2(\tilde\mathfrak{X})$. Thus it suffices to show that $Q$ is injective on $\mathbb{H}^2(\tilde\mathfrak{X})$. Setting $a(j)={2+\frac{\ell^j-1}{\ell-1}}$, $Q_{j-1}\cdots Q_0$ maps $\mathbb{H}^2(\tilde\mathfrak{X})$ to $\mathbb{H}^{a(j)}(\tilde\mathfrak{X})$. In particular it suffices to show that $Q_j$ is injective on $\mathbb{H}^{a(j)}(\tilde\mathfrak{X})$ for all $0\le j\le n-1$.
Because $X$ is a norm variety, we know from \cite[3.2]{mc/2} (or \cite[10.14]{HW}) and \cite[13.20]{HW} that the Margolis sequence is exact for each $Q_j$, $j<n$: \[ \mathbb{H}^{a(j)-\ell^j}(\tilde\mathfrak{X}) \map{Q_j} \mathbb{H}^{a(j)}(\tilde\mathfrak{X}) \map{Q_j} \mathbb{H}^{a(j)+\ell^j}(\tilde\mathfrak{X}). \] By Lemma \ref{ovv2.2}, the left term is zero because $a(j)-\ell^j\le1$. The result follows. \end{proof}
Since $X$ is a splitting variety, $\underline{a}$ vanishes in $K^M_n(k(X))/\ell$. By Remark \ref{rem:11.1}, $\underline{a}$ vanishes in $H^0(X,\mathscr{H}^n(\muell{n}))$. It follows from Proposition \ref{prop:right-seq} (or \cite[6.5]{mc/l}) that there is a unique element $\delta$ in $H^{n,n-1}(\mathfrak{X},\Z/\ell)$ whose image in $K^M_n(k)/\ell$ is $\underline{a}$.
In the following proposition, $\zeta$ is the map defined in Proposition \ref{prop:right-seq}, $\alpha$ is the direct sum of the maps $\alpha_i$ defined in Corollary \ref{induced.map}, and the maps $r^*$, $s^*$ are given in Theorem \ref{thm:M}.
\goodbreak \begin{prop} \label{rsQ square} If $s_d(X)\not\equiv0\pmod{\ell^2}$, the following diagram commutes up to sign, and the top composite is multiplication by $\underline{a}$.
\begin{diagram}
{ \mathbb{H}^0(\mathfrak{X}) & \mathbb{H}^1(\mathfrak{X}) & K^M_*(k)/\ell \\
\mathbb{H}^{d+1}(D) & \mathbb{H}^{b\ell+2}(\mathfrak{X}) \\};
\arrowsquare{\delta\cup}{r^*}{Q}{s^*}
\diagArrow{right hook->}{1-2}{1-3}^{\zeta}
\to{2-1}{1-2}^{\alpha}
\end{diagram}
\end{prop}
\begin{proof} Note that all maps in the diagram are (right) module maps over the ring $K^M(k)/\ell\cong\mathbb{H}^0(\mathfrak{X})$. This is clear for multiplication by $\delta$, and we have already seen that the cohomology operation $Q$ is also a $\mathbb{H}^0(\mathfrak{X})$-module map. Finally, the maps $r^*$ and $s^*$ are also $\mathbb{H}^0(\mathfrak{X})$-module maps, since they come from morphisms in $\mathbf{DM}$; see \eqref{3.4.1} and \eqref{3.4.2}.
The top row sends $x\in\mathbb{H}^0(\mathfrak{X})$ to $\zeta(\delta\cup x)=\underline{a}\cup x$; since $\zeta$ is an injection (by Proposition \ref{prop:right-seq}), and $\underline{a}\cup x=\zeta\circ\alpha^*r^*(x)$, the upper triangle commutes: $\delta\cup x=\alpha^*r^*(x)$.
We will show that $s^*r^*(1) = (-1)^{n-1} Q(\delta)$. By linearity, it will follow that $s^*r^*(x)=(-1)^{n-1} Q(\delta\cup x)$ for all $x\in \mathbb{H}^0(\mathfrak{X})$. Since $r^*$ is surjective by Proposition \ref{prop:left-seq}, the result will follow.
We need to recall the definition of $\phi_V(\mu)$ from \cite[p.\,413]{mc/l} and \cite[5.10]{HW}. Given an element $\mu$ in $H^{2b+1,b}(\mathfrak{X},\Z/\ell)$, form the triangle $A\rto\mathfrak{X}\map{\mu}\mathfrak{X}(b)[2b+1]$ and set $S=S^{\ell-2}A$. Then $\phi_V(\mu)$ is defined to be the element of $H^{2b\ell+2,b\ell}(\mathfrak{X},\Z/\ell)$ represented by the composition \[ R_{\tr}(\mathfrak{X}) \map{s} S(b)[2b+1] \map{r\oo1} R_{\tr}(\mathfrak{X})(b\ell)[b\ell+2]. \] When $\mu=Q_{n-2}\cdots Q_0(\delta)$, we get the distinguished triangles \eqref{3.4.1} and \eqref{3.4.2} with $D=S$. Thus
the composition $s^*\circ r^*$ in the above diagram is multiplication by the element $\phi^V(\mu)$.
As observed in {\it loc.\,cit.} By \cite[Thm.\,3.8]{mc/l} (cf.\,\cite[Cor.\,6.33]{HW}), $\phi^V$ agrees with $\beta P^b$. In addition, since $\mu$ is annihilated by the $Q_i$ with $i\le n-2$ we have $\beta P^b(\mu)=(-1)^{n-1}Q_{n-1}(\mu)$; see \cite[p.\,427]{mc/l} or \cite[5.14]{HW}. This shows that the bottom right triangle commutes in the above diagram. \end{proof}
\begin{remm} In the proof of Proposition \ref{rsQ square}, we have cited Definition 5.10, Corollary 6.33 and Lemma 5.14 from the book \cite{HW}. These are slightly improved versions of Lemma 3.2 and (5.2), Theorem 3.8 and Lemma 5.13 in Voevodsky's paper \cite{mc/l}. Note that \cite[5.13]{mc/l} is missing several minus signs. \end{remm}
\begin{cor} \label{cor:Q-surj} In Proposition \ref{rsQ square}, $Q$ and $\alpha$ are isomorphisms, and the maps $r^*$ and $\delta\cup-$ are surjections. \end{cor}
\begin{proof} From Proposition~\ref{prop:left-seq}, we see that $r^*$ is surjective. By \cite[4.16]{HW}, $s^*$ is an isomorphism (because $d+1>d$), and $Q$ is an injection by Lemma \ref{lem:Q-inj}. The results follows from a diagram chase. \end{proof}
Note that $H^{q,q-1}(\mathfrak{X})=0$ for $q<n$, because by Corollary \ref{cor:Q-surj} this is a quotient of $H^{q-n,q-n}(\mathfrak{X})$.
Recall that $\widetilde{K}^M_q(k(x))/\ell$ is the equalizer of the two maps \[ \iota_1, \iota_2: K^M_{q}(k(X))/\ell \rightrightarrows K^M_{q}(k(X\times X))/\ell. \]
The following result was proved for $n=1$ in Corollary \ref{2->1:n=1}, and will be proved for $n\ge2$ in the next section.
\begin{prop} \label{prop:4thterm} $H^0(\mathfrak{X}, \mathscr{H}^q(\mu_\ell^{\otimes q})) \cong \tilde K^M_q(k(X))/\ell.$ \end{prop}
We are now ready to prove Theorem~\ref{thm:2->l} when $n\ge2$.
\begin{proof}[Proof of Theorem~\ref{thm:2->l}]
Putting Proposition~\ref{prop:right-seq} for $q=n+i$ and Proposition~\ref{prop:left-seq} together, we get that the rows are exact in the following diagram, where $H^{p,q}(-)$ denotes $H^{p,q}(-,\Z/\ell)$. \begin{diagram}
{ & \bar{H}_{-i,-i}(X) & K_i^M(k) & H^{i+2d+1,i+d}(D,\Z_{(\ell)}) \\ H^{q+1,q-1}(\mathfrak{X}) & H^0(\mathfrak{X},\mathscr{H}^{q}(\muell{q})) &K_q^M(k)/\ell& H^{q,q-1}(\mathfrak{X}) \\};
\to{1-2}{1-3} \fib{1-3}{1-4}^{r^*}
\diagArrow{left hook->}{2-4}{2-3}^{\zeta} \to{2-3}{2-2} \to{2-2}{2-1}
\to{1-3}{2-3}^{\underline{a}\cup}
\diagArrow{densely dashed,->}{1-4}{2-4}^{\alpha}
\to{1-3}{2-4}^{\delta\cup}
\draw[densely dotted,->,rounded corners=5pt]
(m-1-2.south) -- (m-1-3.south west) -- (m-2-3.north west)
-- (m-2-2.north) -- (m-2-1.north east); \end{diagram} From Corollary \ref{cor:Q-surj} we can conclude that the five-term sequence indicated by the dotted arrow is exact:
\addtocounter{equation}{-1} \begin{subequations} \renewcommand{\theparentequation.\arabic{equation}}{\theparentequation.\arabic{equation}} \begin{equation}\label{eq:6-term}
\bar{H}_{-i,-i}(\mathfrak{X}) \rto K_i^M(k) \map{\underline{a}\cup} K_{q}^M(k)/\ell \rto
H^0(\mathfrak{X},\mathscr{H}^{q}(\muell{q})) \rto H^{q+1,q-1}(\mathfrak{X}). \end{equation} \end{subequations} Theorem \ref{thm:2->l} now follows from Proposition~\ref{prop:4thterm}.
\end{proof}
\section{The fourth term}
Let $\iota_1,\iota_2$ be the two inclusions $k(X) \rcofib k(X\times X)$ induced by the projections $X \times X \rto X$. To finish the proof of Theorem \ref{thm:2->l}, we need to prove Proposition \ref{prop:4thterm} for $n\ge2$.
\begin{lem}\label{lem:Gersten} Fix $n\ge2$. In the commutative diagram \begin{diagram}
{
H^0(\mathfrak{X},\mathscr{H}^q) & H^0(X,\mathscr{H}^q) & H^0(X\times X, \mathscr{H}^q) \\
E_0 & K^M_q(k(X))/\ell & K^M_q(k(X\times X))/\ell \\
E_1 & \bigoplus_{x\in X^{(1)}} K^M_{q-1}(k(x))/\ell & \bigoplus_{y\in
(X\times X)^{(1)}} K^M_{q-1}(k(y))/\ell \\};
\cofib{1-1}{1-2} \cofib{2-1}{2-2} \cofib{3-1.mid east}{3-2.mid west}
\diagArrow{->,shiftup}{1-2}{1-3} \diagArrow{->,shiftdown}{1-2}{1-3}
\diagArrow{->,shiftup}{2-2}{2-3}^{p_0} \diagArrow{->,shiftdown}{2-2}{2-3}_{p_0'}
\diagArrow{->,shiftup}{3-2.mid east}{3-3.mid west}^{p_1}
\diagArrow{->,shiftdown}{3-2.mid east}{3-3.mid west}_{p_1'}
\cofib{1-1}{2-1} \cofib{1-2}{2-2} \cofib{1-3}{2-3}
\to{2-1}{3-1} \to{2-2}{3-2} \to{2-3}{3-3}
\end{diagram} all of the columns are exact, and each $E_i$ is the equalizer of the morphisms $p_i$ and $p'_i$. \end{lem}
\begin{proof} Exactness of the first row (i.e., that $H^0(\mathfrak{X},\mathscr{H}^q)$ is the equalizer) is immediate from Lemma \ref{H0(Hq)}.
The two right-hand columns are exact, as they are obtained from the Gersten resolutions for $\mathscr{H}^q$. The homomorphisms which are known to be injective are denoted $\rcofib$. By an elementary diagram chase, the left-hand column is also exact. \end{proof}
In order to prove Proposition \ref{prop:4thterm} it thus suffices to show that $E_1 \cong 0$ in Lemma \ref{lem:Gersten}.
\begin{lem} If $n\ge2$, $E_1 = \ker p_1 = \ker p_1'.$ \end{lem}
\begin{proof}
Since $n>1$, we have $\dim X = \ell^{n-1}-1 \ge 1$. For any point $x\in X^{(1)}$ the summand indexed by $x$ is mapped by $p_1$ and $p_1'$ to the summands indexed by the generic points of $x\times X$ and $X\times x$, respectively. Since these points (and hence the summands) are distinct, the images of $p_1$ and $p_1'$ intersect in 0. It follows that their equalizer is $\ker(p_1)=\ker(p_1')$, as asserted. \end{proof}
\begin{prop} If $X$ is a smooth variety of dimension $\ge1$,
then $p_1$ is injective. \end{prop}
\begin{proof} For each $x\in X^{(1)}$, let $y_x$ denote a generic point of $x\times X$; since $X$ is smooth, $x\times X$ is reduced. We will show that the composition of $p_1$ with the projection $\pi_x$ onto $K^M_{q-1}(k(y_x))/\ell$, \[ \bigoplus_{x\in X^{(1)}} K^M_{q-1}(k(x))/\ell \map{p_1} \bigoplus_{y\in(X\times X)^{(1)}} K^M_{q-1}(k(y))/\ell \map{\pi_x} K^M_{q-1}(k(y_x))/\ell, \] is an injection on the $x$-summand; since $\pi_x p_1$ is zero on all the other summands of the left term, it will follow that $p_1$ is an injection.
Fix $x$ and write $F$ for $k(X)$; as $X$ is smooth, the function field of $x\times X$ is a finite product of fields. Choosing an affine neighborhood $\Spec R$ of $x$, $x$ is given by a height~1 prime ideal $\mathfrak{p}$ of $R$: $k(x)=\rmfrac(R/\mathfrak{p})$ and $F=\rmfrac(R)$. Note that $k(x)\otimes R$ is a regular ring because $X$ is smooth over $k$. The kernel $\mathfrak{m}$ of the multiplication map \[ k(x)\otimes R \rto k(x)\otimes k(x) \map{\mu} k(x) \] is a maximal ideal of $k(x)\otimes R$, and the localization $R'=(k(x)\otimes R)_{\mathfrak{m}}$ at $\mathfrak{m}$ is a regular local ring with residue field $k(x)$ and fraction field $k(y_x)$. Choose a regular sequence $r_1,\dots,r_d$ generating the maximal ideal of $R'$; by iterated use of \cite[III.7.3]{WK}, there is a {\it specialization map} \[ K^M_*(k(y_x)) \map{\lambda} K^M_*(k(x)) \] which is a left inverse to the component $p_1^x:K^M_*(k(x))\rto K^M_*(k(y_x))$ of $p_1$. \end{proof}
Proposition \ref{prop:4thterm} now follows for $n\ge2$, since norm varieties are smooth by definition. This completes the proof of Theorem~\ref{thm:2->l}.
\end{document} |
\begin{document}
\title{Bounds on the conditional and average treatment effect with unobserved confounding factors}
\author{Steve Yadlowsky, Hongseok Namkoong, Sanjay Basu, John Duchi, Lu Tian}
\begin{center}
{\LARGE Bounds on the conditional and average treatment effect with unobserved confounding factors} \\
{\large Steve Yadlowsky$^1$, Hongseok Namkoong$^2$, Sanjay Basu$^3$, John Duchi$^4$, Lu Tian$^5$} \\
$^{1}$Google Research, Brain Team, \texttt{yadlowsky@google.com}\\
$^2$Decision, Risk, and Operations Division, Columbia Business School, \texttt{namkoong@gsb.columbia.edu} \\
$^3$School of Public Health,
Imperial College London, \texttt{s.basu@imperial.ac.uk} \\
$^4$Statistics and Electrical Engineering, Stanford University, \texttt{jduchi@stanford.edu}\\
$^5$Biomedical Data Science, Stanford University, \texttt{lutian@stanford.edu} \end{center}
\begin{abstract}
For observational studies, we study the sensitivity of causal inference when
treatment assignments may depend on unobserved confounders. We develop a
loss minimization approach for estimating bounds on the conditional average
treatment effect (CATE) when unobserved confounders have a bounded effect on
the odds ratio of treatment selection. Our approach is scalable and allows
flexible use of model classes in estimation, including nonparametric and black-box machine
learning methods. Based on these bounds for the CATE, we propose a sensitivity
analysis for the average treatment effect (ATE). Our semi-parametric
estimator extends/bounds the augmented inverse propensity weighted (AIPW)
estimator for the ATE under bounded unobserved confounding. By constructing
a Neyman orthogonal score, our estimator of the bound for the ATE is a regular root-$n$ estimator so
long as the nuisance parameters are estimated at the $o_p(n^{-1/4})$
rate. We complement our methodology with optimality results
showing that our proposed bounds are tight in certain cases. We demonstrate
our method on simulated and real data examples, and show accurate coverage
of our confidence intervals in practical finite sample regimes with rich
covariate information. \end{abstract}
\section{Introduction}
Consider a causal inference problem with treatment indicator $Z\in \{0, 1\}$ representing control or intervention, potential outcomes $Y(1) \in \R$ under intervention and $Y(0) \in \R$ under control, and a set of observed covariates $X \in \mathcal{X} \subseteq \R^d$. Our interest is in studying confounding bias in estimators of the \emph{conditional average treatment effect} (CATE) \begin{equation*}
\tau(x) \defeq \E[Y(1)-Y(0)\mid X=x], \end{equation*} and estimation and inference of the \emph{average
treatment effect} (ATE) \begin{equation*}
\tau \defeq \E[Y(1) - Y(0)] \end{equation*} based on $n$ i.i.d.\ observations $\{Y = Y(Z), Z, X\}$.\footnote{Together, these imply the stable unit treatment value assumption, which will be assumed throughout.} Many methods provide consistent estimators for the ATE~\cite{ImbensRu15} under the independence assumption \begin{equation}
\{Y(1), Y(0)\} \independent Z \mid X, \label{eq:indepsim} \end{equation} that all confounding factors are observed or equivalently, that observed covariates $X$ account for all dependence between the potential outcomes and treatment assignments.
Estimation of the CATE, $\tau(x),$ under the independence assumption~\eqref{eq:indepsim} has recently generated substantial interest~\cite{hill2011bayesian, athey2016recursive, kunzel2017meta,
wager2017estimation, nie2017learning}.
Confounding bias is ubiquitous in observational studies, and the assumption \eqref{eq:indepsim} is frequently too restrictive: in practice, there is almost always an unobserved confounding factor $U \in \mc{U}$ affecting both treatment selection and outcome. Consequently, we consider an unobserved confounding factor $U$ such that \begin{equation}
\{Y(1), Y(0)\} \independent Z \mid X,U. \label{eq:indep} \end{equation} This allows there to be a common cause $U$ of the treatment $Z$ and potential outcomes $\{Y(0), Y(1)\}$ that contains the relevant information about the potential outcomes that influence the treatment assignment. More abstractly, it allows the treatment assignment $Z$ to depend directly on the unobserved potential outcome; a multivariate unobserved confounder $U$ satisfying condition~\eqref{eq:indep} always exists by letting $U=(Y(1), Y(0))$. Under this assumption, neither the ATE $\tau$ nor the CATE $\tau(x)$ is identifiable, and traditional estimators can be arbitrarily biased~\cite{Rosenbaum02,imbensreview,robins2000sensitivity}. Yet it may be plausible that there is not ``too much'' confounding, so it is interesting to provide bounds on the possible range of treatment effects under such scenarios. We take this approach to propose a sensitivity analysis linking the posited strength of unobserved confounding to the range of possible values of the ATE $\tau$ and CATE $\tau(x)$.
We consider unobserved confounders that have bounded influence on the odds of treatment assignment, following Rosenbaum's ideas~\cite{Rosenbaum02}. \begin{definition}
\label{definition:bounded-selection}
A distribution $P$ over $\{Y(1), Y(0), X, U, Z\}$ satisfies the
\emph{$\Gamma$-\cornfield} condition with $1 \le \Gamma < \infty$ if
for $P$-almost all $u, \tilde{u} \in \mc{U}$ and $X \in \mc{X}$,
\begin{equation}
\frac{1}{\Gamma} \le
\frac{P(Z = 1 \mid X, U=u)}{P(Z = 0 \mid X, U=u)}
\frac{P(Z = 0 \mid X, U=\tilde{u})}{P(Z = 1 \mid X, U=\tilde{u})}
\le \Gamma.
\label{eq:cornfield}
\end{equation} \end{definition} \noindent Condition~\eqref{eq:cornfield} limits departures from the independence assumption \eqref{eq:indepsim}, and is equivalent to a regression model for the treatment selection probability~\cite[Prop.~12]{Rosenbaum02} where the log odds ratio for treatment is \begin{equation}
\label{eq:stat-model}
\log \frac{P(Z=1 \mid X, U)}{P(Z=0 \mid X, U)}
= \kappa(X) + \log(\Gamma) b(U, X), \end{equation} for some function $\kappa : \mathcal{X} \to \R$ of observed covariates $X$ and a bounded function $b: \mathcal{U}\times \mathcal{X} \to [0, 1]$ of the unobserved, and observed confounders, $U$ and $X$ respectively. Such odds ratios are common, for example, in medicine, where they reflect associations between risk factors and outcomes~\cite{norton2018odds}. Practice requires choosing a realistic value of $\Gamma$ to interpret the sensitivity analysis; we discuss this in more detail in Section~\ref{section:discussion}. One common approach by practitioners is to look at the level of $\Gamma$ when bounds on the ATE $\tau$ crosses a certain level of interest (e.g. $0$), which measures the robustness of the findings to unobserved confounding ~\cite{Rosenbaum02}, and then consider how plausible that choice of $\Gamma$ would be for the data generating process.
The ATE $\tau,$ and CATE $\tau(x),$ are partially identified under the $\Gamma$-\cornfield condition~\eqref{eq:cornfield}, so we focus instead on estimating bounds for them. This perspective on sensitivity to unobserved confounding traces to \citeauthor{CornfieldHaHaLiShWy59}'s analysis demonstrating that if an unmeasured hormone can explain the observed association between smoking and lung cancer, it would need to increase the probability of smoking by nine-fold (an unrealistic amount)~\cite{CornfieldHaHaLiShWy59}. Contemporary medical informatics and epidemiological studies focusing on small effect sizes require a more nuanced approach for estimating the causal effect in the presence of unobserved confounding than the simple one used by \citet{CornfieldHaHaLiShWy59}. For example, observational data is often used for post-market drug surveillance, but \citet{bosco2010most} shows that unobserved confounding presents a particularly high risk in these data, motivating the need for sensitivity analysis to contextualize findings. \citet{coloma2012electronic} show that effect sizes are often small, as adverse events for approved drugs are relatively rare. Therefore, to draw confident and precise conclusions when there is mild confounding, it is important to avoid applying an overly conservative sensitivity analysis. Motivated to provide the most precise conclusions possible in the presence of confounding, we seek methods that provide optimal (tight) bounds on the CATE and ATE under the $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield}.
\subsection{Bounding treatment effects}
In what follows, we bound the confounding bias using analogues of the plug-in treatment contrast estimator for the the CATE, and the augmented inverse probability weighted (AIPW) estimator for the ATE~\cite{bang2005doublerobust}. We treat each potential outcome separately, focusing on lower bounds on $\mu_1 = \E[Y(1)]$ (other cases are symmetric). Based on observed data, all parameters necessary to estimate $\mu_1$ can be non-parametrically identified, except the conditional mean of the unobserved potential outcome, $\E[Y(1) \mid X, Z=0]$. Since this quantity is not identifiable in the presence of unobserved confounding, we develop a worst-case bound under the $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield}, and develop estimators based on the observed data. Specifically, let \begin{equation} \theta_1(x) \defeq \inf\{\E_Q[Y(1) \mid X=x, Z=0] : Q \in \mathcal{Q}_x\}
\label{eq:cate-lower-bound} \end{equation} where $\mathcal{Q}_x$ is the set of all distributions for $(Y(0), Y(1), Z)$ conditional on $X=x$ satisfying the independence assumption \eqref{eq:indep} and the bound \eqref{eq:cornfield} for $X = x$, and matching the conditional distributions that are identified in the observed data $P$: $Q(Z=1 \mid X)=P(Z=1 \mid X)$ and $Q(Y(1) \in \cdot \mid Z=1, X) = P(Y(1) \in \cdot \mid Z=1, X)$. By definition, $ \theta_1(x) \le \E_P[Y(1) \mid X=x, Z=0]$ under the bounded unobserved confounding ($\Gamma$-\cornfield{} condition~\eqref{eq:cornfield}). Lower bounds on $\E[Y(1) \mid X=x]$ and $\E[Y(1)]$ follow from plugging in $\theta_1(x)$ in place of the unknown $\E_P[Y(1) \mid X=x, Z=0].$
Our first main result (Section~\ref{sec:cate-sensitivity}) shows that $\theta_1(x)$ can be expressed as the solution to the loss minimization problem with a reweighted squared loss \begin{equation*}
\minimize_{\funcparam(\cdot)}
~\half \E\left[ \hinge{Y(1) - \funcparam(X)}^2 +
\Gamma \neghinge{Y(1) - \funcparam(X)}^2 \mid \treatmentrv=1 \right], \end{equation*} where $a_+=a \ind{a>0}$, $a_-=-a \ind{a<0}, a \in \R$ and $\ind{\cdot}$ is the indicator function.
The scalable loss minimization approach allows us to use flexible model classes to estimate the lower bound, including many nonparametric and machine learning methods. Intuitively, the preceding display upweights the penalty for negative residuals, therefore increasing the impact of smaller observed outcomes on the minimizer $\theta_1(x),$ correcting for the fact that selection bias from confounding may have decreased the frequency of smaller observed outcomes.
Our second main result defines a semiparametric estimator \eqref{eq:orthogonal-estimator} for the lower bound on the expected outcome $Y(1)$ under the $\Gamma$-\cornfield condition~\eqref{eq:cornfield} \begin{equation}
\label{eq:outcome-bound}
\mu_1^- \defeq \E[Z Y(1) + (1-Z) \theta_1(X)] \le \E[Y(1)]. \end{equation} Our estimation approach (Section~\ref{sec:semiparametric}) builds out of a line of work~\cite{bang2005doublerobust, chernozhukov2018double} for statistical inference on $\tau$ when all confounders are observed~\eqref{eq:indepsim}; we adapt \citet{chernozhukov2018double}'s cross-fitting procedure to allow large model classes to estimate nuisance parameters. Our semiparametric estimator satisfies \emph{Neyman
orthogonality}~\cite{Neyman59}, and is insensitive to estimation errors in nuisance parameters.
By virtue of this orthogonality, our estimator is root-$n$ consistent and asymptotically normal so long as the nuisance parameters are estimated at a slower-than-parametric $o_p(n^{-1/4})$ rate of convergence. Our result gives asymptotically exact confidence intervals (CIs) for the lower bound $\mu_1^-$ ~\eqref{eq:outcome-bound}.
Coupling the asymptotic distribution for $\hat{\mu}_1^-$ with the symmetrically defined upper and lower bounds $\what{\mu}_z^\pm$ for $\E[Y(z)]$, we can construct a CI for the ATE $\tau$ under the $\Gamma$-\cornfield condition~\eqref{eq:cornfield}. In general, the boundary of our interval never shrinks to $\tau$ even in the large sample limit due to unobserved confounding. However, when there is no unmeasured confounding ($\Gamma = 1$), our method is equivalent to the AIPW estimator for the ATE $\tau$.
Our population-level bound is unimprovable for bounding each expected potential outcome and their conditional counterparts, $\E[Y(z)]$ and $\E[Y(z) \mid X=x]$, $z \in \{0, 1\}$, but may not be always optimal in bounding their difference, the ATE $\tau = \E[Y(1) - Y(0)]$. On the other hand, when the potential outcomes are symmetric in the sense that $Y(0) \overset{d}{=} C(1-Y(1))$ for some constant $C$, then our bounds on the treatment effect are also unimprovable (Section~\ref{sec:hypothesis-test}), thereby guaranteeing that our CI converges (in the large sample limit) to the smallest possible interval containing $\tau$ under the $\Gamma$-\cornfield condition~\eqref{eq:cornfield}.
Finally, we supplement our theoretical analysis with an experimental investigation of the proposed approaches in Section~\ref{sec:experiments}. On both simulated and real-world data, we show that the CIs have good coverage and reasonable length.
\subsection{Related Work}
The semiparametric literature~\cite{bang2005doublerobust,
chernozhukov2018double} have shown that the augmented inverse probability weighted (AIPW) estimator allows the use of flexible nonparametric and machine learning models to estimate the nuisance parameters: conditional means $\E[Y(z) \mid X]$, $z \in \{0, 1\}$, and the propensity score $\P(Z = 1\mid X).$ By exploiting certain orthogonality properties, \citet{chernozhukov2018double} showed how to obtain root-$n$ consistency and asymptotic normality for estimated $\tau$ even when involved estimates of the nuisance parameters converge at slower nonparametric rates. We generalize this approach under the $\Gamma$-\cornfield{} condition.
A number of authors have studied nonparametric and semiparametric models for sensitivity analysis. These works consider alternatives to our choice of model~\eqref{eq:cornfield} in characterizing the relationship between unobserved confounders, treatment, and outcomes~\cite{,franks2019flexible,richardson2014nonparametric,
robins2000sensitivity, zhao2017sensitivity, shen2011sensitivity,
brumback2004sensitivity}. We focus on the model of \citet{Rosenbaum02} because of its appealing interpretation as a regression model~\eqref{eq:stat-model}.
\citet{Imbens03} derived a sensitivity analysis for the treatment effect in the presence of unobserved confounding. His approach requires specifying parametric models for the effect of an unobserved confounder on both the treatment selection and outcome. Specifically, the relationship between the unmeasured confounder and treatment assignment is modelled via a logistic regression, which is a special case of condition~\eqref{eq:cornfield}.
\citet{aronow2012interval} and \citet{miratrix2017shape} study the bias due to unknown selection probabilities in survey analysis, with an approach similar to ours. In the survey setting, only surveyed individuals provide covariates $X$, so the papers~\cite{aronow2012interval,miratrix2017shape} consider a simplified model for selection bias, \begin{equation} \label{cond:simple-gamma-selection}
\frac{1}{\Gamma} \le \frac{P(Z=1 \mid U=u)}{P(Z=0 \mid U=u)} \frac{P(Z=0 \mid U=\tilde{u})}{P(Z=1 \mid U=\tilde{u})} \le \Gamma. \end{equation} \citet{zhao2017sensitivity} and \citet{shen2011sensitivity} consider the sensitivity of inverse probability weighted estimates of the ATE $\tau$ to unobserved confounding by varying the propensity score estimates around their estimated values. \citet{zhao2017sensitivity} discuss the relationship between their model of bounded unobserved confounding---which they call the marginal sensitivity model---and that based on the $\Gamma$-\cornfield{}~\eqref{eq:cornfield}. Compared to our semiparametric estimator, the complexity of the asymptotic distribution of their estimator necessitates using a bootstrap method for inference. A interesting future direction is to extend the methods in this paper to improve statistical inference under their model.
The most common approach to sensitivity analysis for the ATE under condition~\eqref{eq:cornfield} is to use matched observations~\cite{Rosenbaum02, Rosenbaum10,
rosenbaum2011new,rosenbaum2014weighted,fogarty2016sensitivity}. Unfortunately, exactly matched pairs rarely exist in practice, even for covariate vectors of moderate dimension; when considering continuous covariates, the probability of finding exactly matched pairs is zero. \citet{abadie2006large} show that under appropriate regularity conditions on the functions $\mu_{z}(x)$ and $e_1(x)$ (defined in Eqs. \eqref{eq:true-mean-function} and \eqref{eq:prop-score}), estimators of $\tau$ using approximately matched pairs can have a bias of order $\Omega(n^{-1/d})$ for $d$-dimensional continuous covariates. For these data, the AIPW method is a more appropriate statistical analysis tool. The AIPW estimator and other semiparametric methods can provide $\sqrt{n}$-consistent estimates of the ATE without unmeasured confounding \cite{imbensreview, hahn98, scharfstein1999adjusting,
chernozhukov2018double}. The semi-parametric approach for the lower bound on the ATE that we present in Section~\ref{sec:semiparametric} is $\sqrt{n}$-consistent under analogous regularity conditions. Therefore, when analyzing an observational study using the AIPW estimator, one should perform a sensitivity analysis using the semiparametric method we provide here. When finding good matched pairs is feasible, many analysts prefer matching due to the transparency of the results and the simplicity of confounding adjustment. If analyzing an observational study using matching methods, it would be natural to also use a matching-based method for sensitivity analysis, such as the ones described above. In summary, our proposed method and matching based sensitivity analysis approaches can be coupled with different main analyses in practice, and are complementary to each other.
Most work~\cite{hill2011bayesian, athey2016recursive, kunzel2017meta,
wager2017estimation, nie2017learning} directly study estimation of the CATE $\tau(x) = \mu_1(x) - \mu_0(x)$ assuming that all confounders are observed. More recently, \citet{kallus2018confounding} present an approach to learning personalized decision policy in the presence of unobserved confounding, and a contemporaneous work with this paper~\cite{kallus2019interval} derive bounds on the CATE; their methods are based on the marginal sensitivity model of \citet{zhao2017sensitivity}.
\paragraph*{Notation} We use $\P_n$ and $\P_n(\cdot \mid Z=z)$ to represent the empirical probabilities of $\{(Y_i(Z_i), X_i, Z_i)\}_{i = 1}^n$ and $\{(Y_i(Z_i), X_i) \mid Z_i=z\}$, respectively, and $\empE[\cdot \mid Z=z]$ is the expectation with respect to $\P_n(\cdot \mid Z=z)$ for $z=0, 1.$ We let $n_z=\sum_{i=1}^n \ind{Z_i=z}$ be the count of observations with $Z_i = z$, where $\ind{\cdot}$ is the indicator function. For a distribution $P$ and function $f : \mc{X} \to \R$, we use $\norm{f}_{2,P} = (\int_{\mc{X}} f^2(x) \dif{P}(x))^{1/2}$. For functions $f : \Omega \to \R$ and $g : \Omega \to \R$ with arbitrary domain $\Omega$, we write $f \lesssim g$ if there exists constant $C < \infty$ such that $f(t) \le C g(t)$ for all $t \in \Omega$, and $f \asymp g$ if $g \lesssim f \lesssim g$.
We use $P_z$ and $\E_z$ to denote the conditional distribution $P(\cdot \mid Z=z)$ and associated expectation, respectively. We write $\E_{Q}$ for the expectation under the probability $Q$, and omit the subscript under the data-generating distribution $P$.
\section{Bounds on Conditional Average Treatment Effect} \label{sec:cate-sensitivity}
To bound the CATE $\tau(X) = \E[Y(1) - Y(0)\mid X]$, we begin by separately bounding \begin{equation}
\mu_1(X) = \E[Y(1)\mid X] ~~~\mbox{and}~~~ \mu_0(X) = \E[Y(0)\mid X].
\label{eq:true-mean-function} \end{equation} We focus on $\mu_1(\cdot)$ as these two cases are symmetric. Henceforth, our statements hold for $P$-almost every $X$ and $P_z$-almost every $Y$ (where $z$ should be inferred from context).
\subsection{Bounding the unobserved potential outcome} \label{sec:no-covariate-lower-bound}
Decompose $\mu_1(\cdot)$ into observed and unobserved components \begin{equation}
\label{eq:mu-1-decomposition}
\mu_1(X) = \E[Y(1) \mid Z = 1, X] P(Z = 1 \mid X)
+\E[Y(1) \mid Z = 0, X] P(Z = 0 \mid X). \end{equation} The mean functions and the nominal propensity score, \begin{gather}
\label{eq:mean-function}
\mu_{z,z}(X) = \E[Y(z) \mid Z=z, X],
\\
\label{eq:prop-score}
e_z(X) = P(Z=z\mid X), \end{gather} are standard regression functions estimable based on observed data ~\cite{imbensreview,nie2017learning,
scharfstein1999adjusting,wager2015adaptive}. The key difficulty in estimating the CATE is that one potential outcome is always unobserved: we never observe data to directly estimate $\E[Y(1) \mid Z=0, X]$.
We begin by reformulating the worst-case lower bound~\eqref{eq:cate-lower-bound}, $\theta_1(\cdot)$, based on the likelihood ratio between the observed and unobserved potential outcomes. We take a worst case optimization approach over likelihood ratios to bound the unobserved conditional mean. Using Lemma~\ref{lem:bdd-lr-no-cov} to come, the conditional distribution $P(Y(1) \in \cdot \mid X, Z=1)$ is absolutely continuous with respect to $P(Y(1) \in \cdot \mid X, Z=0)$ under condition \eqref{eq:cornfield}, so \begin{equation}
\label{eq:lr-form}
\E[Y(1) \mid Z = 0, X] = \E\left[Y(1) L(Y(1),X) \mid Z = 1, X \right], \end{equation} where $L$ is the likelihood ratio \begin{equation}
L(y,x)=\frac{\dif{P(Y(1) \in \cdot \mid Z = 0, X=x)}}{\dif{P(Y(1) \in \cdot \mid Z=1, X=x)}}(y).
\label{eq:likratio} \end{equation} While $L$ is unknown, the $\Gamma$-\cornfield{} condition \eqref{eq:cornfield} constrains it, inducing a lower bound on the unobserved quantity~\eqref{eq:lr-form}. \begin{restatable}{lemma}{lembddlrnocov}
\label{lem:bdd-lr-no-cov}
Let $P$ satisfy the $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield},
and the conditional independence~\eqref{eq:indep}. Then $P_{Y(1)|Z = 0,X=x}$
is absolutely continuous with respect to $P_{Y(1)|Z = 1,X=x}$, and the
likelihood ratio~\eqref{eq:likratio} satisfies
$0\le L(y,x) \le \Gamma L(\tilde{y},x)$ for almost every $y$, $\tilde{y}$
and $x$.
Furthermore, for any likelihood ratio $L$ satisfies
$0\le L(y,x) \le \Gamma L(\tilde{y},x)$ for almost every $y$, $\tilde{y}$
and $x$, there is a distribution $P$ satisfying the $\Gamma$-\cornfield{}
condition~\eqref{eq:cornfield}, and the independence
assumption~\eqref{eq:indep}, such that Eq.~\eqref{eq:likratio} holds. \end{restatable} See Appendix~\ref{sec:proof-bounded-likelihood} for a proof of the absolute continuity. The rest of the results are illuminating, so we provide them here, assuming absolute continuity.
\begin{proof} For simplicity in notation and without loss
of generality, we assume there are no covariates $x$. Define the
likelihood ratio for the unobserved
$U$ by
$r(u) \defeq \frac{q_0(u)}{q_1(u)},$ where $q_z(u)$ is the probability density function for $U\mid Z=z.$ Note that by applying Bayes rule in the inequality \eqref{eq:cornfield}, for any $u,~\wt{u}$,
\begin{equation}
r(u) \le \Gamma r(\wt{u}).
\label{eq:u-bound}
\end{equation}
Then, for
any set $B \in \sigma(Y(1))$, the sigma algebra of $Y(1)$, we have
\begin{align*}
\E[\ind{B} \mid Z = 0]
= \E\left[\E[r(U) \mid Y(1), Z = 1]
\ind{B} \mid Z = 1\right],
\end{align*}
so that almost everywhere, the likelihood
ratio $L(y) = \frac{dP_{Y(1) \mid Z = 0}}{dP_{Y(1) \mid Z = 1}}(y)$
satisfies
\begin{equation}
L(y) = \E\left[r(U) \mid Y(1) = y, Z = 1\right]
\label{eqn:remember-radon}
\end{equation}
by the Radon-Nikodym theorem. Now, for an arbitrary $\epsilon > 0$, and $y,~ \wt{y}$ satisfying the equality~\eqref{eqn:remember-radon}, let $u_0$ be such that $r(u_0) \le
\inf_u r(u) + \epsilon$.
Then
\begin{equation*}
L(y) \stackrel{(i)}{=}
\E[r(U) \mid Y(1) = y, Z = 1]
= r(u_0) \E\left[\frac{r(U)}{r(u_0)} \mid Y(1) = y, Z = 1\right]
\stackrel{(ii)}{\le} \Gamma r(u_0)
\end{equation*}
where equality~$(i)$ is simply Eq.~\eqref{eqn:remember-radon} and
inequality~$(ii)$ follows from the bound \eqref{eq:u-bound}.
We also have $L(\wt{y}) \ge \inf_u r(u) \ge r(u_0) - \epsilon$
by equality~\eqref{eqn:remember-radon}. Consequently,
$L(y) \le \Gamma r(u_0) \le \Gamma (L(\wt{y}) + \epsilon)$, and
as $\epsilon$ was arbitrary, this completes the proof.
The converse follows easily as well: given a likelihood ratio satisfying the above constraint, the $\Gamma$-\cornfield~\eqref{eq:cornfield} condition and the independence $\{Y(1), Y(0)\} \,\indep\, Z \mid X, U$ is satisfied for $U \defeq (Y(1), Y(0))$, and $P(Z=1 \mid Z=z, U=u)$ only depending on the $Y(1)$ component of $U$, and defined by applying Bayes rule to the likelihood ratio. \end{proof}
Lemma~\ref{lem:bdd-lr-no-cov} implies that the lower bound $\theta_1(x)$ from Eq.~\eqref{eq:cate-lower-bound} on the unobserved conditional expectation $\E[Y(1)\mid X, Z = 0]$ is: \begin{align}
\label{eq:population-theta}
\theta_1(X) &=
\inf \left\{ \E[Y(1) L(Y(1)) \mid Z = 1, X] ~:~ L \in \likeratioset\right\} \end{align} where \begin{equation*}
\likeratioset = \left\{ L :\R \to \R~\text{measurable} ~:~\begin{aligned} & 0 \le L(y) \le \Gamma L(\tilde{y})~\text{for all}~y,\tilde{y},\\&\E[L(Y(1)) \mid Z=1, X] = 1\end{aligned}\right\}. \end{equation*} The first constraint in $\likeratioset$ comes from the $\Gamma$-\cornfield{} condition (Lemma~\ref{lem:bdd-lr-no-cov}), and the second normalization constraint guarantees that $L$ is a likelihood ratio; the objectives and constraints are linear in $L$. Applying Lagrangian duality to these constraints and simplifying the resulting dual problem shows that the solution to this optimization problem is the solution to an estimating equation in terms of the function \begin{equation}
\psi_{\theta}(y) \defeq \hinge{y - \theta} - \Gamma \neghinge{y -
\theta}.
\label{eq:psi-defn} \end{equation}
\begin{restatable}{lemma}{populationduality}
\label{lemma:duality}
Let $\theta_1(X)$ be defined as in
\eqref{eq:population-theta}. If $|\theta_1(X)|<\infty$, then
$\theta_1(X)$ solves
$$\E[ \psi_{\theta_1(X)}(Y(1)) \mid Z=1, X]=0$$
whenever this solution is unique. If the solution is not unique,
\begin{equation}
\theta_1(X)
= \sup \left\{\mu \in \R
~ :~ \E[ \psi_\mu(Y(1)) \mid Z=1, X] \ge 0
\right\}.
\label{eq:dual-constrained}
\end{equation} \end{restatable}
While $\theta_1(\cdot)$ could be estimated using a local estimating equation approach (eg., as in \cite{newey1994kernel} and \cite{athey2019generalized}) for the equations $\E[ \psi_{\theta_1(X)}(Y(1))\mid Z = 1, X]=0$ for each $X$, we go further to provide an alternative loss minimization method to estimate $\theta_1(\cdot)$. This enables the application of a broad class of computationally and statistically efficient estimators.
\begin{figure}
\caption{Loss function \eqref{eq:gamma-loss} to minimize to lower bound conditional mean of unobserved potential outcome under the $\Gamma$-\cornfield{} condition. Illustrated here for $\Gamma=2$. This loss penalizes negative residuals more than positive residuals, to account for the the fact that confounding could be already up-weighting positive residuals.}
\label{fig:loss}
\end{figure}
The lower bound $\theta_1(\cdot)$ is the solution to the convex loss minimization problem
\begin{equation}
\label{eqn:opt}
\minimize_{\funcparam(\cdot)}
~\E[ \loss_\Gamma\left(\funcparam(X), Y(1)\right) \mid \treatmentrv=1],
\end{equation}
where $\loss_{\Gamma}$ is the weighted squared loss
\begin{equation}
\label{eq:gamma-loss}
\loss_\Gamma(\theta, \outcome)
\defeq \half \left[ \hinge{\outcome - \theta}^2 +
\Gamma \neghinge{\outcome - \theta}^2 \right], \end{equation} illustrated in Figure~\ref{fig:loss}. Noting that $\frac{\dif{}}{\dif{\theta}} \loss_\Gamma(\theta, \outcome) = -\psi_{\theta}(y)$, we have the following lemma on the uniqueness properties and structure of $\funcparam_1$ solving the optimization problem~\eqref{eqn:opt}. \begin{restatable}{lemma}{lemoptisgood}
\label{lemma:opt-is-good}
Assume $(t, x) \mapsto
\E[\loss_\Gamma(t, Y(1)) \mid X = x, Z = 1]$ is continuous
on $\R \times \mc{X}$.
If $\treatedE[\loss_\Gamma(\popfunc(\covariaterv), Y(1))] < \infty$, then
$\popfunc(\cdot)$ solves
$\E\left[ \psi_{\theta}(Y(1)) \mid X = x, Z=1\right]=0$
for almost every $x$ if and only if it
solves \eqref{eqn:opt}.
Such a minimizer $\theta_1(\cdot): \mc{X} \to \R$ exists and is unique up to
measure-0 sets. \end{restatable} \noindent See Appendix~\ref{section:proof-of-opt-is-good} for proof. Our approach allows both classical techniques, such as sieves, and flexible use of modern machine learning methods to estimate $\theta_1(x)$; in our experiments, we demonstrate how to approximately solve the loss minimization problem~\eqref{eqn:opt} using gradient boosted decision trees.
\subsection{Nonparametric Estimation with Sieves} \label{sec:nonparametric-sieves} To obtain concrete nonparametric guarantees, we consider the method of sieves~\cite{GemanHw82}, which considers an increasing sequence $\funcspace_1 \subset \funcspace_2 \subset \cdots \subset \funcspace$ of spaces of (smooth) functions, where $\funcspace$ denotes all measurable functions. Here, for a sample size $n$, we take the estimator $\what{\theta}_1(\cdot)$ solving \begin{equation}
\label{eqn:opt-emp}
\minimize_{ \funcparam \in \funcspace_{n}}
~\empE[ \loss_\Gamma\left(\funcparam (\covariaterv), Y(1)\right) \mid Z=1]. \end{equation} With appropriate choices of the function spaces $\funcspace_n$, it is possible to provide strong approximation and estimation guarantees. As the loss $\funcparam \mapsto \loss_\Gamma(\funcparam(\covariate), \outcome)$ is convex, the empirical optimization problem~\eqref{eqn:opt-emp} is convex when $\funcspace_{n}$ is a finite dimensional linear space (eg. polynomials, splines), which facilitates efficient computation~\cite{BoydVa04}.
In Appendix~\ref{sec:sieve-method}, we adapt results for sieve estimators~\cite{Chen07} to show convergence rates for the solution $\what{\theta}_1(\cdot)$ to the empirical problem~\eqref{eqn:opt-emp}. When $\theta_1(X)$ belongs in a $\holdersmooth$-smooth H\"{o}lder space, in Theorem~\ref{thm:sieve} of the Supplementary Materials, we prove that the empirical solution $\what{\theta_1}(\cdot)$ is consistent and achieves the following convergence rate (up to logarithmic factors): \begin{equation*}
\norm{\what{\theta}_1(\cdot) - \theta_1(\cdot)}_{2, P_1} = O_P\left( \left(\frac{\log n}{n} \right)^{\frac{p}{2p+d}} \right). \end{equation*} In the interest of space, we defer a comprehensive treatment to Appendix~\ref{sec:sieve-method}.
\subsection{Bounding the CATE}
Since $\theta_1(\cdot)$ satisfies $\theta_1(X) \le \E[Y(1) \mid X, Z=0]$ under the $\Gamma$-\cornfield{} condition, altogether $\mu_1^{-}(\cdot)$ defined below provides the lower bound \begin{equation*}
\mu_1^{-}(X) = \mu_{1,1}(X) e_1(X) + \theta_1(X) e_0(X) \le \mu_1(X). \end{equation*} By symmetry, letting
$\mu_0^+(X)=\mu_{0,0}(X) e_0(X) + \theta_0(X) e_1(X)$
where \begin{align}
\label{eq:population-mu-0-upper}
\theta_0(X) & =
\sup_{L~\textup{measurable}} \E[Y(0) L(Y(0)) \mid Z = 0, X] \\
& \qquad ~
\text{s.t.} ~ 0 \le L(y) \le \Gamma L(\tilde{y})~\text{all}~y,\tilde{y},
~~~ \E[L(Y(1)) \mid Z=0, X] = 1,
\nonumber \end{align} we have the parallel conclusion that $\mu_0^+(X) \ge \mu_0(X)$ holds under $\Gamma$-\cornfield{} condition. Similar to the above, $\theta_0(\cdot)$ is a unique minimizer of $\E[\loss_{\Gamma^{-1}}(\funcparam(\covariaterv), Y(0)) \mid \treatmentrv=0]$.\footnote{Convergence results for sieve estimators of $\theta_0(\cdot)$ again fall out of our results in Section~\ref{sec:sieve-method}.}
Thus, under the $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield}, a valid lower bound on the CATE is simply \begin{equation}
\tau^{-}(X) = \mu_1^{-}(X) - \mu_0^{+}(X).
\label{eq:ate-lower-bound} \end{equation} We summarize our developments in the theorem below. \begin{restatable}{thm}{lowerboundproperty}
\label{thm:lower-bound-property}
Let $\Gamma \ge 1$ and $\{Y(1), Y(0), Z, X, U\}$ satisfy
condition~\eqref{eq:cornfield} and the conditional independence
assumption~\eqref{eq:indep}. Let $\tau^{-}(X)$ in \eqref{eq:ate-lower-bound}
use $\theta_1(X)$ and $\theta_0(X)$ solving the optimization problems
\eqref{eq:population-theta} and \eqref{eq:population-mu-0-upper} with the
same $\Gamma$. When $\E[|Y(z)| \mid X]<\infty$ for $z = 0, 1$ and $0 < e_1(X)<1$,
\begin{equation*}
\tau^{-}(X) \le \E[Y(1) - Y(0) \mid X].
\end{equation*} \end{restatable}
A natural estimator for $\tau^{-}(x)$ is the difference in conditional expected potential outcomes \begin{gather*} \what{\tau}^{-}(x)= \what{\mu}_1^{-}(x)-\what{\mu}_0^+(x),\\ \what\mu_1^-(x) = \what{\mu}_{1, 1}(x)\what{e}_1(x) + \what{\theta}_1(x) \what{e}_0(x), ~~\mbox{and}~~ \what\mu_0^+(x) = \what{\mu}_{0, 0}(x) \what{e}_0(x) + \what{\theta}_0(x) \what{e}_1(x), \end{gather*} where $\what{e}_z(\cdot)$ and $\what{\mu}_{z, z}(\cdot)$ are suitable estimators for the nominal propensity score $e_z(\cdot)$ and the observed potential outcome's mean function $\mu_{z,z}(\cdot),$ respectively. A variety of classical nonparametric methods and machine learning methods can estimate these regression functions~\cite{chernozhukov2018double,wager2015adaptive,chen1999improved}. To understand convergence of $\what{\tau}^{-}(\cdot)$, consider the convergence of these regression estimates. Specifically, assume that the estimators $\what{e}_1(\cdot)$ and $\what{\mu}_{z, z}(\cdot)$ satisfy that \begin{align*} &\norm{\what{e}_1(\cdot) - e_1(\cdot)}_{2, P} = O_P(r_{n, 1}), \\
&\norm{\what{\mu}_{1, 1}(\cdot) - \mu_{1, 1}(\cdot)}_{2, P_1} = O_P(r_{n, 2}),
~~\norm{\what{\mu}_{0, 0}(\cdot) - \mu_{0, 0}(\cdot)}_{2, P_0} = O_P(r_{n, 3}), \\
& \norm{\what{\theta}_1(\cdot) - \theta_1(\cdot)}_{2, P_1} = O_p(r_{n, 4}), ~~\norm{\what{\theta}_0(\cdot) - \theta_0(\cdot)}_{2, P_0} = O_p(r_{n, 5}), \end{align*} where $r_{n,j}$ depend on the model assumptions and estimation method. We assume $0 < \epsilon \le e_1(x) \le 1-\epsilon$, so
$\|\cdot\|_{2,P_1} \asymp \|\cdot\|_{2,P_0} \asymp \|\cdot\|_{2,P}$. Then, $\what{\tau}^-(\cdot)$ is a consistent estimator, and \begin{equation*}
\norm{\what{\tau}^-(\cdot) - \tau^-(\cdot)}_{2, P} =
O_p\left(r_{n, 1} + r_{n, 2} + r_{n, 3} + r_{n, 4} + r_{n, 5}\right). \end{equation*}
Under assumptions stated in Appendix~\ref{sec:sieve-method} (\ref{assumption:holder-smooth}--\ref{assumption:lebesgue-equiv}, including that $\theta_z$ belongs in a $\holdersmooth$-smooth H\"{o}lder space), our sieve estimators~\eqref{eqn:opt-emp} for $\theta_z$ achieves the asymptotic convergence rate \begin{equation*}
\norm{\what{\theta}_z - \theta_z}_{2, P_z} = \widetilde{O}_P\left( n^{-\frac{p}{2p+d}} \right) ~~z \in \{0, 1\}, \end{equation*} where the notation $\widetilde{O}_P(\cdot)$ hides logarithmic factors. Under similar smoothness and regularity assumptions, \citet{chen1999improved} establish that sieve estimators $\what{e}_z(\cdot)$ and $\what{\mu}_{z, z}(\cdot)$ for $e_z$ and $\mu_{z,z}$ can also achieve a convergence rate of $r_{n,j}=\widetilde{O}(n^{-\frac{\holdersmooth}{2\holdersmooth + \covdim}}).$ Consequently, $$\norm{\what{\tau}^-(\cdot) - \tau^-(\cdot)}_{2, P} = \widetilde{O}_P\left(n^{-\frac{p}{2p+d}}\right),$$ where the convergence rates reflect typical behavior of (minimax optimal) non-parametric estimators of a regression function~\cite{newey1997convergence,Stone80}. These constitute the high order terms of the approximation error for estimating the CATE $\tau(x)$ without unobserved confounding~\cite{kunzel2017meta}, if the smoothness of the CATE $\tau(\cdot)$ is of a similar order to the individual parameters $\theta_z(\cdot)$, $\mu_z(\cdot)$ and $e_z(\cdot)$. Interesting future work would be to develop a method that adapts to the complexity of $\tau^{-}(\cdot)$, itself, as done by \citet{nie2017learning} and \citet{kennedy2020optimal}.
\section{Bounds on the Average Treatment Effect} \label{sec:semiparametric}
Given the bounds developed in Section~\ref{sec:cate-sensitivity} for the conditional average treatment effect $\tau(\cdot)$, we now turn to bounding the average treatment effect (ATE) $\tau$ by marginalizing over $X$ \begin{align} \tau^- & \defeq \E[\tau^-(X)] = \E\left[ \mu_1^-(X) - \mu_0^+(X) \right]. \label{eq:ate-lower-lazy} \end{align} Because $\tau^-(x) \le \tau(x)$ for any $x$, $\tau^- \le \tau$ is a lower bound of the ATE. Rewriting $\tau^-$ as \begin{equation}
\tau^- = \mu_1^- - \mu_0^+
~~ \mbox{where} ~~
\Bigg\{\begin{array}{l}
\mu_1^- = \E[ \mu_1^-(X)] = \E\left[ Z Y(1) + (1-Z)\funcparam_1(X)
\right] \\
\mu_0^+ = \E[ \mu_0^+(X)] = \E\left[ (1-Z)Y(0) +
Z\funcparam_0(X) \right],
\end{array}
\label{eq:one-side-functional} \end{equation} we estimate $\mu_1^-$ and $\mu_0^+$ separately and combine the resulting estimators.
In Section~\ref{sec:semiparametric-method}, we construct a semiparametric estimator for the bound $\tau^-$ that is conceptually similar to the AIPW estimator under unconfoundedness. We show in Section~\ref{sec:semiparametric-convergence} that it achieves $\sqrt{n}$-consistency even when (nonparametric) estimates of the nuisance parameters (e.g. $e_z(\cdot)$, $\mu_{z, z}(\cdot)$, $\funcparam_1(\cdot)$) only converge at slower rates. We focus on lower bounds for the potential outcome $Y(1)$ as other cases are symmetric. We conclude our theoretical discussion by complementing our methodological developments with optimality guarantees (Section~\ref{sec:hypothesis-test}). We show that our approach is asymptotically unimprovable for testing a null of no treatment effect and unobserved confounding satisfying the $\Gamma$-\cornfield{} condition against a positive alternative.
\subsection{Estimation procedure} \label{sec:semiparametric-method}
We construct a score $T(V; \eta)$ to estimate $\mu_1^-$ similar to the AIPW estimator of the ATE in the absence of unobserved confounding, where $V = (X, Y, Z)$ and $\eta$ represents a set of nuisance parameters defined below. The score $T(V; \eta)$ comes from calculating the semiparametric influence function for $\mu_1^-$ from representation in \eqref{eq:one-side-functional} using the method described by \citet{newey1994asymptotic}, and augmenting the representation with the influence function. To this end, by computing the pathwise derivative of the functional in \eqref{eq:one-side-functional} with respect to a parametric subfamily of the nonparametric model, and matching to the form derived by \citet{newey1994asymptotic}, we see that the remaining term in the influence function is \begin{equation*}
\alpha_1(V; \theta_1, e_1, \nu_1) = \treatmentrv \frac{\psi_{\theta_1(X)}(Y)(1-e_1(X))}{\nu_1(X)e_1(X)}, \end{equation*} which depends on the nuisance parameters $\theta_1(x)$ and $e_1(x)$, and a new nuisance parameter, \begin{equation}
\label{eqn:weight-normalization-def-a}
\nu_1(x)=P(Y \ge \funcparam_1(x) \mid Z=1, X=x) +
\Gamma P(Y < \funcparam_1(x) \mid Z=1, X=x), \end{equation} which serves as a weight normalization factor. In this, the function $\psi_\theta(y)$ refers to the one defined in Eq.~\eqref{eq:psi-defn}. Adding the term $\alpha_1(V; \eta)$ to the representation in \eqref{eq:one-side-functional} gives the augmented score \begin{equation}
T(X, Y, Z; \theta_1, e_1, \nu_1) \defeq \treatmentrv Y + (1-\treatmentrv)\funcparam_1(X) + \treatmentrv \frac{\psi_{\theta_1(X)}(Y)(1-e_1(X))}{\nu_1(X)e_1(X)},
\label{eq:aug-score} \end{equation} that we use for estimation. We have $E_P[T(X, Y, Z; \theta_1, e_1, \nu_1)] = \mu_1^-$ since $\E_P[\psi_{\theta_1(X)}(Y) \mid Z=1, X] = 0.$ By virtue of its augmented form, the score $T(\cdot; \cdot)$ is insensitive to estimates in the nuisance parameters, formalized by the Neyman orthogonality condition~\citep{Neyman59}: \begin{definition}
\label{def:neyman}
Let $Q, \eta \mapsto \E_Q[S(V; \eta)]$ be a statistical functional with $Q$ a distribution over $V$, and nuisance
parameter $\eta \in \Lambda$, where we take $\Lambda$ to be a subset of a
normed vector space containing the true nuisance parameter $\eta_0$. The
score $S$ is Neyman orthogonal at $P$ if for all $\eta \in \Lambda$,
the derivative $\frac{d}{dr} S(P; \eta_0 + r(\eta-\eta_0))$
exists for $r \in [0, 1)$, and is zero at $r = 0$. \end{definition} As \citet[Section 2.2.5]{chernozhukov2018double} shows, a score formed by adding the influence function adjustment $\alpha_1(v)$ from the pathwise derivative as in \citet{newey1994asymptotic} is Neyman orthogonal. Therefore, we expect Neyman orthogonality of the functional~\eqref{eq:aug-score} constructed in this way; we verify this formally in the proof of Theorem~\ref{thm:semiparametric} in the Supplementary Materials.
We construct a semiparameteric plug-in estimator for the augmented functional, and show that estimation errors of the nuisance parameters multiply to reduce their influence on our final estimator. Concretely, we prove that our augmented esitmator preserves $\sqrt{n}$ consistency provided that our nuisance estimates converge at a rate of $o_P(n^{-1/4})$ in $\|\cdot\|_{2,P}$ norm. This draws important connections to the classical doubly-robust AIPW estimator under no unobserved confounding. Recalling the definitions~\eqref{eq:mean-function} and \eqref{eq:prop-score} of $\mu_{z,z}(x)$ and $e_1(x)$ (respectively), the standard AIPW estimator for $\E[Y(1)]$ is
\begin{equation}
\label{eq:aipw}
\what{\mu}_{1,\text{AIPW}} = \frac{1}{n} \sum_{i=1}^{n}\left[
\what{\mu}_{1,1}(X_i) + \frac{Z_i}{\what{e}_1(X_i)}\left(Y_i -
\what{\mu}_{1,1}(X_i)\right)\right]. \end{equation} Assuming all confounding variables are observed \eqref{eq:indepsim}, the AIPW~\eqref{eq:aipw} is an asymptotically efficient estimator of $\mu_1$~\cite{HiranoImRe03}. The AIPW also satisfies the Neyman orthogonality condition, which \citet{chernozhukov2018double} used to show that the AIPW estimator~\eqref{eq:aipw} with cross-fitting (described below) enjoys the root $n$ rate so long as the nuisance parameters can be estimated at the rate $o_p(n^{-1/4})$. Our approach generalize the AIPW estimator~\eqref{eq:aipw} under the $\Gamma$-\cornfield{} condition, and reduces to the AIPW when $\Gamma = 1$.
We use an efficient sample-splitting recipe for constructing an augmented estimator for $\mu_1^-$ by adapting \citet{chernozhukov2018double}'s cross-fitting meta-procedure for Neyman-orthogonal functionals to our augmented score $T(\cdot)$. Letting $K \in \naturals$ be the number of folds for cross-fitting, randomly split the data into $K$ folds of approximately equal size. With slight abuse of notation, let $\mathcal{I}_k$ be the indices corresponding to the observations in the $k$-th part as well as the corresponding observation themselves.
For each $k$, using the sample $\mathcal{I}_{-k}$ of observations \emph{not} in the $k$-th fold, we compute \begin{enumerate} \item an estimator of $\theta_1(x)$, denoted by $\what\funcparam_{1,k}(x),$ using
the procedure described in Section~\ref{sec:cate-sensitivity} \item an estimator of $e_1(x)$, denoted by $\what{e}_{1,k}(x)$, and
let $\what{e}_{0, k}(x)=1-\what{e}_{1, k}(x);$ \item an estimator of $\nu_1(\cdot)$, denoted by $\what\nu_{1,k}(\cdot)$, using the procedure described in Section~\ref{sec:nu-est} \end{enumerate} Estimating $\nu_{1}(\cdot)$ in the last step is more involved, as it depends on $\theta_1(\cdot)$, so we defer the construction of $\what\nu_{1,k}(\cdot)$ to Section~\ref{sec:nu-est}. Under appropriate regularity conditions---for example, sufficient smoothness of $\funcparam_1(x)$, $e_1(x)$, and $\nu_1(x)$---these estimators attain $o_P(n^{-1/4})$ convergence in $\norm{\cdot}_{2,P}$.
In the end, our proposed cross-fitting estimator of $\mu_1^-$ is \begin{align}
\label{eq:orthogonal-estimator}
\what{\mu}_1^- =
\frac{1}{n} \sum_{k=1}^K \sum_{i\in\mathcal{I}_k} \left\{\treatmentrv_i Y_i + (1-\treatmentrv_i)\what{\funcparam}_{1,k}(X_i) + \treatmentrv_i \frac{\psi_{\what{\funcparam}_{1,k}(X_i)}(Y_i)\what{e}_{0,k}(X_i)}{\what{\nu}_{1,k}(X_i)\what{e}_{1,k}(X_i)}\right\}, \end{align} with an estimator $\what{\mu}_0^+$ for $\mu_0^+$ constructed similarly. This estimator is natural; when $\Gamma = 1$, we recover the cross-fitting version of the standard doubly robust AIPW estimator~\eqref{eq:aipw}. While the estimator satisfies the orthogonality conditions of \citet{chernozhukov2018double} that imply a form of local robustness for $\widehat{\theta}_1(\cdot)$ near $\theta_1(\cdot)$, we explain below why it isn't doubly robust.
\subsection{Asymptotic properties and inference} \label{sec:semiparametric-convergence}
To establish asymptotic normality of $\what{\mu}_1^-$, we require a few assumptions. Consistency of $\what{\mu}_1^-$ follows from weak regularity conditions and the consistency of $\what{\theta}_1(\cdot)$, which we address via Assumption~\ref{assumption:bounded-variance}. Asymptotic normality requires stronger conditions (Assumptions~\ref{assumption:problem-regularity} and~\ref{assumption:nuisance-est}), in turn allowing us to establish Theorem~\ref{thm:semiparametric} on the asymptotic normality of $\what{\mu}_1$.
\begin{assumption}
\label{assumption:bounded-variance}
There exist $\epsilon>0$ and $0<\constlow <\constup $
such that (a) $\E[|Y(1)|] < \infty,$ (b) $\|
\what{\theta}_{1}(\cdot) - \theta_1(\cdot) \|_{1,P} \cp 0$, (c)
$e_1(X) \in [\epsilon, 1-\epsilon]$ almost surely, (d) $\P([\essinf \what{e}_1(X), \esssup
\what{e}_1(X)] \subset [\epsilon, 1-\epsilon]) \to 1,$ and (e) $\P\left(\constlow
\le \what{\nu}_1(x)
\le \constup ~ \mbox{for~all~}x\right) \to 1$. \end{assumption}
Assumption~\ref{assumption:bounded-variance}(a-c) are slightly stronger than the usual assumptions for justifying consistency of the AIPW estimator for the ATE $\tau$ in the absence of unobserved confounding
\cite{bang2005doublerobust,chernozhukov2018double}. When $\|\what{e}_1(\cdot) - e_1(\cdot)\|_{\infty,P} \cp 0$, Assumption~\ref{assumption:bounded-variance}(c) implies Assumption~\ref{assumption:bounded-variance}(d), and similarly when
$\|\what{\nu}_1(\cdot) - \nu_1(\cdot)\|_{\infty,P} \cp 0$, $\nu_1(x) \in [1,
\Gamma]$ implies Assumption~\ref{assumption:bounded-variance}(e).
Assumption~\ref{assumption:bounded-variance}(b) is necessary, and cannot be removed by alternatively assuming consistency of the other nuisance parameters. The $\alpha(V; \eta)$ with the true $\theta_1(\cdot)$ plugged in has mean zero regardless of the nominal propensity score used. In this case, the proposed estimator is consistent in estimating $\mu_1^-.$ However, if an incorrect $\theta_1(\cdot)$ is plugged in to $T(V; \eta),$ straightforward computation shows that $\E[T(V; \eta)]$ depends on the $\theta_1(\cdot)$ plugged in, even with the correct $e_1(\cdot)$ and $\nu_1(\cdot)$. Therefore, $\hat{\mu}_1^-$ is not globally doubly robust; the Neyman orthogonality condition only guarantees a local form of robustness.
\begin{restatable}{thm}{thmconsistency}
\label{thm:consistency}
Under Assumption~\ref{assumption:bounded-variance}, the
estimator~\eqref{eq:orthogonal-estimator} satisfies
$\what{\mu}_1^- \cp \mu_1^-$. \end{restatable} \noindent See the Supplementary Materials (Section~\ref{sec:proof-consistency}) for the proof. We now turn to stronger regularity assumptions for the weak convergence of $\widehat{\mu}_1^-.$
\begin{assumption}
\label{assumption:problem-regularity}
(a) There exist $q > 2$, and $C_q < \infty$ such that
$\E[|Y(1)|^q] \le C_q$, and (b) $Y(1)$ has a conditional density
$p_{Y(1)}(y \mid X=x, \treatmentrv=1)$ with respect to the Lebesgue measure
and $\sup_{x,y} p_{Y(1)}(y \mid \treatmentrv=1, X=x) < \infty$. \end{assumption}
\begin{assumption}
\label{assumption:nuisance-est}
$\what{\eta}_1 = (\what{\funcparam}_1, \what{\nu}_1, \what{e}_1)$ is a
consistent estimator of $\eta_1 \defeq (\funcparam_1, \nu_1, e_1)$ and (a)
$\| \what{\eta}_1(\cdot) - \eta_1(\cdot) \|_{2,P}= o_P(n^{-1/4})$,
(b)~$\| \what{\eta}_1(\cdot) - \eta_1(\cdot) \|_{\infty,P} = O_P(1)$. \end{assumption}
Assumptions~\ref{assumption:problem-regularity} (a) is no stronger than the standard regularity conditions needed for existence of asymptotically normal estimators of the ATE without unobserved confounding~\cite{chernozhukov2018double}. Assumption \ref{assumption:problem-regularity}(b) ensures that the term
$\theta(\cdot) \mapsto \E[ Z\psi_{\theta(X)}\{Y(1)\} \mid X]$
is sufficiently smooth to control fluctuations due to estimating $\theta_1(\cdot)$. Inspection of the proof of Theorem~\ref{thm:semiparametric} to come shows that we may relax Assumption~\ref{assumption:problem-regularity}(b): if $\theta_1(x)$ and $\what{\theta}_1(x)$ have range ${\cal A}_1(x)$, we may replace \ref{assumption:problem-regularity}(b) with \begin{equation}
\label{eqn:condition-replacing-density}
\esssup_{X} \sup_{y \in {\cal A}_1(X)} p_{Y(1)}(y \mid
\treatmentrv=1, X) < \infty, \end{equation} which is satisfied, eg., when the outcome $Y(z)$ is binary and $P(Y(z) = y \mid \treatmentrv = z, X)<1$ for $y \in \{0,1\}$, because $\what{\theta}(X) \in (0, 1)$ eventually and $p(y \mid Z=1, X) = 0$ for $y \not \in \{0,1\}$.
The convergence rate conditions for estimating nuisance parameters in Assumption~\ref{assumption:nuisance-est} are relatively standard in semi-parametric estimation \cite{newey1994asymptotic,chernozhukov2018double}, but nonetheless this theoretical requirement can be restrictive and hard to achieve to certain applications. For example, while for $e_1(\cdot),$ the conditional mean of observed random variables, a variety of methods can provide $o_P(n^{-1/4})$ consistency, they still require the data generating distribution to meet appropriate conditions and the sample size to be large relative to the dimension of covariate \cite{wager2017estimation,chernozhukov2018double}. The estimators $\what{\theta}_1(\cdot)$ from Section~\ref{sec:cate-sensitivity} and $\what{\nu}_1(\cdot)$ from Section~\ref{sec:semiparametric-method} achieve the convergence rates in Assumption~\ref{assumption:nuisance-est} under appropriate smoothness conditions on $\theta_1(\cdot)$ and $\nu_1(\cdot).$ For instance, if Assumptions~\ref{assumption:holder-smooth}, \ref{assumption:bdd-error}, and \ref{assumption:lebesgue-equiv} hold with $p > d/2$, then Theorem~\ref{thm:sieve} shows that estimating $\funcparam_1(x)$ as in Section~\ref{sec:cate-sensitivity} with linear sieves (see Examples~\ref{example:polynomials} and \ref{example:splines}) will satisfy Assumption~\ref{assumption:nuisance-est}. Section~\ref{sec:nu-est} provides an efficient enough estimator of $\nu_1(\cdot)$ when $p > d/2$. Under these assumptions, the following theorem gives the asymptotic distribution of the estimator $\what{\mu}_1^-$ in~\eqref{eq:orthogonal-estimator}, with asymptotic variance \begin{equation*}
\sigma_1^2 \defeq \var\left[
\treatmentrv Y + (1-\treatmentrv)\funcparam_1(X) + \treatmentrv
\frac{\psi_{\funcparam_1(X)}(Y)(1-e_1(X))}{\nu_1(X)e_1(X)}
\right]. \end{equation*} We use the following consistent estimator of the asymptotic variance \begin{equation*} \what\sigma_1^2 \defeq \frac{1}{n}\sum_{k=1}^K\sum_{i\in \mathcal{I}_k}\left[ \treatmentrv_i Y_i + (1-\treatmentrv_i)\what{\funcparam}_{1,k}(X_i) + \treatmentrv_i \frac{\psi_{\what{\funcparam}_{1,k}(X_i)}(Y_i)\what{e}_{0,k}(X_i)}{\what{\nu}_{1,k}(X_i)\what e_{1,k}(X_i)} -\what{\mu}_1^-\right]^2.
\end{equation*} \begin{restatable}{thm}{semiparametricnormality}
\label{thm:semiparametric}
Let Assumptions~\ref{assumption:bounded-variance},
\ref{assumption:problem-regularity}, and \ref{assumption:nuisance-est} hold.
Then, $\what{\mu}_1^-$ given in Eq.~\eqref{eq:orthogonal-estimator} is
asymptotically normal with $ \sqrt{n}(\what{\mu}_1^- - \mu_1^-)
\overset{d}{\to} \normal(0, \sigma_1^2)$.
Furthermore, $\what{\sigma}_1^2 \cp \sigma_1^2$, and
$ \frac{\sqrt{n}}{\what{\sigma_1}} (\what{\mu}_1^- - \mu_1^-)
\overset{d}{\to} \normal(0, 1)$. \end{restatable} \noindent See Section~\ref{sec:proof-semiparametric} in the Supplementary Materials for a proof. To bound $\tau$ from below, let $$\what{\tau}^- = \what\mu_1^- - \what\mu_0^+, $$ where $\tilde{\psi}_\theta(y)=\Gamma (y-\theta)_+-(y-\theta)_-$, $$\what\mu_0^+=\frac{1}{n}\sum_{k=1}^K \sum_{i \in {\cal I}_k} \left[(1-\treatmentrv_i)Y_i + \treatmentrv_i \what\funcparam_{0,k}(X_i) + (1-\treatmentrv_i) \frac{\tilde{\psi}_{\what{\funcparam}_{0,k}(X_i)}(Y_i)\what{e}_{1,k}(X_i)}{\what{\nu}_{0,k}(X_i)\what{e}_{0,k}(X_i)}\right],$$
and $\what{\nu}_{0,k}(\cdot)$ is the nonparametric estimator of
$$\nu_0(X)=P(Y \le \funcparam_0(X) \mid Z=0, X) + \Gamma P(Y > \funcparam_0(X) \mid Z=0, X)$$
based on data in ${\cal I}_{-k}.$ A simple extension of Theorem~\ref{thm:semiparametric} shows $$\sqrt{n}(\what{\tau}^- - \tau^-) \to \normal(0, \sigma_{\tau^-}^2),$$ as $n \rightarrow \infty,$ where \begin{equation}
\sigma_{\tau^-}^2 \defeq \var \begin{aligned}[t] \Bigg[& \treatmentrv Y + (1-\treatmentrv)\funcparam_1(X) + \treatmentrv \frac{\psi_{\funcparam_1(X)}(Y)e_0(X)}{\nu_1(X)e_1(X)} \\ &-(1-\treatmentrv) Y - \treatmentrv \funcparam_0(X) - (1-\treatmentrv)\frac{\tilde{\psi}_{\funcparam_0(X)}(Y)e_1(X)}{\nu_0(X)e_0(X)} \Bigg].
\end{aligned}
\label{eq:var-est-tau-hat-minus} \end{equation}
Furthermore, a consistent estimator of the variance $\sigma_{\tau^-}^2$ is \begin{align} \label{eq:est-tau-hat-var}
\what\sigma_{\tau^-}^2
= \frac{1}{n}\sum_{k=1}^{K}\sum_{i\in \mathcal{I}_k}
\bigg[& \treatmentrv_i Y_i + (1-\treatmentrv_i)\what\funcparam_{1,k}(X_i) + \treatmentrv_i \frac{\psi_{\what\funcparam_{1,k}(X_i)}(Y_i)\what{e}_{0,k}(X_i)}{\what{\nu}_{1,k}(X_i)\what{e}_{1,k}(X_i)} \\ & - (1-\treatmentrv_i)Y_i - \treatmentrv_i \what\funcparam_{0,k}(X_i) - (1-\treatmentrv_i) \frac{\tilde{\psi}_{\what{\funcparam}_{0,k}(X_i)}(Y_i)\what{e}_{1,k}(X_i)}{\what{\nu}_{0,k}(X_i)\what{e}_{0,k}(X_i)}-\what\tau^- \bigg]^2
\nonumber \end{align}
and $[\what\tau^- - z_{1-\alpha/2}\what\sigma_{\tau^-}/\sqrt{n}, \what\tau^- + z_{1-\alpha/2}\what\sigma_{\tau^-}/\sqrt{n}] $ is a $100(1-\alpha)\%$ asymptotic confidence interval for $\tau^-.$
The proof is \emph{mutatis mutandis} identical to that of Theorem~\ref{thm:semiparametric}.
Importantly, our bounds define a confidence set for $\tau = \E[Y(1)-Y(0)]$. The same approach as in Section~\ref{sec:cate-sensitivity}, but up-weighting large values of $Y(1)$ and small values of $Y(0)$, provides an estimate $\what{\tau}^+$ of $\tau^+$ that upper bounds the ATE. The limiting distribution of $\what{\tau}^+$ is also normal. With these estimators, we may construct a confidence interval for the ATE, \begin{equation}
\what{\operatorname{CI}}_{\tau} = \left[\what\tau^- - z_{1-\alpha/2}\frac{\what\sigma_{\tau^-}}{\sqrt{n}}, \what\tau^+ + z_{1-\alpha/2}\frac{\what\sigma_{\tau^+}}{\sqrt{n}} \right],
\label{eq:ci4ate} \end{equation} where $\what{\sigma}_{\tau^+}^2$ is a consistent estimator of the variance of $\sqrt{n}(\what{\tau}^+-\tau^+).$
Because $\tau^- \le \tau \le \tau^+$, this confidence interval has appropriate asymptotic coverage: \begin{restatable}{cor}{corincludeate}
\label{cor:include-ate}
Let $P$ satisfy the $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield},
conditional independence~\eqref{eq:indep}, and
Assumptions~\ref{assumption:bounded-variance}--\ref{assumption:nuisance-est}. Let
$\what{\operatorname{CI}}_{\tau}$ be defined as in \eqref{eq:ci4ate}. For
$\tau = \E[Y(1) - Y(0)]$, we have
\begin{equation*}
\liminf_{n \to \infty} P(\tau \in \what{\operatorname{CI}}_\tau) \ge 1-\alpha.
\end{equation*} \end{restatable}
\begin{remark}
It is possible to extend Theorem~\ref{thm:semiparametric} to provide
confidence intervals uniform over $\mc{P}$. In other words, the coverage
probability of the relevant confidence intervals converge to the desired
level uniformly over all the distributions in $\mc{P}.$ To do so,
Assumption~\ref{assumption:nuisance-est} must be uniform over a class of
distributions $\mc{P}$ satisfying
Assumption~\ref{assumption:problem-regularity}, for instance by assuming
there exists sequences $\Delta_n \to 0$ and $\delta_n \to 0$ such that
\begin{equation*}
\sup_{P \in \mc{P}} P\left(\|\what{\eta}_1(\cdot) - \eta_1(\cdot)\|_{2,P} > n^{-1/4}\delta_n\right) < \Delta_n.
\end{equation*}
Previous work~\cite{chen2015optimal} shows that series estimators for
the conditional regression function (example 1 in
Section~\ref{sec:cate-sensitivity}) converge uniformly; extending these
results to the estimation of $\theta_1(\cdot)$ and $\nu_1(\cdot)$ is beyond the
scope of the present work. \end{remark}
\subsection[Estimation of nu]{Construction of
$\what\nu_{1,k}(\cdot)$ and its asymptotic properties} \label{sec:nu-est}
\newcommand{\bar{\loss}_{\Gamma}}{\bar{\loss}_{\Gamma}}
The above results assumed access to a well-behaved estimate of the the weighted probability $\popprob(X) = 1 + (\Gamma - 1)\P(Y(1) \ge \popfunc(X) \mid Z=1, X)$. Here, we describe a nonparametric estimator via a loss function: defining \begin{equation*}
\bar{\loss}_{\Gamma}(\probfunc, \funcparam, \outcome) \defeq \half \left[
1 + (\Gamma - 1)\indic{\outcome \ge \funcparam} -
\probfunc \right]^2, \end{equation*} $\popprob$ uniquely solves the optimization problem \begin{equation}
\label{eqn:opt-prob}
\minimize_{\probfunc(\cdot)~\textup{measurable}}
~ \E[\bar{\loss}_{\Gamma}\left\{\probfunc(X), \popfunc(X), Y(1) \right\} \mid Z=1]. \end{equation} The natural sieve estimator for $\popprob(\cdot)$ minimizes the empirical version of \eqref{eqn:opt-prob} under finite-dimensional sieves. However, this requires knowledge of $\popfunc(\cdot)$, which itself must be estimated. Therefore, consider the following (nested) cross-fitting approach: \begin{enumerate}[1.] \item Partition the sample $\mathcal{I}_{-k}$ into two
\emph{independent} sets, $\mathcal{I}_{-k,1}$ and $\mathcal{I}_{-k,2}$. \item Let $\what{\theta}_{1k}^{\nu_1}(\cdot)$ be an estimator of
$\popfunc(\cdot)$ based on the first subset $\mathcal{I}_{-k,1};$ \item For a sequence of sieve parameter spaces $\probspace_1 \subseteq
\cdots \subseteq \probspace_{n} \subseteq \cdots \subseteq \probspace,$
estimate $\what{\nu}_{1,k}$ minimizing
the plug-in version of the
population problem~\eqref{eqn:opt-prob},
\begin{equation}
\label{eqn:opt-emp-prob}
\minimize_{ \probfunc(\cdot) \in 1 + (\Gamma -1)\probspace_{n}}
~ \E_{n,2}^{(k)}\left[\bar{\loss}_{\Gamma}
\big(\probfunc(\covariaterv), \what{\theta}_{1k}^{\nu_1}(\covariaterv),
Y \big) \mid Z = 1 \right],
\end{equation} \end{enumerate} where $\E_{n,2}^{(k)}$ is the empirical expectation with respect to the second subset $\mathcal{I}_{-k,2}$.
When $\nu_1(X)$ belongs in a $\probsmooth$-smooth H\"{o}lder space, in Proposition~\ref{prop:prob-sieve} in the Supplementary Materials, we prove that the empirical solution $\what{\nu}_{1,k}(\cdot)$ to the problem~\eqref{eqn:opt-emp-prob} achieves the minimax optimal nonparametric rate (up to logarithmic factors) \begin{equation*}
\norm{\what{\nu}_{1, k}(\cdot) - \nu_1(\cdot)}_{2, P_1} = O_P\left( \left(\frac{\log n}{n} \right)^{\frac{\probsmooth}{2\probsmooth+d}} \right). \end{equation*} If $\probsmooth>d/2,$ then $\norms{\what{\nu}_{1,k} - \nu_1}_{2, P}=o_P(n^{-1/4})$, satisfying the assumptions in Theorem~\ref{thm:semiparametric}. We defer a rigorous treatment to Appendix~\ref{sec:nu-sieve} as our results heavily build on the standard theory of sieve estimation~\cite{Chen07}. In Proposition~\ref{prop:prob-sieve}, we demonstrate sufficient conditions for the convergence of $\what{\nu}_{1,k}$ needed for the lower bound estimator~\eqref{eq:orthogonal-estimator} and its asymptotic normality via Theorem~\ref{thm:semiparametric}: with sufficient smoothness of $\nu_1$, it is possible to efficiently estimate lower and upper bounds on the average treatment effect.
\subsection[Design sensitivity and optimality of the estimator for tau-]{Design sensitivity and optimality of our bound on the ATE} \label{sec:hypothesis-test}
We complement our methodological development so far with optimality results for our worst-case bounds. By construction, our approach yields a tight bound on the mean of each unobserved potential outcome. We extend these results to the ATE by constructing an instance where our bound is tight. That is, we construct a family of data generating distributions such that whenever our bounds cannot infer the sign of the ATE, the ability to test whether or not the ATE is positive is intrinsically difficult. To this end, we study a pointwise asymptotic level $\alpha$ hypothesis test for the composite null \begin{equation}
H_0(\Gamma) : \E[Y(1)] \le \E[Y(0)] ~~\mbox{and the}~
\Gamma\text{-\cornfield{} condition~\eqref{eq:cornfield} holds}
\label{eq:gaussian-null-regular} \end{equation} under Assumptions~\ref{assumption:bounded-variance}--\ref{assumption:nuisance-est}, and analyze its design sensitivity~\cite{Rosenbaum10}. Let $H_1 : Q$ be an alternative with a positive average treatment effect $\tau = \E_Q[Y(1) - Y(0)] > 0$ and no confounding ($\Gamma=1$ in Eq.~\eqref{eq:cornfield}). Let $t_n^\Gamma = t_n^\Gamma\{(Y_i, Z_i, X_i)_{i=1}^n\} \in \{0, 1\}$ be a pointwise asymptotic level $\alpha$ test for the null hypothesis~\eqref{eq:gaussian-null-regular}, where $t_n^\Gamma=1$, if the null hypothesis $\tau\le 0$ is rejected. The \emph{design sensitivity}~\cite{Rosenbaum10,rosenbaum2011new} of the sequence $\{t_n^\Gamma\}$ is the threshold $\Gamma_\design$ such that the power $Q(t_n^\Gamma = 1) \to 0$ for $\Gamma > \Gamma_{\design}$ and the power $Q(t_n^\Gamma = 1) \to 1$ for $\Gamma < {\Gamma}_\design$. In other words, if the selection bias satisfies $\Gamma > \Gamma_{\design}$, the test cannot differentiate the alternative $\tau>0$ from the null $\tau\le 0$ regardless of the sample size; if $\Gamma < \Gamma_{\design}$, the test always rejects the null under the alternative $Q$ for sufficiently large $n$ (we define $\Gamma_{\design}=\infty$ when no such threshold exists). Given the confidence interval for $\tau$ described in Section~\ref{sec:semiparametric-convergence}, a natural asymptotic level $\alpha$ test for $H_0(\Gamma)$, the hypothesis~\eqref{eq:gaussian-null-regular}, is \begin{equation}
\label{eq:the-test}
\psi_n^\Gamma\{(Y_i, Z_i, X_i)_{i=1}^n\}
\defeq \ind{\what\tau^- > z_{1-\alpha}\frac{\what\sigma_{\tau^-}}{\sqrt{n}} }. \end{equation}
We consider the design sensitivity of $\psi_n^\Gamma$ in the simplified setting without covariates, which allows us to demonstrate its optimality. In this case, $\{Y(0), Y(1)\} \independent Z \mid U$, the simplified $\Gamma$-\cornfield{} condition \eqref{cond:simple-gamma-selection} holds, $ \{Y(0), Y(1)\}\independent Z$ under the alternative $Q$ (recall Eq.~\eqref{eq:indepsim}), and $\theta_1,\theta_0 \in \R$ are constants defined in Eq.~\eqref{eq:population-theta} and Eq.~\eqref{eq:population-mu-0-upper}.
\begin{restatable}{prop}{thmdesignsensitivity}
\label{prop:design-sensitivity-model}
Let $\psi_n^\Gamma$ be defined as in Eq.\eqref{eq:the-test}, so that
$\psi_n^\Gamma$ is asymptotically level $\alpha$ for $H_0(\Gamma)$ in
\eqref{eq:gaussian-null-regular}. For an alternative $H_1 = \{Q\}$, define
\begin{equation*}
\tau^-(\Gamma)
\defeq
\E_Q[Z Y(1) + (1 - Z) \theta_1 - (1 - Z) Y(0) - Z \theta_0],
\end{equation*}
where $\theta_1,~\theta_0$ solve
~\eqref{eq:population-theta} and ~\eqref{eq:population-mu-0-upper}, respectively, at level $\Gamma$
for the distribution $Q.$
Then, either the design sensitivity $\Gamma_\design$ of
$\psi_n^\Gamma$ is infinite or it uniquely solves the equation
$\tau^-(\Gamma) = 0$. \end{restatable}
\noindent See Section~\ref{sec:proof-design-sensitivity} in the Supplementary Materials for proof. While there is no simplified expression for $\Gamma_\design$ in general, it can be derived explicitly for some special alternatives $Q$. For instance, in Supp. Materials, Section~\ref{sec:proof-gaussian-design-sensitivity}, we prove the following result for Gaussian alternatives. \begin{restatable}{cor}{cordesignsensitivity}
\label{cor:designsensitivity}
Let $\psi_n^\Gamma$ be as in Eq.~\eqref{eq:the-test}. For the alternative $H_1(Q):$
$$\left\{Y(1) \distas \normal\left(\frac{\tau}{2}, \sigma^2\right),Y(0) \distas
\normal\left(-\frac{\tau}{2}, \sigma^2\right), Z\distas \bernoulli(\frac{1}{2}) \right\},$$
$\psi_n^\Gamma$ has design sensitivity
\begin{equation}
\label{eq:design-sensitivity-gaussian}
\Gamma_\design^\gauss
\defeq
-\frac{\int_0^\infty y \exp\left(- \tfrac{(y - \tau)^2}{2\sigma^2}\right)
\dif{y}}{
\int_{-\infty}^0 y \exp\left(- \tfrac{(y - \tau)^2}{2\sigma^2}\right)
\dif{y}}
= \frac{\phi(\frac{\tau}{\sigma}) + \frac{\tau}{\sigma} \Phi(\frac{\tau}{
\sigma})}{
\phi(\frac{\tau}{\sigma}) -
\frac{\tau}{\sigma} \Phi(\frac{\tau}{\sigma})},
\end{equation}
where $\Phi$ and $\phi$ denote the standard Gaussian
CDF and density, respectively. \end{restatable}
The next proposition shows that the test $\psi_n^\Gamma$ is optimal for alternative $H_1(Q)$ given in Corollary \ref{cor:designsensitivity}, as any asymptotic level $\alpha$ test of $H_0(\Gamma)$ has design sensitivity $\ge \Gamma^\gauss_\design$ (see Supplementary Materials Section~\ref{sec:proof-opt-design-sensitivity} for proof). \begin{restatable}{prop}{optdesignsensitivity}
\label{prop:opt-design-sensitivity}
Let $H_0(\Gamma)$ be as in \eqref{eq:gaussian-null-regular}. There exists
$a \in [1/(1+\sqrt{\Gamma}), \sqrt{\Gamma}/(1+\sqrt{\Gamma})]$ such that
for the alternative $H_1 (Q):$
$$\left\{Y(1) \distas \normal\left(\frac{\tau}{2},
\sigma^2\right),Y(0) \distas \normal\left(-\frac{\tau}{2},
\sigma^2\right), Z\distas \bernoulli(a) \right\},$$ if
$\Gamma \ge \Gamma_\design^\gauss$, there exists a probability measure
$P \in H_0(\Gamma)$ for $\{Y(1), Y(0), Z, U\},$ such that for all
$n \in \N$, all tests $t_n$, and $(Y_i, Z_i)$ i.i.d.,
\begin{equation*}
P(t_n\{(Y_i, Z_i)_{i=1}^n\} = 1) = Q(t_n\{(Y_i, Z_i)_{i=1}^n\} = 1).
\end{equation*}
\end{restatable}
\begin{remark}
Our proof uses a specific choice of $a$ to simplify the algebra;
solving a system of nonlinear equations for the
distribution of $P_{Z|U}$ allows for any marginal
$P(Z=1)$.
\end{remark}
\begin{remark}
The above optimality results for $\psi_n^\Gamma$ extend to alternatives beyond Gaussian distributions, so long as $Y(0) \overset{d}{=} C(1 - Y(1))$, for some constant
$C > 0.$ The proof relies on this symmetry in the potential outcomes to
construct a distribution under $H_0(\Gamma)$ matching $Q$ over the observed
data, $\{(Y_i(Z_i), Z_i), i=1,\cdots, n\}.$ This symmetry is
unnecessary if one is interested in the mean (or conditional mean) of a
single potential outcome $\E[Y(1)]$ (or $\E[Y(1) \mid X=x]$, in which case
the test $\psi_n^\Gamma$ achieves the optimal design sensitivity for any
alternative for which the proposed method is consistent. \end{remark}
\section{Numerical experiments} \label{sec:experiments}
To complement our theoretical analysis in Section~\ref{sec:semiparametric}, we examine the performance of the method using Monte-Carlo simulation and a real dataset from an observational study examining the effect of fish consumption on blood mercury levels. We evaluate two implementations of the methodology developed in Sections~\ref{sec:cate-sensitivity} and~\ref{sec:semiparametric}---one based on the sieve estimators studied in Section~\ref{sec:nonparametric-sieves} and the other based on gradient boosted trees fit to minimize the weighted squared loss~\eqref{eqn:opt}.
The Monte-Carlo simulations support the validity of the inference procedure in realistic settings. We find that the semiparametric approach presented in Section~\ref{sec:semiparametric} accurately bounds the average treatment effect under unobserved confounding, when our assumptions about the extent of confounding $\Gamma$ hold. We show that by using machine learning to optimize the loss function in \eqref{eq:gamma-loss}, our method can scale to reasonably high dimensional data. Additionally, we show that the bounds on the ATE are tight in practice, and empirically compare their conservativeness to that of the matching-based approach from \citet{rosenbaum2014weighted}. Finally, we confirm our findings on a real observational study, demonstrating that our semiparametric approach provides valid yet narrow bounds on the ATE $\tau$.
\subsection{Method Implementations} \label{sec:implementation}
When implementing an estimator to bound the ATE $\tau$ using the method developed in Section~\ref{sec:semiparametric}, one must choose estimators of the nuisance parameters ${e}_z(\cdot),$ ${\theta}_z(\cdot),$ and ${\nu}_z(\cdot)$, and select their hyperparameters. In the first implementation, we stay close to the estimators used in our theoretical analysis with formal convergence guarantees: we estimate the propensity score $\what{e}_1(\cdot)$ by a random forest \cite{athey2019generalized}, and $\what\theta_z(\cdot)$ (respectively, $\what{\nu}_z(\cdot)$) by the non-parametric estimator from Section~\ref{sec:cate-sensitivity} (respectively Section~\ref{sec:nu-est}) using the polynomial (power series) sieve. The sieve size and regularization were selected via 10-fold cross-validation, and then used with 10-fold cross-fitting for the semiparametric estimation. To estimate $\what\nu_z(\cdot)$, we use an iterative, instead of nested, form of cross-fitting that sacrifices some independence between folds to be more computationally efficient, described in Section~\ref{sec:practical-nu} of the Supplement. Nonparametric estimation of the propensity score $e_1(\cdot)$ leads to variability that requires weight clipping to stabilize the semiparametric estimates \cite{lee2011weight,tsiatis2007comment}. We clipped weights worth more than $1/20$ of the total weight of the samples.
In one experiment below, we use a variant of this implementation where we fit $\what{e}_{1, k}(\cdot)$ via a simple logistic regression; the logistic regression model for the propensity score is misspecified, so the lower-order statistical bias from the Neyman orthogonality will not hold; the statistical bias of the estimator will depend on the convergence rate of the nonparametric estimator of $\what{\theta}_{z}(\cdot)$, which will not converge sufficiently quickly. As a result, we expect that the statistical bias will dominate the convergence of $\what\tau^{-}$ to $\tau^-$.
In the second implementation, we use \texttt{xgboost}~\cite{ChenGu16} to fit a machine learning estimator for all of the nuisance parameters, emphasizing the generality and scalability of our methods. \texttt{xgboost} is a gradient boosted tree method that performs well with tabular data, despite having little formal theory regarding its convergence guarantees. Therefore, we used the simulations discussed below as a way to assess it's appropriateness as a nuisance parameter estimator for our semiparametric method from Section~\ref{sec:semiparametric-method}. In this implementation, we fit the estimator $\what{\theta}_z(\cdot)$ to minimize the weighted squared loss~\eqref{eqn:opt}, and fit the remaining nuisance parameters to minimize the $\log$ loss for predicting a binary target (treatments or the targets $\ind{Y_i \ge \theta}$ for estimating $\nu_z(\cdot)$). As with the previous implementation, $\what\nu_{z}(\cdot)$ are fit with the iterative cross-fitting described in Section~\ref{sec:practical-nu} of the Supplement. Similarly, all tuning parameters (boosting iterations, regularization, subsampling fraction, minimum node size) are selected via 10-fold cross-validation. We found that when estimating a generic nuisance parameter $\eta(\cdot)$, representing either $\theta_z(\cdot), \nu_{z}(\cdot)$, or $e_1(\cdot)$, adding an additional intercept term as follows improved performance signficantly: After fitting $\what{\eta}_z(\cdot)$ using \texttt{xgboost}, we fit $\beta_0$ in the model $\what{\eta}_z(X) + \beta_0$ using the appropriate loss function for the nuisance parameter.
\subsection{Simulations}
The purpose of the simulation study is to demonstrate the good coverage of the proposed confidence intervals for reasonable choices of sample size $n$ and covariate dimension $d,$ and to understand some of the practical properties of the proposed methods relative to existing methods for sensitivity analysis, such as matching methods \cite{rosenbaum2014weighted}. In all of the simulations, we generate the data as follows for a randomly chosen set of coefficients $\beta$ and $\mu$: draw $X \distas \uniform[0,1]^d$, and conditional on $X=x$, draw \begin{equation*}
U \distas \normal\left\{0, \left(1 + \tfrac{1}{2}\sin(2.5 x_1)\right)^2
\right\},
~~~
Y(0) = \beta^\top x + U, ~~~
Y(1) = \tau + \beta^\top x + U. \end{equation*}
We draw the treatment assignment according to \begin{equation*}
Z \distas \bernoulli\left\{\frac{\exp\left(\alpha_0+x^\top \mu +
\log(\Gamma_{\rm data}) \ind{u > 0} \right)}{1+\exp\left(\alpha_0+x^\top \mu +
\log(\Gamma_{\rm data}) \ind{u > 0} \right)}\right\}, \end{equation*} where $\alpha_0$ is a constant controlling the overall treatment assignment ratio. This model satisfies the $\Gamma_{\rm data}$-\cornfield{} condition, since \begin{equation*}
\frac{P(Z = 1 \mid X=x, U=u)}{P(Z = 0 \mid X=x, U=u)}
\frac{P(Z = 0 \mid X=x, U=\tilde{u})}{P(Z = 1 \mid X=x, U=\tilde{u})}
= \Gamma_{\rm data}^{\ind{u>0}-\ind{\tilde{u}>0}}\in [\Gamma_{\rm data}^{-1},
\Gamma_{\rm data}] \end{equation*}
Across all experiments, we set $\tau = 1$ and \smash{$\Gamma_{\rm data} = \exp(1)$}. Unless otherwise stated, we used the same $\Gamma$ in our sensitivity analysis as the level of confounding \smash{$\Gamma_{\rm data}$} used to generate the data. Here, unobserved confounding inflates estimates that assume unconfoundedness: when $Z=1$, $U$ is more likely to be positive than when $Z=0$, which inflates the mean of treated units, i.e., $\E[Y(1) \mid Z=1, X=x] > \E[Y(1) \mid X=x]$. We expect that the upper bound from the sensitivity analysis is above the true ATE, while the lower bound is only slightly below the truth, assuming that we choose $\Gamma \ge \Gamma_{\rm data}$, but not by too much.
In the first set of simulations, we simulate data with a moderate number of observed covariates ($d=20$), where we observe the proposed sensitivity analysis procedure quickly approaches it's asymptotic behavior as sample size grows.
For these simulations, we use the \texttt{xgboost} implementation, validating the performance of our semiparametric method when the nuisance parameters are estimated well, even if lacking in formal convergence guarantees.
Table~\ref{tab:coverage} summarizes the empirical performance of the \texttt{xgboost} implementation based on 500 simulations. As expected, the average lower bound estimator $\what\tau^-$ is close to the true ATE, while the average upper bound estimator $\what\tau^+$ is higher than the true ATE to account for unmeasured confounding. The estimators of the standard errors of $\what\tau^-$ and $\what\tau^+$ are fairly accurate when $n\ge 1000$. When $n$ is small, they slightly underestimate the true standard errors. The empirical coverage probability of the confidence interval of ATE is conservative because of unobserved confounding. As the unobserved confounding introduces upward bias, the lower bound $\tau^-\approx \tau,$ and we expect that the coverage probability of the confidence interval of $\tau$ is close to 97.5\% for large $n$, which is confirmed by the simulation results in Table~\ref{tab:coverage}.
\begin{table}[t]
\caption{Simulation results of the proposed method with $20$ observed
covariates. $\what{\tau}^-$, the empirical average of $\what{\tau}^-;$
$\what{\sigma}_{\tau^-}$, the empirical average of
$\what{\sigma}_{\tau^-};$ SD. of $\what\tau^-$, the empirical standard
deviation of $\what\tau^-$; $\what{\tau}^+$, the empirical average of
$\what{\tau}^+;$ $\what{\sigma}_{\tau^+}$, the empirical average of
$\what{\sigma}_{\tau^+};$ SD. of $\what\tau^+$, the empirical standard
deviation of $\what\tau^+$; and coverage, the empirical coverage
probability of the 95\% confidence intervals
$\what{\operatorname{CI}}_{\tau}.$ ($\mbox{ATE} =\tau = 1$ and
$\Gamma_{\rm data} = \exp(1)$.)} \begin{center}
\begin{tabular}{|c | c| c| c| c| c| c| c|} \hline
$n$ & $\what{\tau}^-$ & SD. of $\what\tau^-$ & $\what{\sigma}_{\tau^-}$ & $\what\tau^+$ & SD. of $\what\tau^+$ & $\what{\sigma}_{\tau^+}$ & Coverage \\
\hline
500.0 & 1.008 & 0.085 & 0.081 & 1.424 & 0.082 & 0.077 & 0.952 \\ 1000.0 & 1.000 & 0.059 & 0.057 & 1.404 & 0.058 & 0.053 & 0.978 \\ 2000.0 & 0.998 & 0.042 & 0.040 & 1.395 & 0.040 & 0.038 & 0.966 \\ 4000.0 & 0.995 & 0.029 & 0.028 & 1.387 & 0.027 & 0.027 & 0.980 \\ \hline \end{tabular} \end{center} \label{tab:coverage} \end{table}
In the second set of simulations, the dimension $d$ of the covariates, sample size $n$, and marginal treatment probability $P(Z=1)$ match those from the real observational study on fish consumption and blood mercury levels in the next subsection ($d=8$, $n = 1100$, $P(Z=1)=0.21$), so that we can validate our approach before interpreting the results on real data. We use the nonparametric sieve implementation for estimating the nuisance parameters in the real observational study, and so we use this implementation here. As estimation with sieves is challenging in this setting due to the eight covariates and a nonlinear model, in Table~\ref{tab:realistic-coverage} we observe that the variance estimates $\what{\sigma}^{\pm}$ underestimate the standard deviation of $\what{\tau}^{\pm}$ by approximately $10\%$. We also evaluate the performance when the propensity score estimator is mis-specified, as discussed in Section~\ref{sec:implementation}.
We compare our semiparametric methods to the $M$-estimator based matching method \texttt{sensitivitymw}~\cite{rosenbaum2014weighted}. Note that our simulation uses a constant treatment effect, as assumed by matching methods. The confidence intervals for the matching approach is conditional on the design (and assumes exact matched pairs), whereas our intervals are unconditional. The confidence intervals for the ATE from the matching method appear conservative, coming from having a lower design sensitivity and larger standard errors (Table~\ref{tab:realistic-coverage}). The larger standard errors could potentially be reduced using covariate adjustment in matching~\citep{Rosenbaum02Cov}.
\begin{table}[t] \caption{Simulation results of the proposed method (parametric and nonparametric) and the existing matching method with eight observed covariates. $\what{\tau}^-$, the empirical average of $\what{\tau}^-;$ $\what{\sigma}_{\tau^-}$, the empirical average of $\what{\sigma}_{\tau^-};$ SD. of $\what\tau^-$, the empirical standard deviation of $\what\tau^-$; $\what{\tau}^+$, the empirical average of $\what{\tau}^+;$ $\what{\sigma}_{\tau^+}$, the empirical average of $\what{\sigma}_{\tau^+};$ SD. of $\what\tau^+$, the empirical standard deviation of $\what\tau^+$; and Coverage, the empirical coverage probability of the 95\% confidence intervals $\what{\operatorname{CI}}_{\tau}.$ ($\mbox{ATE} =\tau = 1$ and
$\Gamma_{\rm data} = \exp(1)$.)} \begin{center}
\begin{tabular}{|l | c| c| c| c| c| c| c|} \hline
Approach & $\what{\tau}^-$ & $\what{\sigma}_{\tau^-}$ & SD. of $\what\tau^-$ & $\what\tau^+$ & $\what{\sigma}_{\tau^+}$ & SD. of $\what\tau^+$ & Coverage \\
\hline Nonparametric & 0.995 & 0.073 & 0.081 & 1.775 & 0.069 & 0.076 & 0.960 \\ Misspecified & 0.988 & 0.071 & 0.081 & 1.775 & 0.068 & 0.076 & 0.970 \\ Matching & 0.869 & - & 0.097 & 2.125 & - & 0.097 & 0.996 \\ \hline \end{tabular} \end{center} \label{tab:realistic-coverage}
\end{table}
In the third set of simulations, we include only a single covariate ($d=1$), and evaluate the performance of the semiparametric method with the \texttt{xgboost} implementation, and the matching method described above over a range of sample sizes. One of the challenges with interpreting the above simulations is that the results will include a mixture of errors---statistical error from having finite observations, and population-level uncertainty on the treatment effect. With one covariate, the semiparametric and approximate matching methods should have a small statistical bias relative to their standard errors, so the average of the point estimates from simulations with a large sample size should approximate the asymptotic sensitivity bounds well. This allows us to compare the asymptotic behavior of the semiparametric method and matching methods, over a variety of values of $\Gamma$ used in analysis (while holding $\Gamma_{\rm data}$ used in the data-generation fixed). Like previous settings, Table~\ref{tab:low-dim} shows that the bounds from matching are more conservative than the semiparametric approach.
\begin{table}[t]
\caption{Simulation results of the proposed method and matching with $1$
observed covariate. For each method, $0.025$-quantile and average of the
lower bound, followed by average and $0.975$-quantile of the upper bound,
and the coverage of the confidence interval are reported. Comparing the
average bounds for each method shows that the semiparametric method has a
less conservative lower bound as $\Gamma$ varies, but is still below the
true ATE when the appropriate $\Gamma$ is used, which is $1$ in this
simulation; the coverage shows that it still covers the true ATE at the
appropriate level. Varying the sample size shows that the statistical bias
of both methods is already negligible with very small sample
sizes. ($\mbox{ATE} =\tau = 1$ and $\Gamma_{\rm data} = \exp(1)$.)} \footnotesize \hspace{-2em}
\begin{tabular}{|c|ccccc|ccccc|} \hline
& \multicolumn{5}{c|}{Semiparametric Method} & \multicolumn{5}{c|}{Matching Method}\\ \hline & \makecell{Lower\\$0.025$-\\quantile} & Lower & Upper & \makecell{Upper\\$0.975$-\\Quantile} & \makecell{Cover-\\age} & \makecell{Lower\\$0.025$-\\quantile} & Lower & Upper & \makecell{Upper\\$0.975$-\\Quantile} & \makecell{Cover-\\age}\\ \hline
$\Gamma$ & \multicolumn{10}{c|}{Fixing $n=1000$} \\ \hline $1$ & 1.08 & 1.18 & 1.18 & 1.29 & 0.06 & 1.23 & 1.42 & 1.42 & 1.65 & 0.00 \\ $\exp(0.5)$ & 1.00 & 1.09 & 1.27 & 1.37 & 0.56 & 0.93 & 1.11 & 1.73 & 1.96 & 0.61 \\ $\exp(1)$ & 0.90 & 1.00 & 1.35 & 1.46 & 0.97 & 0.58 & 0.80 & 2.05 & 2.30 & 1.00 \\ $\exp(2)$ & 0.71 & 0.81 & 1.52 & 1.64 & 1.00 & -0.13 & 0.17 & 2.69 & 3.01 & 1.00 \\ $\exp(3)$ & 0.51 & 0.63 & 1.69 & 1.82 & 1.00 & -0.90 & -0.48 & 3.35 & 3.75 & 1.00 \\ $\exp(4)$ & 0.30 & 0.46 & 1.85 & 2.01 & 1.00 & -1.67 & -1.16 & 4.02 & 4.49 & 1.00 \\
\hline n & \multicolumn{10}{c|}{Fixing $\Gamma = \exp(1)$ as in simulation} \\ \hline 100.0 & 0.65 & 1.00 & 1.37 & 1.69 & 0.97 & 0.11 & 0.82 & 2.05 & 2.83 & 0.99 \\ 1000.0 & 0.90 & 1.00 & 1.35 & 1.46 & 0.97 & 0.58 & 0.80 & 2.05 & 2.30 & 1.00 \\ 4000.0 & 0.94 & 1.00 & 1.35 & 1.41 & 0.98 & 0.68 & 0.81 & 2.05 & 2.19 & 1.00 \\ \hline \end{tabular} \label{tab:low-dim} \end{table}
\subsection{Real observational data} We apply our method to analyzing an observational study to infer the effect of fish consumption on blood mercury levels and compare our result to that of a prior analysis based on covariate matching~\cite{zhao2017sensitivity}. The data consist of observations from 2,512 adults in the United States who participated in a single cross-sectional wave of the National Health and Nutrition Examination Survey (2013-2014). All participants answered a questionnaire regarding their demographics and food consumption and had their blood mercury concentration measured (data available in the R package CrossScreening).
High fish consumption is defined as individuals who reported $>12$ servings of fish or shellfish in the previous month per their questionnaire, low fish consumption as 0 or 1 servings of fish. The outcome of interest is $\log_2$ of total blood mercury concentration (ug/L). The primary objective is to study if fish consumption causes higher mercury concentration. To match prior analysis \cite{zhao2017sensitivity}, we excluded one individual with missing education level and seven individuals with missing smoking status from the analysis, and imputed missing income data for 175 individuals using the median income. In addition, we created a supplementary binary covariate to indicate whether the income data were missing. There are a total of 234 treated individuals (those with high fish consumption), 873 control individuals (low fish consumption). The data include eight covariates (gender, age, income, whether income is missing, race, education, ever smoked, and number of cigarettes smoked last month). Our approach uses the same $\Gamma$-\cornfield{} model as the previous matched-pair analysis in \cite{zhao2017sensitivity}, so results for our proposed method and the analysis based on these 234 matched pairs are nearly comparable. However, the confidence intervals constructed for matching are conditional on the covariates and choice of matched pairs. As Table~\ref{tab:obs-study} shows (see also Fig.~\ref{fig:obs-study}), when $\Gamma>\exp(1)$, our method achieves tighter confidence intervals around the effect of fish consumption on blood mercury level: our confidence intervals are nested within those based on the matching method. For example, when $\Gamma=\exp(3)$ (representing a relatively large selection bias), the 95\% confidence interval for the increase in average $\log_2$-transformed blood mercury concentration caused by high fish consumption is [0.47, 3.29] based on our new method and [-0.24, 4.48] based on the matching method. While the former excludes zero, suggesting a significant association in the presence of unknown confounding, the latter includes the null association and is not statistically significant. The confidence intervals for our method are always shorter except when $\Gamma = 1$, ie. under unconfoundedness.
\begin{table} \caption{Comparison to sensitivity results of \cite{zhao2017sensitivity} using the same data set. Because the same sensitivity model as the matched analysis was used, results can be compared directly. We demonstrate that the method can achieve tighter bounds on the average treatment effect both in point estimates and confidence intervals.}
\hspace{-3.2em}\begin{tabular}{| c| c c c c c|c c c c c|} \hline
& \multicolumn{5}{c|}{Semiparametric Method} & \multicolumn{5}{c|}{Matching Method} \\ \hline $\Gamma$ & \makecell{Lower \\95\% CI} & Lower & Upper & \makecell{Upper \\ 95\% CI} & \makecell{Length\\ of CI} & \makecell{Lower \\95\% CI} & Lower & Upper & \makecell{Upper \\ 95\% CI} & \makecell{Length\\ of CI} \\ \hline 1 & 1.51 & 1.74 & 1.74 & 1.97 & 0.46 & 1.9 & 2.08 & 2.08 & 2.25 & 0.35 \\ $\exp(0.5)$ & 1.31 & 1.53 & 2.03 & 2.26 & 0.95 & 1.57 & 1.75 & 2.41 & 2.59 & 1.02 \\ $\exp(1)$ & 1.07 & 1.27 & 2.27 & 2.47 & 1.4 & 1.25 & 1.45 & 2.74 & 2.94 & 1.89 \\ $\exp(2)$ & 0.74 & 0.91 & 2.77 & 2.89 & 2.15 & 0.58 & 0.87 & 3.36 & 3.65 & 3.07 \\ $\exp(3)$ & 0.47 & 0.6 & 3.19 & 3.29 & 2.82 & -0.23 & 0.28 & 3.97 & 4.48 & 4.71 \\ $\exp(4)$ & 0.18 & 0.29 & 3.55 & 3.63 & 3.45 & - & - & - & - & - \\ \hline \end{tabular} \label{tab:obs-study} \end{table}
\begin{figure}
\caption{Visual comparison to sensitivity results of matching method in
\cite{zhao2017sensitivity} using the same data set. See numerical
details in Table~\ref{tab:obs-study}. The filled areas represent the
estimated bounds on the average treatment effect, whereas the dotted /
dashed lines represent their confidence intervals. For
values of $\Gamma$ larger than $\exp(0.5)$, our approach
produces intervals with shorter length.}
\label{fig:obs-study}
\end{figure}
\section{Discussion} \label{section:discussion}
The $\Gamma$-\cornfield{} model \eqref{eq:cornfield} relaxes the unconfoundedness assumption \eqref{eq:indepsim} required for the identification of causal treatment effects. We propose estimators $\what{\tau}^{\pm}(\cdot)$ for upper and lower bounds on the CATE $\tau(x)$ and $\what{\tau}^{\pm}$ for the ATE $\tau$ under the $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield} and derive their asymptotic properties. Our loss minimization approach is practical and scalable, allowing the use of flexible machine learning methods. Theoretically, we demonstrate the statistical advantages of our approach, replicating the advantageous $o_{p}(n^{-p/(2p+d)})$ convergence of series estimation procedures \cite{newey1997convergence} and root $n$ consistency of doubly robust semi-parametric estimates~\cite{bang2005doublerobust,chernozhukov2018double} in the absence of unobserved confounding~\eqref{eq:indepsim}. Our simulation studies and experimental evidence from real observational data confirm these advantages exist in practical finite sample regimes as well.
Our bounds demonstrate a few important phenomena for understanding the robustness of causal inference with observational data. First, as we note in Section~\ref{sec:semiparametric}, the estimator $\what{\tau}^-$ reduces to the AIPW estimator~\eqref{eq:aipw} when $\Gamma=1$. Therefore, for any $\Gamma > 1$, the confidence interval for $\tau$ estimated in \eqref{eq:ci4ate} includes the AIPW estimate~\eqref{eq:aipw}, which serves as the center of the interval bounding the ATE. Second, the estimator $\what{\theta}_1(\cdot)$ minimizes a weighted squared error loss function \eqref{eqn:opt}, while the estimator $\what{\mu}_{1,1}(\cdot)$ minimizes a unweighted mean squared error loss. When the residual noise $Y - \mu_{1,1}(X)$ is small, the difference between weighted and unweighted loss functions also tends to be small. Therefore, the effect of selection bias on the bias of the ATE $\tau$ or CATE $\tau(x)$ estimated under the no unobserved confounding assumption \eqref{eq:indepsim} depends on the magnitude of these residuals; when these residuals are close to zero, the risk of unobserved confounding is mitigated.
Our bounds on the ATE $\tau$ and CATE $\tau(x)$ depend on bounding the conditional mean of the potential outcomes $\mu_1(x) = \E[Y(1) \mid X=x]$ and $\mu_0(x) = \E[Y(0)\mid X=x]$. The proposed $\what{\tau}^-$ and $\what{\tau}^-(x)$ employ a worst case re-weighting scheme (such as in \eqref{eq:population-theta} and \eqref{eqn:opt}) to bound them separately. Section~\ref{sec:hypothesis-test} establishes the optimality of this approach under a specific symmetry condition on the distributions of the potential outcomes. In general, our approach may not be optimal; an optimal estimator may require worst case treatment assignments that depend on both potential outcomes simultaneously, consistent with the independence assumption \eqref{eq:indep} and $\Gamma$-\cornfield{} condition \eqref{eq:cornfield}. Such joint consideration of $\mu_1(x)$ and $\mu_0(x)$ complicates the estimation procedure but is an important direction of future research.
In practice, choosing an appropriate level of $\Gamma$ in the sensitivity analysis is important. \citet[Chp.~6]{Rosenbaum02} discusses using known relationships between a treatment and an auxiliary measured outcome to detect the presence and magnitude of hidden bias. For example, suppose a drug is approved with an unbiased estimate of its effect on a primary outcome based on a randomized clinical trial, and drug surveillance investigates the potential adverse events associated with the drug use in real world. The difference between the estimated treatment effect on the primary outcome based on observational data and that based on a randomized clinical trial can serve as an indication of the magnitude of $\Gamma$, the hidden bias in the observational data. It may then be appropriate to perform a sensitivity analysis for adverse events with the same level of $\Gamma$. However, in many settings, there is no such surrogate for estimating $\Gamma$. In discussions with clinicians who often conduct biomedical studies, we find it helpful to provide results for a number of different values of $\Gamma$ to help contextualize the strength of evidence, rather than present a single bound with undue certainty. While our result is valid for each fixed $\Gamma$, providing uniform inference results over a set of $\Gamma$ would allow estimation of the smallest value of $\Gamma$ consistent with zero treatment effect in the data (a \emph{sensitivity value} analogous to the \textit{E value} for risk ratios from \citet{vanderweele2017sensitivity}).
\setlength{\bibsep}{2pt}
\appendix
\section{Proofs for bounds on the CATE} \label{sec:proofs-no-cov}
\subsection{Proof of absolute continuity in Lemma~\ref{lem:bdd-lr-no-cov}} \label{sec:proof-bounded-likelihood}
\begin{proof}
Here, we only prove the absolute continuity result. The rest of the proof is in Section~\ref{sec:cate-sensitivity} after the statement of Lemma~\ref{lem:bdd-lr-no-cov}. Let $U \in \mc{U}$ be the unobserved confounder satisfying
$Y(1) \indep Z \mid X, U$ and~\eqref{eq:cornfield}. Then for any set
$A \subset \mc{U}$
\begin{align}
\frac{P(U \in A \mid Z = 0, X = x)}{P(U \in A \mid Z = 1, X = x)}
& = \frac{P(Z = 0 \mid X = x, U \in A)}{P(Z = 1 \mid X = x, U \in A)}
\cdot \frac{P(Z = 1 \mid X = x)}{P(Z = 0 \mid X = x)}
\in [\Gamma^{-1}, \Gamma]
\label{eqn:remember-cornfield}
\end{align}
by condition~\eqref{eq:cornfield} and the quasi-convexity of the ratio
mapping
$(a, b) \mapsto a/b$. Letting $q_z$ denote the density of $U$ (with
respect to a base measure $\mu$) conditional on $Z = z$, we then have
$q_0(u \mid x) / q_1(u \mid x) \in [\Gamma^{-1}, \Gamma]$, and for
any measurable set $A \subset \R$
\begin{align*}
\frac{P(Y(1) \in A \mid Z = 0, X = x)}{P(Y(1) \in A \mid Z = 1, X = x)}
& = \frac{\int P(Y(1) \in A \mid Z = 0, U = u, X = x) q_0(u \mid x) d\mu(u)}{
\int P(Y(1) \in A \mid Z = 1, U = u, X = x) q_1(u \mid x) d\mu(u)} \\
& \stackrel{(i)}{=}
\frac{\int P(Y(1) \in A \mid U = u, X = x) q_0(u \mid x) d\mu(u)}{
\int P(Y(1) \in A \mid U = u, X = x) q_1(u \mid x) d\mu(u)}
\stackrel{(ii)}{\in} [\Gamma^{-1}, \Gamma]
\end{align*}
where equality~$(i)$ is a consequence of $Y(1) \indep Z \mid X, U$, and
inequality~$(ii)$ follows again from the quasi-convexity of the ratio. This
yields the absolute continuity claim. \end{proof}
\subsection{Proof of Lemma~\ref{lemma:duality}} \label{sec:proof-duality}
\begin{proof}
As everything is conditional on $x$, we it without
loss of generality, letting $\E_1[\cdot] = \E[\cdot \mid Z = 1]$ for
shorthand.
We first develop a simple duality argument. The set
\begin{equation*}
\mc{L}_\Gamma \defeq \{L : \mc{Y} \to \R_+, L ~ \mbox{measurable}, ~
L(y) \le \Gamma L(\wt{y}) ~ \mbox{for~all~} y, \wt{y} \}
\end{equation*}
is convex, contains the constant function $L \equiv 1$ in its interior,
and for $L \equiv 1$ we have $\E_1[L(Y(1))] = 1$. Thus,
strong duality~\cite[Thm.~8.6.1 and Problem 8.7]{Luenberger69}
implies
\begin{equation}
\inf_{L \in \mc{L}_\Gamma}
\left\{\E_1[L(Y(1))] \mid \E_1[L(Y(1))] = 1 \right\}
= \sup_{\mu \in \R}
\inf_{L \in \mc{L}_\Gamma} \left\{\E_1[(Y(1) - \mu) L(Y(1))]
+ \mu \right\}
\label{eq:lagrange-ver}.
\end{equation}
Now, we show that for each $\mu \in \R$,
\begin{equation}
L^\ast(y) \propto \Gamma \ind{y - \mu \le 0} + \ind{y - \mu > 0}
\label{eq:best-lr}
\end{equation}
attains the minimum value of $\inf_{L \in \mc{L}_\Gamma} \E_1[(Y(1) - \mu)
L(Y(1))]$. That is, the minimizer takes on only the values $L^*(y) \in
\{c, c\Gamma\}$ for some $c \ge 0$. The constraint
$L \in \mc{L}_\Gamma$ guarantees that $L^*(y) \in [c, c \Gamma]$ for some
$c \ge 0$.
Assume that $c \le L(y) \le c \Gamma$,
but $L(y) \not\in \{c, c \Gamma\}$. Then
letting $L\opt(y) = c$ if $(y - \mu) > 0$ and $L\opt(y) = c \Gamma$
if $(y - \mu) \le 0$, we have
$(y - \mu) L\opt(y) \le (y - \mu) L(y)$, with strict inequality
if $y \neq \mu$. Thus, any function $L \in \mc{L}_\Gamma$ can be modified
to be of the form~\eqref{eq:best-lr} without increasing the objective
$\E_1[(Y(1) - \mu) L(Y(1))]$.
Substituting the minimizer~\eqref{eq:best-lr} into
the right objective~\eqref{eq:lagrange-ver}, we recall
that $\psi_t(y) = \hinge{y - t} - \Gamma \neghinge{y-t}$ to obtain
\begin{equation*}
\theta_1(x) =
\sup_{\mu} \inf_{c \ge 0}
\left\{\E_1\left[ c \psi_\mu(Y(1)) \mid X=x\right] + \mu\right\}.
\end{equation*}
This gives the final result~\eqref{eq:dual-constrained}, as
\begin{equation*}
\inf_{c \ge 0}
\E_1\left[ c \psi_\mu(Y(1)) \mid X=x\right]
= \begin{cases} -\infty & \mbox{if~} \E_1[\psi_\mu(Y(1)) \mid X = x]
< 0 \\
0 & \mbox{otherwise}.
\end{cases}
\end{equation*}
Since $\theta \mapsto \E[ \psi_\theta\left(Y(1)\right)\mid Z = 1, X]$ is a decreasing function, $\theta_1(X)$ is the only zero crossing of the function for almost every $X$. \end{proof}
\subsection{Proof of Lemma~\ref{lemma:opt-is-good}} \label{section:proof-of-opt-is-good}
\lemoptisgood*
\begin{proof}
Let $\wb{\R} = \R \cup \{+\infty\}$.
Normal integrand theory~\cite[Section 14.D]{RockafellarWe98} allows
swapping integrals and infimum over measurable mappings. A map
$f: \R \times \covspace \to \wb{\R}$ is a normal integrand if its
epigraphical mapping $x \mapsto S_f(x) \defeq
\epi f(\cdot; \covariate) = \{ (t,
\alpha) \in \R \times \R: f(t; \covariate) \le \alpha\}$ is closed-valued
and
measurable, that is, for $\sigalg$ the Borel sigma-algebra on $\R$,
$S_f^{-1}(O) \in \sigalg$ for all open $O \subset \R^2$. We have
\begin{lemma}[{\citet[Theorem 14.60]{RockafellarWe98}}]
\label{lemma:inf-int-interchange}
If $f: \R \times \covspace \to \wb{\R}$ is a normal integrand, and
$\int_{\covspace} f(\funcparam_1(\covariate); \covariate) ~
dP(\covariate) < \infty$ for some measurable $\funcparam_1$, then
\begin{equation*}
\inf_{\funcparam} \left\{
\int_{\covspace} f(\funcparam(\covariate); \covariate) ~d P(\covariate)
\mid
\funcparam: \covspace \to \R~\mbox{measurable}
\right\}
= \int_{\covspace} \inf_{\funcscalar \in \R} f(\funcscalar; \covariate) ~ dP(\covariate).
\end{equation*}
If this common value is not $-\infty$, a measurable function $
\funcparam^*: \covspace \to \R$ attains the minimum of the left-hand side
iff $ \funcparam^*(\covariate) \in \argmin_{\funcscalar \in \R}
f(\funcscalar; \covariate)$ for $P$-almost every $\covariate
\in \covspace$.
\end{lemma}
\noindent
Let $f(t, \covariate) \defeq \half \treatedE[ \hinge{Y(1) - t}^2 + \Gamma
\neghinge{Y(1) - t}^2 \mid \covariaterv = \covariate]$. Since
$(t, x) \mapsto f(t, x)$ is
continuous by assumption, $f$ is a normal integrand~\cite[Example
14.31]{RockafellarWe98}. Rewrite the minimization
problem~\eqref{eqn:opt} using the tower property
\begin{equation*}
\inf_{\funcparam} \left\{
\treatedE\left[ \treatedE[
\loss_\Gamma(\funcparam; (\covariaterv, Y(1))) \big| \covariaterv]
\right] = \treatedE[f\left\{\funcparam(\covariaterv), \covariaterv\right\}]
\mid
\funcparam: \covspace \to \R~\mbox{measurable}
\right\}.
\end{equation*}
Apply Lemma~\ref{lemma:inf-int-interchange} to obtain
$\funcparam_1(\covariate)
= \argmin_{\funcscalar \in \R} f(\funcscalar; \covariate)$. Since $t
\mapsto f(t, \covariate)$ is convex, the first order condition
$\frac{d}{dt} f(t; \covariate)=0$ shows that $\funcparam_1(x)$
solves $\E_1[\psi_{\theta(x)}(Y(1)) \mid X=x]=0$. The uniqueness (up
to measure-zero transformations) of $\theta_1$ is immediate by
the strong convexity of $t \mapsto \loss_\Gamma(t, y)$. \end{proof}
\section{Sieve estimation} \label{sec:sieve-method}
\subsection{Convergence rates for $\what{\theta_1}$, the empirical
minimizer~\eqref{eqn:opt-emp}}
In this section, we establish asymptotic convergence rates for minimizers $\what{\theta}_1(\cdot)$ of \eqref{eqn:opt-emp}. We consider two examples to make this concrete.
\begin{example}[Polynomials]
\label{example:polynomials}
Let $\pol{\comp}$ be the space of $\comp$-th order polynomials on
$[0, 1]$,
\begin{equation*}
\pol{\comp} \defeq
\left\{ [0,1] \ni x \mapsto \sum_{k=0}^{\comp} a_k x^k
: a_k \in \R \right\}.
\end{equation*} Define the sieve $\funcspace_n \defeq \left\{ x \mapsto \Pi_{k=1}^d f_k(x_k) \mid f_k \in \pol{J_n}, k = 1,\dots,d \right\},$ for $J_n \to \infty$. \end{example}
\begin{example}[Splines]
\label{example:splines}
Let $0 = t_0 < \ldots < t_{\comp+1} = 1$ be knots that satisfy
\begin{equation*}
\frac{\max_{0 \le j \le \comp} (t_{j+1} - t_j)}
{\min_{0 \le j \le \comp} (t_{j+1} - t_j)}
\le c
\end{equation*}
for some $c > 0$. Then, the space of $r$-th order splines with $\comp$
knots is
\begin{equation*}
\spl{r}{\comp} \defeq
\left\{ x \mapsto
\sum_{k=0}^{r-1} a_k x^k + \sum_{j=1}^\comp b_j \hinge{x-t_j}^{r-1}, x\in [0, 1]:
a_k, b_k \in \R
\right\}.
\end{equation*}
Define the sieves $\funcspace_n \defeq \left\{ x \mapsto
f_1(x_1)f_2(x_2)\dots f_d(x) \mid f_k \in \spl{r}{J_n}, k = 1,\dots,d
\right\}$ for some integer $r \ge \floor{\holdersmooth} + 1$ and $J_n \to
\infty.$ \end{example}
We require (standard) regularity conditions. Let $\holderball{\holdersmooth}{\holderradius}$ denote the H\"{o}lder class of $\holdersmooth$-smooth functions, defined for $\holdersmooth_1 = \ceil{\holdersmooth} - 1$ and $\holdersmooth_2 = \holdersmooth - \holdersmooth_1$ by \begin{equation*}
\holderball{\holdersmooth}{\holderradius}
\defeq \left\{ h \in C^{\holdersmooth_1}(\covspace):
\sup_{\tiny \begin{array}{c}x\in \mc{X} \\ \sum_{l=1}^d \alpha_l<p_1 \end{array}}|D^\alpha h(x)| +
\sup_{\tiny \begin{array}{c} x \neq x' \in \mc{X} \\ \sum_{l=1}^d \beta_l=p_1\end{array}}\frac{|D^\beta h(x) - D^\beta h(x')|}{\norm{x - x'}^{\holdersmooth_2}}
\le \holderradius
\right\}, \end{equation*} where $C^{\holdersmooth_1}(\covspace)$ denotes the space of $\holdersmooth_1$-times continuously differentiable functions on $\covspace,$ and
$D^{\alpha} = \frac{\partial^{\alpha}} {\partial^{\alpha_1} \ldots \partial^{\alpha_\covdim}}$,
for any $\covdim$-tuple of nonnegative integers $\alpha = (\alpha_1, \ldots, \alpha_\covdim)$. We make a few concrete assumptions on smoothness and other properties of parameters of interest. \begin{assumption}
\label{assumption:holder-smooth}
Let $\covspace = \covspace_1 \times \cdots \times \covspace_\covdim$ be the Cartesian product of compact intervals $\covspace_1, \ldots, \covspace_\covdim$, and assume
$\popfunc \in \holderball{\holdersmooth}{\holderradius} \eqdef \funcspace$
for some $\holderradius > 0$. \end{assumption}
\newcommand{\sigma^2_{\rm shift}}{\sigma^2_{\rm shift}}
\begin{assumption}
\label{assumption:bdd-error}
There exists $\sigma^2_{\rm shift} < \infty$ such that for all $\covariate
\in \covspace$, $\E[\{Y(1) - \popfunc(\covariaterv)\}^2 \mid Z=1, X = x] \le
\sigma^2_{\rm shift}$. \end{assumption}
\begin{assumption}
\label{assumption:lebesgue-equiv}
$P_{\covariaterv | \treatmentrv = 1}$ has a density
$\treatedp(x)$ with respect to the Lebesgue measure and $0 < \inf_{\covariate \in
\covspace} \treatedp(\covariate) \le \sup_{\covariate \in \covspace}
\treatedp(\covariate) < \infty$. \end{assumption}
Assumption~\ref{assumption:holder-smooth} assumes that $\popfunc(\cdot)$ is in a $\holdersmooth$-smooth H\"{o}lder space.
Sufficient conditions for satisfying this assumption include when the conditional mean function $\mu_z(x) = \E[Y(z) \mid X=x]$ is in a $\holdersmooth$-smooth H\"older space, and the residuals $Y(1) - \mu_1(x)$ are homoskedastic or $Y(1)$ is binary: in these cases, $\theta_1(x)$ is a simple affine transformation of $\mu_1(x)$, preserving its smoothness. Assumption~\ref{assumption:holder-smooth} allows for more general models where the residuals may be heteroskedastic but $\theta_1$ is still smooth.
Assumption~\ref{assumption:bdd-error} is a standard condition to ensure convergence of the empirical loss function by bounding the second moment. Finally, Assumption~\ref{assumption:lebesgue-equiv} asserts that $P_{\covariaterv | \treatmentrv = 1}$ has upper- and lower-bounded density, so that it is equivalent to the Lebesgue measure on $\mc{X}$. This assumption, along with the symmetric assumption on $P_{\covariaterv | \treatmentrv = 0}$ needed to estimate $\theta_0(\cdot)$, implies strong ignorability, as well as bounds on the marginal density of $P_{\covariaterv}$ under the Lebesgue measure. Assumption~\ref{assumption:lebesgue-equiv}
allows us to relate the $L^2(P)$ norm, $\|\cdot\|_{2,P},$ to the supremum norm, $\|\cdot\|_{\infty, P},$ of $\what{\theta}_1 - \theta_1 \in \holderball{\holdersmooth}{\holderradius}$,
which is important for proving the convergence of sieve estimators~\cite{Chen07}. Although outside the scope of this paper, adapting other nonparametric estimators such as the partitioning estimates described in \citet{GyorfiKoKrWa02} may admit good $\|\cdot\|_{2,P}$ convergence rates without this assumption.
The tradeoff between the random estimation error and approximation precision of the sieve space $\funcspace_n$ (see Lemma~\ref{lemma:sieve} in the Appendix) dictates the accuracy of $\what\theta_1(\cdot)$. The following theorem guarantees that finite dimensional linear sieves considered yield standard non-parametric rates for estimating $\popfunc(\cdot)$ by balancing different sources of error. \begin{restatable}{thm}{thmsieve}
\label{thm:sieve}
For $\covspace = [0, 1]^d$, let $\funcspace_n$ be given by the finite
dimensional linear sieves in Example~\ref{example:polynomials}
or~\ref{example:splines} with $\comp_n \asymp n^{\frac{1}{2 \holdersmooth
+ \covdim}}$. Define $\epsilon_n = (\frac{\log
n}{n})^\frac{\holdersmooth}{2\holdersmooth + \covdim}$. Let
Assumptions~\ref{assumption:holder-smooth}, \ref{assumption:bdd-error},
and~\ref{assumption:lebesgue-equiv} hold, and let $\empfunc$ satisfy
\begin{equation*}
\empE\left[
\loss_\Gamma\big(\empfunc (\covariaterv), Y(1)\big) \mid Z=1 \right]
\le \inf_{ \funcparam \in \funcspace_n}
\empE\left[
\loss_\Gamma\left(\funcparam(\covariaterv), Y(1)\right) \mid Z=1\right]
+ O_P(\epsilon_n^2).
\end{equation*}
Then
$\norms{\empfunc - \popfunc}_{2, P_1} = O_{P}(\epsilon_n)$
and $\norms{\empfunc - \popfunc}_{\infty,P_1} =
O_{P}(\epsilon_n^\frac{2 \holdersmooth}{2 \holdersmooth + \covdim})$. \end{restatable} \noindent See the Supplementary Materials Section~\ref{section:proof-of-sieve} for proof. The key property of the function spaces $\funcspace_n$ in Examples~\ref{example:polynomials} and~\ref{example:splines} is that $\inf_{\theta \in \funcspace_n} \linf{\theta -
\popfunc} = O(\comp_n^{-\holdersmooth})$ (cf.~\cite[Sec.~5.3.1]{Timan63} or \cite[Thm.~12.8]{Schumaker07}), which allows appropriate balance between approximation and estimation error. Similar guarantees hold for wavelet bases and other finite-dimensional sieves~\cite{Daubechies92, Chen07}, allowing generalization of Theorem~\ref{thm:sieve} beyond the explicit examples provided.
\subsection{Convergence rates for $\what{\nu}_{1, k}$, the empirical
minimizer~\eqref{eqn:opt-emp-prob}} \label{sec:nu-sieve}
To show convergence of the empirical minimizer~\eqref{eqn:opt-emp-prob}, $\what{\nu}_{1, k}$, we need two assumptions. \begin{assumption}
\label{assumption:prob-smooth}
There exist $\probsmooth, \probradius > 0$, and a set
$S \subset \holderball{\holdersmooth}{\holderradius}$ with
$\popfunc \in S$
such that
(a) $P(\what{\theta}_{1k}^{\nu_1} \in S \mid \treatmentrv=1) \to 1$ as $n \to \infty$,
(b) for all $\funcparam \in S$,
$x \mapsto P(Y(1) \ge \funcparam(x) \mid \treatmentrv=1, X=x)$
belongs to
$ \probspace \defeq \holderball{\probsmooth}{\probradius} \cap \left\{
\probfunc: \covspace \to [0, 1] \right\}$. \end{assumption} \begin{assumption}
\label{assumption:lip-cdf}
Let $S$ be as in Assumption~\ref{assumption:prob-smooth}.
There is a constant $L_{\probfunc} <\infty$
such that for $f, g \in S$,
\begin{equation*}
\int \left[P(Y(1) \ge f(X) \mid Z=1, \covariaterv=x)-
P(Y(1) \ge g(X) \mid Z=1, \covariaterv=x)\right]^2 \dif{P_1}(x)
\le L_{\probfunc}^2 \|f - g\|_{2,P_1}^2.
\end{equation*} \end{assumption} \noindent Assumption \ref{assumption:prob-smooth} ensures that the map $x \mapsto P(Y(1) \ge \funcparam(x) \mid \treatmentrv=1, X=x)$ is sufficiently smooth for functions $\funcparam(\cdot)$ close to
$\popfunc(\cdot).$ In the current case, since $\|\what{\theta}_{1k}^{\nu_1} - \popfunc\|_{2,P_1} = o_p(1)$, $S$ in Assumption \ref{assumption:prob-smooth} can be a
$\|\cdot\|_{2,P_1}$-neighborhood of $\popfunc \in \holderball{\holdersmooth}{\holderradius}.$ This condition is necessary, as $\hat{\nu}_{1,k}(x)$ solves an empirical version of the optimization problem~\eqref{eqn:opt-prob} using the estimator $\what{\theta}_{1k}^{\nu_1}(\cdot)$ instead of the true $\funcparam_1(\cdot).$ Assumption \ref{assumption:lip-cdf} guarantees that the map
$\funcparam \mapsto P\left(Y(1) \ge \funcparam(x) \mid \treatmentrv=1, X=x\right)$
is also sufficiently smooth. A simple sufficient condition for Assumption \ref{assumption:lip-cdf} is that $Y(1) \mid \treatmentrv = 1, X=x$ has a bounded density for almost every $x\in \mathcal{X}.$ If the density in certain regions is not bounded, but we know a priori that $\theta(x) \not= y$ in these regions, then we choose $S$ so that $\what{\theta}_{1k}^{\nu_1} \not= y$ in these regions, as well. For instance, if $Y(1) \in \{0, 1\}$, then unless it is deterministic, $\theta_1(x) \in (0,1)$, so $\theta_1 \in S = \holderball{\probsmooth}{\probradius} \cap \{ f: \covspace \to (0,1) \}$, and $P_{Y(1) \mid X=x, Z=1}$ has a density $p_{Y(1) \mid X=x, Z=1}(\theta_1(x)) = 0$, which implies Assumption~\ref{assumption:lip-cdf}.
Under these additional assumptions, the following proposition gives the convergence rate of the proposed sieve estimator. See the proof in the Supplementary Materials Section~\ref{section:proof-of-prob-sieve}. \begin{restatable}{prop}{propprobsieve}
\label{prop:prob-sieve}
For $\covspace = [0, 1]^d$, let $\probspace_n$ be the finite dimensional
linear sieves considered in Examples~\ref{example:polynomials} or
\ref{example:splines}. Let
$\epsilon_n = (\frac{\log n}{n})^\frac{q}{2q + d}$ and
$\comp_n \asymp \epsilon_n^{-1/q}$. Let
Assumptions~\ref{assumption:lebesgue-equiv},~\ref{assumption:prob-smooth},
and~\ref{assumption:lip-cdf} hold. Assume that
$\norms{\what\theta_{1k}^{\nu_1} - \popfunc}_{2,P_1}=O_p(\epsilon_n)$, and
let $\what{\nu}_{1,k}$
satisfy
\begin{equation*}
\mathbb{E}_{n,2}^{(k)} \left[ \bar{\loss}_{\Gamma}\left(\what{\nu}_{1,k}(\covariaterv), \what{\theta}_{1k}^{\nu_1}(\covariaterv), Y(1)\right)\right]
\le \inf_{ \probfunc \in 1 + (\Gamma-1)\probspace_n}
\mathbb{E}_{n,2}^{(k)}\left[ \bar{\loss}_{\Gamma}\left(\probfunc(\covariaterv), \what{\theta}_{1k}^{\nu_1}(\covariaterv), Y(1)\right)\right]
+ O_p(\epsilon_n^2).
\end{equation*}
Then $\norms{\what{\nu}_{1,k} - \popprob}_{2, P} = O_p(\epsilon_n)$. \end{restatable}
\section{A Practical Procedure for Estimating \lowercase{$\nu_{z}(\cdot)$}} \label{sec:practical-nu}
The nested cross-fitting procedure proposed in Section~\ref{sec:nu-est} enjoys strong theoretical properties, but it may be computationally expensive in practice. For instance, when using $10$-fold cross-fitting, this requires fitting the nonparametric sieve estimator $100$ times on different subsamples. To reduce the computational complexity, we use an iterative cross-fitting procedure. We describe our algorithm for the treated units $z=1$, as the case for control units $z=0$ is symmetric. \begin{enumerate} \item Select hyperparameters for estimating $\theta_{1}(\cdot)$
using cross-validation on the weighted squared
loss~\eqref{eq:gamma-loss}. \item Fit models $\what\theta_{1,k}(\cdot),~k=1,\dots,K$ using cross-fitting
with the selected hyperparamters. \item Compute the binary targets $V_i = 1\{Y_i \le \what\theta_{1,k}(X_i)\}$
for each observation $i$ based on the corresponding $k$ such that
$i \in \mathcal{I}_k$. \item Using the targets $V_i$, choose hyperparameters for estimating
$\nu_1(\cdot)$ using cross-validation; use the loss described in
Section~\ref{sec:nu-est} with observations $(V_i, X_i)$. \item With the selected hyperparameters, fit final estimators
$\what\nu_{1,k}(\cdot)$ using the observations $(V_i, X_i)$ for
$i \in \mathcal{I}_{-k}$. \item Using these nuisance parameter estimates alongside estimates of the
propensity score, $\what{e}_{1,k}(\cdot)$, calculate the cross-fitted
semiparametric estimate $\what\tau^{-}$ \end{enumerate} Due to the cross-fitting construction, the $\what\theta_{1,k}(\cdot)$ used to construct the targets $V_i$ are independent of the $i$-th observation, capturing the key property of the nested cross-fitting needed for estimation of $\nu_1(\cdot)$ as described in Section~\ref{sec:nu-est}. Additionally, $\what{\theta}_{1,k}$ is independent of the observations $(X_i, Y_i, Z_i)$ for $i \in I_k$ when plugged in to the cross-fit estimate $\what\tau^{-}$.
However, the iterative construction does not guarantee the independence of $\what\nu_1(\cdot)$ and the observations $(X_i, Y_i, Z_i)$ for $i \in I_k$. To see this, let $k$ and $k'$ be two distinct fold indices between $1$ and $K$, and let $i$ be an observation in $\mathcal{I}_k$ and $i'$ be an observation in $\mathcal{I}_{k'}$. Observation $i$ is used to fit $\what\theta_{1, k'}(\cdot)$, which is then used to compute $V_{i'}$. Therefore, the $i$-th observation is not independent of $V_{i'}$, which is used to estimate $\what\nu_{1,k}(\cdot)$. In summary, the $i$-th observation will generally not be independent of $\what\nu_{1,k}(\cdot)$. In all of our numerical experiments, this dependence does not have a noticeable effect on the distribution of $\what{\tau}^{-}$.
\section{Proofs for Sieve Estimation} \subsection{Proof of Theorem~\ref{thm:sieve}} \label{section:proof-of-sieve}
We require a few notions of complexity to give this proof. Let $\mc{V}$ be a vector space with (semi)norm $\norm{\cdot}$ on $\mc{V}$, and let $V \subset \mc{V}$. A collection $v_1, \ldots, v_\covnum \subset V$ is an \emph{$\epsilon$-cover} of $V$ if for each $v \in V$, there exists $v_i$ such that $\norm{v - v_i} \le \epsilon$. The \emph{covering number} of $V$ with respect to $\norm{\cdot}$ is then $\covnum(V, \epsilon, \norm{\cdot}) \defeq \inf\left\{\covnum \in \N : ~ \mbox{there~is~an~} \epsilon \mbox{-cover~of~} V ~ \mbox{with~respect~to~} \norm{\cdot} \right\}$. For some fixed $b > 0$, define the sequence \begin{equation}
\label{eqn:covering-mod}
\covmod_n \defeq
\inf\left\{ \delta \in (0, 1):
\frac{1}{\sqrt{n} \delta^2}
\int_{b\delta^2}^{\delta} \sqrt{\log \covnum\left(
\epsilon^{1 + \covdim / 2\holdersmooth }, \funcspace_n, \norm{\cdot}_{2, \treatedP}
\right)} d\epsilon
\le 1 \right\}. \end{equation} The following convergence result is a consequence of general results on sieve estimators~\cite{ChenSh98, Huang98, Chen07} adapted for the optimization problem~\eqref{eqn:opt}. \begin{restatable}{lemma}{lemsieve}
\label{lemma:sieve}
Let
Assumptions~\ref{assumption:holder-smooth}, \ref{assumption:bdd-error},
and~\ref{assumption:lebesgue-equiv}
hold, and let $\empfunc$ minimize the empirical risk~\eqref{eqn:opt-emp}
to accuracy
\begin{equation*}
\E_{1,n}\left[ \loss_\Gamma\left\{\empfunc (\covariaterv), Y(1)\right\}\right]
\le \inf_{ \funcparam \in \funcspace_n}
\E_{1,n}\left[ \loss_\Gamma\left\{\funcparam (\covariaterv), Y(1)\right\} \right]
+ O_p\left(\epsilon_n^2\right)
\end{equation*}
where
$\epsilon_n \defeq \max\{ \covmod_n, \inf_{\funcparam \in \funcspace_n}
\norm{\popfunc - \funcparam}_{2, \treatedP}\}$. Then,
$\norms{\empfunc - \popfunc}_{2, \treatedP} = O_p( \epsilon_n)$. \end{restatable} \begin{proof}
To prove the result, it is sufficient to verifying the assumptions of the
following general result for sieve estimation due to~\citet{ChenSh98}
(see also~\cite{Chen07, Huang98}). \begin{lemma}[{\citet[Theorem 3.2]{Chen07}}]
\label{lemma:sieve-chen}
Let $\popfunc \in \holderball{\holdersmooth}{\holderradius}$ for some
$\holdersmooth > 0, \holderradius < \infty$, and for $\funcparam$ in some
neighborhood of $\popfunc$ assume that
\begin{equation*}
\treatedE[ \loss_\Gamma(\funcparam(\covariaterv), Y(1))] - \treatedE[
\loss_\Gamma(\popfunc(\covariaterv), Y(1))] \asymp \norm{\funcparam -
\popfunc}_{2, \treatedP}^2.
\end{equation*}
For $\delta$ small enough, assume there exists
a function $M : \mc{X} \times \R \to \R_+$ such that
\begin{align}
& \sup_{\funcparam \in \funcspace_n:
\norm{\funcparam - \popfunc}_{2, \treatedP} \le \delta}
\var_{\treatedP} \left(
\loss_\Gamma(\funcparam(\covariaterv), Y(1))
- \loss_\Gamma(\popfunc(\covariaterv), Y(1))
\right) \lesssim \delta^2 \label{eqn:var-modulus} \\
& \sup_{\funcparam \in \funcspace_n:
\norm{\funcparam - \popfunc}_{2, \treatedP} \le \delta}
\left|
\loss_\Gamma(\funcparam(\covariaterv), Y(1))
- \loss_\Gamma(\popfunc(\covariaterv), Y(1))
\right|
\le \delta^s M(\covariaterv, Y(1))
\label{eqn:loss-modulus}
\end{align}
for some $s \in (0, 2)$ where
$\treatedE[M(\covariaterv, Y(1))^2] < \infty$. Then
$\norms{\empfunc - \popfunc}_{2, \treatedP} = O_p( \epsilon_n)$. \end{lemma}
To verify these assumptions, first we check that \begin{equation}
\treatedE\left[ \loss_\Gamma \left\{\funcparam (\covariaterv), Y(1)\right\}\right] - \treatedE \left[
\loss_\Gamma\left\{\popfunc(\covariaterv), Y(1)\right\} \right] \asymp \norm{\funcparam -
\popfunc}_{2, \treatedP}^2.
\label{eqn:loss-differences-like-ltwo} \end{equation} Indeed, $\loss_\Gamma(\cdot, \outcome)$ is $1$-strongly convex and has $\Gamma$-Lipschitz derivative, so for any $t, t' \in \R$, \begin{equation}
\half (t - t')^2
+ \psi_{t'}(\outcome) (t - t')
\le \loss_\Gamma(\funcscalar, \outcome) - \loss_\Gamma(\funcscalar', \outcome)
\le \psi_{\funcscalar'}(\outcome) (\funcscalar - \funcscalar')
+ \frac{\Gamma}{2} (\funcscalar - \funcscalar')^2,
\label{eqn:loss-upper-lower} \end{equation} where we have used that $\psi_{\funcscalar}(\outcome) =\hinge{\outcome -
\funcscalar} - \Gamma \neghinge{\outcome-\funcscalar} = \frac{\partial}{\partial \funcscalar}\loss_\Gamma(\funcscalar, y)$. Recalling that $\treatedE[ \psi_{\popfunc(\covariaterv)}(Y(1)) \mid
\covariaterv] = 0$ almost surely, taking expectations yields~\eqref{eqn:loss-differences-like-ltwo} as \begin{equation*}
\half \norm{\theta - \theta_1}_{2,\treatedP}^2
\le \treatedE\left[ \loss_\Gamma\left\{\funcparam (\covariaterv), Y(1)\right\}\right] - \treatedE \left[
\loss_\Gamma\left\{\popfunc(\covariaterv), Y(1)\right\} \right]
\le \frac{\Gamma}{2} \ltwotp{\funcparam - \popfunc}^2. \end{equation*}
Next, we verify~\eqref{eqn:var-modulus} and \eqref{eqn:loss-modulus}. By substituting in inequality~\eqref{eqn:loss-upper-lower}, we have \begin{equation}
\label{eqn:modulus-bound}
|\loss_\Gamma(\funcparam(\covariate), \outcome)
- \loss_\Gamma(\popfunc(\covariate), \outcome) |
\le \Gamma |\outcome - \popfunc(\covariate)| |\funcparam(\covariate) - \popfunc(\covariate)|
+ \Gamma |\funcparam(\covariate) - \popfunc(\covariate)|^2. \end{equation} The following lemma~\cite{ChenSh98, Gabushin67} connects the $L^2(\lambda)$-norm of $\funcparam \in \holderball{\holdersmooth}{\holderradius}$ to its supremum norm (where $\lambda$ denotes the Lebesgue measure). \begin{lemma}[{\citet[Lemma 2]{ChenSh98}}]
\label{lemma:two-sup-norm}
For $\funcparam \in \holderball{\holdersmooth}{\holderradius}$, we have
$\linf{\funcparam} \le 2 c^{1-\frac{2\holdersmooth}{2\holdersmooth + \covdim}} \norm{\funcparam}_{2,
\lambda}^{\frac{2\holdersmooth}{2\holdersmooth + \covdim}}$. \end{lemma} \noindent Note that $\norm{\cdot}_{2, \lambda} \asymp \norm{\cdot}_{2, \treatedP}$ by Assumption~\ref{assumption:lebesgue-equiv}, and so $\linf{\funcparam} \lesssim \norm{\funcparam}_{2, \treatedP}^{\frac{2\holdersmooth}{2\holdersmooth + \covdim}}$.
Taking squares on both sides in the inequality~\eqref{eqn:modulus-bound} and using convexity of $t \mapsto t^{2}$ gives \begin{equation*}
|\loss_\Gamma(\funcparam(\covariate), \outcome))
- \loss_\Gamma(\popfunc(\covariate), \outcome)) |^2
\le 2\Gamma^2 |\outcome - \popfunc(\covariate)|^2 |\funcparam(\covariate) -
\popfunc(\covariate)|^2
+ 2\Gamma^2 |\funcparam(\covariate) - \popfunc(\covariate)|^4. \end{equation*} Recalling from Assumption~\ref{assumption:bdd-error} that $\treatedE[(Y(1) -
\popfunc(\covariaterv))^2 \mid \covariaterv] \le \sigma^2_{\rm shift}$ for some $\sigma^2_{\rm shift} < \infty$, Lemma~\ref{lemma:two-sup-norm} implies that \begin{align*}
\sup_{\funcparam \in \funcspace_n:
\norm{\funcparam - \popfunc}_{2, \treatedP} \le \delta}
\var_{\treatedP} \left(
\loss_\Gamma(\funcparam(\covariaterv), Y(1))
- \loss_\Gamma(\popfunc(\covariaterv), Y(1))
\right) \lesssim \Gamma^2 M \delta^2 +
\Gamma^2 \delta^{2+ \frac{4\holdersmooth}{2\holdersmooth + \covdim}}
\lesssim \delta^2 \end{align*} whenever $\delta \in (0, 1)$. This verifies the condition~\eqref{eqn:var-modulus}. Similarly, for $\delta$ small \begin{equation*}
\sup_{\funcparam \in \funcspace_n:
\norm{\funcparam - \popfunc}_{2, \treatedP} \le \delta}
\left|
\loss_\Gamma(\funcparam(\covariaterv), Y(1))
- \loss_\Gamma(\popfunc(\covariaterv), Y(1))
\right|
\lesssim \Gamma \delta^{\frac{2\holdersmooth}{2\holdersmooth + \covdim}} c^{1-\frac{2\holdersmooth}{2\holdersmooth+\covdim}}
\left(|Y(1) - \popfunc(X)| + c^{1-\frac{2\holdersmooth}{2\holdersmooth+\covdim}}\right). \end{equation*} Noting $\treatedE (Y(1) - \popfunc(X))^2 <\infty$ verifies the condition~\eqref{eqn:loss-modulus} with $s = 2\holdersmooth / (2\holdersmooth + \covdim).$ \end{proof}
It now suffices to bound $\covmod_n$ and the approximation error
$\inf_{\funcparam \in \funcspace_n} \norm{\popfunc - \funcparam}_{2,
\treatedP}$ in Lemma ~\ref{lemma:sieve}. First, note
from~\citet{ChenSh98} and \citet{vandeGeer00} that \begin{equation*}
\log\covnum\left(
\epsilon, \funcspace_n, \ltwotp{\cdot}
\right)
\lesssim \mbox{dim}(\funcspace_n) \log \frac{1}{\epsilon}, \end{equation*} where $\mbox{dim}(\funcspace_n) = \comp_n^d$. Then \begin{equation*}
\frac{1}{\sqrt{n} \delta^2}
\int_{b\delta^2}^{\delta} \sqrt{\log \covnum\left(
\epsilon^{1 + \covdim / 2\holdersmooth }, \funcspace_n, \norm{\cdot}_{2, \treatedP}
\right)} d\epsilon
\lesssim \frac{1}{\delta} \sqrt{\frac{\mbox{dim}(\funcspace_n)}{n}\log \frac{1}{\delta}}, \end{equation*} which implies that $$\covmod_n \asymp \sqrt{\frac{\mbox{dim}(\funcspace_n) \log n}{n}} = \sqrt{\frac{\comp_n^d \log n}{n}}.$$
For $\funcspace_n$ defined as in Examples~\ref{example:polynomials} or \ref{example:splines} with $\comp=\comp_n$, standard function approximation results yield $\inf_{\funcparam \in \funcspace_n} \linf{\funcparam -
\popfunc} = O(\comp_n^{-\holdersmooth})$. (See \citet[Section 5.3.1]{Timan63} and \citet[Theorem
12.8]{Schumaker07}, respectively.)
Therefore, for any of these choices of approximating functions, \begin{equation*}
\inf_{\funcparam \in \funcspace_n} \norm{\popfunc - \funcparam}_{2,
\treatedP} = O(\comp_n^{-p}). \end{equation*}
Set $\comp_n \asymp n^{\frac{1}{2\holdersmooth + \covdim}} (\log n)^{-\frac{1}{2\holdersmooth + \covdim}}$ in Lemma~\ref{lemma:sieve}, so that $\|\what\theta_1-\theta_1\|_{2,P_1}=O_p((\frac{\log n}{n} )^{\frac{\holdersmooth}{2\holdersmooth + \covdim}})$.
Finally, Lemma~\ref{lemma:two-sup-norm} gives the comparison between $\norm{\cdot}_{2, \treatedP}$ and $\norm{\cdot}_{\infty, \treatedP}$.
\input{nu-proof}
\section{Proofs for properties of bounds on the ATE} \subsection{Proof of the consistency of the ATE estimator} \label{sec:proof-consistency}
\thmconsistency* \begin{proof}
To establish the convergence of $\what{\mu}_1^-$, we use the cross-fitting
construction to split $\what{\mu}_1^-$ into $K$ estimators and establish
the convergence of each separately. Because $K$ is fixed, $\what{\mu}_1^-$
will converge if each does. For the estimator in the $k$-th fold, the
samples used to estimate $(\what{\theta}_{1,k}, \what{\nu}_{1,k},
\what{e}_{1,k})$ are independent of the samples in $\mc{I}_k$ and
hence the sum
\begin{equation*}
\what{\mu}_{1,k}^- \defeq
\frac{1}{|\mathcal{I}_k|} \sum_{i \in \mathcal{I}_k} Z_i Y_i
+ (1-Z_i)\what{\theta}_{1,k}(X_i) +
Z_i \frac{\psi_{\what{\theta}_{1,k}(X_i)}(Y_i)(1-\what{e}_{1,k}(X_i))}{
\what{\nu}_{1,k}(X_i)\what{e}_{1,k}(X_i)}.
\end{equation*}
For notational simplicity, let $m = |\mathcal{I}_k|$. By
construction, $m \approx n/K$, so establishing convergence as $m \to
\infty$ among the samples in $\mathcal{I}_k$ is equivalent to establishing
the convergence as $n \to \infty$.
The argument for establishing consistency is relatively
standard~\cite{chernozhukov2018double}. The only challenge is that
$\what{\nu}_{1,k}$ (recall the
definition~\eqref{eqn:weight-normalization-def-a} and
estimator~\eqref{eqn:opt-emp-prob}) changes as a function of $m$, so that
for $i\in \mathcal{I}_k$
\begin{equation*}
Z_i Y_i + (1-Z_i)\what{\theta}_{1,k}(X_i) + Z_i \frac{\psi_{\what{\theta}_{1,k}(X_i)}(Y_i)(1-\what{e}_{1,k}(X_i))}{\what{\nu}_{1,k}(X_i)\what{e}_{1,k}(X_i)}
\end{equation*}
are only i.i.d.\ conditional on $\what{\nu}_{1,k}$. Define the
$\sigma$-algebra $\mc{F}_{\infty, k}$ generated by the samples in the set
$\mc{I}_{-k}$ as $n \to \infty$, so that the elements in the preceding
display are i.i.d.\ conditional on $\mc{I}_{-k}$.
Because $\P(|\what{\mu}_1^- - \mu_1^-|> \epsilon \mid \mc{F}_{\infty, k})
\cas 0$ implies $\P(|\what{\mu}_1^- - \mu_1^-|> \epsilon ) \to 0$ by
dominated convergence, it suffices to show that $\what{\mu}_1^- \cp
\mu_1^-$ conditionally almost surely on $\mc{F}_{\infty, k}$. To check
convergence conditional on $\mc{F}_{\infty, k}$, note that the elements in
the preceeding display form a triangular array for which the weak law of
large numbers still holds. Then, we apply the following weak
law for triangular arrays.
\begin{thm}[Dembo~\cite{Dembo16}, Corollary 2.1.14]
\label{thm:wlln-tri}
Suppose that for each $m$, the random variables
$\xi_{m,i},\,i=1,\dots,m$ are pairwise independent, identically
distributed for each $m$, and $\E[|\xi_{m,1}|] < \infty$. Then setting
$S_m = \sum_{i = 1}^m \xi_{m,i}$ and $a_m = \sum_{i=1}^m
\E\overline{\xi}_{m,i}$,
\begin{equation*}
m^{-1}(S_m - a_m) \cp 0~\mbox{as}~m \to \infty.
\end{equation*}
\end{thm}
\newcommand{\E_{\infty,k}}{\E_{\infty,k}}
For the constant $\constlow > 0$ in
Assumption~\ref{assumption:bounded-variance}. Define the event
\begin{equation*}
A \defeq \left\{ \inf_{x} \what{e}_{1,k}(x) \ge \epsilon,
\inf_{x} \what{\nu}_{1,k}(x) \ge \constlow,
~\mbox{and}~
\norms{\theta_1 - \what{\theta}_{1,k}}_{1,P} \le 1
\right\},
\end{equation*}
which is $\mc{F}_{\infty,k}$-measurable. Let
\begin{equation*}
\xi_{m,i} \defeq \ind{A} \left( Z_i Y_i + (1-Z_i)\what{\theta}_{1,k}(X_i)
+ Z_i \frac{\psi_{\what{\theta}_{1,k}(X_i)}(Y_i)(1-\what{e}_{1,k}(X_i))}{
\what{\nu}_{1,k}(X_i)\what{e}_{1,k}(X_i)}\right),
\end{equation*}
so that $\what{\mu}_{1,k}^- \ind{A} = \frac{1}{m} \sum_{i=1}^m \xi_{m,i}$.
Assume without loss of generality that $\mathcal{I}_k = \{1,\dots,m\}$.
Defining the conditional expectation $\E_{\infty,k}[\cdot] \defeq \E[\cdot
\mid \mc{F}_{\infty, k}]$ for shorthand, we have
\begin{align*}
\E_{\infty,k}[|\xi_{m,i}|]
& = \ind{A}\E_{\infty,k}\left[\bigg| Z Y(1) + (1-Z)\what{\theta}_{1,k}(X)
+ Z \frac{\psi_{\what{\theta}_{1,k}(X)}(Y(1))(1-\what{e}_{1,k}(X))}{
\what{\nu}_{1,k}(X)\what{e}_{1,k}(X)}
\bigg|\right] \\
& \le \ind{A}\left(\E_{\infty,k}\left| Y(1) \right| +
\E_{\infty,k}\left|\what{\theta}_{1,k}(X)\right| + \E_{\infty,k}\left|
\frac{\psi_{\what{\theta}_{1,k}(X)}(Y(1))(1-\what{e}_{1,k}(X))}{\what{\nu}_{1,k}(X)\what{e}_{1,k}(X)}
\right| \right) \\
& \le \ind{A} \left( \E_{\infty,k}\left| Y(1) \right| +
\E_{\infty,k}\left|\what{\theta}_{1,k}(X)\right| + \frac{1}{\epsilon}\E_{\infty,k}\left|
\psi_{\what{\theta}_{1,k}(X)}(Y(1)) \right|\right) \\
& \le \ind{A} \left(
\E_{\infty,k}\left| Y(1) \right| + \E_{\infty,k}\left|\what{\theta}_{1,k}(X)\right| +
\frac{\Gamma}{\epsilon}\E_{\infty,k}\left| Y(1) \right| +
\frac{\Gamma}{\epsilon}\E_{\infty,k}\left|\what{\theta}_{1,k}(X)\right|\right) \\
& < \infty,
\end{align*}
because $\indic{A} \E_{\infty,k}|\what{\theta}_{1,k}(X)| \le \indic{A}
(\E|\theta_1(X)| + 1) < \infty$ and $\E_{\infty,k}|\theta_1(X)| \le \Gamma
\E_{\infty,k}[|Y(1)|]
= \E[|Y(1)|] < \infty$, all
condtitionally almost surely on $\mc{F}_{\infty,k}$.
Adopt the notation $S_m = \sum_{i \in \mathcal{I}_k} \xi_{m,i}$ and
$a_m = \E_{\infty,k}[S_m]$.
Theorem~\ref{thm:wlln-tri} implies that $\frac{1}{m}(S_m - a_m) \cp 0$
(conditionally a.s.). Next, we show that $\frac{1}{m}a_m -
\E_{\infty,k}[\xi_{m,1}] = \E_{\infty,k}[\overline{\xi}_{m,1}] -
\E_{\infty,k}[\xi_{m,1}] \cp 0$. Recall that $|\xi_{m,1}|\ind{|\xi_{m,1}| >
m} \to 0$ a.s.\ and
$\E_{\infty,k}[|\xi_{m,1}|\ind{|\xi_{m,i}| > m}] \le
\E_{\infty,k}[|\xi_{m,1}|] < \infty$, so dominated convergence
implies $\E_{\infty,k}[|\xi_{m,1}|\ind{|\xi_{m,i}| > m}] \to
0$. Together, these imply that
\begin{align*}
\lefteqn{\ind{A} \what{\mu}_{1,k}^- - \E_{\infty,k}[\xi_{m,i}]} \\
& = \ind{A}\frac{1}{m} \sum_{i=1}^m
\left(Z_i Y_i + (1-Z_i)\what{\theta}_{1,k}(X_i) + Z_i \frac{\psi_{\what{\theta}_{1,k}(X_i)}(Y_i)(1-\what{e}_{1,k}(X_i))}{\what{\nu}_{1,k}(X_i)\what{e}_{1,k}(X_i)}
\right)
- \E_{\infty,k}[\xi_{m,i}] \cp 0
\end{align*}
conditionally almost surely, and
$\ind{A} \what{\mu}_{1,k}^- - \what{\mu}_{1,k} \cp 0$ conditionally
almost surely, as $\P(A) \to 1$.
Jensen's inequality,
Assumption~\ref{assumption:bounded-variance}(d,e), and that
$\E[\psi_{\theta(X)}(Y(1)) \mid Z = 1, X] = 0$ almost surely
allow us to bound the error
\begin{align*}
\lefteqn{|\E_{\infty,k}[\xi_{m,i}] - \mu_1^-|} \\
& = \left|\ind{A}
\E_{\infty,k}\left[(1 - Z) \left(\what{\theta}_{1,k}(X)
- \theta_1(X)\right) + Z \frac{\psi_{\what{\theta}_{1,k}}(Y(1))
(1 - \what{e}_{1,k}(X))}{\what{\nu}_{1,k}(X) \what{e}_{1,k}(X)}\right]
\right| + o_P(1) \\
&=
\begin{aligned}[t]
&\Bigg|\ind{A}\E_{\infty,k}\Bigg[ (1-Z)\left(\what{\theta}_{1,k}(X)
- \theta_{1}(X)\right) + Z \frac{\left(\psi_{\what{\theta}_{1,k}(X)}(Y(1))-\psi_{{\theta}_{1}(X)}(Y(1))\right)(1-\what{e}_{1,k}(X))}{\what{\nu}_{1,k}(X)\what{e}_{1,k}(X)} \\
&- Z \frac{\psi_{{\theta}_{1}(X)}(Y(1))(1-\what{e}_{1,k}(X))}{\what{\nu}_{1,k}(X)\what{e}_{1,k}(X)} \Bigg] \Bigg| + o_P(1)
\end{aligned} \\
& \le
\E_{\infty,k}\left[
\left|\what{\theta}_{1,k}(X)- \theta_1(X)\right| \right]
+ \E_{\infty,k}\left[ \frac{\Gamma}{c \epsilon}\left|\psi_{\what{\theta}_{1,k}(X)}(Y(1))-\psi_{{\theta}_{1}(X)}(Y(1))\right| \right]
+ o_P(1),
\end{align*}
where the $o_P(1)$ term comes from the fact that
Assumption~\ref{assumption:bounded-variance}(b,d,e) imply $\P(A) \to
1$. Finally, the Lipschitz continuity $|\psi_t(y) - \psi_s(y)| \le
\Gamma|s - t|$
and Assumption~\ref{assumption:bounded-variance}(b)
that $\norms{\what{\theta}_{1,k} - \theta_1}_{1,P} \cp 0$ give that
$|\E_{\infty,k}[\xi_{m,i}] - \mu_1^-| \cp 0$. \end{proof}
\subsection{Proof of Theorem~\ref{thm:semiparametric}} \label{sec:proof-semiparametric}
\newcommand{_\star}{_\star} \newcommand{\dot{\ell}}{\dot{\ell}}
\semiparametricnormality*
The proof depends heavily on Theorems 3.1 and 3.2 and Corollary 3.1 of \citet{chernozhukov2018double}; primarily, we check their Assumptions 3.1 and 3.2 for our proposed estimator. Stating their results requires a bit of notation, which we introduce here briefly. We begin with their general assumptions about a score function. Their result allows multi-dimensional estimates, so we allow a $d$-dimensional score in the assumptions. \begin{assumption}[\citet{chernozhukov2018double}, Assumption 3.1]
\label{assumption:dml-score}
Let $\mc{W}$ be a measurable space and
$\mc{T}$ be a collection of (nuisance) functions mapping
$\mc{W} \to \R^{d_0}$. Let $d \in \N$ and
$\Theta \subset \R^d$, and let
$m : \mc{W} \times \Theta \times \mc{T} \to \R^d$ be a score function.
Let $\theta_\star \in \Theta$ be the true parameter of interest
and $\eta_\star \in \mc{T}$ be the true nuisance.
There exist $0 < c_0 \le c_1 < \infty$ such that for all $n \ge 3$, the
following conditions hold.
\begin{enumerate}[(a)]
\item \label{item:mean-true-param}
The true parameter $\theta_\star$ obeys
\begin{equation*}
\E\left[\dot{\ell}(W, \theta_\star, \eta_\star) \right] = 0.
\end{equation*}
\item \label{item:linearization-score}
There exist functions $\dot{\ell}^a$ and $\dot{\ell}^b$ such that
the score $m$ is linear in $\theta$, satisfying
\begin{equation*}
\dot{\ell}(w, \theta, \eta) = \dot{\ell}^a(w; \eta)\theta + \dot{\ell}^b(w; \eta),
\end{equation*}
for all $w \in \mathcal{W}, \theta \in \Theta, \eta \in \mathcal{T}$.
\item \label{item:twice-gateaux}
The map $\eta \mapsto \E[\dot{\ell}(W, \theta, \eta)]$ is twice
continuously Gateaux-differentiable on $\mathcal{T}$.
\item \label{item:neyman-orthogonality}
The score function $\dot{\ell}$ obeys the Neyman orthogonality
condition~\cite[Def.~2.1]{chernozhukov2018double}, i.e.\ the
derivative $\frac{\dif{}}{\dif{r}} \E[ \dot{\ell}(W, \theta_\star, \eta_\star
+ r(\eta - \eta_\star)]$ exists for all $\eta \in \mathcal{T}$ and
$r$ near 0, and
\begin{equation}
\label{eqn:neyman-orthogonality}
\frac{\dif{}}{\dif{r}} \E\left[ \dot{\ell}(W, \theta_\star, \eta_\star +
r(\eta - \eta_\star)) \right]
\bigg|_{r=0} = 0.
\end{equation}
\item\label{item:linear-score-matrix}
Let $J_0 \defeq \E[\dot{\ell}^a(W; \eta_\star)]$. The singular values of
$J_0$ lie in $[c_0, c_1]$.
\end{enumerate} \end{assumption} \noindent We also require assumptions on properties of the nuisance variables. \begin{assumption}[\citet{chernozhukov2018double}, Assumption 3.2]
\label{assumption:dml-nuisance}
Let the notation of Assumption~\ref{assumption:dml-score} hold. Let
$\delta_n$ and $\Delta_n$ be sequences with $\delta_n \ge n^{-1/2}$ and
$\lim_{n\to\infty} \delta_n = \lim_{n\to\infty} \Delta_n = 0$. There
exist $0 < \epsilon$, $0 < c_0, c_1 < \infty$ and a set $\mc{T}_n$ with
the following properties.
\begin{enumerate}[(a)]
\item \label{item:def-of-tn}
Given a random subset $I$ of $[n]$ of size $ n / K$, the nuisance
parameter $\what{\eta}$
estimated on $\{W_i\}_{i \in I^c}$ belongs to $\mathcal{T}_n$
with probability at least $1-\Delta_n$. $\mc{T}_n$ contains
$\eta_\star$.
\item \label{item:moment-conditions}
The following moment conditions hold:
\begin{align*}
m_n & = \sup_{\eta \in \mathcal{T}_n}\left( \E[\|\dot{\ell}(W, \theta_\star,
\eta)\|^q]
\right)^{1/q} \le c_1
~~ \mbox{and} ~~
m_n' = \sup_{\eta \in \mathcal{T}_n}\left( \E[\|\dot{\ell}^a(W; \eta)\|^q]
\right)^{1/q} \le c_1.
\end{align*}
\item \label{item:score-rates}
Define the rates
\begin{align*}
r_n & \defeq \sup_{\eta \in \mathcal{T}_n} \E[\|\dot{\ell}^a(W; \eta)
- \dot{\ell}^a(W; \eta_\star)\|], ~~
r_n' \defeq \sup_{\eta \in \mathcal{T}_n} \E[\|\dot{\ell}(W, \theta_\star, \eta)
- \dot{\ell}(W, \theta_\star, \eta_\star)\|^2]^{\half}
\\
\lambda_n' & \defeq \sup_{0 < r < 1, \eta \in \mathcal{T}_n}
\left\|\E\left[\frac{d^2}{dr^2}
\dot{\ell}(W, \theta_\star, \eta_\star + r(\eta -\eta_\star)) \right]
\right\|.
\end{align*}
Then $r_n \le \delta_n$, $r_n' \le \delta_n$, and $\lambda_n'
\le \delta_n / \sqrt{n}$.
\item \label{item:score-variance}
The score has positive variance:
$\lambda_{\min}(\E[ \dot{\ell}(W, \theta_\star, \eta_\star)
\dot{\ell}(W, \theta_\star, \eta_\star)^\top]) \ge c_0$.
\end{enumerate} \end{assumption}
It is possible to relax Assumption~\ref{assumption:dml-score}(d), the equality~\eqref{eqn:neyman-orthogonality}, to require only that
$|\frac{\partial}{\partial r} \E[\dot{\ell}(W, \theta_\star, \eta_\star + r(\eta -
\eta_\star))]|_{r = 0}| \le \lambda_n$ for $\lambda_n \to 0$ sufficiently quickly; we shall not need such generality. With these assumptions in place, we have the following theorem.
\begin{thm}[Chernozhukov et al.~\cite{chernozhukov2018double}, Theorems 3.1
and 3.2]
\label{thm:dml-score}
\newcommand{\what{\theta}_{\textup{DML}}}{\what{\theta}_{\textup{DML}}} Let
Assumptions~\ref{assumption:dml-score} and~\ref{assumption:dml-nuisance}
hold. In addition, suppose that $\delta_n \ge n^{-1/2}$ for all $n \ge
1$. Let the sets $\{\mc{I}_k\}_{k=1}^K$ equally (at random) partition
$n$. Define the empirical estimators $\what{\eta}_k$ based on $i \not
\in \mc{I}_k$ and the double-robust-double-machine-learning (DMLR2)
estimator $\what{\theta}_{\textup{DML}}$ as the root of
\begin{equation*}
\frac{1}{n} \sum_{k = 1}^K
\sum_{i \in \mc{I}_k} \dot{\ell}(W_i, \what{\theta}_{\textup{DML}}, \what{\eta}_k)
= 0.
\end{equation*}
Then
$\what{\theta}_{\textup{DML}}$ concentrates in a $1/\sqrt{n}$ neighborhood of
$\theta_\star$ and is approximately linear and centered Gaussian:
\begin{equation*}
\sqrt{n}(\what{\theta}_{\textup{DML}} - \theta_\star)
= \frac{1}{\sqrt{n}} \sum_{i=1}^n \dot{\ell}(W_i, \theta_\star, \eta_\star)
+ O_P(\rho_n) \cd \normal(0, \Sigma),
\end{equation*}
where the
remainder term $\rho_n = n^{-1/2} + r_n + r_n' + n^{1/2}\lambda_n +
n^{1/2}\lambda_n'$ and the variance $\Sigma = J_0^{-1}\E[ \dot{\ell}(W,
\theta_\star, \eta_\star) \dot{\ell}(W, \theta_\star,
\eta_\star)^\top] J_0^{-1 \top}$, where $J_0$ is as in
Assumption~\ref{assumption:dml-score}(\ref{item:linear-score-matrix}).
If additionally $\delta_n \ge n^{-(1 - 2/q) \wedge 1/2}$
and
\begin{equation*}
\what{\Sigma}
= \frac{1}{n}
\sum_{k = 1}^K \sum_{i \in \mc{I}_k} \what{J}_0^{-1}
\dot{\ell}(W_i, \what{\theta}_{\textup{DML}}, \what{\eta}_k) \dot{\ell}(W_i,
\what{\theta}_{\textup{DML}}, \what{\eta}_k)^\top \what{J}_0^{-\top}
~~ \mbox{and} ~~
\what{J}_0 = \frac{1}{n} \sum_{k = 1}^K \sum_{i \in \mc{I}_k}
\dot{\ell}^a(W_i; \what{\eta}_k)
\end{equation*}
then
\begin{equation}
\sqrt{n} \what{\Sigma}^{-\half}
(\what{\theta}_{\textup{DML}} - \theta_\star)
= -\frac{1}{\sqrt{n}} \sum_{i = 1}^n
\Sigma^{-\half} J_0^{-1} \dot{\ell}(W_i, \theta_\star, \eta_\star)
+ O_P(\rho_n) \cd \normal(0, I_d)
\end{equation}
where $\rho_n = n^{-(1 - 2/q)\wedge 1/2} + r_n + r_n'
+ n^{1/2} \lambda_n + n^{1/2} \lambda_n'$. \end{thm}
We now construct a score function for our estimator $\what{\mu}_1^-$ and demonstrate that it satisfies the conditions necessary to apply Theorem~\ref{thm:dml-score}. Let $Y = Y(Z)$ be the observed potential outcome. Define the triple $W_i = (Y_i, X_i, \treatmentrv_i)$ as the $d+2$ dimensional random vector containing all the observed random variables. Similarly let $w = (y, x, \treatment)$ for a fixed variable; we define our score \begin{align}
\label{eq:orthogonal-score}
\dot{\ell}(w, \mu, \eta) \defeq z y + (1-z)\funcparam(x) - \mu
+ z\frac{\hinge{y-\funcparam(x)} - \Gamma\neghinge{y - \funcparam(x)}}{
\nu(x)}\frac{1-e(x)}{e(x)}, \end{align} where the nuisance parameter $\eta \defeq (\funcparam, \nu, e)$. First, it is clear by definition~\eqref{eq:orthogonal-estimator} of $\what{\mu}_1^-$ it is the root (in $\mu$) of $\sum_{k = 1}^K \sum_{i \in
\mc{I}_k} \dot{\ell}(W_i, \mu, \what{\eta}_{1,k}) = 0$, so that the definitional part of Theorem~\ref{thm:dml-score} holds. Additionally, by construction $\what{\eta}_{1,k}$ is estimated based on $[n] \setminus \mc{I}_k$.
We now turn to verifying that Assumptions~\ref{assumption:dml-score} and \ref{assumption:dml-nuisance} hold for the estimator $\what{\mu}_1^-$. To that end, define the space \begin{equation}
\label{eqn:funcparam-space}
\mc{T} \defeq \{ \eta = (\theta, \nu, e) \mid
\eta~\text{measurable},~1 \le \nu(x) \le \Gamma,
\epsilon \le
e(x) \le 1-\epsilon ~ \mbox{for~} x \in \mc{X}\}, \end{equation} so that $\eta_1 \defeq (\theta_1, \nu_1, e_1) \in \mathcal{T}$ (recall that $\theta_1(x) = \E[Y(1) \mid X = x, Z = 1]$, $e_1(x) = \P(Z = 1 \mid X = x)$, and $\nu_1(x) = P(Y(1) \ge \theta_1(x) \mid Z = 1, X= x) + \Gamma P(Y(1) < \theta_1(x) \mid Z = 1, X=x)$). We verify the assumptions in Sections~\ref{sec:verify-dml-score} and \ref{sec:verify-dml-nuisance}, respectively. Once we verify these, the proof of Theorem~\ref{thm:semiparametric} is complete, as the confidence interval statements are immediate.
\subsubsection{Verifying Assumption~\ref{assumption:dml-score}} \label{sec:verify-dml-score}
By construction, the score satisfies \begin{align*}
\E[\dot{\ell}(W, \mulower, \eta_1)] = & \E[ZY(1) + (1-Z)\funcparam_1(X)] - \mu_1^- \\
& + \E\left[\frac{\E[Z(\hinges{Y(1) - \funcparam_1(X)}-
\Gamma \neghinges{Y(1) - \funcparam_1(X)}) \mid X]}{\nu_1(X)}
\frac{e_0(X)}{e_1(X)}\right] = 0 \end{align*} because $\E[Z(\hinges{Y(1) - \funcparam_1(X)} - \Gamma \neghinges{Y(1) -
\funcparam_1(X)}\mid X] = \E[Z \mid X] \E[\psi_{\funcparam_1(X)}(Y(1)) \mid X = x, Z = 1] = 0$ almost everywhere by Lemma~\ref{lemma:opt-is-good}, $\nu_1(x) \ge 1$, and $e_1(x) > \epsilon$. Thus Assumption \ref{assumption:dml-score}\eqref{item:mean-true-param} holds. The score~\eqref{eq:orthogonal-score} is linear in $\mu$, so Assumption~\ref{assumption:dml-score}\eqref{item:linearization-score} holds with $\dot{\ell}^a\equiv - 1$.
We turn to verifying the twice Gateaux differentiability with respect to $\funcparam$ in Assumption~\ref{assumption:dml-score}\eqref{item:twice-gateaux}. Because
$\psi_t(Y(1)) \le \Gamma Y(1)$, and $\E[|Y(1)|^q] < \infty$, dominated convergence will hold for all interchanges of $\E$ and differentiation in what follows. First note that $\nu(x)$ and $e(x)$ are bounded below and above by definition~\eqref{eqn:funcparam-space} of $\mc{T}$. Therefore, it suffices to prove that for an arbitrary measurable function $g$ with values in $[-\epsilon^{-1}, \epsilon^{-1}]$, \begin{equation*}
\funcparam \mapsto \left[
\E\left[ g(x)Z\psi_{\theta(x)}(Y(1)) \mid X=x \right]\right]_{x \in \mc{X}} \end{equation*}
is twice Gateaux differentiable for $\funcparam \in \mc{T}$. When $Y(1)|Z=1, X=x$ has a density $p_{Y(1)}(y\mid X=x, Z=1),$ for almost every $x$ and $P(Y(1)=y \mid X=x,Z=1)=0$, \begin{equation}
\label{eqn:first-psi-derivative}
\frac{\dif}{\dif{t}} \E[ Z\psi_t(Y) \mid Z=1, X=x] = -P_1(Y(1) > t \mid X=x) - \Gamma P_1(Y(1) < t \mid X=x), \end{equation} and so \begin{align*}
\frac{\dif{}^2}{\dif{t}^2} \E[ Z\psi_t(Y) \mid Z=1, X=x ]
& =
(1-\Gamma)p_{Y(1)}(t \mid X=x,Z=1). \end{align*} Coupled with the boundedness of $g(\cdot)$, this implies twice Gateaux differentiability, satisfying Assumption~\ref{assumption:dml-score}\eqref{item:twice-gateaux} for $\theta_1(x)$. As $(\nu, e) \in \mc{T}$ are uniformly bounded, twice Gateaux differentiability with respect to the remaining terms is immediate by~\citet[Proof of Thms.~5.1 \& 5.2, step 1]{chernozhukov2018double}.
Next, we verify Neyman orthogonality, Assumption~\ref{assumption:dml-score}\eqref{item:neyman-orthogonality}.
Note that if $y \neq \theta_1(x)$, then \begin{align}
\lefteqn{\left.\frac{\partial}{\partial r}
\dot{\ell}(w, \mu, \eta_1 + r(\eta - \eta_1))\right|_{r = 0}} \nonumber \\
& = (1 - z) (\theta(x) - \theta_1(x))
- z \frac{\indic{y > \theta_1(x)}
+ \Gamma \indic{y < \theta_1(x)}}{\nu_1(x)}
\cdot (\theta(x) - \theta_1(x))
\cdot \frac{1 - e_1(x)}{e_1(x)} \nonumber \\
& \qquad + z \frac{\psi_{\theta_1(x)}(y)}{
\nu_1(x)^2 e_1(x)^2}(1 - e_1(x))
\left.\frac{\partial}{\partial r}
(\nu_1(x) + r (\nu(x) - \nu_1(x)))(
(e_1(x) + r (e(x) - e_1(x)))\right|_{r = 0} \nonumber \\
& \qquad ~ - z \frac{\psi_{\theta_1(x)}(y)}{\nu_1(x) e_1(x)}
(e(x) - e_1(x)) \nonumber \\
& = (1 - z) (\theta(x) - \theta_1(x))
- z \frac{\indic{y > \theta_1(x)}
+ \Gamma \indic{y < \theta_1(x)}}{\nu_1(x)}
\cdot (\theta(x) - \theta_1(x))
\cdot \frac{1 - e_1(x)}{e_1(x)} \nonumber \\
& \qquad + z \frac{\psi_{\theta_1(x)}(y)}{
\nu_1(x)^2 e_1(x)^2}(1 - e_1(x))
\left[e_1(x) (\nu(x) - \nu_1(x))
+ \nu_1(x) (e(x) - e_1(x))\right]
\label{eqn:big-gateaux-expansion}
\\
& \qquad ~ - z \frac{\psi_{\theta_1(x)}(y)}{\nu_1(x) e_1(x)}
(e(x) - e_1(x)).
\nonumber \end{align} As $\nu, e \in \mc{T}$ are uniformly bounded, $\psi_t(\cdot)$ is Lipschitz, and $Y(1)$ has $q > 2$ moments, the dominated convergence theorem implies \begin{equation*}
\left.\frac{\partial}{\partial r}
\E[\dot{\ell}(W, \mu, \eta_1 + r (\eta - \eta_1))]\right|_{r = 0}
= \E\left[\left.\frac{\partial}{\partial r}
\dot{\ell}(W, \mu, \eta_1 + r (\eta - \eta_1))\right|_{r = 0}
\right]. \end{equation*} Now, note that by definition~\eqref{eqn:weight-normalization-def-a}, we have \begin{align*}
\lefteqn{\E\left[Z \left(\indic{Y(1) > \funcparam_1(X)}
+ \Gamma \indic{Y(1) < \funcparam_1(X)} \right) \mid X \right]} \\
& =
\E\left[Z \left(P(Y(1) > \funcparam_1(X) \mid X, Z = 1)
+ \Gamma P(Y(1) < \funcparam_1(X) \mid X, Z = 1)\right)
\mid X \right]
= \E[Z \mid X] \nu_1(X). \end{align*} Substituting Eq.~\eqref{eqn:big-gateaux-expansion} for the derivative of the score and applying iterated expectations with $\E[\cdot \mid X = x]$, we thus obtain \begin{align*}
\lefteqn{\E\left[\left.\frac{\partial}{\partial r}
\dot{\ell}(W, \mu, \eta_1 + r (\eta - \eta_1))\right|_{r = 0}
\right]} \\
& = \E\left[e_0(X) (\theta(X) - \theta_1(X))\right]
- \E\left[\frac{e_1(X) \nu_1(X) (\theta(X) - \theta_1(X))}{\nu_1(X)}
\frac{e_0(X)}{e_1(X)}\right] \\
& \quad
+ \E\left[\frac{\E[Z \psi_{\funcparam_1(X)}(Y(1)) \mid X]}{
\nu_1(X)^2 e_1(X)^2}
e_0(X)
\left[e_1(X) (\nu(X) - \nu_1(X))
+ \nu_1(X) (e(X) - e_1(X))\right]\right] \\
& \quad
- \E\left[\frac{\E[Z \psi_{\funcparam_1(X)}(Y(1))
\mid X]}{e_1(X) \nu_1(X)} (e(X) - e_1(X))\right] \\
& = \E\left[e_0(X) (\theta(X) - \theta_1(X))\right]
- \E\left[e_0(X) (\theta(X) - \theta_1(X))\right]
= 0, \end{align*} where we have used that $\E[Z \psi_{\funcparam_1(X)}(Y(1)) \mid X] = 0$ with probability 1 by Lemma~\ref{lemma:opt-is-good} and that $Y(1) = \theta_1(X)$ with probability 0.
Finally, Assumption~\ref{assumption:dml-score}\eqref{item:linear-score-matrix} clearly holds, as $J_0 = \dot{\ell}^a = -1$.
\subsubsection{Verifying Assumption~\ref{assumption:dml-nuisance}} \label{sec:verify-dml-nuisance}
Assumption~\ref{assumption:nuisance-est} establishes that
$\what\funcparam$, $\what{\nu}$ and $\what{e}$ satisfy $\|
\what\funcparam(\cdot) - \funcparam_1 (\cdot)\|_{2,P} = o_P(n^{-1/4})$, $\|
\what\nu(\cdot) - \nu_1(\cdot) \|_{2,P} = o_P(n^{-1/4})$, and $\|
\what{e}(\cdot) - e_1(\cdot) \|_{2,P} = o_P(n^{-1/4})$. Therefore, there exists sequences $a_n \to 0$ and $\Delta_n' \to 0$ such that for each $n$, \begin{equation*}
\| \what{\funcparam}(\cdot) - \funcparam_1(\cdot) \|_{2,P} \le a_n n^{-1/4},
~~
\| \what{\nu}(\cdot) - \nu_1(\cdot) \|_{2,P} \le a_n n^{-1/4},
~~ \mbox{and} ~~
\| \what{e}(\cdot) - e_1(\cdot) \|_{2,P} \le a_n n^{-1/4} \end{equation*} with probability $1-\Delta_n'/2$. We may choose $a_n$ so that these hold when $\what{\eta}$ is estimated using only $(1- \frac{1}{K})n$ (as opposed to $n$) samples. Similarly, Assumption~\ref{assumption:nuisance-est} implies that there exists a constant $C_1$ such that \begin{equation*}
\| \what{\funcparam}(\cdot) - \funcparam_1(\cdot) \|_{\infty,P} \le C_1,
~~
\| \what{\nu}(\cdot) - \nu_1(\cdot) \|_{\infty,P} \le C_1,
~~ \mbox{and} ~~
\| \what{e}(\cdot) - e_1(\cdot) \|_{\infty,P} \le C_1 \end{equation*} with probability $1-\Delta_n'/2$. Now, for constants $\constlow$ and $\constup$ to be chosen, we define the relevant set $\mc{T}_n$ by \begin{equation}
\label{eqn:def-Tn}
\begin{split}
\mathcal{T}_n
\defeq
\big\{ \eta = (\funcparam, e, \nu) & :
\norm{\eta - \eta_1}_{2,P} \le a_n n^{-1/4},
~ \norm{\eta - \eta_1}_{\infty,P} \le C_1,
\\
&
~~ \epsilon \le e \le 1 - \epsilon,
~ \constlow \le \nu \le \constup \big\}.
\end{split} \end{equation} By Assumption~\ref{assumption:bounded-variance}, there exists $\Delta_n''$ such that $P(\epsilon \le \inf_x \what{e}(x) \le \sup_x \what{e}(x) \le 1-\epsilon) \ge 1- \Delta_n'' / 2$ such that $P(\constlow \le \inf_x \what{\nu}(x) \le \sup_x \what{\nu}(x) \le \constup \ge 1- \Delta_n''/2$, and $\Delta_n'' \to 0$. Then $P(\what\eta \in \mathcal{T}_n)\ge 1-\Delta_n'/2-\Delta_n'/2-\Delta_n''=1-\Delta_n$, for $\Delta_n = \Delta_n' + \Delta_n''$ by Assumptions~\ref{assumption:bounded-variance} and \ref{assumption:nuisance-est}, so Assumption~\ref{assumption:dml-nuisance}\eqref{item:def-of-tn} holds.
To bound the moments in Assumption~\ref{assumption:dml-nuisance}\eqref{item:moment-conditions}, first we bound the score at the true nuisance parameter $\eta_1$, applying the triangle inequality to bound the difference. By the equality~\eqref{eq:population-theta} defining $\funcparam_1$, for any $x$ and $\delta > 0$, there exists $L^\delta_x(y)$ such that \begin{equation*}
\theta_1(x) \in \left[ \E[L^\delta_x(Y(1)) Y(1) \mid X=x] - \delta,
\E[L^\delta_x(Y(1)) Y(1) \mid X = x]\right] \end{equation*} where $\E[L^\delta_x(Y(1))] = 1$, and $0\le L_x^\delta(y) \le \Gamma L_x^\delta(\wt{y})$ for all $y,\wt{y}$. Together, these imply
$\Gamma^{-1} \le L^\delta_x(y) \le \Gamma$. Therefore, Assumption~\ref{assumption:problem-regularity}(b) and H\"{o}lder's inequality imply that for $C_q = \E[|Y(1)|^q]$, \begin{align}
\nonumber
2^{1 - q}
\E\left[ |\theta_1(X)|^q \right]
& \le \E\left[ \left| \E\left[ L^\delta_X(Y(1)) Y(1) | X\right] \right|^q + \delta^q \right]
\le
\E\left[ \Gamma^q \E\left[ \left|Y(1)\right|^q \mid X\right] + \delta^q \right]
\\
& = \Gamma^q \E\left[ \left|Y(1)\right|^q \right] + \delta^q
= \Gamma^q C_q + \delta^q.
\label{eqn:bound-theta-1-q-moment} \end{align} To bound the moments of the score, we therefore have \begin{align*}
\lefteqn{4^{1 - q} \E\left[|\dot{\ell}(W, \mulower, \eta_1)|^q\right]} \\
& \le
|\mulower|^q
+ \E\left[|ZY(1)|^q + (1-Z)|\funcparam_1(X)|^q +Z\biggm|\frac{\psi_{\funcparam_1(X)}(Y(1))( 1 - e_1(X))}{\nu_1(X) e_1(X)}\biggm|^q \right] \\
&\le
|\mu_1^-|^q + (1-\epsilon)\E_1[|Y(1)|^q] + (1 - \epsilon)\E_0\left[|\funcparam_1(X)|^q\right] + (1-\epsilon) \E_1\left[ |\psi_{\funcparam_1(X)}(Y(1))|^q \right] \\
&\le
|\mu_1^-|^q + (1-\epsilon)\E_1[|Y(1)|^q] + (1 - \epsilon)\E_0\left[|\funcparam_1(X)|^q\right] + (1-\epsilon)2^{q-1} \Gamma^q \left( \E_1\left[ |Y(1)|^q \right] + \E_1\left[ |\funcparam(X)|^q \right] \right) \\
& \le \wt{C}_1
\end{align*} for a finite $\wt{C}_1$. For $\eta \in \mc{T}_n$, so that $\norm{\eta - \eta_1}_{q, P} \le C_1$ by definition~\eqref{eqn:def-Tn}, we may bound the difference \begin{align*}
\lefteqn{2^{1-q}
\E\left[|\dot{\ell}(W, \mulower, \eta_1)-\dot{\ell}(W, \mulower, \eta)|^q\right]} \\
&\le
\E\left[(1-Z)|\funcparam_1(X) - \funcparam(X)|^q +Z\left|\frac{\psi_{\funcparam_1(X)}(Y(1))(1 - e_1(X))}{\nu_1(X) e_1(X)} - \frac{\psi_{\funcparam(X)}(Y(1))(1 - e(X))}{\nu(X) e(X)}\right|^q \right] \\
& \le C_1^q + \E\left[ Z\left| \frac{\psi_{\theta_1(X)}(Y(1))(1 - e_1(X))\nu(X)e(X)-\psi_{\funcparam(X)}(Y(1))(1-e(X))\nu_1(X)e_1(X)}{\nu_1(X)\nu(X)e_1(X)e(X)} \right|^q\right] \\
& \stackrel{(i)}{\le}
C_1^q + \frac{1}{\constlow^2\epsilon^2}
\E\left[Z \left| \psi_{\funcparam_1(X)}(Y(1))(1 - e_1(X))\nu(X) e(X)
- \psi_{\funcparam(X)}(Y(1))(1 - e(X)) \nu_1(X) e_1(X) \right|^q \right] \\
& \stackrel{(ii)}{\le}
C_1^q + \frac{2^{q-1}}{\constlow^2 \epsilon^2}
\E\left[Z \left| \left\{\psi_{\funcparam_1(X)}(Y(1))(1 - e_1(X))
- \psi_{ \funcparam(X)}(Y(1))(1 - e(X))\right\} \nu(X) e(X) \right|^q \right]\\
& \qquad ~ + \frac{2^{q-1}}{\constlow^2 \epsilon^2}
\E\left[Z\bigg| \psi_{\funcparam(X)}(Y(1))(1 - e(X))
\left(\nu(X) e(X) - \nu_1(X) e_1(X)\right) \bigg|^q\right]\\
& \le \wt{C}_2 \end{align*} for some constant $\wt{C}_2$, uniformly over $\eta \in \mc{T}_n$. Above, inequality~$(i)$ follows by the lower bound conditions~\eqref{eqn:def-Tn} on $e, \nu \in \mc{T}_n$, inequality $(ii)$ is convexity and the triangle inequality, and the final inequality follows as
$\sup_x |\nu(x)| < \infty$, $e(\cdot) \subset [\epsilon, 1 - \epsilon]$ for $\nu, e \in \mc{T}_n$, and \begin{align*}
\E[Z|\psi_{\theta(X)}(Y(1))|^q]
& = \E[Z \hinge{Y(1) - \theta(X)}^q]
+ \Gamma^q \E[Z \neghinge{Y(1) - \theta(X)}^q] \\
& \le 2^q \left((1 + \Gamma^q) \E[|Y(1)|^q]
+ (1 + \Gamma^q) \E[|\theta(X)|^q]\right)
\le \wt{C}_3 \end{align*} for a finite $\wt{C}_3$ because $\norm{\theta - \theta_1}_{q,P} \le C_1$ by definition~\eqref{eqn:def-Tn} of $\mc{T}_n$ and inequality~\eqref{eqn:bound-theta-1-q-moment}. Assumption~\ref{assumption:dml-nuisance}\eqref{item:moment-conditions} follows by the triangle inequality.
We turn to verifying Assumption~\ref{assumption:dml-nuisance}\eqref{item:score-rates}. Because $\dot{\ell}^a \equiv -1$, the rate $r_n = 0$. The construction~\eqref{eqn:def-Tn} of $\mathcal{T}_n$ implies that \begin{align*}
\lefteqn{\sup_{\eta \in \mathcal{T}_n} E\left[ \left\{\dot{\ell}(W, \mulower, \eta) - \dot{\ell}(W, \mulower, \eta_1)\right\}^2 \right]^\half} \\
& \le
\sup_{\eta \in \mathcal{T}_n}
\| \funcparam(\cdot) - \funcparam_1(\cdot) \|_{2,P} +
\frac{\Gamma(1-\epsilon)}{\epsilon} \sup_{\eta \in \mathcal{T}_n} \| \funcparam(\cdot) - \funcparam_1(\cdot) \|_{2,P} \\
& \quad + 2\frac{\Gamma^2(1-\epsilon)}{\epsilon}\left(\|\funcparam(\cdot)\|_{2,P} + \E[Y(1)^2]\right)
\cdot \left(\left\| e(\cdot) - e_1(\cdot) \right\|_{2,P} + \left\| \nu(\cdot) - \nu_1(\cdot) \right\|_{2,P}\right) \end{align*} Therefore, $r_n'\le \tilde{C}_4 a_n n^{-1/4}$ for a constant $\tilde{C}_4 < \infty$. Bounding the second derivative moments $\lambda_n'$ is more involved; the next lemma controls this term. We defer the proof to section~\ref{sec:proof-second-deriv-rate}.
\begin{restatable}{lemma}{lemsecondderiv}
\label{lem:second-deriv-rate}
Assume the conditions of Theorem~\ref{thm:semiparametric}. Let
$\mc{T}_n$ be as defined~\eqref{eqn:def-Tn}.
Then
\begin{equation}
\sup_{r \in (0,1),\eta \in \mathcal{T}_n} \frac{d^2}{dr^2} \E\left[ \dot{\ell}(W, \mulower, \eta_1 + r(\eta - \eta_1)) \right] \le C a_n^2 n^{-1/2}.
\label{eq:rate-cond} \end{equation} \end{restatable}
\noindent In summary, $\lambda_n' \le C a_n^2/\sqrt{n}$. Let $\delta_n = \max\{\tilde{C}_3 a_n, \tilde{C}_4 a_n^2, n^{-1/2}\}$. The sequences $\{a_n\}$, $\{a_n^2\}$, and $\{n^{-1/2}\}$ all converge to 0, and $\delta_n$ satisfies the conditions of Assumption~\ref{assumption:dml-nuisance}. Assumption \ref{assumption:dml-nuisance}\eqref{item:score-rates} follows.
For Assumption~\ref{assumption:dml-nuisance}\eqref{item:score-variance}, note that \begin{align*}
\E\left[ \dot{\ell}(W, \mulower, \eta_1)^2 \mid Z=1, X=x \right] &= \var\left[ Y(1) + \frac{\psi_{\funcparam_1(X)}(Y(1))}{\nu_1(X)}\frac{1-e_1(X)}{e_1(X)} \biggm| Z=1, X=x \right] \\
&\ge \var(Y(1)\mid Z=1, X=x), \end{align*} because $\cov(Y(1), \psi_{\funcparam_1(X)}(Y(1))) \ge 0$. Taking expectations over $X$ completes the proof.
\subsubsection{Proof of Lemma~\ref{lem:second-deriv-rate}} \label{sec:proof-second-deriv-rate}
\lemsecondderiv* \begin{proof}
For notational convenience, and with some abuse,
throughout this proof for any function
$f : \mc{X} \times \R \to \R$ of $x \in \mc{X}$ and a scalar
$r \in \R$, we let $f'(x, r) = \frac{\partial}{\partial r} f(x, r)$
and $f''(x, r) = \frac{\partial^2}{\partial r^2} f(x, r)$.
Let
\begin{equation}
\label{eqn:define-shitty-h}
h(x,r) = \E\left[ Z \psi_{\funcparam_1(X) + r\left\{\funcparam(X) -
\funcparam_1(X)\right\}}(Y(1)) \mid X=x \right].
\end{equation}
Note that $\sup_{r \in (0, 1)} h(x, r)$ is integrable because
$t \mapsto \psi_t(Y(1))$ is Lipschitz and $\funcparam \in \mc{T}_n$.
Differentiate once to get
\begin{align*}
h'(x, r)
&= \left\{\funcparam(X) - \funcparam_1(X)\right\}\frac{d}{dt} \E\left[ Z \psi_t(Y(1)) \mid X=x \right] \bigg|_{t = \funcparam_1(X)+r\left\{\funcparam(X) - \funcparam_1(X)\right\}} \\
&= -\left\{\funcparam(X) - \funcparam_1(X)\right\} \left[1 + (\Gamma-1)P_1\left(Y(1) < \funcparam_1(X)+r\left\{\funcparam(X) - \funcparam_1(X)\right\} | X=x\right) \right] e_1(X),
\end{align*}
and again to get
\begin{align*}
h''(x, r)
&=
-(\Gamma-1)e_1(X) \left\{\funcparam(X) - \funcparam_1(X)\right\}^2 p_{Y(1)}(\funcparam_1(X)+r\{\funcparam(X) - \funcparam_1(X)\} \mid Z=1, X=x).
\end{align*}
Assumption~\ref{assumption:problem-regularity}
(or the relaxation~\eqref{eqn:condition-replacing-density})
guarantee that
the density term $p_{Y(1)}$ above is uniformly bounded.
Additionally, because $\theta \in \mc{T}_n$, we may use the Lipschitz
continuity of $t \mapsto \psi_t(Y(1))$ to obtain
\begin{equation}
\label{eqn:get-h-bounded-ish}
h(x, r) \in h(x, 0)
\pm \Gamma r |\theta(x) - \theta_1(x)|,
\end{equation}
where we have used that $h(x, 0) = 0$ for almost all $x$ (recall
Lemma~\ref{lemma:opt-is-good}).
Let $f(x, r) = h(x, r)\left\{1 - e_1(x) - r(e(x) - e_1(x))\right\}.$ Then
\begin{equation}
\label{eqn:fp-calc}
f'(x,r) = \left[1 - e_1(x) - r\{e(x) - e_1(x)\}\right]
h'(x,r) - \{e(x) - e_1(x)\} h(x,r),
\end{equation}
and
\begin{align*}
f''(x, r)
&= -2\{e(x) - e_1(x)\} h'(x,r)
+ \left[1 - e_1(x) - r\{e(x) - e_1(x)\}\right]
h''(x,r).
\end{align*}
Because $e(x) \in (\epsilon, 1-\epsilon)$ by definition~\eqref{eqn:def-Tn}
of $\mc{T}_n$, we have
\begin{equation}
\label{eqn:get-fp-bounded}
\left|f'(x,r)\right| \le (1-\epsilon)\Gamma
|\funcparam(x) - \funcparam_1(x)| + |e(x) - e_1(x)| |h(x,r)|
\le \Gamma |\funcparam(x) - \funcparam_1(x)|(1 + |e(x) - e_1(x)|)
\end{equation}
by inequality~\eqref{eqn:get-h-bounded-ish},
and
\begin{align*}
\left|f''(x,r)\right|
& \le 2\Gamma|e(x) - e_1(x)||\funcparam(x) - \funcparam_1(x)| \\
& \quad ~
+ (1-\epsilon)(\Gamma-1) \left(\funcparam(X) - \funcparam_1(X)\right)^2 p_{Y(1)}(\funcparam_1(X)+r(\funcparam(X) - \funcparam_1(X)) | Z=1, X=x).
\end{align*}
As we note above, there is $R < \infty$ such
that $\sup_t p_{Y(1)}(t \mid Z = 1, X = x) \le R$,
giving the bound
\begin{align}
\left| f''(x,r)\right|
& \le 2\Gamma|e(x) - e_1(x)||\funcparam(x) - \funcparam_1(x)| +
\Gamma R \left\{\funcparam(X) - \funcparam_1(X)\right\}^2.
\label{eqn:uniform-bound-on-fpp}
\end{align}
Now, let
\begin{equation*}
g(x, r) = \left[\nu_1(x) + r\{\nu(x) - \nu_1(x)\} \right]\left[e_1(x) + r\{e(x) - e_1(x)\} \right],
\end{equation*}
where we recall that
$\sup_x |\nu(x) - \nu_1(x)| < \infty$ and
$|e(x) - e_1(x)| \le 2 - 2 \epsilon$ by definition~\eqref{eqn:def-Tn}
of $\mc{T}_n$.
Differentiating, we have
\begin{align}
\nonumber g'(x, r)
& =
\{\nu(x) - \nu_1(x)\} \left[e_1(x) + r\{e(x) - e_1(x)\} \right]
+
\left[\nu_1(x) + r\{\nu(x) - \nu_1(x)\} \right]\{e(x) - e_1(x)\} \\
g''(x, r) & = 2\{\nu(x) - \nu_1(x)\}\{e(x) - e_1(x)\}.
\label{eqn:gp-gpp}
\end{align}
Therefore,
\begin{align}
\label{eqn:gp-super-bound}
\sup_{x \in \mc{X}, r \in (0, 1)}
|g'(x, r)|
& \le \sup_{x \in \mc{X}} \left\{3|\nu(x) - \nu_1(x)| +
3(\Gamma+1) |e(x) - e_1(x)|\right\}
\le C
\end{align}
where $C < \infty$ by the boundedness conditions on $\nu$ and $e$
that $\mc{T}_n$ guarantees~\eqref{eqn:def-Tn}.
The following lemma abstracts the technical challenge in bounding the
second derivatives via dominated convergence. We
defer its proof temporarily to
Sec.~\ref{sec:proof-second-deriv}.
\begin{restatable}{lemma}{lemquotient}
\label{lem:second-deriv}
Let $f, g$ be as above.
Then
$\frac{\partial^2}{\partial r^2} \E[\frac{f(X, r)}{g(X, r)}]
= \E[\frac{\partial^2}{\partial r^2} \frac{f(X, r)}{g(X, r)}]$ and
there exists a finite $C < \infty$ such that for all $r \in (0, 1)$,
\begin{equation*}
\left|\frac{\partial^2}{\partial r^2} \E\left[
\frac{f(X,r)}{g(X,r)} \right] \right|
\le C\E\left[ \left| f''(X, r)\right| +
|f(X, r)|\left( \left|g''(X, r)\right|
+ g'(X, r)^2 \right) + \left| f'(X, r) g'(X, r)\right|
\right].
\end{equation*}
\end{restatable}
As a consequence of Lemma~\ref{lem:second-deriv} and
Young's inequality that $ab \le \half a^2 + \half b^2$,
we apply (variously) inequality~\eqref{eqn:uniform-bound-on-fpp}
to $f''$,
inequality~\eqref{eqn:get-h-bounded-ish}
to obtain $|f(x, r)| \le \Gamma |\theta(x) - \theta_1(x)|
\le \Gamma C_1$ by definition~\eqref{eqn:def-Tn} of $\mc{T}_n$,
Eq.~\eqref{eqn:gp-gpp} to get
$|g''(x, r)| \le (\nu(x) - \nu_1(x))^2 + (e(x) - e_1(x))^2$,
again Eq.~\eqref{eqn:gp-gpp}
to get $|g'(x, r)|
\le C |\nu(x) - \nu_1(x)|
+ C|e(x) - e_1(x)|$, and
inequality~\eqref{eqn:get-fp-bounded} to bound
$|f'(x, r)| \le C \Gamma |\funcparam(x) - \funcparam_1(x)|$,
yielding
\begin{align*}
\left|\frac{\partial^2}{\partial r^2} \E\left[\frac{f(X, r)}{g(X, r)}
\right]\right|
& \le C \E\left[
|e(X) - e_1(X)|^2 + |\funcparam(X) - \funcparam_1(X)|^2
+ |\nu(X) - \nu_1(X)|^2\right].
\end{align*}
By construction~\eqref{eqn:def-Tn} of $\mc{T}_n$,
$\norm{\eta - \eta_1}_{2,P} \le a_n n^{-1/4}$,
so that noting
that
$\frac{\partial^2}{\partial r^2}
\E[\dot{\ell}(W, \mu_1^-, \eta_1 + r(\eta - \eta_1))]
= \frac{\partial^2}{\partial r^2}
\E[\frac{f(X, r)}{g(X, r)}]$
gives the result.
\end{proof}
\subsubsection{Proof of Lemma~\ref{lem:second-deriv}} \label{sec:proof-second-deriv}
By applying the quotient rule, we have \begin{align*}
\frac{\partial^2}{\partial r^2}
\frac{f(x, r)}{g(x, r)}
& = \frac{f''(x, r)}{g(x, r)}
- 2 \frac{f'(x, r) g'(x, r)}{g(x, r)^2}
- \frac{f(x, r) g''(x, r)}{g(x, r)^2}
+ \frac{2 f(x, r) (g'(x, r))^2}{g(x, r)^3}. \end{align*} If we can exhibit a function $R(x)$ such that $R(x) \ge
|\frac{\partial^2}{\partial r^2} \frac{f(x, r)}{g(x, r)}|$ for all $r \in (0,1)$ with $\E[R(X)] < \infty$, this is sufficient for interchanging expectation and differentiation via the dominated convergence theorem.
Because $g(x, r) = (\nu_1(x) + r(\nu(x) - \nu_1(x))) (e_1(x) + r(e(x) - e_1(x)))$, the definition~\eqref{eqn:def-Tn} of $\mc{T}_n$ guarantees that $\inf_{x,r\in[0,1]} g(x, r) > 0$. Therefore, for some $C < \infty$ whose value may change from line to line but which is independent of $x$ and $r$ for $r\in(0,1)$, \begin{align*}
\left|\frac{\partial^2}{\partial r^2}
\frac{f(x, r)}{g(x, r)}\right|
\le C \left[
|f''(x, r)| + |f'(x, r)| |g'(x, r)|
+ |f(x, r) g''(x, r)| + |f(x, r)| (g'(x, r))^2\right]. \end{align*} We bound each of the terms in the inequality in turn.
Assume $r \in (0,1)$. Inequality~\eqref{eqn:uniform-bound-on-fpp} guarantees the existence of an integrable $R_1$ such that $R_1(x) \ge |f''(x, r)|$. For the $f' g'$ term, we have $|f'(x, r)| \le C (|h(x, r)| + |h'(x, r)|)$ for some $C < \infty$ by Eq.~\eqref{eqn:fp-calc}, while Eq.~\eqref{eqn:gp-super-bound} gives $\linf{g'} \le C$. Thus, as
$\funcparam$ is integrable by assumption~\eqref{eqn:def-Tn}, there is some integrable $R_2$ such that $R_2(x) \ge |f'(x, r)| |g'(x, r)|$. Now, we consider the term $|f(x, r) g''(x, r)|$. By Eq.~\eqref{eqn:gp-gpp} we have $|g''(x, r)| \le C$ because $\nu, e \in \mc{T}_n$. By the discussion after the definition~\eqref{eqn:define-shitty-h} of $h$, there exists integrable $R_3$
such that $R_3(x) \ge |f(x, r)| C \ge |f(x, r)| |g''(x, r)|$. Finally, we consider the final term. As $\linf{g'} \le C$, we may again use the function $R_3$, and so we have \begin{equation*}
\left|\frac{\partial^2}{\partial r^2}
\frac{f(x, r)}{g(x, r)}\right|
\le C (R_1(x) + R_2(x) + R_3(x)), \end{equation*} which is integrable. This gives the lemma.
\subsection{Design sensitivity proofs} \label{sec:proof-design-sensitivity}
In this section, we prove Proposition~\ref{prop:design-sensitivity-model}. We begin with a technical lemma, returning to prove the proposition in Sec.~\ref{sec:proof-design-sensitivity-model}. \begin{restatable}{lemma}{lemmamonotonects}
\label{lem:monotone-continuous}
Let $\theta_1^\Gamma(x)$ be the optimum~\eqref{eq:population-theta}
for a fixed $\Gamma \ge 1$. If $\theta_1^\Gamma(x)$ is finite
for some $\Gamma$, then $\Gamma \mapsto \theta_1^\Gamma(x)$ is
continuous and monotone decreasing. \end{restatable} \begin{proof}
To check that $\Gamma \mapsto \theta_1^\Gamma(x)$ is strictly monotone, we
use the choice of $L$ that attains the minimum in
equation~\eqref{eq:best-lr} to write
\begin{equation}
\label{eqn:get-theta-via-infimum}
\theta_1^\Gamma(x) =
\inf_\mu \E \left[\frac{\ind{Y(1) \ge \mu} + \Gamma\ind{Y(1) < \mu}}{
C_\Gamma(\mu)} Y(1) \mid Z=1, X=x \right],
\end{equation}
where $C_\Gamma(\mu) = P(Y(1) \ge \mu \mid Z = 1, X = x) + \Gamma P(Y(1) < \mu
\mid Z = 1, X = x)$ normalizes $\ind{Y(1) \ge \mu} + \ind{Y(1) < \mu}$ so
that it is a valid likelihood ratio. Lemma~\ref{lemma:duality} implies
that $\theta_1^\Gamma(x)$ itself achieves the
infimum~\eqref{eqn:get-theta-via-infimum}. Then, for $\widetilde{\Gamma} >
\Gamma$, if $\var(Y(1) \mid X = x) > 0$, then
\begin{align}
\lefteqn{\theta_1^{\widetilde{\Gamma}}(x) - \theta_1^{\Gamma}(x)} \nonumber \\
& = \inf_\mu \E \left[ \frac{\ind{Y(1) \ge \mu}
+ \widetilde{\Gamma}\ind{Y(1) < \mu}}{C_{\wt{\Gamma}}(\mu)}
Y(1) \mid Z=1, X=x \right]
- \theta_1^{\Gamma}(x) \nonumber \\
&\le \E \left[ \frac{\ind{Y(1) \ge \theta_1^{\Gamma}(x)}
+ \widetilde{\Gamma}
\ind{Y(1) < \theta_1^{\Gamma}(x)}}{C_{\wt{\Gamma}}(\theta_1^{\Gamma}(x))}
(Y(1)-\theta_1^{\Gamma}(x)) \mid Z=1, X=x \right]
\label{eq:before-weight-change} \\
& < \E \left[ \frac{\ind{Y(1) \ge \theta_1^{\Gamma}(x)}
+ \Gamma\ind{Y(1) < \theta_1^{\Gamma}(x)}}{
C_{\wt{\Gamma}}(\theta_1^{\Gamma}(x))}
(Y(1)-\theta_1^{\Gamma}(x)) \mid Z=1, X=x \right]
\label{eq:after-weight-change} \\
& = 0. \nonumber
\end{align}
The strict inequality~\eqref{eq:after-weight-change} follows by
considering the signs of $Y(1) - \theta_1^\Gamma(x)$ in
expression~\eqref{eq:before-weight-change}, that $\wt{\Gamma} > \Gamma$,
and that $\var(Y(1) \mid X = x) > 0$. The final equality is simply the
definition of $\theta_1^\Gamma$ via the
expectation~\eqref{eq:population-theta}.
The function $t \mapsto f_\Gamma(t) \defeq \E[(Y(1) - t)_+ - \Gamma(Y(1) - t)_-
\mid Z=1, X=x]$ is strictly monotone with slope $ \le -1$. Therefore,
for $1 \le \Gamma \le \widetilde{\Gamma} < \infty$, using that
$f_\Gamma(\theta_1^{\Gamma}) = 0$ and
$f_{\widetilde{\Gamma}}(\theta_1^{\widetilde{\Gamma}}) = 0$,
\begin{align*}
|\theta_1^{\Gamma}(x) - \theta_1^{\widetilde{\Gamma}}(x)| &\le f_{\widetilde{\Gamma}}(\theta_1^\Gamma(x)) - f_{\widetilde{\Gamma}}(\theta_1^{\widetilde{\Gamma}}(x))
= f_{\widetilde{\Gamma}}(\theta_1^{\Gamma}(x)) - f_{\Gamma}(\theta_1^{\Gamma}(x))
\le (\widetilde{\Gamma} - \Gamma)\E\left[ (Y(1) - \theta_{\Gamma}(x))_- \right].
\end{align*}
When $\theta^{\Gamma}_1(x)$ is finite, this implies $\Gamma \mapsto
\theta_1^\Gamma(x)$ is continuous. \end{proof}
\subsubsection{Proof of Proposition~\ref{prop:design-sensitivity-model}} \label{sec:proof-design-sensitivity-model}
\thmdesignsensitivity* \begin{proof}
That $\tau^-$ takes the form claimed in the proposition is a consequence
of Eq.~\eqref{eqn:get-theta-via-infimum}. Let us consider the power of
the tests $\psi_n^\Gamma$. As $n \to \infty$, we have
$\what{\tau}^--z_{1-\alpha}\what{\sigma}_{\tau^-}/\sqrt{n} \cp \tau^{-}$,
so that $\lim_{n \to \infty} Q(\psi_n^\Gamma = 0) = 0$ if $\tau^- > 1$,
and otherwise $Q(\psi^\Gamma_n = 0) \to 1$. By
Lemma~\ref{lem:monotone-continuous}, $\tau^-(\Gamma)$ is strictly
decreasing in $\Gamma$, so the design sensitivity $\Gamma_{\design}$ for
this test is the choice of $\Gamma$ such that $\tau^-(\Gamma) = 0$. If
this equation has no roots, then no choice of $\Gamma$ makes $\tau^-(\Gamma)$
negative, so we set $\Gamma_{\design}=\infty$. \end{proof}
\subsubsection{Proof of Corollary~\ref{cor:designsensitivity}} \label{sec:proof-gaussian-design-sensitivity}
\cordesignsensitivity* \begin{proof}
For notational convenience, we use $\tilde{\Gamma} \equiv
\Gamma_\design$ in the proof.
Proposition~\ref{prop:design-sensitivity-model} shows that the design
sensitivity $\tilde{\Gamma}$ satisfies
\begin{align*}
0 & = \E_Q[Y(1)] + \funcparam_1 - \E_Q[Y(0)] - \theta_0
\stackrel{(i)}{=} \tau + 2 \theta_1,
\end{align*}
for $\theta_1$ solving $\E_Q[\hinge{Y(1) - \theta_1} - \tilde{\Gamma}
\neghinge{Y(1) - \theta_1}] = 0$ (recall
Lemmas~\ref{lemma:duality} and~\ref{lemma:opt-is-good})
where equality~$(i)$
uses that $Y(1)
\overset{d}= -Y(0)$ under $Q$. The design sensitivity
$\tilde{\Gamma}$ thus solves
\begin{equation*}
\E_Q\left[\hinge{Y(1) + \frac{\tau}{2}} - \tilde{\Gamma}
\neghinge{Y(1) + \frac{\tau}{2}} \right] = 0.
\end{equation*}
Substituting the density of $Y(1)$ under $Q$ gives
\begin{align*}
0 & = \int_{-\infty}^\infty
\left(\indic{y \ge -\tau/2} + \tilde{\Gamma} \indic{y < \tau/2}\right)
\frac{y+\tau/2}{\sqrt{2 \pi \sigma^2}}
\exp\left(-\frac{1}{2 \sigma^2}(y - \tau/2)^2\right)\dif{t}\\
&= \int_{-\infty}^\infty
\left(\indic{t \ge 0} + \tilde{\Gamma} \indic{t < 0}\right)
\frac{t}{\sqrt{2 \pi \sigma^2}}
\exp\left(-\frac{1}{2 \sigma^2}(t - \tau)^2\right) \dif{t}
\end{align*}
by a change of variables.
This immediately implies that
\begin{equation*}
\tilde{\Gamma} \int_{-\infty}^0 t \exp\left(-\frac{1}{2 \sigma^2}
(t - \tau)^2 \right) \dif{t}
= -\int_0^\infty t \exp\left(-\frac{1}{2 \sigma^2} (t - \tau)^2\right) \dif{t},
\end{equation*}
which gives the first equality in the corollary.
The second equality is just a change of variables and computation of
the integral. \end{proof}
\subsection{Proof of Proposition~\ref{prop:opt-design-sensitivity}} \label{sec:proof-opt-design-sensitivity}
\optdesignsensitivity* \begin{proof}
We assume for simplicity that $\sigma = 1$; replacing $\tau$ by
$\tau/\sigma$ gives an equivalent problem and does not
change the quantity~\eqref{eq:design-sensitivity-gaussian}.
We show that for $\Gamma \ge \Gamma_\design^\gauss$, there
exists $P \in H_0(\Gamma)$ such that $\tvnorms{P_{Y(Z), Z} - Q_{Y(Z), Z}} = 0$,
where the notation indicates that we only observe pairs $Y(Z), Z$.
Our choice of $P$ will be independent of the sample size $n$, so
that it gives both asymptotic and finite sample results.
We proceed in three steps: (1) we construct $P$, (2) we verify the constructed $P$ belongs
to $H_0(\Gamma)$, and (3) we show the variation distance is zero.
\paragraph{Step 1: Constructing $P$}
We construct $P$ using the distribution $Q$, augmenting with an unobserved
confounding variable $U$ to have the Markov structure
\begin{equation*}
Z \longleftarrow U \longrightarrow (Y(1), Y(0)).
\end{equation*}
Let
\begin{equation*}
t^* = t^*(\Gamma)
\defeq \argmin_t \E_Q\left[\frac{\ind{Y(1)\ge t}
+ \Gamma\ind{Y(1) < t}}{Q(Y(1)\ge t) + \Gamma Q(Y(1) < t)}Y(1)\right],
\end{equation*}
where Lemma~\ref{lemma:duality} shows that $t^\ast=\theta_1$ attains this
minimum. Denote the densities of $Y(1)$ and $Y(0)$ under $Q$ by $q_1$ and
$q_0$, respectively, where $q_0(t) = q_1(-t)$.
Under $P$, let $Y(1)$ have density
\begin{subequations}
\label{eqn:lr-treated-p}
\begin{equation}
\label{eq:lr-treated-p1}
p_1(t) \propto \left( \ind{t > t^\ast} + \sqrt{\Gamma} \ind{t \le t^\ast}\right) q_1(t),
\end{equation}
and set $Y(0) = -Y(1)$, so that marginally $Y(0)$ has density
\begin{equation}
p_0(t) \propto \left( \ind{t < -t^\ast} + \sqrt{\Gamma} \ind{t \ge -t^\ast}\right) q_0(t).
\label{eq:lr-treated-p0}
\end{equation}
\end{subequations}
Define the unobserved confounding variable
\begin{equation*}
U = \ind{Y(1) > t^\ast} = \ind{Y(0) < -t^\ast}
\end{equation*}
and let $Z$ be a random variable based on the conditional probabilities
\begin{equation*}
P(Z = z \mid U = u) = \begin{cases}
\sqrt{\Gamma} / (1 + \sqrt{\Gamma}) & \mbox{if~} z = u \\
1 / (1 + \sqrt{\Gamma}) & \mbox{if~} z = 1 - u,
\end{cases}
\end{equation*}
so $Z$ is independent of $Y(1)$ and $Y(0)$ conditional on $U$.
Define $a$ by the marginal distribution of $Z$ under this conditional
distribution,
\begin{align*}
a \defeq P(Z=1)&=P(Z=1\mid U=1)P(U=1)+P(Z=1 \mid U=0)P(U=0)
\in \left[\frac{1}{1 + \sqrt{\Gamma}}, \frac{\Gamma}{1 + \sqrt{\Gamma}}
\right].
\end{align*}
\paragraph{Step 2: Verifying that $P \in H_0(\Gamma)$}
The $\Gamma$-\cornfield{} condition~\eqref{eq:cornfield} holds, as
\begin{align*}
\frac{P(Z = 1 \mid U = 1)}{P(Z = 0 \mid U = 1)}\frac{P(Z = 0 \mid U = 0)}{P(Z = 1 \mid U = 0)} &= \frac{\frac{\sqrt{\Gamma}}{1 + \sqrt{\Gamma}}}{\frac{1}{1 + \sqrt{\Gamma}}}\frac{\frac{\sqrt{\Gamma}}{1 + \sqrt{\Gamma}}}{\frac{1}{1 + \sqrt{\Gamma}}} = \Gamma.
\end{align*}
Therefore, $P \in H_0(\Gamma)$ if $\E_P[Y(1) - Y(0)] \le 0$. To verify
this condition, we first calculate the conditional likelihood ratios using
Bayes rule:
\begin{align*}
\frac{p_{Y(1) | Z=1}(t)}{p_{Y(1)}(t)} &= \frac{P(Z=1 \mid Y(1) = t)}{P(Z=1)}
\\
&=
\frac{P(Z=1 \mid U = 1)P(U=1 \mid Y(1) = t) + P(Z=1 \mid U = 0)P(U=0 \mid Y(1) = t)}{P(Z=1)}
\\
&\propto
\sqrt{\Gamma}\ind{t > t^\ast} + \ind{t \le t^\ast},
\end{align*}
and similarly
$$
\frac{p_{Y(0) | Z=0}(t)}{p_{Y(0)}(t)} = \frac{P(Z=0 \mid Y(0) = t)}{P(Z=0)}\propto
\ind{t < -t^\ast} + \sqrt{\Gamma}\ind{t \ge -t^\ast}.
$$
Thus, the definitions~\eqref{eqn:lr-treated-p} of
$p_z(t)$ yield
\begin{align}
\label{eq:equal-marginal-y-1}
\frac{p_{Y(1) | Z=1}(t)}{q_1(t)} &= \frac{p_{Y(1) | Z=1}(t)}{p_{Y(1)}(t)} \frac{p_{Y(1)}(t)}{q_1(t)} = 1, \\
\frac{p_{Y(0) | Z=0}(t)}{q_0(t)} &= 1,
\label{eq:equal-marginal-y-0}
\end{align}
and
\begin{align*}
\frac{p_{Y(1) | Z=0}(t)}{q_1(t)} &\propto \ind{t > t^\ast} + \Gamma \ind{t \le t^\ast} \\
\frac{p_{Y(0) | Z=1}(t)}{q_0(t)} &\propto \ind{t < -t^\ast} + \Gamma \ind{t \ge -t^\ast}.
\end{align*}
These imply that
\begin{align*}
\E_P\left[Y(1) - Y(0) \right] &= a\E_P\left[Y(1) - Y(0) \mid Z=1 \right] + (1-a)\E_P\left[Y(1) - Y(0) \mid Z=0 \right]
\\
& \stackrel{(i)}{=}
\frac{\tau}{2} + \E_Q\left[\frac{\ind{Y(1) \ge t^\ast} + \Gamma \ind{Y(1) < t^\ast}}{Q(Y(1) \ge t^\ast) + \Gamma Q(Y(1) < t^\ast)}Y(1) \right] \\
& \stackrel{(ii}{\le}
\frac{\tau}{2} + \frac{1}{C_0} \int_{-\frac{\tau}{2}}^{\infty} y q_1(y) \dif{y} + \frac{\Gamma}{C_0} \int_{-\infty}^{-\frac{\tau}{2}} y q_1(y) \dif{y}
\\
&=
\frac{1}{C_0} \int_{0}^{\infty} y\, q_1\!\left(y-\frac{\tau}{2}\right) \dif{y} + \frac{\Gamma}{C_0} \int_{-\infty}^{0} y \, q_1\!\left(y-\frac{\tau}{2}\right) \dif{y}
\\
& \stackrel{(iii)}{\le} 0,
\end{align*}
where inequality~$(i)$ follows by~\eqref{eqn:get-theta-via-infimum}
defining the threshold and in inequality~$(ii)$ we replace the
threshold $t^*$ by $\tau/2$, which only increases the objective,
and the inequality~$(iii)$ holds when $\Gamma \ge \Gamma_\design^\gauss$
(and $C_0 = Q(Y(1) \ge -\tau/2) + \Gamma Q(Y(1) \le -\tau/2)$).
\paragraph{Step 3: Proving Zero Variation Distance}
Now, we verify that observed quantity $(Y(Z), Z)$ has the same
distribution under $Q$ and $P$. If $Q(Z=1) = a$, then $P(Z=1) = a =
Q(Z=1)$. Furthermore, the conditional distribution $Y(Z) \mid Z$ under $Q$ equals that under $P$
by Eqs.~\eqref{eq:equal-marginal-y-1} and
\eqref{eq:equal-marginal-y-0}. Therefore, the marginal distributions $(Y(Z),Z))$ under $P$ and $Q$ are identical, and
$\tvnorms{P_{Y(Z), Z} - Q_{Y(Z), Z}} = 0$.
Consequently, for any test $t_n^\Gamma,$ we have
$P(t_n^\Gamma = 1) = Q(t_n^\Gamma = 1)$. \end{proof}
\end{document} |
\begin{document}
\maketitle \begin{abstract}
The stationary horizon (SH) is a stochastic process of coupled Brownian motions indexed by their real-valued drifts. It was first introduced by the first author as the diffusive scaling limit of the Busemann process of exponential last-passage percolation. It was independently discovered as the Busemann process of Brownian last-passage percolation by the second and third authors. We show that SH is the unique invariant distribution and an attractor of the KPZ fixed point under conditions on the asymptotic spatial slopes. It follows that SH describes the Busemann process of the directed landscape. This gives control of semi-infinite geodesics simultaneously across all initial points and directions. The countable dense set $\Xi$ of directions of discontinuity of the Busemann process is the set of directions in which not all geodesics coalesce and in which there exist at least two distinct geodesics from each initial point. This creates two distinct families of coalescing geodesics in each $\Xi$ direction. In $\Xi$ directions, the Busemann difference profile is distributed like Brownian local time. We describe the point process of directions $\xi\in\Xi$ and spatial locations where the $\xi\pm$ Busemann functions separate. \end{abstract}
\tableofcontents
\section{Introduction} \subsection{KPZ fixed point and directed landscape}
The study of the Kardar-Parisi-Zhang (KPZ) class of 1+1 dimensional stochastic models of growth and interacting particles has advanced to the point where the first conjectured universal scaling limits have been rigorously constructed. These two interrelated objects are the {\it KPZ fixed point}, initially derived as the limit of the totally asymmetric simple exclusion process (TASEP)~\cite{KPZfixed}, and the {\it directed landscape} (DL), initially derived as the limit of Brownian last-passage percolation (BLPP)~\cite{Directed_Landscape}.
The KPZ fixed point describes the height of a growing interface, while the directed landscape describes the random environment through which growth propagates. These two objects are related by a variational formula, recorded in \eqref{eqn:KPZ_DL_rep} below. Evidence for the universality claim comes from rigorous scaling limits of exactly solvable models~\cite{reflected_KPZfixed,heat_and_landscape,KPZ_equation_convergence,Dauvergne-Virag-21}.
Our paper studies the global geometry of the directed landscape, through the analytic and probabilistic properties of its Busemann process. Our construction of the Busemann process begins with the recent construction of individual Busemann functions by Rahman and Vir\'ag~\cite{Rahman-Virag-21}. The remainder of this introduction describes the context and gives previews of some results. The organization of the paper is in Section \ref{sec:org}.
\subsection{Semi-infinite geodesics and Busemann functions} In growth models of first- and last-passage type, \textit{semi-infinite geodesics} trace the paths of infection all the way to infinity and hence are central to the large-scale structure of the evolution. Their study was initiated by Licea and Newman in first-passage percolation in the 1990s~\cite{licea1996,Newman} with the first results on existence, uniqueness and coalescence. Since the work of Hoffman~\cite{Hoffman-2005,hoffman2008}, Busemann functions have been a key tool for studying semi-infinite geodesics (see, for example~\cite{Damron_Hanson2012,Hanson-2018,Georgiou-Rassoul-Seppalainen-17a,Timo_Coalescence,Seppalainen-Sorensen-21a,Seppalainen-Sorensen-21b,Rahman-Virag-21,Ganguly-Zhang-2022a}, and Chapter 5 of~\cite{50years}).
Closer to the present work, the study of semi-infinite geodesics began in directed last-passage percolation with the application of the Licea-Newman techniques to the exactly solvable exponential model by Ferrari and Pimentel~\cite{ferr-pime-05}. Georgiou, Rassoul-Agha, and the second author~\cite{Georgiou-Rassoul-Seppalainen-17a,Georgiou-Rassoul-Seppalainen-17b} showed the existence of semi-infinite geodesics in directed last-passage percolation with general weights under mild moment conditions. Using this, Janjigian, Rassoul-Agha, and the second author~\cite{Janjigian-Rassoul-Seppalainen-19} showed that geometric properties of the semi-infinite geodesics can be found by studying analytic properties of the Busemann process. In the special case of exponential weights, the distribution of the Busemann process from~\cite{Fan-Seppalainen-20}
was used to show that all geodesics in a given direction coalesce if and only if that direction is not a discontinuity of the Busemann process.
In~\cite{Seppalainen-Sorensen-21b} the second and third author extended this work to the semi-discrete setting, by deriving the distribution of the Busemann process and analogous results for semi-infinite geodesics in BLPP. Again, all semi-infinite geodesics in a given direction coalesce if and only if that direction is not a discontinuity of the Busemann process. In each direction of discontinuity there are two coalescing families of semi-infinite geodesics and from each initial point \textit{at least} two semi-infinite geodesics. Compared to LPP on the discrete lattice, the semi-discrete setting of BLPP gives rise to additional non-uniqueness. In particular, \cite{Seppalainen-Sorensen-21b} developed a new coalescence proof to handle the non-discrete setting.
In the directed landscape, Rahman and Vir\'ag~\cite{Rahman-Virag-21} showed the existence of semi-infinite geodesics, almost surely in a fixed direction across all initial points, as well as almost surely from a fixed initial point across all directions. Furthermore, all semi-infinite geodesics in a fixed direction coalesce almost surely. This allowed \cite{Rahman-Virag-21} to construct a Busemann function for a fixed direction. After the first version of our present paper was posted, Ganguly and Zhang~\cite{Ganguly-Zhang-2022a} gave an independent construction of a Busemann function and semi-infinite geodesics, again for a fixed direction. They defined a notion of ``geodesic local time'' which was key to understanding the global fractal geometry of geodesics in DL. Later in~\cite{Ganguly-Zhang-2022b}, the same authors showed that the discrete analogue of geodesic local time in exponential LPP converges to geodesic local time for the DL.
Starting from the definition in~\cite{Rahman-Virag-21}, we construct the full Busemann process across all directions. Through the properties of this process, we establish a classification of uniqueness and coalescence of semi-infinite geodesics in the directed landscape. Similar constructions of the Busemann process and classifications for discrete and semi-discrete models have previously been achieved \cite{Sepp_lecture_notes,Janjigian-Rassoul-2020b,Janjigian-Rassoul-Seppalainen-19,Seppalainen-Sorensen-21b}, but the procedure in the directed landscape is more delicate. One reason is that the space is fully continuous. Another difficulty is that Busemann functions in DL possess monotonicity only in horizontal directions, while discrete and semi-discrete models exhibit monotonicity in both horizontal and vertical directions. A new perspective is needed to construct the Busemann process for arbitrary initial points.
The full Busemann process is necessary for a complete understanding of the geometry of semi-infinite geodesics. In particular, countable dense sets of initial points or directions cannot capture non-uniqueness of geodesics or the singularities of the Busemann process.
\subsection{Stationary horizon as the Busemann process of the directed landscape}
The {\it stationary horizon} (SH)
is a cadlag process indexed by the real line whose states are Brownian motions with drift (Definition~\ref{def:SH} in Appendix~\ref{sec:stat_horiz}). SH was first introduced by the first author~\cite{Busani-2021} as the diffusive scaling limit of the Busemann process of exponential last-passage percolation from~\cite{Fan-Seppalainen-20}, and was conjectured to be the universal scaling limit of the Busemann process of models in the KPZ universality class. Shortly afterwards, the paper~\cite{Seppalainen-Sorensen-21b} of the last two authors was posted. To derive the aforementioned results about semi-infinite geodesics, they constructed the Busemann process in BLPP and made several explicit distributional calculations. Remarkably, after discussions with the first author, the second and third authors discovered that the Busemann process of BLPP has the same distribution as the SH, restricted to nonnegative drifts. Furthermore, due to a rescaling property of the stationary horizon, when the direction is perturbed on order $n^{-1/3}$ from the diagonal, this process also converges to the SH, in the sense of finite-dimensional distributions. These results were added to the second version of~\cite{Seppalainen-Sorensen-21b}.
The convergence of the full Busemann process of exponential LPP to SH under the KPZ scaling, proven in~\cite{Busani-2021}, is currently the only example of what we expect to be a universal phenomenon: namely, that SH is the universal limit of the Busemann processes of models in the KPZ class. The present paper takes a step towards this universality, by establishing that the stationary horizon is the Busemann process of the directed landscape, which itself is the conjectured universal scaling limit of metric-like objects in the KPZ class. This is the central result that gives access to properties of the Busemann process. In addition to giving strong evidence towards the universality of SH conjectured by \cite{Busani-2021}, it provides us with computational tools for studying the geometric features of DL.
The characterization of the Busemann process of DL comes from a combination of two results. (i) The Busemann process evolves as a backward KPZ fixed point. (ii) The stationary horizon is the unique invariant distribution of the KPZ fixed point, subject to an asymptotic slope condition satisfied by the Busemann process (Theorem~\ref{thm:invariance_of_SH}).
Our invariance result is an infinite-dimensional extension of the previously proved invariance of Brownian motion with drift \cite{KPZfixed,Pimentel-21a,Pimentel-21b}.
For the invariance of a single Brownian motion, we have a strengthened uniqueness statement (Remark~\ref{rmk:k = 1 uniqueness}).
Furthermore, under asymptotic slope conditions on the initial data, the stationary horizon is an attractor.
This is analogous to the results of~\cite{Bakhtin-Cator-Konstantin-2014,Bakhtin-Li-19} for stationary solutions of the Burgers equation with random Poisson and kick forcing.
\subsection{Non-uniqueness of geodesics and random fractals} Among the key questions is the uniqueness of semi-infinite geodesics in the directed landscape. We show the existence of a countably infinite, dense random set $\Xi$ of directions $\xi$ such that, from each initial point in $\mathbb{R}^2$, two semi-infinite geodesics in direction $\xi$ emanate, separate immediately or after some time, and never return back together. It is interesting to relate this result and its proof to earlier work on disjoint finite geodesics.
The set of exceptional pairs of points between which there is a non-unique geodesic in DL was studied in~\cite{Bates-Ganguly-Hammond-22}. Their approach relied on~\cite{Basu-Ganguly-Hammond-21} which studied the random nondecreasing function $z \mapsto \mathcal{L}(y,s;z,t) - \mathcal{L}(x,s;z,t)$ for fixed $x < y$ and $s < t$. This process is locally constant except on an exceptional set of Hausdorff dimension $\frac{1}{2}$. From here~\cite{Bates-Ganguly-Hammond-22} showed that for fixed $s < t$ and $x < y$, the set of $z \in \mathbb{R}$ such that there exist disjoint geodesics from $(x,s)$ to $(z,t)$ and from $(y,s)$ to $(z,t)$ is exactly the set of local variation of the function $z \mapsto \mathcal{L}(x,s;z,t) - \mathcal{L}(y,s;z,t)$, and therefore has Hausdorff dimension $\frac{1}{2}$. Going further, they showed that for fixed $s < t$, the set of pairs $(x,y) \in \mathbb{R}^2$ such that there exist two disjoint geodesics from $(x,s)$ to $(y,t)$ also has Hausdorff dimension $\frac{1}{2}$, almost surely. Later, this exceptional set in the time direction was studied in~\cite{Ganguly-Zhang-2022a},and was shown to have Hausdorff dimension $2/3$. Across the entire plane, this set has Hausdorff dimension $\frac{5}{3}$. In a similar spirit, Dauvergne~\cite{Dauvergne-23} recently posted a paper detailing all the possible configurations of non-unique point-to-point geodesics, along with the Hausdorff dimensions of the sets of points with those configurations.
Our focus is on the limit of the measure studied in~\cite{Basu-Ganguly-Hammond-21}, namely, the nondecreasing function $\xi \mapsto W_{\xi}(y,s;x,s)= \lim_{t \to \infty}[\mathcal{L}(y,s;t\xi,t) - \mathcal{L}(x,s;t\xi,t)]$, which is exactly the Busemann function in direction $\xi$.
The support of its Lebesgue-Stieltjes measure corresponds to the existence of disjoint geodesics (Theorem~\ref{thm:Buse_pm_equiv}), but in contrast to~\cite{Bates-Ganguly-Hammond-22}, the measure is supported on a countable discrete set instead of on a set of Hausdorff dimension $\frac{1}{2}$ (Theorem~\ref{thm:DLBusedc_description}\ref{itm:DL_Buse_no_limit_pts} and Remark~\ref{rmk:shock_measure}).
We encounter a Hausdorff dimension $\frac{1}{2}$ set if we look along a fixed time level $s$ for those space-time points $(x,s)$ out of which there are disjoint semi-infinite geodesics in a {\it random, exceptional} direction (Theorem~\ref{thm:Split_pts}\ref{itm:Hasudorff1/2}). Up to the removal of an at-most countable set, this Hausdorff dimension $\frac{1}{2}$ set is the support of the random measure defined by the function \[ x \mapsto f_{s,\xi}(x) = W_{\xi +}(x,s;0,s) - W_{\xi -}(x,s;0,s), \] where $W_{\xi\pm}$ are the left and right-continuous Busemann processes (Theorem~\ref{thm:random_supp}). This is a semi-infinite analogue of the result in~\cite{Bates-Ganguly-Hammond-22}.
The distribution of $f_{s,\xi}$ is delicate. The set of directions $\xi$ such that $W_{\xi -} \neq W_{\xi +}$, or equivalently such that $\tau_\xi=\inf\{x>0: f_{s,\xi}(x)>0\}<\infty $, is the set $\Xi$ mentioned above. A fixed direction $\xi$ lies in $\Xi$ with probability $0$. Theorem~\ref{thm:BusePalm} shows that the law of $f_{s,\xi}(\tau_\xi+\aabullet)$ on $\mathbb{R}_{\ge0}$, conditioned on $\xi \in \Xi$ in the appropriate Palm sense, is exactly that of the running maximum of a Brownian motion, or equivalently, that of Brownian local time. This complements the fact that the function $z \mapsto \mathcal{L}(y,s;z,t) - \mathcal{L}(x,s;z,t)$ is locally absolutely continuous with respect to Brownian local time \cite{Ganguly-Hegde-2021}. Furthermore, the point process $\{(\tau_\xi, \xi):\xi\in\Xi\}$ has an explicit mean measure (Lemma~\ref{lm:ac} in Section \ref{sec:Palm}).
Since the first version of the present article has appeared, Bhatia~\cite{Bhatia-22,Bhatia-23} has posted two papers that use our results as inputs. The first,~\cite{Bhatia-22} studies the Hausdorff dimension of the set of splitting points of geodesics, along a geodesic itself. The second,~\cite{Bhatia-23} answers an open problem presented in this paper. Namely, the sets $\operatorname{NU}_0^{\xi \sig}$ and $\operatorname{NU}_1^{\xi \sig}$ defined in~\eqref{NU0}--\eqref{NU1} are almost surely equal, and for $\xi \notin \Xi$, this set has Hausdorff dimension $\frac{4}{3}$ in the plane.
\subsection{Inputs} We summarize the inputs to this paper, besides the basic \cite{Directed_Landscape, KPZfixed,reflected_KPZfixed}. Four ingredients go into the invariance of SH under the KPZ fixed point: (i) The invariance of the Busemann process of the exponential corner growth model under the LPP dynamics \cite{Fan-Seppalainen-20}. (ii) Convergence of this Busemann process to SH \cite{Busani-2021}. Here, the emergence of SH as a scaling limit in the KPZ universality class plays a fundamental role. (iii) Exit point bounds for stationary exponential LPP \cite{bala-busa-sepp-20, Balazs-Cator-Seppalainen-2006, Emrah-Janjigian-Seppalainen-20,Seppalainen-Shen-2020, Sepp_lecture_notes}.
(iv) Convergence of exponential LPP to DL \cite{Dauvergne-Virag-21}. For the uniqueness, we use Lemma~\ref{lem:DL_crossing_facts}\ref{itm:KPZ_crossing_lemma}, originally from~\cite{Pimentel-21b}.
To construct the global Busemann process, we start from the results in~\cite{Rahman-Virag-21}, summarized in Section~\ref{sec:RV_summ}. After the first version of our paper appeared,~\cite{Ganguly-Zhang-2022a} gave an independent construction of the Busemann function in a fixed direction. Our results do not rely on~\cite{Ganguly-Zhang-2022a}. After characterizing the distribution of the Busemann process, we use the regularity of SH from~\cite{Busani-2021,Seppalainen-Sorensen-21b} to prove results about the regularity of the Busemann process and semi-infinite geodesics.
To describe the size of the exceptional sets of points with non-unique geodesics (Theorems~\ref{thm:Split_pts} and ~\ref{thm:DLNU}\ref{itm:DL_NU_count}), we use results about point-to-point geodesics from~\cite{Bates-Ganguly-Hammond-22} and~\cite{Dauvergne-Sarkar-Virag-2020}.
A result from~\cite{Dauvergne-22} implies Lemma~\ref{lm:horiz_shift_mix} and the mixing in Theorem~\ref{thm:Buse_dist_intro}\ref{itm:stationarity}.
Our techniques are probabilistic rather than integrable, but some results we use come from integrable inputs. The results of~\cite{Directed_Landscape,Dauvergne-Virag-21,Dauvergne-Nica-Virag-2021,Busani-2021,Dauvergne-Zhang-2021} utilize the continuous RSK correspondence
\cite{O'Connell-2003,rep_non_colliding}. The results on point-to-point geodesics in~\cite{Bates-Ganguly-Hammond-22,Dauvergne-Sarkar-Virag-2020} rely on \cite{Hammond2}, who studied the number of disjoint geodesics in BLPP using integrable inputs.
\subsection{Organization of the paper}\label{sec:org} Section~\ref{sec:model_main_results} defines the models and states three results accessible without further definitions: Theorem~\ref{thm:invariance_of_SH} (proved in Section~\ref{sec:invariance}) on the unique invariance and attractiveness of SH under the KPZ fixed point, Theorem~\ref{thm:DLSIG_main} (proved in Section~\ref{sec:Buseextraproofs}) on the global structure of semi-infinite geodesics in DL, and Theorem~\ref{thm:Split_pts} (proved in Section~\ref{sec:last_proofs}) on the fractal properties of the set of initial points with disjoint semi-infinite geodesics in the same direction. Section~\ref{sec:invariance} proves Theorem~\ref{thm:invariance_of_SH}. Section~\ref{sec:RV_summ} summarizes the results of~\cite{Rahman-Virag-21} that we use as the starting point for constructing the Busemann process.
The remainder of the paper covers finer results on the Busemann process and semi-infinite geodesics. Sections~\ref{sec:Buse_geod_results}--\ref{sec:meas_supp} each start with several theorems that are then proved later in the paper. The theorems can be read independently of the proofs. Each section depends on the sections that came before. Section~\ref{sec:Buse_geod_results} describes the construction of the Busemann process and infinite geodesics in all directions. Section~\ref{sec:LR_sig} gives a detailed discussion of non-uniqueness of geodesics. Section~\ref{sec:geometry_sec} is concerned with coalescence and connects the regularity of the Busemann process to the geometry of geodesics. This culminates in the proof of Theorem~\ref{thm:DLSIG_main}. Section~\ref{sec:meas_supp} develops the theory of random measures for the Busemann process, culminating in the proof of Theorem~\ref{thm:Split_pts}. Section~\ref{sec:op} collects open problems. The appendices contain material from the literature. Details of the results in the appendices and other routine proofs appear in our arXiv version~\cite{Busa-Sepp-Sore-22arXiv}.
\section{Model and main theorems} \label{sec:model_main_results} \subsection{Notation} \label{sec:notat}
\begin{enumerate} [label={\rm(\roman*)}, ref={\rm\roman*}] \itemsep=2pt
\item $\mathbb{Z}$, $\mathbb{Q}$ and $\mathbb{R}$ are restricted by subscripts, as in for example $\mathbb{Z}_{> 0}=\{1,2,3,\dotsc\}$.
\item $\mathbf e_1= (1,0)$ and $\mathbf e_2 = (0,1)$ denote the standard basis vectors in $\mathbb{R}^2$.
\item Equality in distribution is $\hspace{0.9pt}\deq\hspace{0.9pt}$ and convergence in distribution $\hspace{0.9pt}\Longrightarrow$.
\item $X \sim \operatorname{Exp}(\rho)$ means that
$\mathbb P(X>t)=e^{-\rho t}$ for $t>0$.
\item The increments of a function $f:\mathbb{R} \to \mathbb{R}$ are denoted by $f(x,y) = f(y) - f(x)$.
\item Increment ordering of $f,g:\mathbb{R} \to \mathbb{R}$: $f \;{\le}_{\rm inc}\; g$ means that $f(x,y) \le g(x,y)$ for all $x < y$.
\item For $s \in \mathbb{R}$, $\mathcal{H}_s=\{(x,s): x \in \mathbb{R}\}$ is the set of space-time points at time level $s$.
\item A two-sided standard Brownian motion is a continuous random process $\{B(x): x \in \mathbb{R}\}$ such that $B(0) = 0$ almost surely and $\{B(x):x \ge 0\}$ and $\{B(-x):x \ge 0\}$ are two independent standard Brownian motions on $[0,\infty)$.
\item\label{def:2BMcmu} If $B$ is a two-sided standard Brownian motion, then
$\{c B(x) + \mu x: x \in \mathbb{R}\}$ is a two-sided Brownian motion with diffusivity $c>0$ and drift $\mu\in\mathbb{R}$.
\item The parameter domain of the directed landscape is $\Rup = \{(x,s;y,t) \in \mathbb{R}^4: s < t\}$.
\item The Hausdorff dimension of a set $A$ is denoted by $\dim_H(A)$. \end{enumerate} \subsection{Geodesics in the directed landscape} \label{sec:DL_geod}
The directed landscape, originally constructed in~\cite{Directed_Landscape}, is a random continuous function $\mathcal{L}:\Rup \to \mathbb{R}$ that arises as the scaling limit of a large class of models in the KPZ universality class, and is expected to be a universal limit of such models. We cite the theorem for convergence of exponential last-passage percolation in Theorem~\ref{thm:conv_to_DL} in Appendix~\ref{sec:LPP}, and summarize some key points from~\cite{Directed_Landscape} here. The directed landscape satisfies the metric composition law: for $(x,s;y,u) \in \Rup$ and $t \in (s,u)$, \begin{equation} \label{eqn:metric_comp} \mathcal{L}(x,s;y,u) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + \mathcal{L}(z,t;y,u)\}. \end{equation} This implies the reverse triangle inequality: for $s < t < u$ and $(x,y,z) \in \mathbb{R}^3$, $\mathcal{L}(x,s;z,t) + \mathcal{L}(z,t;y,u) \le \mathcal{L}(x,s;y,u)$. Furthermore, over disjoint time intervals $(s_i,t_i)$, $1 \le i \le n$, the processes $(x,y) \mapsto \mathcal{L}(x,s_i;y,t_i)$ are independent.
Under the directed landscape, the length of a continuous path $g:[s,t] \to \mathbb{R}$ is \[ \mathcal{L}(g) = \inf_{k \in \mathbb{Z}_{>0}} \; \inf_{s = t_0 < t_1 < \cdots < t_k = t} \sum_{i = 1}^k \mathcal{L}(g(t_{i - 1}),t_{i - 1};g(t_i),t_i), \] where the second infimum is over all partitions $s = t_0 < t_1 < \cdots < t_k < t$. By the reverse triangle inequality, $\mathcal{L}(g) \le \mathcal{L}(g(s),s;g(t),t)$. We call $g$ a \textit{geodesic} if equality holds. When this occurs, every partition $s = t_0 < t_1 < \cdots < t_k = t$ satisfies \[ \mathcal{L}(g(s),s;g(t),t) = \sum_{i = 1}^k \mathcal{L}(g(t_{i - 1}),t_{i - 1};g(t_i),t_i). \]
For fixed $(x,s;y,t) \in \Rup$, there exists almost surely a unique geodesic between $ (x,s)$ and $(y,t)$ \cite[Sect.~12--13]{Directed_Landscape}. Across all points, there exist leftmost and rightmost geodesics. The leftmost geodesic $g$ is such that, for each $u \in (t,s)$, $g(u)$ is the leftmost maximizer of $\mathcal{L}(x,s;z,u) + \mathcal{L}(z,u;y,t)$ over $z \in \mathbb{R}$. The analogous fact holds for the rightmost geodesic. Geodesics in the directed landscape have H\"older regularity $\frac{2}{3} - \varepsilon$ but not $\frac{2}{3}$ \cite{Directed_Landscape,Dauvergne-Sarkar-Virag-2020}.
A \textit{semi-infinite geodesic} from $(x,s) \in \mathbb{R}^2$ is a continuous path $g:[s,\infty) \to \mathbb{R}$ such that $g(s) = x$ and the restriction of $g$ to each domain $[s,t]\subseteq[s,\infty)$ is a geodesic between $(x,s)$ and $(g(t),t)$. Such an infinite path $g$ has {\it direction} $\xi \in \mathbb{R}$ if $\lim_{t \to \infty} g(t)/t=\xi$. Two semi-infinite geodesics $g_1$ and $g_2$ \textit{coalesce} if there exists $t$ such that $g_1(u) = g_2(u)$ for all $u\ge t$. If $t$ is the minimal such time, then $(g_1(t),t)$ is the \textit{coalescence point}. Two semi-infinite geodesics $g_1,g_2:[s,\infty) \to \mathbb{R}$ are \textit{distinct} if $g_1(t) \neq g_2(t)$ for at least some $t\in(s,\infty)$ and \textit{disjoint} if $g_1(t) \neq g_2(t)$ for all $t\in(s,\infty)$.
\subsection{KPZ fixed point} The KPZ fixed point $h_t(\aabullet;\mathfrak h)$ started from initial state $\mathfrak h$ is a Markov process on the space of upper semi-continuous functions. More precisely, its state space is defined as \begin{equation} \label{UCdef} \begin{aligned}
\operatorname{UC} &= \{\text{ upper semi-continuous functions }\mathfrak h:\mathbb{R} \to \mathbb{R} \cup \{-\infty\}: \\\ &\quad \text{ there exist }a,b > 0 \text{ such that } \quad \mathfrak h(x) \le a + b|x| \text{ for all }x \in \mathbb{R}, \\ &\quad \text{ and }\mathfrak h(x) > -\infty \text{ for some }x \in \mathbb{R}\}. \end{aligned} \end{equation} The topology on this space is that of convergence of hypographs. When restricted to continuous functions, this convergence is equivalent to uniform convergence on compact sets (Section 3.1 in~\cite{KPZfixed}). This subspace of continuous functions is preserved under the KPZ fixed point (\cite{KPZfixed}, Lemma~\ref{lem:max_restrict}). The process $\{h_t(\aabullet;\mathfrak h)\}_{t \ge 0}$ can be represented as \cite{reflected_KPZfixed} \begin{equation} \label{eqn:KPZ_DL_rep} h_t(y;\mathfrak h) = \sup_{x \in \mathbb{R}}\{\mathfrak h(x) + \mathcal{L}(x,0;y,t)\}, \quad y\in\mathbb{R}, \end{equation} where $\mathcal{L}$ is the directed landscape.
If $\mathfrak h$ is a two-sided Brownian motion with diffusivity $\sqrt 2$ and arbitrary drift, then $ h_t(\aabullet;\mathfrak h) - h_t(0;\mathfrak h) \deq \mathfrak h(\aabullet)$ for each $t > 0$ \cite{KPZfixed,Pimentel-21a,Pimentel-21b}.
\subsection{Stationary horizon} \label{sec:SHintro} The stationary horizon (SH) is a process $G = \{G_\xi\}_{\xi \in \mathbb{R}}$ with values $G_\xi$ in the space $C(\mathbb{R})$ of continuous $\mathbb{R}\to\mathbb{R}$ functions. $C(\mathbb{R})$ has its Polish topology of uniform convergence on compact sets. The paths $\xi\mapsto G_\xi$ lie in the Skorokhod space $D(\mathbb{R},C(\mathbb{R}))$ of cadlag functions $\mathbb{R} \to C(\mathbb{R})$. This means that for each $\xi \in \mathbb{R}$, $\lim_{\beta \searrow \xi} G_\beta = G_\xi$, where convergence holds uniformly on compact sets. The limit $\lim_{\alpha \nearrow} G_\alpha$ also exists in the same sense, but is not necessarily equal to $G_\xi$. We use $G_{\xi -}$ to denote this limit. For each $\xi \in \mathbb{R}$, $G_{\xi}$ is a two-sided Brownian motion with diffusivity $\sqrt 2$ and drift $2\xi$. The distribution of a $k$-tuple $(G_{\xi_1},\dotsc,G_{\xi_k})$ can be realized as an image of $k$ independent Brownian motions with drift, given in Definition~\ref{def:SH}. See Appendix~\ref{sec:stat_horiz} for further properties of SH.
For a compact set $K \subseteq \mathbb{R}$, the process $\xi \mapsto G_\xi |_K$ of functions restricted to $K$ is a jump process. Figure~\ref{fig:SH_sim} shows a simulation of $G_\xi$. Each pair of trajectories remains together in a neighborhood of the origin before separating for good, both forward and backward on $\mathbb{R}$. \begin{figure}
\caption{The stationary horizon. Each color represents a different parameter $\xi \in \{0,\pm 1,\pm 2,\pm 3,\pm 5,\pm 10\}$}
\label{fig:SH_sim}
\end{figure}
Our first result is the unique invariance and attractiveness of SH under the KPZ fixed point. This generalizes the invariance of a single Brownian motion with drift and provides a new uniqueness statement (Remark~\ref{rmk:k = 1 uniqueness} below). Attractiveness is proved under these assumptions on the asymptotic drift $\xi \in \mathbb{R}$ of the initial function $\mathfrak h \in \operatorname{UC}$: \begin{equation} \label{eqn:drift_assumptions}
\begin{aligned}
&\text{If } \xi = 0, \quad &\limsup_{x \to +\infty} \frac{\mathfrak h(x)}{x} \in [-\infty,0] \qquad &\text{and}\quad &\liminf_{x \to -\infty} \frac{\mathfrak h(x)}{x} \in [0,+\infty], \\
&\text{if } \xi > 0,\quad &\lim_{x \to +\infty} \frac{\mathfrak h(x)}{x} = 2\xi\qquad&\text{and}\quad &\liminf_{x \to -\infty} \frac{\mathfrak h(x)}{x} \in (-2\xi,+\infty], \\
&\text{and if } \xi < 0,\quad &\lim_{x \to -\infty} \frac{\mathfrak h(x)}{x} = 2 \xi\qquad&\text{and}\quad &\limsup_{x \to +\infty} \frac{\mathfrak h(x)}{x} \in [-\infty, -2\xi).
\end{aligned} \end{equation} As spelled out in the theorem below, these conditions describe the basins of attraction for the KPZ fixed point. When $\xi > 0$ and $x>0$ is large, this condition forces $\mathfrak h(x)$ to be approximated by $2\xi x$. The directed landscape $\mathcal{L}(x,s;y,t)$ can be approximated by $-\frac{(x - y)^2}{t - s}$ (Lemma~\ref{lem:Landscape_global_bound}), so that $\mathfrak h(x) + \mathcal{L}(x,0;y,t) \approx 2\xi x - \frac{(y - x)^2}{t}$, which has its maximum at $x = y + \xi t$. Once we can control the maximizers, Lemma~\ref{lem:DL_crossing_facts} allows us to compare the KPZ fixed point from different initial conditions. This, of course, must be made precise. In the $\xi > 0$ case, the proof of Lemma~\ref{lem:unq} (contained in the arXiv version of the present paper), the $\liminf$ condition as $x \to -\infty$ forces the maximizer to be positive, and an analogous statement holds for $\xi < 0$, although the condition is different. These drift conditions are analogous to the conditions on the drift studied in~\cite{Bakhtin-Cator-Konstantin-2014} for stationary solutions of the Burgers equation with random Poisson forcing.
\begin{theorem} \label{thm:invariance_of_SH}
Let $(\Omega,\mathcal{F},\mathbb P)$ be a probability space on which the stationary horizon $G=\{G_\xi\}_{\xi \in \mathbb{R}}$ and directed landscape $\mathcal{L}$ are defined, and such that the processes $\{\mathcal{L}(x,0;y,t):x,y \in \mathbb{R}, t > 0\}$ and $G$ are independent. For each $\xi \in \mathbb{R}$, let $G_\xi$ evolve under the KPZ fixed point in the same environment $\mathcal{L}$, i.e., for each $\xi \in \mathbb{R}$, \[ h_t(y;G_\xi) = \sup_{x \in \mathbb{R}}\{G_\xi(x) + \mathcal{L}(x,0;y,t)\},\qquad\text{for all } y\in\mathbb{R} \text{ and } t > 0. \]
{\rm(Invariance)} For each $t > 0$, the equality in distribution $\{h_t(\aabullet;G_\xi) - h_t(0;G_\xi)\}_{\xi \in \mathbb{R}}$ $\deq G$ holds between random elements of $D(\mathbb{R},C(\mathbb{R}))$.
{\rm(Attractiveness)} Let $k \in \mathbb{Z}_{>0}$ and $\xi_1 < \cdots < \xi_k$ in $\mathbb{R}$. Let $(\mathfrak h^1,\ldots,\mathfrak h^k)$ be a $k$-tuple of functions in $\operatorname{UC}$, coupled with $(G,\mathcal{L})$ {\rm arbitrarily}, and that almost surely satisfy \eqref{eqn:drift_assumptions} for $(\mathfrak h, \xi) = (\mathfrak h_i, \xi_i)$ for each $i\in\{1,\dotsc,k\}$. Then if $(\mathfrak h^1,\ldots,\mathfrak h^k)$ evolves in the same environment $\mathcal{L}$, for any $a > 0$, \[ \lim_{t \to \infty} \mathbb P\bigl\{ h_t(x;\mathfrak h^i)-h_t(0;\mathfrak h^i)= h_t(x;G_{\xi_i})-h_t(0;G_{\xi_i}) \ \, \forall x \in [-a,a],1 \le i \le k\bigr\} = 1. \] Consequently, as $t \to \infty$, the distributional limit \[ \bigl(h_t(\aabullet;\mathfrak h^1) -h_t(0;\mathfrak h^1) ,\ldots,h_t(\aabullet;\mathfrak h^k) - h_t(0;\mathfrak h^k)\bigr) \Longrightarrow \bigl(G_{\xi_1}(\aabullet),\ldots,G_{\xi_k}(\aabullet)\bigr) \] holds in $\operatorname{UC}^k$ (or in $\mathcal{C}(\mathbb{R})^k$ if the $\mathfrak h^i$ are continuous).
{\rm(Uniqueness)} In particular, on the space $\operatorname{UC}^k$, $\bigl(G_{\xi_1}, \dotsc, G_{\xi_k})$ is the unique invariant distribution of the KPZ fixed point such that for each $i\in\{1,\dotsc,k\}$ the condition~\eqref{eqn:drift_assumptions} holds for $(\mathfrak h, \xi) = (\mathfrak h^i, \xi_i)$. \end{theorem} \begin{remark} Theorem~\ref{thm:DL_Buse_summ}\ref{itm:global_attract} in Section \ref{sec:Buse_geod_results} states that the Busemann process is a global attractor of the backward KPZ fixed point. Namely, start the KPZ fixed point at time $t$ with initial data $\mathfrak h$ satisfying \eqref{eqn:drift_assumptions} and run it backward in time to a fixed final time $s$. Then, in a given a compact set, for large enough $t$ the increments of the backwards KPZ fixed point at time $s$, started from initial data $\mathfrak h$ at time $t$, match those of the Busemann function in direction $\xi$. \end{remark} \begin{remark}
The process $t \mapsto \{h_t(\aabullet;\mathfrak h^\xi) - h_t(0;\mathfrak h^\xi) \}_{\xi \in \mathbb{R}}$ is a well-defined Markov process on a state space which is a Borel subset of $D(\mathbb{R},C(\mathbb{R}))$ (Lemma~\ref{lem:KPZ_preserve_Y}). By the uniqueness result for finite-dimensional distributions, $G$ is the unique invariant distribution on this space of $C(\mathbb{R})$-valued cadlag paths. \end{remark} \begin{remark} \label{rmk:k = 1 uniqueness} In the above strength, the attractiveness result was previously unknown even in the case $k = 1$ (a single initial function). Pimentel \cite{Pimentel-21a,Pimentel-21b} proved attractiveness for $k =1$ and $\xi = 0$ under the following condition on the initial data $\mathfrak h$: there exist $\gamma_0 > 0$ and $\psi(r)$ such that for all $\gamma > \gamma_0$ and $r \ge 1$, \begin{equation} \label{eqn:ergodicity_assumption}
\mathbb P(\gamma^{-1}\mathfrak h(\gamma^2 x) \le r|x| \, \forall x \ge 1) \ge 1 - \psi(r), \ \text{ where } \lim_{r \to \infty} \psi(r) = 0. \end{equation} \end{remark}
\subsection{Semi-infinite geodesics}
A significant consequence of Theorem~\ref{thm:invariance_of_SH} is that the stationary horizon characterizes the distribution of the Busemann process of the directed landscape (Theorem \ref{thm:Buse_dist_intro}). The Busemann process in turn is used to construct semi-infinite geodesics called \textit{Busemann geodesics} simultaneously from all initial points and in all directions (Theorem~\ref{thm:DL_SIG_cons_intro}). The definition of Busemann geodesics, along with a detailed study, comes in Section \ref{sec:Buse_geod_results}.
The next theorem states our conclusions for general semi-infinite geodesics. The random countably infinite dense set $\Xi$ of directions is later characterized in \eqref{eqn:DLBuseDC_def} as the discontinuity set of the Busemann process, and its properties stated in Theorem~\ref{thm:DLBusedc_description}.
We assume the probability space $(\Omega,\mathcal{F},\mathbb P)$ of the directed landscape $\mathcal{L}$ complete. All statements about semi-infinite geodesics are with respect to $\mathcal{L}$. Two geodesics are {\it disjoint} if they do not share any space-time points, except possibly their common initial and/or final point.
\begin{theorem} \label{thm:DLSIG_main} The following statements hold on a single event of full probability. There exists a random countably infinite dense subset $\Xi$ of $\mathbb{R}$ such that parts \ref{itm:good_dir_coal}--\ref{itm:bad_dir_split} below hold. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:all_dir} Every semi-infinite geodesic has a direction $\xi \in \mathbb{R}$. From each initial point $p \in \mathbb{R}^2$ and in each direction $\xi \in \mathbb{R}$, there exists at least one semi-infinite geodesic from $p$ in direction $\xi$.
\item \label{itm:good_dir_coal} When $\xi \notin \Xi$, all semi-infinite geodesics in direction $\xi$ coalesce. There exists a random set of initial points, of zero planar Lebesgue measure, outside of which the semi-infinite geodesic in each direction $\xi \notin \Xi$ is unique.
\item \label{itm:bad_dir_split} When $\xi \in \Xi$, there exist at least two families of semi-infinite geodesics in direction $\xi$, called the $\xi -$ and $\xi +$ geodesics. From every initial point $p \in \mathbb{R}^2$ there exists both a $\xi -$ geodesic and a $\xi +$ geodesic which eventually separate and never come back together. All $\xi -$ geodesics coalesce, and all $\xi +$ geodesics coalesce.
\end{enumerate}
\end{theorem}
\begin{remark}[Busemann geodesics and general geodesics] Theorem \ref{thm:DLSIG_main} is proved by controlling all semi-infinite geodesics with Busemann geodesics. Namely, from each initial point $p$ and in each direction $\xi$, all semi-infinite geodesics lie between the leftmost and rightmost Busemann geodesics (Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG}). Furthermore, for all $p$ outside a random set of Lebesgue measure zero and all $\xi \notin \Xi$, the two extreme Busemann geodesics coincide and thereby imply the uniqueness of the semi-infinite geodesic from $p$ in direction $\xi$ (Theorem~\ref{thm:DLSIG_main}\ref{itm:good_dir_coal}).
Even more generally, whenever $\xi \notin \Xi$, all semi-infinite geodesics in direction $\xi$ are Busemann geodesics (Theorem~\ref{thm:DL_good_dir_classification}\ref{itm:DL_allBuse}). This is presently unknown for $\xi \in \Xi$, but may be expected by virtue of what is known about exponential LPP \cite{Janjigian-Rassoul-Seppalainen-19}.
Our work therefore gives a nearly complete description of the global behavior of semi-infinite geodesics in the directed landscape. The conjecture that all semi-infinite geodesics are Busemann geodesics is equivalent to the following statement: In Item~\ref{itm:bad_dir_split}, for $\xi \in \Xi$, there are \textit{exactly} two families of coalescing semi-infinite geodesics in direction $\xi$. That is, each $\xi$-directed semi-infinite geodesic coalesces either with the $\xi-$ or the $\xi +$ geodesics. \end{remark}
\begin{remark}[Non-uniqueness of geodesics] The non-uniqueness of geodesics from initial points in a Lebesgue null set in Theorem~\ref{thm:DLSIG_main}\ref{itm:good_dir_coal} is temporary in the sense that these geodesics eventually coalesce. This forms a ``bubble." The first point of intersection after the split is the coalescence point (Theorem~\ref{thm:DL_all_coal}\ref{itm:DL_split_return}). Hence, these particular geodesics form at most one bubble. This contrasts with the non-uniqueness of Theorem~\ref{thm:DLSIG_main}\ref{itm:bad_dir_split}, where geodesics do not return together (Figure~\ref{fig:non_unique_comp}). Non-uniqueness is discussed in detail in Section~\ref{sec:LR_sig}. \end{remark}
\begin{remark} The authors of~\cite{Rahman-Virag-21} alluded to non-uniqueness of geodesics. They showed that for a fixed initial point, with probability one, there are at most countably many directions with a non-unique geodesic. On page 23 of~\cite{Rahman-Virag-21}, they note that the set of directions with a non-unique geodesic ``should be dense over the real line.'' Our result is that this set is dense and, furthermore, it is the set $\Xi$ of discontinuities of the Busemann process. \end{remark}
\begin{figure}
\caption{\small On the left, a depiction of the non-uniqueness in Theorem~\ref{thm:DLSIG_main}\ref{itm:good_dir_coal}: geodesics separate and coalesce back together, forming a bubble. After the first version of the present article was posted, Bhatia~\cite{Bhatia-23} proved that this is the only possible configuration for this type of non-uniqueness--that is, geodesics which split and later coalesce can only split at the initial point. On the right, $\xi\in \Xi$. The blue/thin paths depict the $\xi -$ geodesics, while the red/thick paths depict the $\xi +$ geodesics. From each point, the $\xi -$ and $\xi +$ geodesics separate at points of $\mathfrak S$. The $\xi -$ and $\xi +$ families each have a coalescing structure.}
\label{fig:non_unique_comp}
\end{figure}
The last theorem of this section describes the set of initial points with disjoint geodesics in the same direction. Let $\Xi$ be the random set from Theorem~\ref{thm:DLSIG_main} (precisely characterized in~\eqref{eqn:DLBuseDC_def}). Define the following random sets of splitting points.
\begin{align}
\mathfrak S_{s,\xi} &:= \{x \in \mathbb{R}: \exists \text{
\textbf{disjoint}} \text{ }\text{semi-infinite geodesics from }(x,s) \text{ in direction }\xi\} \label{Split_sdir} \\
\mathfrak S &:= \bigcup_{s \hspace{0.9pt}\in\hspace{0.9pt} \mathbb{R}, \, \xi\hspace{0.9pt} \in\hspace{0.9pt} \Xi} \mathfrak S_{s,\xi} \times \{s\}. \label{eqn:gen_split_set}
\end{align}
\begin{remark}
From Theorem~\ref{thm:DLSIG_main}\ref{itm:good_dir_coal}, $\mathfrak S_{s,\xi} = \varnothing$ whenever $\xi \notin \Xi$.
\end{remark}
\begin{theorem} \label{thm:Split_pts} The following hold.
\begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:split_dense}
On a single event of full probability, the set $\mathfrak S$ is dense in $\mathbb{R}^2$.
\item \label{itm:splitp0} For each fixed $p \in \mathbb{R}^2$, $\mathbb P(p \in \mathfrak S) = 0$.
\item \label{itm:Hasudorff1/2} For each $s \in \mathbb{R}$, on an $s$-dependent full-probability event, for every $\xi \in \Xi$, the set $\mathfrak S_{s,\xi}$ has Hausdorff dimension $\frac{1}{2}$.
\item \label{itm:nonempty} On a single event of full probability, simultaneously for every $s \in \mathbb{R}$ and $\xi \in \Xi$, the set $\mathfrak S_{s,\xi}$ is nonempty and unbounded in both directions. \end{enumerate} \end{theorem}
\begin{remark} \label{rmk:supports} For each $s \in \mathbb{R}$ and $\xi \in \Xi$, the set $\mathfrak S_{s,\xi}$ has an interpretation as the support of a random measure, up to the removal of a countable set. Thus, since $\Xi$ is countable, for each $s \in \mathbb{R}$, the set $\{x \in \mathbb{R}: (x,s) \in \mathfrak S\}$ is the countable union of supports of random measures, up to the removal of an at most countable set. By Item~\ref{itm:Hasudorff1/2}, this set also has Hausdorff dimension $\frac{1}{2}$. Conditioning in the appropriate Palm sense on $\xi \in \Xi$, the random measure whose support is ``almost'' $\mathfrak S_{s,\xi}$ is equal to the local time of a Brownian motion (Theorems~\ref{thm:random_supp},~\ref{thm:BusePalm},and~\ref{thm:indep_loc}). We expect that, simultaneously for all $s\in\mathbb{R}$, the set $\mathfrak S_{s,\xi}$ has Hausdorff dimension $\frac{1}{2}$, but currently lack a global result stronger than Item~\ref{itm:nonempty}. \end{remark}
\section{ Invariance and uniqueness of the stationary horizon under the KPZ fixed point} \label{sec:invariance} In this section, we prove Theorem~\ref{thm:invariance_of_SH}. Take $\{G_\xi\}_{\xi \in \mathbb{R}}$ as the initial data of the KPZ fixed point, where $G$ is the stationary horizon, independent of $\{\mathcal{L}(x,0;y,t):x,y \in \mathbb{R}, t > 0\}$. For $\xi \in \mathbb{R}$, set \[ h_t(y;G_\xi) = \sup_{x \in \mathbb{R}}\{G_\xi(x) + \mathcal{L}(x,0;y,t)\},\qquad\text{for all }y\in\mathbb{R} \text{ and } t > 0. \] Define the following state space: \begin{equation}\label{Y} \begin{aligned} \mathcal{Y} &:= \bigl\{\{\mathfrak h^\xi\}_{\xi \in \mathbb{R}} \in D(\mathbb{R},C(\mathbb{R})): \mathfrak h^{\xi_1} \;{\le}_{\rm inc}\; \mathfrak h^{\xi_2} \text{ for }\xi_1 < \xi_2,\\ &\qquad\qquad \text{ and for all }\xi \in \mathbb{R}, \; \mathfrak h^\xi(0) = 0 \text{ and } \mathfrak h^\xi \text{ satisfies condition~\eqref{eqn:drift_assumptions}} \\ &\qquad\qquad\qquad \text{ with all $\limsup$ and $\liminf$ terms finite}\bigr\}. \end{aligned} \end{equation}
\begin{lemma} \label{lem:KPZ_preserve_Y}
The space $\mathcal{Y}$ defined in~\eqref{Y} is a Borel subset of $D(\mathbb{R},C(\mathbb{R}))$. Let $\mathcal{L}$ be the directed landscape, $\{\mathfrak h^\xi\}_{\xi \in \mathbb{R}} \in \mathcal{Y}$, $h_0(\aabullet;\mathfrak h^\xi)=\mathfrak h^\xi$, and
\[ h_t(y;\mathfrak h^\xi) = \sup_{x \in \mathbb{R}}\{\mathfrak h^\xi(x) + \mathcal{L}(x,0,y;t)\} \qquad \text{for } \ t>0, \ y\in\mathbb{R} \text{ and } \xi \in \mathbb{R}. \] Then
$t \mapsto \{h_t(\aabullet;\mathfrak h^\xi) - h_t(0;\mathfrak h^\xi) \}_{\xi \in \mathbb{R}}$ is a Markov process on $\mathcal{Y}$. Specifically, on the event of full probability from Lemma~\ref{lem:Landscape_global_bound}, $\{h_t(\aabullet;\mathfrak h^\xi) - h_t(0;\mathfrak h^\xi) \}_{\xi \in \mathbb{R}} \in \mathcal{Y}$ for each $t > 0$. \end{lemma} \begin{proof} Borel measurability of $\mathcal{Y}$ is standard and left to the reader.
We show that $\{h_t(\aabullet;\mathfrak h)-h_t(0;\mathfrak h^\xi)\}_{\xi \in \mathbb{R}} \in \mathcal{Y}$ for all $t > 0$. Lemmas~\ref{lem:DL_crossing_facts}\ref{itm:KPZ_crossing_lemma} shows the preservation of the ordering of functions, Lemma~\ref{lem:KPZ_preserve_lim} shows the preservation of limits, and Lemma~\ref{lem:max_restrict}\ref{itm:KPZcont} shows that $h(t,\abullet;\mathfrak h^\xi) \in C(\mathbb{R})$ for all $\xi$. It remains to show that $\{h_t(\aabullet;\mathfrak h)\}_{\xi \in \mathbb{R}} \in D(\mathbb{R},C(\mathbb{R}))$ for each $t > 0$. Since $\mathfrak h^{\xi_1} \;{\le}_{\rm inc}\; \mathfrak h^{\xi_2}$, Lemma~\ref{lemma:max_monotonicity} and the global bounds of Lemma~\ref{lem:Landscape_global_bound} imply that, for each compact $K \subseteq \mathbb{R}$ and $\xi \in \mathbb{R}$, there exists a random $M = M(\xi,t,K) > 0$ such that for all $y \in K$, $\alpha \in (\xi - 1,\xi + 1)$, \[ \sup_{x \in \mathbb{R}}\{\mathfrak h^\alpha(x) + \mathcal{L}(x,0;y,t)\} = \sup_{x \in [-M,M]}\{\mathfrak h^\alpha(x) + \mathcal{L}(x,0;y,t)\}. \] Then, it follows that $\{h_t(\aabullet;\mathfrak h^\xi)\}_{\xi \in \mathbb{R}}$, as an $\mathbb{R} \to C(\mathbb{R})$ function of $\xi$, is right-continuous with left limits because this is true of $\{\mathfrak h^\xi\}_{\xi \in \mathbb{R}}$.
By the metric composition~\eqref{eqn:metric_comp} of the directed landscape $\mathcal{L}$, for $0 < s < t$, \begin{align*}
h_t(y;\mathfrak h^\xi) - h_t(0;\mathfrak h^\xi)
&=\sup_{x \in \mathbb{R}}\{h_s(x;\mathfrak h^\xi) -h_s(0;\mathfrak h^\xi)+ \mathcal{L}(x,s;y,t)\}\\ &\qquad\qquad - \sup_{x \in \mathbb{R}}\{h_s(x;\mathfrak h^\xi)- h_s(0;\mathfrak h^\xi) + \mathcal{L}(x,s;0,t)\}.
\end{align*} The process $t \mapsto \{h_t(\aabullet;\mathfrak h^\xi) - h_t(0;\mathfrak h^\xi)\}_{\xi \in \mathbb{R}}$ is Markovian by the independent temporal increments of $\mathcal{L}$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:invariance_of_SH}]
\textbf{Invariance:}
For the invariance of SH $G$, it suffices to prove the invariance of a finite-dimensional marginal $(G_{ \xi_1},\ldots,G_{ \xi_k})$ for given $-\infty < \xi_1 < \cdots < \xi_k < \infty$. So for
\begin{equation} \label{605} h_t(y;G_{\xi_i}) = \sup_{x \in \mathbb{R}}\{G_{\xi_i}(x) + \mathcal{L}(x,0;y,t)\}, \quad 1 \le i \le k, \end{equation} the goal is to show that for each $t > 0$, \begin{equation} \label{606} \bigl(h_t(\aabullet;G_{\xi_1})-h_t(0;G_{\xi_1}),\ldots, h_t(\aabullet;G_{\xi_k})-h_t(0;G_{\xi_k})\bigr) \deq (G_{\xi_1},\ldots,G_{\xi_k}). \end{equation}
We prove \eqref{606} via a limit using stability of discrete queues. For $N \in \mathbb{Z}_{>0}$ and $1 \le i \le k$, set $\rho_i = \frac{1}{2} - 2^{-4/3}\xi_i N^{-1/3}$ and $\boldsymbol \rho^k = (\rho_1,\ldots,\rho_k)$. Let $\mu^{\boldsymbol \rho^k}$ be the probability distribution on $(\mathbb{R}_{>0}^\mathbb{Z})^k$ defined in~\eqref{mu_def} in Appendix \ref{sec:cgm-bus}. It is the joint distribution of $k$ horizontal Busemann functions of the exponential corner growth model by Theorem \ref{thm:exp_Buse_dist}. Let $(I^{N,1},\ldots,I^{N,k})$ be a $\mu^{\boldsymbol \rho^k}$-distributed $k$-tuple of random, positive bi-infinite sequences $I^{N,i}=(I^{N,i}_j)_{j\in\mathbb{Z}}$.
For $1 \le i \le k$, let $F^N_{i}:\mathbb{R} \to \mathbb{R}$ be the linear interpolation of the function defined by
\[ F^N_i(0)=0 \ \ \text{ and } \ \ F^N_i(m)-F^N_i(k)= \textstyle\sum_{j = k+1}^m I^{N,i}_j \quad\text{ for integers } k<m.
\]
Its scaled and centered version is defined by \begin{equation} \label{eqn:GN} G_{i}^N(x) = 2^{-4/3}N^{-1/3}\bigl[F_{i}^N(2^{5/3} N^{2/3}x ) - 2^{8/3}N^{2/3} x\bigr] \qquad\text{for } \ x\in\mathbb{R}. \end{equation} Theorems~\ref{thm:exp_Buse_dist} and~\ref{thm:conv_to_SH} give the distributional limit \begin{equation} \label{eqn:conv_of_finite_GN} (G_{1}^N,\ldots,G_{k}^N) \Longrightarrow (G_{\xi_1},\ldots,G_{\xi_k}), \end{equation} on the space $C(\mathbb{R},\mathbb{R}^k)$, under the Polish topology of uniform convergence of functions on compact sets.
For $N \in \mathbb{N}$ sufficiently large and $1 \le i \le k$, we consider discrete LPP with initial data $F_i^N$ and exponential weights, as in \eqref{eqn:LPP_bd} in Appendix~\ref{sec:LPP}. For $m \in \mathbb{Z}$ and $n \in \mathbb{Z}_{>0}$ let \[ d_i^N(m,n) = \sup_{\ell\,: \,\ell \le m}\{F_i^N(\ell) + d((\ell,1),(m,n))\}. \] The scaled and centered version is given by $H_{i,0}^N = G_i^N$ and for $t > 0$ by letting $H_{i,t}^N:\mathbb{R} \to \mathbb{R}$ be the linear interpolation of \begin{equation} \label{600} H_{i,t}^N(y) = 2^{-4/3}N^{-1/3}\bigl[d^N_i(tN + 2^{5/3}N^{2/3} y,tN) - 4Nt - 2^{8/3}N^{2/3} y \bigr]. \end{equation}
By Lemma~\ref{lem:D_and_LPP_bd} and Theorem~\ref{thm:mu_invariant}, $\forall N \in \mathbb{Z}_{>0}$ and $t > 0$ such that $tN \in \mathbb{Z}$, \[ \bigl(H_{1,t}^N(\aabullet) - H_{1,t}^N(0),\ldots, H_{k,t}^N(\aabullet) - H_{k,t}^N(0)\bigr) \deq (G_1^N,\ldots,G_k^N). \] Then, using~\eqref{eqn:conv_of_finite_GN}, the proof of~\eqref{606} is completed by the following lemma. \begin{lemma} Let $(G_{\xi_1},\ldots,G_{\xi_k})$ be independent of $\{\mathcal{L}(x,0;y,t):x,y \in \mathbb{R}, t > 0\}$ and $h_t(y;G_{\xi_i})$ defined by \eqref{605}. Then for $t > 0$, as $N \to \infty$, in the topology of uniform convergence on compact sets of functions $\mathbb{R} \to \mathbb{R}^k$, we have the distributional limit \begin{equation}\label{goal46} (H_{1,t}^N(\aabullet),\ldots, H_{k,t}^N(\aabullet)) \Longrightarrow (h_t(\aabullet;G_{\xi_1}),\ldots,h_t(\aabullet;G_{\xi_k})). \end{equation}
\end{lemma} \begin{proof} Replace the integer $\ell$ with a continuous variable $x$: \begin{align}
\label{601} &H_{i,t}^N(y) \; = \sup_{-\infty < \;\ell \;\le tN + 2^{5/3} N^{2/3}y} \;
2^{-4/3}N^{-1/3}\Bigl[F_i^N(\ell) \\
\nonumber &\qquad\qquad\qquad + \; d\bigl((\ell,1),(tN + 2^{5/3} N^{2/3}y,tN)\bigr) - 4Nt - 2^{8/3} N^{2/3} y\Bigr] \\
&= \sup_{-\infty \hspace{0.9pt}<\hspace{0.9pt} 2^{5/3}N^{2/3} x \;\le\; tN + 2^{5/3}N^{2/3}y}
\; 2^{-4/3}N^{-1/3}\Bigl[F_i^N(2^{5/3} xN^{2/3}) - 2^{8/3} N^{2/3}x\nonumber \\
&\qquad + d\bigl((2^{5/3} xN^{2/3},1),(tN + 2^{5/3} N^{2/3}y,tN)\bigr) - 4Nt - 2^{8/3} N^{2/3} (y - x)\Bigr] \nonumber \\
&= \; \sup_{x \in \mathbb{R}} \bigl\{G_i^N(x) + \mathcal{L}_N(x,0;y,t)\bigr\}, \label{602} \end{align} where $G_i^N$ is defined in~\eqref{eqn:GN} and \[ \mathcal{L}_N(x,0;y,t) = \frac{d((2^{5/3} xN^{2/3},1),(tN + 2^{5/3} N^{2/3}y,tN)) - 4Nt - 2^{8/3} N^{2/3} (y - x)}{2^{4/3}N^{1/3}} \] when $\hspace{0.9pt} x \le y + 2^{-5/3} N^{1/3} t\,$ and $-\infty$ otherwise.
Let $Z_i^N(y)$ denote the largest maximizer of~\eqref{601}. It is precisely the exit point defined in Equation~\eqref{eqn:exit_pt}. These satisfy $Z_i^N(x) \le Z_i^N(y)$ for $x < y$. If there exists some $M > 0$ such that $|Z_i^N(y)| \le M 2^{5/3}N^{2/3}$, then
\[ \text{\rm line \eqref{602}} = \sup_{x \in [-M,M]}\{G_i^N(x) + \mathcal{L}_N(x,0;y,t)\}. \]
By the weak limit \eqref{eqn:conv_of_finite_GN}, Theorem~\ref{thm:conv_to_DL}, and independence, Skorokhod representation (\cite[Thm.~11.7.2]{dudl}, \cite[Thm.~3.1.8]{ethi-kurt}) gives a coupling of copies of $\{(G_i^N)_{1 \le i \le k},\mathcal{L}_N\}$ and $\{(G_{\xi_i})_{1 \le i \le k},\mathcal{L}\}$ such that $G_i^N \to G_{\xi_i}$ for $1 \le i \le k$ and $\mathcal{L}_N \to \mathcal{L}$, almost surely and uniformly on compacts. Then, for $a<b$, $M > 0$ and $\varepsilon > 0$, in this coupling we have \begin{align}
&\widehat \mathbb P\Big(\max_{1 \le i \le k} \sup_{y \in [a,b]} |H_{i,t}^N(y) - h_t(y;G_{\xi_i})| > \varepsilon \Big) \nonumber \\
\label{605a} &\le \ \widehat\mathbb P\Big(\max_{1 \le i \le k} \sup_{y \in [a,b]} |\sup_{x \in [-M,M]}\{G_i^N(x) + \mathcal{L}_N(x,0;y,t)\}\\
&\qquad\qquad\qquad - \sup_{x \in [-M,M]}\{G_{\xi_i}(x) + \mathcal{L}(x,0;y,t)\}| > \varepsilon \Big) \nonumber \\
\label{606a} &\qquad + \; \widehat \mathbb P\Bigl(\;\sup_{x \in \mathbb{R}}\{G_{\xi_i}(x) + \mathcal{L}(x,0;y,t)\} > \sup_{x \in [-M,M]}\{G_{\xi_i}(x) + \mathcal{L}(x,0;y,t)\} \Bigr) \\
&\qquad + \; \sum_{i = 1}^k \bigl[ \, \widehat\mathbb P(Z_i^N(a) < -M2^{5/3} N^{2/3}) + \widehat \mathbb P(Z_i^N(b) > M 2^{5/3}N^{2/3}) \,\bigr] . \label{607a} \end{align} Above, \eqref{605a} vanishes as $N\to\infty$ by the coupling. \eqref{606a} vanishes as $M\to\infty$ by Lemma~\ref{lem:Landscape_global_bound} because $G_{\xi_i}$ is a Brownian motion with drift, independent of $\{\mathcal{L}(x,0;y,t): x,y\in\mathbb{R}, t > 0\}$. Lemma~\ref{lemma:line_exit_pt} controls \eqref{607a}. This combination verifies the goal \eqref{goal46}. \end{proof}
\noindent \textbf{Attractiveness and uniqueness:} The proof idea is similar to that of Theorem 3.3 in~\cite{Bakhtin-Cator-Konstantin-2014}.
Let $k\in\mathbb{N}$ and let $\bar{\xi} = (\xi_1,\ldots,\xi_k) \in \mathbb{R}^k$ be a strictly increasing vector. Let $\bar{\mathfrak h}=(\mathfrak h^1,...,\mathfrak h^k)$ be a vector of functions in $\operatorname{UC}$ and satisfying~\eqref{eqn:drift_assumptions} with $\mathfrak h = \mathfrak h^i$ and $\xi = \xi_i$ for $1 \le i \le k$.
Let $\varepsilon > 0$. By Theorem~\ref{thm:SH10}\ref{itm:SH_j}, there exists $\delta > 0$ such that
\[
\mathbb P\bigl\{G_{\xi_i \pm \delta}(x) = G_{\xi_i}(x) \ \,\forall x \in [-a,a], \, 1 \le i \le k\bigr\} \ge 1 - {\varepsilon}/{2}.
\]
Then, by invariance of the stationary horizon under the KPZ fixed point, for all $t > 0$,
\begin{equation}\label{lb1}\begin{aligned}
\mathbb P\bigl\{h_t(x;G_{\xi_i\pm \delta})-h_t(0;G_{\xi_i\pm\delta})&= h_t(x;G_{\xi_i})-h_t(0;G_{\xi_i}) \\ &\qquad \forall x\in[-a,a], \, 1 \le i \le k\bigr\} \ge 1 - {\varepsilon}/{2}. \end{aligned}\end{equation} Recall the sets $Z^{a,0,t}_f$ of exit points from~\eqref{exitpt}. By Theorem~\ref{thm:RV-SIG-thm}\ref{itm:pd_fixed}, Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_var}, because $G_{\xi_i\pm\delta}$ satisfies~\eqref{eqn:drift_assumptions} with drift $\xi_i\pm\delta$, and by the temporal reflection symmetry of Lemma~\ref{lm:landscape_symm}, Lemma~\ref{lem:unq} implies that for all $t$ sufficiently large, \begin{equation} \label{largp} \mathbb P\bigl(Z^{a,0,t}_{G_{\xi_i-\delta}} \leq Z^{a,0,t}_{\mathfrak h^i}\leq Z^{a,0,t}_{G_{\xi_i+\delta}} \ \, \forall \hspace{0.7pt} 1 \le i \le k\bigr) > 1 - {\varepsilon}/{2}, \end{equation} where for $A,B\subseteq \mathbb{R}$ we wrote $A\leq B$ if $\sup A\leq \inf B$. By Lemma~\ref{lem:DL_crossing_facts}\ref{itm:KPZ_crossing_lemma}, on the event in~\eqref{largp} the following holds for all $x \in [0,a]$ and $1 \le i \le k$: \begin{equation}\label{lb2}
h_t(x;G_{\xi_i-\delta})-h_t(0;G_{\xi_i-\delta})\leq h_t(x;\mathfrak h^i)-h_t(0;\mathfrak h^i)\leq h_t(x;G_{\xi_i+\delta})-h_t(0;G_{\xi_i+\delta}). \end{equation} The reverse inequalities hold for $x \in [-a,0]$.
Combining \eqref{lb1}--\eqref{lb2}, we have that for sufficiently large $t$, \[
\mathbb P\bigl\{h_t(x;G_{\xi_i})-h_t(0;G_{\xi_i})= h_t(x;\mathfrak h^i)-h_t(0;\mathfrak h^i)\ \, \forall x\in[-a,a],1 \le i \le k\bigr\} \ge 1 - \varepsilon. \] The proof of Theorem~\ref{thm:invariance_of_SH} is complete. \end{proof}
\section{Summary of the Rahman--Vir\'ag results} \label{sec:RV_summ} The paper~\cite{Rahman-Virag-21} shows existence of the Busemann function for a fixed direction. Below is a summary of their results that we use.
\begin{theorem}[\cite{Rahman-Virag-21}]\label{thm:RV-SIG-thm} The following hold. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:p_fixed} For fixed initial point $p$, there exist almost surely leftmost and rightmost semi-infinite geodesics $g_p^{\xi,\ell}$ and $g_p^{\xi,r}$ from $p$ in every direction $\xi$ simultaneously. There are at most countably many directions $\xi$ such that $g_p^{\xi,\ell}\neq g_p^{\xi,r}$
\item \label{itm:d_fixed} For fixed direction $\xi$, there exist almost surely leftmost and rightmost geodesics $g_p^{\xi,\ell}$ and $g_p^{\xi,r}$ in direction $\xi$ from every initial point $p$.
\item \label{itm:pd_fixed} For fixed $p =(x,s) \in \mathbb{R}^2$ and $\xi \in \mathbb{R}$, $g := g_p^{\xi,\ell}= g_p^{\xi,r}$ with probability one.
\item \label{itm:fixed_coal} Given $\xi\in\mathbb{R}$, all semi-infinite geodesics in direction $\xi$ coalesce with probability one. \end{enumerate} \end{theorem} \begin{remark}
Article \cite{Rahman-Virag-21} used $-$ and $+$ in place of the superscripts $\ell$ and $r$ used above. We replaced $-/+$ with $\ell/r$ to avoid confusion with our $\pm$ notation that links with the left- and right-continuous Busemann processes. As demonstrated in Section~\ref{sec:LR_sig}, non-uniqueness of geodesics is properly characterized by two parameters $\sigg \in \{-,+\}$ and $S \in \{L,R\}$. \end{remark}
For fixed direction $\xi$,~\cite{Rahman-Virag-21} defines $\kappa^\xi(p,q)$ as the coalescence point of the rightmost geodesics in direction $\xi$ from initial points $p$ and $q$. Then, they define the Busemann function \begin{equation}\label{RVW-def} W_\xi(p;q) = \mathcal{L}(p;\kappa^\xi(p,q)) - \mathcal{L}(q;\kappa^\xi(p,q)). \end{equation}
\begin{theorem}[\cite{Rahman-Virag-21}, Corollary 3.3, Theorem 3.5, Remark 3.1] \label{thm:RV-Buse} $ $ \begin{enumerate}[label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_Buse_BM} For each $t \in \mathbb{R}$, the process $x \mapsto W_\xi(x,t;0,t)$ is a two-sided Brownian motion with diffusivity $\sqrt 2$ and drift $2\xi$. \end{enumerate} Given a direction $\xi$, the following hold on a $\xi$-dependent event of probability one. \begin{enumerate} [resume, label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item \label{itm:fixed_additive} Additivity: $W_\xi(p;q) + W_\xi(q;r) = W_\xi(p;r)$ for all $p,q,r \in \mathbb{R}^2$.
\item \label{itm:DL_Buse_var} For all $s < t$ and $x,y \in \mathbb{R}$,
\[
W_\xi(x,s;y,t) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_\xi(z,t;y,t)\}.
\]
The supremum is attained exactly at those $z$ such that $(z,t)$ lies on a semi-infinite geodesic from $(x,s)$ in direction $\xi$.
\item \label{itm:DL_Buse_cont} The function $W_\xi:\mathbb{R}^4 \to \mathbb{R}$ is continuous. \end{enumerate} Moreover: \begin{enumerate} [resume, label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item \label{itm:DL_Buse_mont} For a pair of fixed directions $\xi_1 < \xi_2$, with probability one, for every $t \in \mathbb{R}$ and
$x < y$, $W_{\xi_1}(y,t;x,t) \le W_{\xi_2}(y,t;x,t)$.
\end{enumerate} \end{theorem}
\section{Busemann process and Busemann geodesics} \label{sec:Buse_geod_results} With the intention of being accessible to a large audience, in this section, we first present a list of theorems regarding the Busemann process in Section~\ref{sec:Buse_results}. Section~\ref{sec:DL_SIG_intro} defines Busemann geodesics and states their main properties. The proofs are found in Section~\ref{sec:DL_Buse_cons}, except for the proofs of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:BuseLim1}-\ref{itm:global_attract} and the mixing in Theorem~\ref{thm:Buse_dist_intro}\ref{itm:stationarity}, which are proved in Section~\ref{sec:Buseextraproofs}, and Theorem~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t}, which is proved in Section~\ref{sec:last_proofs}. \subsection{The Busemann process} \label{sec:Buse_results}
The Busemann process $\{W_{\xi \sig}(p;q)\}$ is indexed by points $p,q \in \mathbb{R}^2$, a direction $\xi \in \mathbb{R}$, and a sign $\sigg \in \{-,+\}$. The following theorems describe this global process. The parameter $\sigg \in \{-,+\}$ denotes the left- and right-continuous versions of this process as a function of $\xi$.
\begin{theorem} \label{thm:DL_Buse_summ}
On $(\Omega,\mathcal{F},\mathbb P)$, there exists a process \[ \{W_{\xi \sig}(p;q): \xi \in \mathbb{R}, \, \sigg \in \{-,+\}, \, p,q \in \mathbb{R}^2\} \] satisfying the following properties. All the properties below hold on a single event of probability one, simultaneously for all directions $\xi \in \mathbb{R}$, signs $\sigg \in \{-,+\}$, and points $p,q \in \mathbb{R}^2$, unless otherwise specified. Below, for $p,q \in \mathbb{R}^2$, we define the sets \begin{equation} \label{eqn:DLBuseDC_def} \Xi(p;q) = \{\xi \in \mathbb{R}: W_{\xi -}(p;q) \neq W_{\xi +}(p;q)\}\qquad\text{and}\qquad\Xi = \textstyle\bigcup_{p,q \,\in\, \mathbb{R}^2} \Xi(p;q). \end{equation} \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item{\rm(Continuity)} \label{itm:general_cts} As an $\mathbb{R}^4 \to \mathbb{R}$ function, $(x,s;y,t) \mapsto W_{\xi \sig}(x,s;y,t)$ is continuous.
\item {\rm(Additivity)} \label{itm:DL_Buse_add} For all $p,q,r \in \mathbb{R}^2$,
$W_{\xi \sig}(p;q) + W_{\xi \sig}(q;r) = W_{\xi \sig}(p;r)$. In particular, $W_{\xi \sig}(p;q) = -W_{\xi \sig}(q;p)$ and $W_{\xi \sig}(p;p) = 0$.
\item {\rm(Monotonicity along a horizontal line)}
\label{itm:DL_Buse_gen_mont} Whenever $\xi_1< \xi_2$, $x < y$, and $t \in \mathbb{R}$,
\[
W_{\xi_1 -}(y,t;x,t) \le W_{\xi_1 +}(y,t;x,t) \le W_{\xi_2 -}(y,t;x,t) \le W_{\xi_2 +}(y,t;x,t).
\]
\item {\rm(Backwards evolution as the KPZ fixed point)}\label{itm:Buse_KPZ_description} For
all $x,y \in \mathbb{R}$ and $s < t$,
\begin{equation}\label{W_var}
W_{\xi \sig}(x,s;y,t) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;y,t)\}.
\end{equation}
\item {\rm(Regularity in the direction parameter)}
\label{itm:DL_unif_Buse_stick}
The process $\xi\mapstoW_{\xi +}$ is right-continuous in the sense of uniform convergence on compact sets of functions $\mathbb{R}^4 \to \mathbb{R}$, and $\xi\mapstoW_{\xi -}$ is left-continuous in the same sense. The restrictions to compact sets are locally constant in the parameter $\xi$: for each $\xi \in \mathbb{R}$ and compact set $K \subseteq \mathbb{R}^4$ there exists a random $\varepsilon =\varepsilon(\xi,K)>0$ such that, whenever $\xi - \varepsilon < \alpha < \xi < \beta < \xi + \varepsilon$ and $\sigg \in \{-,+\}$, we have these equalities for all $(x,s;y,t) \in K$:
\begin{equation} \label{208}
W_{\alpha \sig}(x,s;y,t) = W_{\xi -}(x,s;y,t)\qquad\text{and}\qquadW_{\beta \sig}(x,s;y,t) = W_{\xi +}(x,s;y,t).
\end{equation}
\item \rm{(Busemann limits I)} \label{itm:BuseLim1} If $\xi \notin \Xi$, then, for any compact set $K \subseteq \mathbb{R}^2$ and any net $r_t= (z_t,u_t)_{t \in \mathbb{R}_{\ge 0}}$ with $u_t \to \infty$ and $z_t/u_t \to \xi$ as $t \to \infty$, there exists $R \in \mathbb{R}_{>0}$ such that, for all $p,q \in K$ and $t \ge R$,
\[
W_{\xi}(p;q) = \mathcal{L}(p;r_t) - \mathcal{L}(q;r_t).
\]
\item {\rm(Busemann limits II)} \label{itm:BuseLim2} For all $\xi \in \mathbb{R}$, $s \in \mathbb{R}$, $x < y \in \mathbb{R}$, and any net $(z_t,u_t)_{t \in \mathbb{R}_{\ge 0}}$ in $\mathbb{R}^2$ such that $u_t \to \infty$ and $z_t/u_t \to \xi$ as $t \to \infty$,
\begin{align*}
W_{\xi -}(y,s;x,s) &\le \liminf_{t \to \infty} \mathcal{L}(y,s;z_t,u_t) - \mathcal{L}(x,s;z_t,u_t) \\ &\le \limsup_{t \to \infty} \mathcal{L}(y,s;z_t,u_t) - \mathcal{L}(x,s;z_t,u_t) \le W_{\xi +}(y,s;x,s).
\end{align*}
\item \rm{(Global attractiveness)} \label{itm:global_attract} Assume that $\xi \notin \Xi$, and let $\mathfrak h \in \operatorname{UC}$ satisfy condition~\eqref{eqn:drift_assumptions} for the parameter $\xi$. For $s < t$, let
\[
h_{s,t}(x;\mathfrak h) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + \mathfrak h(z)\}.
\]
Then, for any $s \in \mathbb{R}$ and $a > 0$, there exists a random $t_0 = t_0(a,\xi,s)<\infty$ such that for all $t > t_0$ and $x \in [-a,a]$, $h_{s,t}(x;\mathfrak h) - h_{s,t}(0;\mathfrak h) = W_{\xi}(x,s;0,s)$.
\end{enumerate}
\end{theorem} \begin{remark}
Item~\ref{itm:BuseLim1} is novel in that it shows the limits simultaneously for all $\xi \notin \Xi$, uniformly over compact subsets of $\mathbb{R}^2$. The existence of Busemann limits in fixed directions
is shown in~\cite{Rahman-Virag-21} and~\cite{Ganguly-Zhang-2022a}. Item~\ref{itm:global_attract} is analogous to Theorem 3.3 in~\cite{Bakhtin-Cator-Konstantin-2014} and Theorem 3.3 in~\cite{Bakhtin-Li-19} on the global solutions of the Burgers equation with random forcing. When comparing with ~\cite{Bakhtin-Cator-Konstantin-2014,Bakhtin-Li-19}, note that our geodesics travel north while theirs head south.
\end{remark}
We describe the distribution of the Busemann process. The key to Item~\ref{itm:SH_Buse_process} is Theorem~\ref{thm:invariance_of_SH}. \begin{theorem} \label{thm:Buse_dist_intro} The following hold. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item {\rm(Independence)} \label{itm:indep_of_landscape} For each $T \in \mathbb{R}$, these processes are independent:
\begin{align*}
&\{W_{\xi \sig}(x,s;y,t): \xi \in \mathbb{R}, \,\sigg \in \{-,+\}, \, x,y \in \mathbb{R}, \, s,t \ge T \} \\[4pt]
&\qquad\qquad\qquad \text{and } \ \
\{\mathcal{L}(x,s;y,t): x,y \in \mathbb{R},\, s < t \le T\}.
\end{align*}
\item {\rm(Stationarity and mixing)} \label{itm:stationarity} The process
\begin{equation} \label{eqn:stat}
\{\mathcal{L}(v),W_{\xi \sig}(p;q):v \in \Rup, \, p,q \in \mathbb{R}^2, \,\xi \in \mathbb{R}, \,\sigg \in \{-,+\} \}
\end{equation}
is stationary and mixing under shifts in any space-time direction. More precisely, let $a,b \in \mathbb{R}$ not both $0$, and $z > 0$. Set $r_z = (az,bz)$. Then, the process~\eqref{eqn:stat} is stationary and mixing {\rm(}for fixed $a,b$ as $z \to +\infty${\rm)} under the transformation
\begin{align*}
\bigl\{\mathcal{L}(v), W_{\xi \sig}(p;q )\bigr\} \mapsto T_{z;a,b}\{\mathcal{L},W \} := \{\mathcal{L}(v + (r_z;r_z)), W_{\xi \sig}(p + r_z;q +r_z)\},
\end{align*}
where, on each side, the process is understood as a function of $(v,(p,q))\in\Rup \times \mathbb{R}^4$. Mixing means that, for all $k\in\mathbb{Z}_{>0}$, $\xi_1,\dotsc,\xi_k\in\mathbb{R}$, and Borel subsets $A,B \subseteq C(\Rup,\mathbb{R})\times C(\mathbb{R}^4,\mathbb{R})^k$,
\begin{align*} &\lim_{z \to \infty}\mathbb P\Bigl(\{\mathcal{L}, W_{\xi_{1:k}}\} \in A, \{T_{z;a,b} \mathcal{L}, T_{z;a,b}W_{\xi_{1:k}}\} \in B\Bigr) \\
&\qquad\qquad\qquad =\mathbb P\bigl( \{\mathcal{L}, W_{\xi_{1:k}}\} \in A\bigr) \mathbb P\bigl(\{\mathcal{L}, W_{\xi_{1:k}}\} \in B \bigr) .
\end{align*}
Above $W_{\xi_{1:k}}=(W_{\xi_{1}},\dotsc,W_{\xi_{k}})\in C(\mathbb{R}^4,\mathbb{R})^k$.
\item {\rm(Distribution along a time level)}\label{itm:SH_Buse_process} For each $t \in \mathbb{R}$, the following equality in distribution holds between random elements of the Skorokhod space $D(\mathbb{R},C(\mathbb{R}))$: \[ \{W_{\xi +}(\aabullet,t;0,t)\}_{\xi \in \mathbb{R}} \deq \bigl\{G_{\xi}(\aabullet) \bigr\}_{\xi \in \mathbb{R}}, \] where $G$ is the stationary horizon in Section \ref{sec:SHintro}, with diffusivity $\sqrt 2$ and drifts $2\xi$. \end{enumerate} \end{theorem} \begin{remark}
Combining Items \ref{itm:indep_of_landscape} and \ref{itm:SH_Buse_process} with Theorem~\ref{thm:DL_Buse_summ}\ref{itm:Buse_KPZ_description} gives a description of the Busemann process on the full plane $\mathbb{R}^2$. \end{remark}
We describe the random sets of Busemann discontinuities defined in~\eqref{eqn:DLBuseDC_def}.
\begin{theorem} \label{thm:DLBusedc_description} The following hold on a single event of probability one.
\begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:Busedc_horiz_mont} For each $t \in \mathbb{R}$, the set $\Xi(x,t;-x,t)$ is nondecreasing as a function of $x \in \mathbb{R}_{\ge 0}$.
\item \label{itm:Busedc_t} For $s,\xi \in \mathbb{R}$, define the function
\begin{equation} \label{fsdir} x \mapsto f_{s,\xi}(x) := W_{\xi +}(x,s;0,s) - W_{\xi -}(x,s;0,s). \end{equation} Then, $\xi \in \Xi$ if and only if, for all $s \in \mathbb{R}$, \begin{equation} \label{bad_ub} \lim_{x \to \pm \infty} f_{s,\xi}(x) = \pm \infty.
\end{equation} In particular, simultaneously for all $s,x \in \mathbb{R}$ and all sequences $|x_k|\to\infty$,
\begin{equation} \label{eqn:dcset_union1}
\Xi = \bigcup_k \hspace{0.9pt}\Xi(x_k,s;x,s).
\end{equation}
\item \label{itm:DL_dc_set_count} The set $\Xi$ is countably infinite and dense in $\mathbb{R}$, while for each fixed $\xi \in \mathbb{R}$, $\mathbb P(\xi \in \Xi) = 0$. In particular, the full-probability event of the theorem can be chosen so that $\Xi$ contains no directions $\xi \in \mathbb{Q}$.
\item \label{itm:DL_Buse_no_limit_pts} For each $p \neq q$ in $\mathbb{R}^2$, the set $\Xi(p;q)$ is discrete, that is, has no limit points in $\mathbb{R}$. The function $\xi \mapsto W_{\xi -}(p;q) = W_{\xi +}(p;q)$ is constant on each open interval $I \subseteq (\mathbb{R} \setminus \Xi(p;q))$. For $t \in \mathbb{R}$ and $x < y$, $\Xi(y,t;x,t)$ is infinite and unbounded, both from above and below. \end{enumerate} Furthermore, \begin{enumerate} [resume, label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item \label{itm:DLBusedcinvar} For $x,t,\nu \in \mathbb{R}$ and $c > 0$, the sets $\Xi(x,t;-x,t)$ satisfy the following distributional invariances: \[ \Xi(x,t;-x,t) \deq \Xi(x,0;-x,0) \deq -\Xi(x,0;-x,0) \deq c\Xi(c^{-2}x,0;-c^{-2}x,0) + c\nu. \] \end{enumerate} \end{theorem} \begin{remark} \label{rmk:shock_measure} Item~\ref{itm:Busedc_t} states that all discontinuities of the Busemann process are present on each horizontal ray. By Item~\ref{itm:DL_Buse_no_limit_pts} $\xi \mapsto W_{\xi \pm}(p;q)$ are the left- and right-continuous versions of a jump process. This function defines a random signed measure supported on a discrete set. When $p$ and $q$ lie on the same horizontal line, this function is monotone (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_gen_mont}) and the support of the measure is exactly the set of directions at which the coalescence point of semi-infinite geodesics jumps (Definition~\ref{def:coal_pt} and Theorems~\ref{thm:DL_eq_Buse_cpt_paths}--\ref{thm:Buse_pm_equiv}).
The discreteness of Item~\ref{itm:DL_Buse_no_limit_pts} allows us to view the sets $\Xi(x,t;-x,t)$ as well-defined point processes, and gives the statements in Item~\ref{itm:DLBusedcinvar} meaning. The set $\Xi$ itself is dense and it is not easy, a priori, to interpret as a random object. However, By Items~\ref{itm:Busedc_horiz_mont} and~\ref{itm:Busedc_t}, $\Xi$ is the increasing union of the sets $\Xi(x,0;-x,0)$. \end{remark}
\subsection{Busemann geodesics} \label{sec:DL_SIG_intro} The study of Busemann geodesics starts with this definition.
\begin{definition} \label{def:LR_maxes} For $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, $(x,s) \in \mathbb{R}^2$ and $t\in[s,\infty)$, let $g_{(x,s)}^{\xi \sig,L}(t)$ and $g_{(x,s)}^{\xi \sig,R}(t)$ denote, respectively, the leftmost and rightmost maximizer of $\mathcal{L}(x,s;y,t) + W_{\xi \sig}(y,t;0,t)$ over $y \in \mathbb{R}$. \end{definition} \begin{remark} The modulus of continuity bounds of the directed landscape recorded in Lemma~\ref{lem:Landscape_global_bound} imply that $\lim_{t \searrow s} g_{(x,s)}^{\xi \sig,L/R}(t) = x$, so we define $g_{(x,s)}^{\xi \sig,L/R}(s) = x$. \end{remark}
As noted earlier, Rahman and Vir\'ag \cite{Rahman-Virag-21} showed the existence of semi-infinite geodesics, almost surely for a fixed initial point across all directions and almost surely for a fixed direction across all initial points. We extend this simultaneously across both all initial points and directions. Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_var}, quoted from~\cite{Rahman-Virag-21}, states that for a {\it fixed} direction $\xi$, with probability one at times $t > s$, the maximizers $z$ of the function $\mathcal{L}(x,s;z,t) + W_{\xi}(z,t;0,t)$ are exactly the points on semi-infinite $\xi$-directed geodesics from $(x,s)$. Theorem~\ref{thm:DL_SIG_cons_intro} clarifies this on a global scale: across all directions, initial points and signs, one can construct semi-infinite geodesics from the Busemann process. Furthermore, $g_{(x,s)}^{\xi \sig,L}$ and $g_{(x,s)}^{\xi \sig,R}$ both define semi-infinite geodesics in direction $\xi$ and give the leftmost (or rightmost) geodesic between any two of their points. We use this heavily in the present paper.
\begin{theorem} \label{thm:DL_SIG_cons_intro}
The following hold on a single event of probability one across all initial points $(x,s) \in \mathbb{R}^2$, times $t>s$, directions $\xi \in \mathbb{R}$, and signs $\sigg \in \{-,+\}$.
\begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:intro_SIG_bd}
All maximizers of $z\mapsto\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)$ are finite. Furthermore, as $x,s,t$ vary over a compact set $K\subseteq \mathbb{R}$ with $s \le t$, the set of all maximizers is bounded.
\item \label{itm:arb_geod_cons}
Let $s = t_0 < t_1 < t_2 < \cdots$ be an arbitrary increasing sequence with $t_n \to \infty$. Set $g(t_0) = x$, and for each $i \ge 1$, let $g(t_i)$ be \textit{any} maximizer of $\mathcal{L}(g(t_{i - 1}),t_{i - 1};z,t_i) + W_{\xi \sig}(z,t_i;0,t_i)$ over $z \in \mathbb{R}$. Then, pick \textit{any} geodesic of $\mathcal{L}$ from $(g(t_{i - 1}),t_{i - 1})$ to $(g(t_i),t_i)$, and for $t_{i - 1} < t < t_i$, let $g(t)$ be the location of this geodesic at time $t$. Then, regardless of the choices made at each step, the following hold.
\begin{enumerate} [label=\rm(\alph{*}), ref=\rm(\alph{*})]
\item \label{itm:g_is_geod} The path $g:[s,\infty)\to \mathbb{R}$ is a semi-infinite geodesic.
\item \label{itm:weight_of_geod} For all $ t < u$ in $[s,\infty)$,
\begin{equation} \label{eqn:SIG_weight}
\mathcal{L}(g(t),t;g(u),u) = W_{\xi \sig}(g(t),t;g(u),u).
\end{equation}
\item \label{itm:maxes} For all $ t < u$ in $[s,\infty)$, $g(u)$ maximizes $\mathcal{L}(g(t),t;z,u) + W_{\xi \sig}(z,u;0,u)$ over $z \in \mathbb{R}$.
\item \label{itm:geo_dir} The geodesic $g$ has direction $\xi$, i.e., $g(t)/t \to \xi$ as $t \to \infty$.
\end{enumerate}
\item \label{itm:DL_all_SIG} For
$S \in \{L,R\}$, $g_{(x,s)}^{\xi \sig,S}:[s,\infty) \to \mathbb{R}$ is a semi-infinite geodesic from $(x,s)$ in direction $\xi$. Moreover, for any $s \le t < u$, we have that
\[
\mathcal{L}\bigl(g_{(x,s)}^{\xi \sig,S}(t),t;g_{(x,s)}^{\xi \sig,S}(u),u\bigr) = W_{\xi \sig}\bigl(g_{(x,s)}^{\xi \sig,S}(t),t;g_{(x,s)}^{\xi \sig,S}(u),u\bigr),
\]
and $g_{(x,s)}^{\xi \sig,S}(u)$ is the leftmost/rightmost {\rm(}depending on $S${\rm)} maximizer of \\$\mathcal{L}(g_{(x,s)}^{\xi \sig,S}(t),t;z,u) + W_{\xi \sig}(z,u;0,u)$ over $z \in \mathbb{R}$.
\item \label{itm:DL_LRmost_geod}
The path $g_{(x,s)}^{\xi \sig,L}$ is the leftmost geodesic between any two of its points, and $g_{(x,s)}^{\xi \sig,R}$ is the rightmost geodesic between any two of its points.
\end{enumerate}
\end{theorem} \begin{definition} We refer to the geodesics constructed in Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:arb_geod_cons} as $\xi \sig$ \textit{Busemann geodesics}, or simply {\it $\xi \sig$ geodesics}. \end{definition} \begin{remark} The geodesics $g_{(x,s)}^{\xi \sig,L}$ and $g_{(x,s)}^{\xi \sig,R}$ are special Busemann geodesics. By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}--\ref{itm:DL_LRmost_geod}, for any sequence $s=t_0 < t_1< t_2 < \cdots$ with $t_n \to \infty$, the path $g = g_{(x,s)}^{\xi \sig,L}$ can be constructed by choosing $g(t_i)$ as the leftmost maximizer of $\mathcal{L}(g(t_{i - 1}),t_{i - 1};z,t_i) + W_{\xi \sig}(z,t_i;0,t_i)$ over $z \in \mathbb{R}$, and for $t \in (t_{i - 1},t_i)$, taking $g(t)$ to be the leftmost geodesic from $(g(t_{i - 1}),t_{i - 1})$ to $(g(t_i),t_i)$. The analogous statement holds for $L$ replaced with $R$ and ``leftmost'' replaced with ``rightmost''. \end{remark}
\subsection{Construction and proofs for the Busemann process and Busemann geodesics} \label{sec:DL_Buse_cons} This section proves the results of Sections~\ref{sec:Buse_results} and~\ref{sec:DL_SIG_intro}. The order in which the items are proved is somewhat delicate, so we outline that here. After proving some lemmas, we prove Theorem~\ref{thm:DL_Buse_summ}\ref{itm:general_cts}--\ref{itm:Buse_KPZ_description} and Theorem~\ref{thm:Buse_dist_intro}. We then skip ahead to constructing the semi-infinite geodesics, culminating in the proof of Theorem~\ref{thm:DL_SIG_cons_intro}. Afterward, we turn to the proof of the regularity in Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick}, then prove Theorem~\ref{thm:DLBusedc_description}, except for Item~\ref{itm:Busedc_t}, which is proved in Section~\ref{sec:last_proofs}.
We construct a full-probability event $\Omega_1$. Later in~\eqref{omega2} and~\eqref{omega3} follow full-probability events $\Omega_3 \subseteq \Omega_2 \subseteq \Omega_1$. For the rest of the proofs, we work almost exclusively on these events. Once the events are constructed and shown to have full probability, the remaining proofs are deterministic statements that hold on those events. \begin{equation} \label{omega1} \text{We define $\Omega_1 \subseteq \Omega$ to be the event of probability one on which the following hold.} \end{equation} \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{om1lrf} Simultaneously for all $(x,s;y,t) \in \Rup$ there exist leftmost and rightmost geodesics {\rm(}possibly in agreement{\rm)} between $(x,s)$ and $(y,t)$ (see Section~\ref{sec:DL_geod}).
\item \label{om1rsi} For each rational direction $\xi \in \mathbb{Q}$ and each point $p \in \mathbb{R}^2$, there exist leftmost and rightmost semi-infinite geodesics {\rm(}possibly in agreement{\rm)} from $p$ in direction $\xi$, and all semi-infinite geodesics in direction $\xi$ coalesce {\rm(}see Theorem~\ref{thm:RV-SIG-thm}, Items~\ref{itm:d_fixed} and~\ref{itm:fixed_coal}{\rm)}.
\item \label{om1pdf} For each rational direction $\xi \in \mathbb{Q}$ and each rational point $p \in \mathbb{Q}^2$, there is a unique semi-infinite geodesic from $p$ in direction $\xi$ (see Theorem~\ref{thm:RV-SIG-thm}\ref{itm:pd_fixed}).
\item \label{om1dirrat} For each rational direction $\xi\in \mathbb{Q}$, the Busemann process defined by~\eqref{RVW-def} satisfies conditions~\ref{itm:fixed_additive}--\ref{itm:DL_Buse_cont} of Theorem~\ref{thm:RV-Buse}. For any pair $\xi_1 < \xi_2$ or rational directions, Item~\ref{itm:DL_Buse_mont} of Theorem~\ref{thm:RV-Buse} holds.
\item \label{om1agree} For each $(x,t,y,\xi) \in \mathbb{Q}^4$,
$
\lim_{\mathbb{Q} \ni \alpha \to \xi} W_\alpha(y,t;x,t) = W_{\xi}(y,t;x,t).
$
\item \label{om1asym} For every rational time $t \in \mathbb{Q}$ and rational direction $\xi \in \mathbb{Q}$,
\begin{equation} \label{eqn:rat_asymp}
\lim_{x \to \pm \infty} x^{-1} {W_\xi(x,t;0,t)} = 2\xi.
\end{equation}
This holds with probability one by properties of Brownian motion and Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_BM}.
\item \label{om1appB} The conclusions of Lemmas~\ref{lem:Landscape_global_bound},~\ref{lm:BGH_disj}, and~\ref{lem:geod_pp} hold for $\mathcal{L}$. Note that then Lemma~\ref{lem:Landscape_global_bound} holds also for the reflected version
$
\{\mathcal{L}(y;-t,x;-s):(x,s;y,t) \in \Rup\}.
$ \end{enumerate}
To justify $\mathbb P(\Omega_1)=1$, it remains only to check Item~\ref{om1agree}. By Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_mont}, for $y \ge x$, \begin{equation} \label{lrlimit} \lim_{\mathbb{Q} \ni \alpha \nearrow \xi} W_{\alpha}(y,t;x,t) \le W_{\xi}(y,t;x,t) \le \lim_{\mathbb{Q} \ni \alpha \searrow \xi} W_{\alpha}(y,t;x,t). \end{equation} By Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_BM}, $W_{\alpha}(y,t;x,t) \sim \mathcal N(2\alpha(y - x),2(y - x))$. Hence, all terms in~\eqref{lrlimit} have the same distribution and are almost surely equal.
Now, on the full-probability event $\Omega_1$, we have defined the process
\begin{equation} \label{rat_process}
\{W_{\alpha}(p;q):p,q \in \mathbb{R}^2,\alpha \in \mathbb{Q} \}.
\end{equation}
On this event, for an arbitrary direction $\xi$, and $t,x,y \in \mathbb{R}$, define \begin{equation} \label{eqn:Buse_def} W_{\xi -}(y,t;x,t) = \lim_{\mathbb{Q} \ni \alpha \nearrow \xi}W_{\alpha }(y,t;x,t)\;\;\text{and}\;\; W_{\xi+}(y,t;x,t) = \lim_{\mathbb{Q} \ni \alpha \searrow \xi} W_{\alpha }(y,t;x,t). \end{equation} By Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_mont}, these limits exist for all $t \in \mathbb{R}$. Complete the definition by setting, \begin{equation} \label{eqn:gen_Buse_var}\begin{aligned} \text{ for $s < t$, } \ W_{\xi \sig}(x,s;y,t) &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;y,t)\},\\ \text{ and finally for $s > t$, } \ W_{\xi \sig}(x,s;y,t) &= -W_{\xi \sig}(y,t;x,s). \end{aligned} \end{equation} With this construction in place, we prove an intermediate lemma. \begin{lemma} \label{lem:DL_horiz_Buse} The following hold on the event $\Omega_1$, across all points, directions and signs. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item \label{itm:DL_agree_horiz} For all $x,y,t \in \mathbb{R}$, and $\xi \in \mathbb{Q}$, $W_{\xi -}(y,t;x,t) = W_{\xi +}(y,t;x,t) = W_\xi(y,t;x,t)$, where $W_\xi$ is the originally defined Busemann function from~\eqref{rat_process}.
\item \label{itm:DL_h_add} Horizontal Busemann functions are additive: $\forall\,x,y,z,t \in \mathbb{R}$, $\xi \in \mathbb{R}$, and $\sigg \in \{-,+\}$,
\[
W_{\xi \sig}(x,t;y,t) + W_{\xi \sig}(y,t;z,t) = W_{\xi \sig}(x,t;z,t).
\]
\item \label{itm:DL_h_unif_conv} For every $t,\xi \in \mathbb{R}$, the limits \eqref{eqn:Buse_def} hold uniformly over $(x,y)$ on compact sets. Further, for each $t,\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$,
these limits hold in the same sense:
\begin{equation} \label{itm:horiz_lim}
\lim_{\alpha \nearrow \xi} W_{\alpha \sig}(y,t;x,t) = W_{\xi -}(y,t;x,t)\ \text{ and }\ \lim_{\alpha \searrow \xi}W_{\alpha \sig}(y,t;x,t) = W_{\xi +}(y,t;x,t).
\end{equation}
\item \label{itm:DL_lim} For every $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, $(p,q) \mapsto W_{\xi \sig}(p;q)$ is continuous, and for each $t \in \mathbb{R}$,
\begin{equation} \label{eqn:horiz_asymptotics}
\lim_{x \to \pm \infty} x^{-1} {W_{\xi \sig}(x,t;0,t)} = 2\xi.
\end{equation}
\end{enumerate}
\end{lemma} \begin{proof} We prove Item~\ref{itm:DL_agree_horiz} last.
\noindent \textbf{Item~\ref{itm:DL_h_add}} follows from the same property in rational directions (Theorem~\ref{thm:RV-Buse}\ref{itm:fixed_additive}).
\noindent \textbf{Item~\ref{itm:DL_h_unif_conv}:} The monotonicity of the horizontal Busemann process from Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_mont} extends to all directions by limits. That is, for any two rational directions $\xi_1 < \xi_2$ and any real $x < y$, and $t$,
\begin{equation} \label{eqn:Buse_mont}
W_{\xi_1 -}(y,t;x,t) \le W_{\xi_1}(y,t;x,t) \le W_{\xi_1 +}(y,t;x,t) \le W_{\xi_2 -}(y,t;x,t),
\end{equation}
and when $\xi_1 \notin \mathbb{Q}$, the same monotonicity holds, removing the middle term that does not distinguish between $\pm$. Hence, the limits as $\alpha \nearrow \xi$ and $\alpha \searrow \xi$ exist and agree with the limits from rational directions (without the $\sigg$). Without loss of generality, we take the compact set to be $[a,b]^2$. Then, by~\eqref{eqn:Buse_mont} and Lemma~\ref{lem:ext_mont}, for $\alpha < \xi$, $\sigg \in \{-,+\}$, and $a \le x \le y \le b$,
\begin{equation} \label{156} 0 \le W_{\xi-}(y,t;x,t)- W_{\alpha \sig }(y,t;x,t) \le W_{\xi-}(b,t;a,t)- W_{\alpha \sig }(b,t;a,t), \end{equation} and for general $(x,y) \in [a,b]^2$, \[
|W_{\xi-}(y,t;x,t)- W_{\alpha \sig }(y,t;x,t)| \le |W_{\xi-}(b,t;a,t)- W_{\alpha \sig }(b,t;a,t)|, \] so the limit as $\alpha \nearrow \xi$ is uniform on compacts. An analogous argument applies to $\alpha \searrow\xi$.
\noindent \textbf{Item~\ref{itm:DL_lim}:} For $t,\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$, the continuity of $ (x,y) \mapsto W_{\xi \sig}(y,t;x,t) $ follows from Item~\ref{itm:DL_h_unif_conv} and the continuity for rational $\xi$ in Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_cont}. Before showing the general continuity, we show the limits~\eqref{eqn:horiz_asymptotics}. For $\xi, t \in \mathbb{Q}$, \eqref{eqn:rat_asymp} holds by definition of $\Omega_1$. Keeping $\xi \in \mathbb{Q}$, let $s \in \mathbb{R}$, and let $t > s$ be rational. By Theorem~\ref{thm:RV-Buse}\ref{itm:fixed_additive}--\ref{itm:DL_Buse_var}, \begin{align*} W_{\xi}(x,s;0,s) &= W_{\xi}(x,s;0,t) + W_{\xi}(0,t;0,s) \\ &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi}(z,t;0,t)\}+ W_{\xi}(0,t;0,s). \end{align*}
Then, by Lemma~\ref{lem:KPZ_preserve_lim} (for the temporally reflected $\mathcal{L}$),
$
\lim_{x \to \pm \infty} x^{-1} {W_{\xi}(x,s;0,s)} = 2\xi.
$
Now, let $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, and $t \in \mathbb{R}$ be arbitrary. Then, the monotonicity of~\eqref{eqn:Buse_mont} implies that for $\alpha < \xi < \beta$ with $\alpha,\beta \in \mathbb{Q}$,
\[
\alpha \le \liminf_{x \to \infty} x^{-1}{W_{\xi \sig}(x,t;0,t)} \le \limsup_{x \to \infty} x^{-1} {W_{\xi \sig}(x,t;0,t)} \le \beta.
\]
Sending $\mathbb{Q} \ni \alpha \nearrow \xi$ and $\mathbb{Q} \ni \beta \searrow \xi$ implies~\eqref{eqn:horiz_asymptotics} for $+\infty$. The case $x \to -\infty$ follows a symmetric argument.
Lastly, the continuity of $(x,y) \mapsto W_{\xi \sig}(y,t;x,t)$ and~\eqref{eqn:horiz_asymptotics} imply that $W_{\xi \sig}(x,t;0,t) \le a + b|x|$ for some constants $a,b$. The general continuity follows from~\eqref{eqn:gen_Buse_var} and Lemma~\ref{lem:max_restrict}\ref{itm:KPZcont}.
\noindent \textbf{Item~\ref{itm:DL_agree_horiz}:} The statement holds for all $x,y,t,\xi \in \mathbb{Q}$ by Item~\ref{om1agree} of $\Omega_1$. The continuity proved in Item~\ref{itm:DL_lim} extends this to all $x,y,t \in \mathbb{R}$. \end{proof}
\noindent Recall Definition~\ref{def:LR_maxes} of the extreme maximizers $g_{(x,s)}^{\xi \sig,L/R}(t)$.
\begin{lemma} \label{lem:bounded_maxes} For each $\omega \in \Omega_1$, $(x,s;y,t) \in \Rup$, $\xi \in \mathbb{R}$, and $\sigg \in \{-,+\}$,
\begin{equation} \label{eqn:zto_pminfty}
\lim_{z \to \pm \infty} \mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;y,t) = -\infty
\end{equation}
so that $g_{(x,s)}^{\xi \sig,L/R}$ are well-defined.
Let $K \subseteq \mathbb{R}$ be a compact set, $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$. Then, there exists a random $Z = Z(\xi \sig,K) \in (0,\infty)$ such that for all $x,s,t \in K$ with $s < t$ and $S \in \{L,R\}$, $|g_{(x,s)}^{\xi \sig,S}(t)| \le Z$. \end{lemma} \begin{proof}
By the continuity and asymptotics of Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_lim}, $\forall t \in \mathbb{R}$ $\exists a,b > 0$ such that $|W_{\xi \sig}(x,t;0,t)| \le a + b|x|$ $\forall x \in \mathbb{R}$. Lemma~\ref{lem:Landscape_global_bound} implies $\mathcal{L}(x,s;z,t) \sim -\frac{(z - x)^2}{t - s}$, which gives \eqref{eqn:zto_pminfty}. Next we observe that \begin{equation} \label{107} \begin{aligned} &\inf_{x,s,t \in K, s < t}\sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\} \\[-3pt]
&\qquad\qquad\qquad \ge \inf_{x,s,t \in K, s < t} \mathcal{L}(x,s;x,t) + W_{\xi \sig}(x,t;0,t) > -\infty. \end{aligned} \end{equation} The last inequality is justified as follows. Since $W_{\xi \sig}(x,t;0,t)$ evolves backwards in time as the KPZ fixed point~\eqref{eqn:gen_Buse_var}, Lemma~\ref{lem:max_restrict}\ref{itm:KPZ_unif_line} implies that $a$ and $b$ can be chosen uniformly for $t \in K$. Lemma~\ref{lem:Landscape_global_bound} states that $\forall x,s,t \in \mathbb{R}$ with $s < t$, there is a constant $C$ such that \[ \mathcal{L}(x,s;x,t) \ge - C(t - s)^{1/3}\log^2\bigl(\tfrac{2\sqrt{2x^2 + s^2 + t^2} + 4}{(t - s)\wedge 1}\bigr). \] Taking the infimum over $x,s,t \in K$ with $s < t$ yields the last inequality in~\eqref{107}.
To contradict the last statement of the lemma, assume maximizers $z_n$ of $\mathcal{L}(x_n,s_n;z,t_n) + W_{\xi \sig}(z,t_n;0,t_n)$ over $z \in \mathbb{R}$ such that $x_n,s_n,t_n \in K$ but $|z_n| \to \infty$. Then, by~\eqref{107}, \begin{equation} \label{9} \liminf_{n \to \infty} \mathcal{L}(x_n,s_n;z_n,t_n) + W_{\xi \sig}(z_n,t_n;0,t_n) > -\infty,
\end{equation} but since $z_n \to \infty$ and $x_n,s_n,t_n \in K$ for all $n$, $\mathcal{L}(x_n,s_n;z_n;t_n) \sim -\frac{(z_n - x_n)^2}{t_n - s_n}$ by Lemma~\ref{lem:Landscape_global_bound}. By the bound $|W_{\xi \sig}(x,t;0,t)| \le a + b|x|$ that holds uniformly for $t \in K$ and $x \in \mathbb{R}$, the inequality~\eqref{9} cannot hold. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DL_Buse_summ}, Items ~\ref{itm:general_cts}--\ref{itm:Buse_KPZ_description}] The full-probability event of these items is $\Omega_1$. The remaining items are proved later.
\noindent \textbf{Item~\ref{itm:general_cts} (Continuity):} This was proved in Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_lim}.
\noindent \textbf{Item~\ref{itm:DL_Buse_add} (Additivity):} First, we show that on $\Omega_1$ for $s < t$, $x \in \mathbb{R}$, $\xi_1 < \xi_2$, and $S \in \{L,R\}$,
\begin{equation} \label{eqn:mont_maxes}
-\infty < g_{(x,s)}^{\xi_1 -,S}(t) \le g_{(x,s)}^{\xi_1 +,S}(t) \le g_{(x,s)}^{\xi_2 -,S}(t) \le g_{(x,s)}^{\xi_2 +,S}(t) < \infty.
\end{equation}
The finiteness of the maximizers comes from Lemma~\ref{lem:bounded_maxes}. The rest of~\eqref{eqn:mont_maxes} follows from the monotonicity of~\eqref{eqn:Buse_mont} and Lemma~\ref{lemma:max_monotonicity}. Next, we show that for $(x,s;y,t) \in \mathbb{R}^4$ and $\xi \in \mathbb{R}$, $W_{\alpha }(x,s;y,t)$ converges pointwise to $W_{\xi -}(x,s;y,t)$ as $\mathbb{Q} \ni \alpha \nearrow \xi$. The same holds for limits from the right, with $\xi -$ replaced by $\xi +$ (Later we prove that the convergence is locally uniform). By~\eqref{eqn:gen_Buse_var}, it suffices to assume $s < t$. By~\eqref{eqn:mont_maxes} and the additivity of Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_h_add} when $s = t$, for all $\alpha \in [\xi - 1,\xi + 1] \cap \mathbb{Q}$ and $\sigg \in \{-,+\}$, \begin{align*} W_{\alpha }(x,s;y,t) &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\alpha }(z,t;y,t)\} \\ &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\alpha}(z,t;0,t)\} + W_{\alpha}(0,t;y,t) \\ &= \sup_{z \in [g_{(x,s)}^{(\xi - 1)-,L}(t),g_{(x,s)}^{(\xi + 1)+,R}(t)]}\{\mathcal{L}(x,s;z,t) + W_\alpha(z,t;0,t)\} + W_{\alpha}(0,t;y,t). \end{align*} By Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_h_unif_conv}, $W_{\alpha}(z,t;y,t)$ converges uniformly on compact sets to $W_{\xi -}(x,t;y,t)$ as $\mathbb{Q} \ni \alpha \nearrow \xi$ and to $W_{\xi +}(x,t;y,t)$ as $\mathbb{Q} \ni \alpha \searrow \xi$. This implies the desired pointwise convergence. The additivity follows from the additivity for rational $\xi$ (Theorem~\ref{thm:RV-Buse}\ref{itm:fixed_additive}).
\noindent \textbf{Item~\ref{itm:DL_Buse_gen_mont} (Monotonicity along a horizontal line):} This was previously proven as Equation~\eqref{eqn:Buse_mont}.
\noindent \textbf{Item~\ref{itm:Buse_KPZ_description} (Backwards evolution as the KPZ fixed point):} This follows directly from the construction~\eqref{eqn:gen_Buse_var}.
\noindent We postpone the proofs of Items~\ref{itm:DL_unif_Buse_stick}--\ref{itm:global_attract}. Item~\ref{itm:DL_unif_Buse_stick} is proved after the proof of Theorem~\ref{thm:Buse_dist_intro}, and Items~\ref{itm:BuseLim2}--\ref{itm:global_attract} are proved after the proof of Theorem~\ref{thm:DL_good_dir_classification}. No subsequent results depend on Items~\ref{itm:BuseLim2}--\ref{itm:global_attract}, except the mixing in Theorem~\ref{thm:Buse_dist_intro}\ref{itm:stationarity}, which is proven later. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:Buse_dist_intro} (Distributional properties of Busemann process)]
\noindent \textbf{Item~\ref{itm:indep_of_landscape} (Independence):} We know that $\{\mathcal{L}(x,s;y,t):s,y \in \mathbb{R}, s < t \le T\}$ is independent of $\{\mathcal{L}(x,s;y,t):s,y \in \mathbb{R}, T \le s < t\}$ for $T \in \mathbb{R}$. From the definition of the Busemann process from geodesics and the extension \eqref{eqn:Buse_def}--\eqref{eqn:gen_Buse_var}, the process \[ \{W_{\xi \sig}(x,s;y,t): \xi \in \mathbb{R}, \,\sigg \in \{-,+\}, \, x,y \in \mathbb{R}, \, s,t \ge T \} \] is a function of
$\{\mathcal{L}(x,s;y,t):s,y \in \mathbb{R}, T \le s < t\}$, and independence follows.
\noindent \textbf{Item~\ref{itm:stationarity} (Stationarity):} Similarly as the previous item, the stationarity of the process follows from the stationarity of the directed landscape from Lemma~\ref{lm:landscape_symm}\ref{itm:time_stat}. The mixing properties will be proven in Section~\ref{sec:Buseextraproofs}, along with Items~\ref{itm:BuseLim2}--\ref{itm:global_attract} of Theorem~\ref{thm:DL_Buse_summ}.
\noindent \textbf{Item~\ref{itm:SH_Buse_process} (Distribution along a time level):} By the additivity of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add} and the variational definition~\eqref{eqn:gen_Buse_var}, for $x \in \mathbb{R}$, $s < t$, and $\sigg \in \{-,+\}$, on the full-probability event $\Omega_1$, \begin{align*} &W_{\xi \sig}(x,s;0,s) = W_{\xi \sig}(x,s;0,t) - W_{\xi \sig}(0,s;0,t) \\ &\qquad =\sup_{y \in \mathbb{R}}\{\mathcal{L}(x,s;y,t) + W_{\xi \sig}(y,t;0,t)\} - \sup_{y \in \mathbb{R}} \{\mathcal{L}(0,s;y,t) + W_{\xi \sig}(y,t;0,t)\}. \end{align*} By Item~\ref{itm:indep_of_landscape}, Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_gen_mont}, and Items~\ref{itm:DL_h_unif_conv} and~\ref{itm:DL_lim} of Lemma~\ref{lem:DL_horiz_Buse}, $\{W_{\xi +}(\abullet,t;0,t):\xi \in \mathbb{R}\}_{t \in \mathbb{R}}$ is a reverse-time Markov process that almost surely lies in the state space $\mathcal{Y}$ defined in~\eqref{Y}. By the stationarity of Item~\ref{itm:stationarity}, the law of $\{W_{\xi +}(\abullet,t;0,t):\xi \in \mathbb{R}\}$ must be invariant for this process. By the temporal reflection invariance of the directed landscape (Lemma~\ref{lm:landscape_symm}\ref{itm:DL_reflect}), $\{W_{\xi +}(\abullet,t;0,t):\xi \in \mathbb{R}\}$ is also invariant for the KPZ fixed point, forward in time. The uniqueness part of Theorem~\ref{thm:invariance_of_SH} completes the proof. \end{proof}
\begin{lemma} \label{lem:L_and_Buse_ineq} For every $\omega \in \Omega_1$ and $(x,s;y,t) \in \Rup$, $\mathcal{L}(x,s;y,t) \le W_{\xi \sig}(x,s;y,t)$, and equality occurs if and only if $y$ maximizes $\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)$ over $z \in \mathbb{R}$. \end{lemma} \begin{proof} For $s < t$, Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add},\ref{itm:Buse_KPZ_description} gives \begin{align} \label{106} W_{\xi \sig}(x,s;y,t) &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;y,t)\} \nonumber \\ &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\} + W_{\xi \sig}(0,t;y,t). \end{align} Setting $z = y$ on the right-hand side of~\eqref{106}, it follows that $W_{\xi \sig}(x,s;y,t) \ge \mathcal{L}(x,s;y,t)$, and equality holds if and only if $y$ is a maximizer. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DL_SIG_cons_intro} (Construction of the Busemann geodesics)] The full-probability event of this theorem is $\Omega_1$~\eqref{omega1}.
\noindent \textbf{Item~\ref{itm:intro_SIG_bd} (Finiteness of the maximizers):} This follows immediately from Lemma~\ref{lem:bounded_maxes}.
\noindent
We prove \textbf{Items~\ref{itm:arb_geod_cons}--\ref{itm:DL_LRmost_geod}} together. By Lemma~\ref{lem:L_and_Buse_ineq}, for any such construction of a path from the sequence of times $s = t_0 < t_1 < \cdots$ and any $i \ge 1$, \[ \mathcal{L}(g(t_{i - 1}),t_{i - 1};g(t_i),t_i) = W_{\xi \sig}(g(t_{i - 1}),t_{i - 1};g(t_i),t_i). \] Furthermore, for any $t_{i - 1} \le t < u \le t_i$, it must hold that \[ \mathcal{L}(g(t),t;g(u),u) = W_{\xi \sig}(g(t),t;g(u),u), \] for otherwise, by additivity of the Busemann functions (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}), \begin{align*} &\quad \mathcal{L}(g(t_{i - 1}),t_{i - 1};g(t_i),t_i)\\ &= \mathcal{L}(g(t_{i - 1}),t_{i - 1};g(t),t) +\mathcal{L}(g(t),t;g(u),u) + \mathcal{L}(g(u),u;g(t_i),t_i)\\ &< W_{\xi \sig}(g(t_{i - 1}),t_{i - 1};g(t),t) +W_{\xi \sig}(g(t),t;g(u),u) + W_{\xi \sig}(g(u),u;g(t_i),t_i) \\ &= W_{\xi \sig}(g(t_{i - 1}),t_{i - 1};g(t_i),t_i), \end{align*} a contradiction. Additivity extends~\eqref{eqn:SIG_weight} to all $s \le t < u$. Therefore, the path is a semi-infinite geodesic because the weight of the path in between any two points is optimal by Lemma~\ref{lem:L_and_Buse_ineq}. From the equality~\eqref{eqn:SIG_weight} and Lemma~\ref{lem:L_and_Buse_ineq}, for \textit{every} $t \ge s$, $g(t)$ maximizes $\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)$ over $z \in \mathbb{R}$.
\begin{figure}
\caption{\small Illustration of the proof of Lemma~\ref{lem:RM_geod_SIG}. Here, the red/thick path denotes the path $\hat \gamma$ in the case $w_t < g(t)$, which is to the right of the rightmost geodesic between $(x,s)$ and $(g(u),u)$, which passes through $(w_t,t)$ by assumption. This gives the contradiction. }
\label{fig:RM_geod}
\end{figure}
Before global directedness of all geodesics, we show that $g_{(x,s)}^{\xi \sig,S}$ are semi-infinite geodesics and the leftmost/rightmost geodesics between any two of their points. Take $S = R$, and the result for $S = L$ follows similarly. Omit $x,s,\xi$, and $\sigg$ from the notation temporarily, and write $g(t) = g_{(x,s)}^{\xi \sig,R}(t)$.
By what was just proved, it is sufficient to prove the following lemma. \begin{lemma} \label{lem:RM_geod_SIG} Let $g$ be as defined above. For $s < t < u$, let $z_u$ be the rightmost maximizer of $\mathcal{L}(g(t),t;z,u) + W_{\xi \sig}(z,u;0,u)$ over $z \in \mathbb{R}$, and let $w_t$ be the rightmost maximizer of $\mathcal{L}(x,s;w,t) + \mathcal{L}(w,t;g(u),u)$ over $w \in \mathbb{R}$ {\rm(}Equivalently, $(w_t,t)$ is the point at level $t$ on the rightmost geodesic between $(x,s)$ and $(g(u),u)${\rm)}. Then, $g(t) = w_t$ and $g(u) = z_u$. \end{lemma} \begin{proof}
By Lemma~\ref{lem:L_and_Buse_ineq} and Items~\ref{itm:arb_geod_cons}\ref{itm:weight_of_geod}--\ref{itm:maxes}, $w_t$ maximizes $\mathcal{L}(x,s;z,t)+ W_{\xi \sig}(z,t;0,t)$ over $z \in \mathbb{R}$, and $z_u$ maximizes $\mathcal{L}(x,s;z,u) + W_{\xi \sig}(z,u;0,u)$ over $z \in \mathbb{R}$. By definition of $g(u)$ and $g(t)$ as the rightmost maximizers, we have $w_t \le g(t)$ and $z_u \le g(u)$ in general. Assume, to the contrary, that $g(t) \neq w_t$ or $g(u) \neq z_u$. We first prove a contradiction in the case $w_t < g(t)$. For the proof, refer to Figure~\ref{fig:RM_geod} for clarity. Let $\gamma_1:[s,u]\to \mathbb{R}$ be the rightmost geodesic from $(x,s)$ to $(g(u),u)$ (which passes through $(w_t,t)$), and let $\gamma_2$ be the concatenation of the rightmost geodesic from $(x,s)$ to $(g(t),t)$ followed by the rightmost geodesic from $(g(t),t)$ to $(z_u,u)$. By Item~\ref{itm:arb_geod_cons}\ref{itm:weight_of_geod} for $i = 1,2$, the weight of the portion of any part of $\gamma_i$ is equal to the Busemann function between the points. Since $w_t < g(t)$ and $z_u \le g(u)$, $\gamma_1$ and $\gamma_2$ must split before time $t$, and then meet again before or at time $u$. Let $(y,v)$ be a crossing point, where $t < v \le u$. Let $\hat \gamma: [s,u] \to \mathbb{R}$ be defined by $\hat \gamma(r) = \gamma_2(r)$ for $r \in [s,v]$ and $\hat \gamma(r) = \gamma_1(r)$ from $(y,v)$ to $(g(u),u)$. Then, by the additivity of Busemann functions, the weight $\mathcal{L}$ of any portion of the path $\hat \gamma$ is equal to the Busemann function between the two points. By Lemma~\ref{lem:L_and_Buse_ineq}, $\widehat \gamma$ is then a geodesic between $(x,s)$ and $(g(u),u)$, which is to the right of $\gamma_1$, which was defined to be the rightmost geodesic between the points, a contradiction.
Now, we consider the case $z_u < g(u)$. Define $\gamma_1$ and $\gamma_2$ as in the previous case. Since $z_u < g(u)$, there is some point $(y,v)$ with $t \le v < u$ such that $\gamma_1$ splits from or crosses $\gamma_2$ at $(y,v)$. Then, define $\hat \gamma$ as in the previous case. Again, the weight $\mathcal{L}$ of any portion of the path $\hat \gamma$ is equal to the Busemann function between the two points. Specifically, $\mathcal{L}(g(t),t;g(u),u) = W_{\xi \sig}(g(t),t;g(u),u)$, and by Item~\ref{lem:L_and_Buse_ineq}, $g(u)$ maximizes $\mathcal{L}(g(t),t;z,u) + W_{\xi \sig}(z,u;0,u)$ over $z \in \mathbb{R}$. This contradicts the definition of $z_u$ as the rightmost such maximizer. \end{proof}
Returning to the proof of Theorem~\ref{thm:DL_SIG_cons_intro}, we show the global directedness of all Busemann geodesics constructed in the manner described in Item~\ref{itm:arb_geod_cons}. By~\eqref{eqn:mont_maxes}, for $t \ge s$ and $\alpha < \xi < \beta$ with $\alpha,\beta \in \mathbb{Q}$, \begin{equation} \label{sig_sand} g_{(x,s)}^{\alpha,L}(t) \le g_{(x,s)}^{\xi \sig,L}(t) \le g(t) \le g_{(x,s)}^{\xi \sig,R}(t) \le g_{(x,s)}^{\beta,R}(t). \end{equation} Note that on $\Omega_1$ the $\pm$ distinction is absent for $\alpha,\beta \in \mathbb{Q}$ (Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_agree_horiz}). By definition~\eqref{omega1} of the event $\Omega_1$ and Theorem~\ref{thm:RV-Buse}\ref{itm:DL_Buse_var}, $\forall\alpha\in\mathbb{Q}$, the maximizers of $\mathcal{L}(x,s;z,t) + W_{\alpha}(z,t;0,t)$ over $z \in \mathbb{R}$ are exactly the locations $z$ where an $\alpha$-directed geodesic goes through $(z,t)$. Therefore, $g_{(x,s)}^{\alpha,L}(t)/t \to \alpha$ and $g_{(x,s)}^{\beta,R}(t)/t \to \beta$ when $\alpha, \beta\in\mathbb{Q}$. By~\eqref{sig_sand}, \[ \alpha \le \liminf_{t \to \infty} t^{-1}{g(t)} \le \limsup_{t \to \infty} t^{-1}{g(t)} \le \beta. \] Sending $\mathbb{Q} \ni \alpha \nearrow \xi$ and $\mathbb{Q} \ni \beta \searrow \xi$ completes the proof of Theorem~\ref{thm:DL_SIG_cons_intro}. \end{proof}
We now define the next full-probability event. \begin{equation} \label{omega2} \text{Let $\Omega_2$ be the subset of $\Omega_1$ on which the following hold.} \end{equation} \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:stickT} For each integer $T \in \mathbb{Z}$ and each compact set $K \subseteq \mathbb{R}^2$, there exists $\varepsilon =\varepsilon(\xi,T,K) > 0$ such that for $\xi - \varepsilon < \alpha < \xi < \beta < \xi + \varepsilon$ and $(x,y) \in K$,
\begin{equation} \label{465} W_{\alpha \sig}(y,T;x,T) = W_{\xi -}(y,T;x,T)\ \ \text{ and }\ \ W_{\beta \sig}(y,T;x,T) = W_{\xi +}(y,T;x,T). \end{equation} \item For each integer $T \in \mathbb{Z}$, the set \begin{equation} \label{infinit_dense} \{\xi \in \mathbb{R}: W_{\xi -}(x,T;0,T) \neq W_{\xi +}(x,T;0,T) \text{ for some }x \in \mathbb{R}\} \end{equation} is countably infinite and dense in $\mathbb{R}$. \item For each $s < t \in \mathbb{R}$, $x,\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, and $S \in \{L,R\}$, \begin{equation} \label{eqn:dirtoinf}
\lim_{\xi \to \pm \infty} g_{(x,s)}^{\xi \sig,S}(t) = \pm \infty. \end{equation} \end{enumerate}
\begin{lemma} $\mathbb P(\Omega_2) = 1.$ \end{lemma} \begin{proof} The fact that~\ref{itm:stickT} holds with probability one is a direct consequence of Theorems~\ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process} and~\ref{thm:SH10}\ref{itm:SH_j}. The set~\eqref{infinit_dense} is countably infinite and dense for all $T \in \mathbb{Z}$ by the distributional equality $ \{W_{\xi+}(\abullet,T;0,T)\}_{\xi \in \mathbb{R}} \deq \{G_\xi\}_{\xi \in \mathbb{R}} $ from Theorem~\ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process} and the properties of $G$ from Theorem~\ref{thm:SH10}\ref{itm:SH_j},\ref{itm:bad_dir_contained}.
Now, we prove that~\eqref{eqn:dirtoinf} holds with probability one. By the monotonicity of~\eqref{eqn:mont_maxes}, the limits $\lim_{\xi \to \infty} g_{(x,s)}^{\xi \sig,S}(t)$ and $\lim_{\xi \to -\infty} g_{(x,s)}^{\xi \sig,S}(t)$ exist in $\mathbb{R} \cup \{-\infty,\infty\}$. Furthermore, by this monotonicity, it is sufficient to show that \begin{equation} \label{678} \lim_{\xi \to \infty} g_{(x,s)}^{\xi -,L}(t) = \sup_{\xi \in \mathbb{R}}g_{(x,s)}^{\xi -,L}(t) = \infty\ \ \text{ and }\ \ \lim_{\xi \to -\infty} g_{(x,s)}^{\xi +,R}(t) = \inf_{\xi \in \mathbb{R}} g_{(x,s)}^{\xi +,R}(t) = -\infty. \end{equation} First, we show that~\eqref{678} holds with probability one for a fixed initial point $(x,s)$ and fixed $t > s$. It is therefore sufficient to take $(x,s) = (0,0)$ and then $t > 0$. By the monotonicity, it suffices to take limits over $\xi \in \mathbb{Q}$ so that by Theorem~\ref{thm:RV-SIG-thm}\ref{itm:pd_fixed}, the $\pm$ and $L/R$ distinctions are unnecessary. $W_{\xi \sig}(z,t;0,t)$ is a two-sided Brownian motion with drift $2\xi$ and diffusivity $\sqrt 2$, independent of the random function $(x,y) \mapsto \mathcal{L}(x,0;y,t)$ (Theorem~\ref{thm:Buse_dist_intro}\ref{itm:indep_of_landscape}). Let $B$ be a standard Brownian motion, independent of $\mathcal{L}$. Using skew stationarity with $c = -\xi$ in the third equality below and time stationarity in the fifth equality (Lemma~\ref{lm:landscape_symm}), we obtain, for $\xi \in \mathbb{Q}$, \begin{align*}
g_{(x,s)}^\xi(t) &= \argmax_{z \in \mathbb{R}} \{\mathcal{L}(x,s;z,t) + W_{\xi}(z,t;0,t) \} \\
&\deq \argmax_{z \in \mathbb{R}} \{\mathcal{L}(x,s;z,t) + \sqrt 2 B(z) + 2\xi z\} \\
&\deq \argmax_{z \in \mathbb{R}} \{\mathcal{L}(x - \xi s,s;z - \xi t,t) + 2\xi(x -z) + (t - s)\xi^2 + \sqrt 2 B(z) +2\xi z\}\\
&= \argmax_{z \in \mathbb{R}} \{\mathcal{L}(x - \xi s,s;z - \xi t,t) + \sqrt 2(B(z) - B(\xi (t-s))) \} \\
&\deq \argmax_{z \in \mathbb{R}} \{\mathcal{L}(x,s;z - \xi (t-s),t) + \sqrt 2 B(z - \xi (t - s)) \} \\
&= \argmax_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + \sqrt 2 B(z) \} + \xi (t-s) \deq g_{(x,s)}^0(t) + \xi(t-s). \end{align*}
Therefore, $\forall\xi \in \mathbb{Q}$, the distribution of $g_{(x,s)}^{\xi}(t)$ is that of a fixed, almost surely finite, random variable plus $\xi (t-s)$. Since we know $\lim_{\mathbb{Q} \ni \xi \to \pm \infty} g_{(x,s)}^{\xi}(t)$ exists, the limit must be $\pm \infty$ a.s.
Now, consider the intersection of $\Omega_1$ with event of probability one on which for each triple $(w,q_1,q_2) \in \mathbb{Q}^3$ with $q_1 < q_2$, \begin{equation} \label{692} \lim_{\xi \to +\infty} g_{(w,q_1)}^{\xi-,L}(q_2) = + \infty\qquad\text{and}\qquad \lim_{\xi \to -\infty} g_{(w,q_1)}^{\xi+,R}(q_2) = - \infty. \end{equation} On this event, let $(x,s,t) \in \mathbb{R}^3$ with $s < t$ be arbitrary. Assume, by way of contradiction, that \begin{equation} \label{792} z := \sup_{\xi \in \mathbb{R}} g_{(x,s)}^{\xi-,L}(t) < \infty,
\end{equation} and let $g:[s,t]$ denote the leftmost geodesic from $(x,s)$ to $(z,t)$. For this proof, refer to Figure~\ref{fig:fanning_proof} for clarity. By the assumption~\eqref{792} and the fact that $g_{(x,s)}^{\xi-,L}$ is the leftmost geodesic between any two of its points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), $g_{(x,s)}^{\xi-,L}(t) \le g(t)$ for all $\xi \in \mathbb{R}$ and $t > s$. Let $q_1 \in (s,t)$ be rational. Choose $w \in \mathbb{Q}$ such that $w < g(q_1)$. By continuity of geodesics, we may choose $q_2 \in (q_1,t) \cap \mathbb{Q}$ to be sufficiently close to $t$ so that $|g(q_2) - z| < 1$. Next, by~\eqref{692}, we may choose positive $\xi$ sufficiently large so that \begin{equation} \label{892} g_{(w,q_1)}^{\xi-,L}(q_2) > z + 1 > g(q_2) \ge g_{(x,s)}^{\xi -,L}(q_2). \end{equation} Since $w < g(q_1)$, $g_{(w,q_1)}^{\xi-,L}$ and $g_{(x,s)}^{\xi -,L}$ cross at some $(\hat z,\hat t)$ with $\hat t \in (q_1,q_2)$. By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, both $g_{(w,q_1)}^{\xi-,L}(q_2)$ and $g_{(x,s)}^{\xi -,L}(q_2)$ are the leftmost maximizer of $\mathcal{L}(\hat z,\hat t; y,q_2) + W_{\xi -}(y,q_2;0,q_2)$ over $y \in \mathbb{R}$. This contradicts~\eqref{892}. The proof for $\xi \to -\infty$ is analogous. \end{proof} \begin{figure}
\caption{\small The blue/thin path represents $g_{(w,q_1)}^{\xi -,L}$ and the red/thick path represents $g$.}
\label{fig:fanning_proof}
\end{figure}
\begin{proof}[Proof of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick} (Regularity of the Busemann process)]
By definition of the event $\Omega_2$~\eqref{omega2}, for each $\xi \in \mathbb{R}$, each integer $T$ and compact set $K\subseteq \mathbb{R}^2$, there is a $\varepsilon > 0$ so that~\eqref{465} holds for all $(x,y) \in K$.
Now, let $\xi \in \mathbb{R}$, let $K$ be a compact subset of $\mathbb{R}^4$, and let $T$ be an integer greater than $\sup\{t \vee s: (x,s;y,t) \in K\}$. Let \begin{align*} A &:= \inf\{g_{(x,s)}^{(\xi - 1)-,L}(T)\wedge g_{(y,t)}^{(\xi - 1)-,L}(T) : (x,s;y,t) \in K\}, \qquad\text{and} \\ B &:= \sup\{g_{(x,s)}^{(\xi + 1)+,R}(T)\vee g_{(y,t)}^{(\xi + 1)+,R}(T) : (x,s;y,t) \in K\}. \end{align*} By~\eqref{eqn:mont_maxes} and Lemma~\ref{lem:bounded_maxes}, $-\infty < A < B < \infty$. By~\eqref{eqn:mont_maxes} and the additivity of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}, for all $(x,s;y,t) \in K$ and $\alpha \in (\xi - 1,\xi + 1)$, \begin{equation} \label{Tdiff} \begin{aligned} &W_{\alpha \sig}(x,s;y,t) = W_{\alpha \sig}(x,s;0,T) - W_{\alpha \sig}(y,t;0,T) \\ & = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,T) + W_{\alpha\sig}(z,T;0,T)\} - \sup_{z \in \mathbb{R}}\{\mathcal{L}(y,t;z,T) + W_{\alpha \sig}(z,T;0,T)\} \\ &= \sup_{z \in [A,B]}\{\mathcal{L}(x,s;z,T) + W_{\alpha \sig}(z,T;0,T)\} - \sup_{z \in [A,B]}\{\mathcal{L}(y,t;z,T) + W_{\alpha \sig}(z,T;0,T)\}. \end{aligned} \end{equation} By~\eqref{465}, the conclusion follows. \end{proof}
\begin{comment} Before proving Theorem~\ref{thm:DLBusedc_description}, we prove a lemma that states that all discontinuities of the Busemann process are present simultaneously along each horizontal line. \begin{lemma} \label{lem:discontinuities_each_level} When $\omega \in \Omega_1$, for each $t \in \mathbb{R}$, $\xi \notin \Xi$ if and only if $W_{\xi -}(y,t;x,t) = W_{\xi +}(y,t;x,t)$ for all $x < y$. \end{lemma} \begin{proof} By the additivity of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}, it suffices to show that $\xi \notin \Xi$ if and only if $W_{\xi -}(x,t;0,t) = W_{\xi +}(x,t;0,t)$ for all $x \in \mathbb{R}$. By the general construction of the Busemann process from~\eqref{eqn:gen_Buse_var}, $\xi \in \Xi$ if and only if $W_{\xi -}(x,t;0,t) \neq W_{\xi +}(x,t;0,t)$ for some $(x,t) \in \mathbb{R}^2$. Thus, it suffices to show that, for $s < t$, $W_{\xi -}(x,s;0,s) = W_{\xi +}(x,s;0,s)$ for all $x \in \mathbb{R}$ if and only if $W_{\xi -}(x,t;0,t) = W_{\xi +}(x,t;0,t)$ for all $x \in \mathbb{R}$.
By the monotonicity of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_gen_mont} and Lemma~\ref{lem:ext_mont}, for $0 < z < Z$, \begin{equation} \label{576} 0 \le W_{\xi +}(z,t;0,t) - W_{\xi -}(z,t;0,t) \le W_{\xi +}(Z,t;0,t) - W_{\xi -}(Z,t;0,t), \end{equation} while for $Z < z < 0$, \begin{equation} \label{846} 0 \ge W_{\xi +}(z,t;0,t) - W_{\xi -}(z,t;0,t) \ge W_{\xi +}(Z,t;0,t) - W_{\xi -}(Z,t;0,t). \end{equation}
By the additivity of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_h_add}, we know that for $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$, \begin{equation} \label{365} W_{\xi \sig}(x,s;0,s) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\} - \sup_{z \in \mathbb{R}}\{\mathcal{L}(0,s;z,t) + W_{\xi \sig}(z,t;0,t)\}. \end{equation} Therefore, if $W_{\xi -}(z,t;0,t) = W_{\xi +}(z,t;0,t)$ for all $z \in \mathbb{R}$, then $W_{\xi -}(x,s;0,s) = W_{\xi +}(x,s;0,s)$ for all $x \in \mathbb{R}$.
Conversely, assume that $W_{\xi -}(w,t;0,t) \neq W_{\xi +}(w,t;0,t)$ for some $w \in \mathbb{R}$. Assume that $w > 0$, and the case $w < 0$ follows by symmetry. By way of contradiction, assume that for all $x \in \mathbb{R}$, $ W_{\xi -}(x,s;0,s) = W_{\xi +}(x,s;0,s). $ Then, by~\eqref{365}, the function \[ x \mapsto f(x) := \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi +}(z,t;0,t)\} - \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi -}(z,t;0,t)\} \] is constant in $x$. By~\eqref{576}, for all $Z \ge w$, \[ 0 < W_{\xi +}(w,t;0,t) - W_{\xi -}(w,t;0,t) \le W_{\xi +}(Z,t;0,t) - W_{\xi -}(Z,t;0,t). \] By Lemma~\ref{lem:max_restrict}, for sufficiently large $x$ and $\sigg \in \{-,+\}$, \[ \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\} = \sup_{z \ge w }\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\}. \]
Then, for such large $x$, $f(x) > 0$. Conversely, by another application of Lemma~\ref{lem:max_restrict}, if $x < 0$ with $|x|$ sufficiently large, \[ \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\} = \sup_{z \le 0}\{\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)\}, \] and so by~\eqref{846}, $f(x) \le 0$ for such $x$, a contradiction to the fact that $f$ is constant. \end{proof} \end{comment}
\begin{proof}[Proof of Theorem~\ref{thm:DLBusedc_description} (Description of the discontinuity set)] The full-probability event of this theorem is $\Omega_2$, except for Item~\ref{itm:Busedc_t} whose proof is postponed until Section~\ref{sec:last_proofs}. Proofs of results that rely on Item~\ref{itm:Busedc_t} come afterwards.
\noindent \textbf{Item~\ref{itm:Busedc_horiz_mont} (Monotonicity):}
By the monotonicity of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_mont}, and by Lemma~\ref{lem:ext_mont}, for $a \le x \le y \le b$,
\begin{equation} \label{801}
0 \le W_{\xi +}(y,t;x,t) - W_{\xi -}(y,t;x,t) \le W_{\xi +}(b,t;a,t) - W_{\xi -}(b,t;a,t).
\end{equation} Thus, discontinuities of $\xi \mapsto W_{\xi \sig}(y,t;x,t)$ are also discontinuities for $\xi \mapsto W_{\xi \sig}(b,t;a,t)$.
\noindent \textbf{Item~\ref{itm:DL_dc_set_count} ($\Xi$ is a countable dense set):} Similarly as in~\eqref{Tdiff}, if $(x,s;y,t) \in \mathbb{R}^4$, then for $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, and any integer $T > s\vee t$, \begin{equation} \label{880}\begin{aligned} W_{\xi \sig}(x,s;y,t) &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,T) + W_{\xi \sig}(z,T;0,T)\}\\ &\qquad - \sup_{z \in \mathbb{R}} \{\mathcal{L}(y,t;z,T) + W_{\xi \sig}(z,T;0,T)\}. \end{aligned} \end{equation} So if $W_{\xi -}(z,T;0,T) = W_{\xi+}(z,T;0,T) \; \forall z \in \mathbb{R}$, then $W_{\xi -}(x,s;y,t) = W_{\xi +}(x,s;y,t)$, and \begin{equation} \label{881} \Xi = \bigcup_{T \in \mathbb{Z}} \{\xi \in \mathbb{R}: W_{\xi - }(x,T;0,T) \neq W_{\xi +}(x,T;0,T) \text{ for some }x \in \mathbb{R}\}. \end{equation} On $\Omega_2$, $\Xi$ is countably infinite and dense by \eqref{omega2}. Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_agree_horiz}, along with~\eqref{881} imply that $\Xi$ contains no rational directions $\xi$. For an arbitrary $\xi \in \mathbb{R}$, $W_{\xi -}(\abullet,T;0,T)$ and $W_{\xi +}(\abullet,T;0,T)$ are both Brownian motions with the same diffusivity and drift, and $W_{\xi -}(y,T;x,T) \le W_{\xi +}(y,T;x,T)$ for $x < y$ by Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_gen_mont}. By~\eqref{881} and continuity, \[ \mathbb P(\xi \in \Xi) \le \sum_{T \in \mathbb{Z}, x \in \mathbb{Q}} \mathbb P(W_{\xi - }(x,T;0,T) \neq W_{\xi +}(x,T;0,T)) = 0, \] where $\mathbb P(W_{\xi - }(x,T;0,T) \neq W_{\xi +}(x,T;0,T)) = 0$ because the two random variables have the same law and are ordered.
\noindent \textbf{Item~\ref{itm:DL_Buse_no_limit_pts} ($\Xi(p;q)$ is discrete):} This is a direct consequence of the regularity of
the Busemann process from Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick}. Since the Busemann function along a horizontal line is a Brownian motion with drift $2\xi$, $\mathbb{E}[W_{\xi \sig}(y,t;x,t)] = 2\xi(y - x)$. For $x < y$ and $t \in \mathbb{R}$, $\xi \mapsto W_{\xi \sig}(y,t;x,t)$ is monotone by Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_mont}. Hence, the limits $\lim_{\xi \to \pm \infty} W_{\xi \sig}(y,t;x,t)$ exist and must be $\pm \infty$. Since the jumps are discrete, $\Xi(y,t;x,t)$ infinite and unbounded.
\noindent \textbf{Item~\ref{itm:DLBusedcinvar} (Distributional invariances of $\Xi$:)} The discreteness of Item~\ref{itm:DL_Buse_no_limit_pts} allows us to view the sets $\Xi(x,t;-x,t)$ as well-defined point processes.
We recall that $\xi \in \Xi$ if and only if $W_{\xi -}(x,t;-x,t) \neq W_{\xi +}(x,t;0,-t)$. Start with the distributional equality $\{W_{\xi +}(\abullet,t;0,t)\}_{\xi \in \mathbb{R}} \deq \{G_\xi\}_{\xi \in \mathbb{R}}$, which holds for all $t$ (Theorem~\ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process}). Furthermore, the additivity of the Busemann process (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}) implies \[ \{W_{\xi +}(x,t;-x,t):x \in \mathbb{R}\}_{\xi \in \mathbb{R}} \deq \{G_{\xi}(x) - G_{\xi}(-x):x \in \mathbb{R}\}_{\xi \in \mathbb{R}}. \] This gives the first distributional equality $\Xi(x,t;-x,t) \deq \Xi(x,0;-x,0)$. The invariance $\Xi(x,0;-x,0) \deq -\Xi(x,0;-x,0)$ follows from the reflection invariance of $G$ (Corollary~\ref{cor:SH_reflect}). The invariance $\Xi(x,0;-x,0) \deq c \Xi(c^{-2}x,0;-c^{-2}x,0) + c\nu$ follows from the corresponding invariance for $G$ in Theorem~\ref{thm:SH10}\ref{itm:SH_sc}. \end{proof}
\section{Non-uniqueness of semi-infinite geodesics} \label{sec:LR_sig} Theorem~\ref{thm:DL_SIG_cons_intro} established global existence of semi-infinite geodesics from each initial point and into each direction.
We know from Theorem 3.3 of~\cite{Rahman-Virag-21}, recorded earlier in Theorem~\ref{thm:RV-SIG-thm}\ref{itm:pd_fixed}, that for a fixed initial point and a fixed direction, there almost surely is a unique semi-infinite geodesic. However, this uniqueness does not extend globally to all initial points and directions simultaneously.
In fact, two qualitatively different types of non-uniqueness of Busemann geodesics from a given point into a given direction arise. One is denoted by the $L/R$ distinction and the other by the $\pm$ distinction.
All semi-infinite geodesics from $p$ in direction $\xi$ lie between the leftmost Busemann geodesic $g_{p}^{\xi -,L}$ and the rightmost Busemann geodesic $g_p^{\xi +,R}$. See Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG}. We refer the reader back to Figure~\ref{fig:non_unique_comp} for the two types of non-uniqueness. The $L/R$ uniqueness is depicted on the left, where geodesics split and return to coalesce, while the $\pm$ non-uniqueness is depicted on the right in the figure, where geodesics split and stay apart, all the way to $\infty$.
The $L/R$ non-uniqueness is a feature of continuous space. Only the $\pm$ non-uniqueness appears in the discrete corner growth model with exponential weights, while both $L/R$ and $\pm$ non-uniqueness are present in semi-discrete BLPP~\cite{Seppalainen-Sorensen-21a,Seppalainen-Sorensen-21b}.
To capture $L/R$ non-uniqueness, we introduce the following random sets of initial points. For $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$, let $\operatorname{NU}_0^{\xi \sig}$ be the set of points $p \in \mathbb{R}^2$ such that the $\xi \sigg$ geodesic from $p$ is not unique. Let $\operatorname{NU}_1^{\xi \sig}$ be the subset of $\operatorname{NU}_0^{\xi \sig}$ of those initial points at which two $\xi \sig$ geodesics separate immediately. In notational terms, \begin{align} \operatorname{NU}_0^{\xi \sig} &= \{(x,s) \in \mathbb{R}^2: g_{(x,s)}^{\xi \sig,L}(t) < g_{(x,s)}^{\xi \sig,R}(t) \text{ for some } t > s\}, \label{NU0}\qquad\text{and}\\ \label{NU1} \operatorname{NU}_1^{\xi \sig} &= \{(x,s) \in \operatorname{NU}_0^{\xi \sig}: \exists\varepsilon > 0 \text{ such that }
g_{(x,s)}^{\xi \sig,L}(t) < g_{(x,s)}^{\xi \sig,R}(t) \ \forall t \in (s,s+\varepsilon)\}. \end{align} For $i = 0,1$, let \begin{equation} \label{NU0_global} \operatorname{NU}_i = \textstyle\bigcup_{\xi \hspace{0.9pt}\in\hspace{0.9pt} \mathbb{R},\,\sig \hspace{0.9pt}\in\hspace{0.9pt} \{-,+\}} \operatorname{NU}_i^{\xi \sig}. \end{equation} Figure~\ref{fig:NU} illustrates $\operatorname{NU}_0$ and $\operatorname{NU}_1$.
\begin{figure}
\caption{\small In this figure, $(x,s) \in \operatorname{NU}_0 \setminus \operatorname{NU}_1$ and $(y,t) \in \operatorname{NU}_1 \subseteq \operatorname{NU}_0$. It has since been shown by Bhatia~\cite{Bhatia-23} and Dauvergne~\cite{Dauvergne-23} that no such points $(x,s)$ exist. }
\label{fig:NU}
\end{figure}
Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_count} establishes that, with probability one, for each $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$, the restriction of $\operatorname{NU}_0^{\xi\sig}$ to each time level $s$ is countably infinite. By Theorem~\ref{thm:DL_all_coal}\ref{itm:DL_allsigns_coal}, on a single event of probability one, for each direction $\xi$ and sign $\sigg \in \{-,+\}$, all $\xi \sig$ geodesics coalesce. Therefore, from each $p \in \operatorname{NU}_0^{\xi \sig}$, two $\xi \sig$ geodesics separate but eventually come back together. In particular, the set of points $(x,s) \in \mathbb{R}^2$ such that $g_{(x,s)}^{\xi \sig,L}(t) < g_{(x,s)}^{\xi \sig,R}(t)$ for all $t\in(s,\infty)$ is empty and the $\varepsilon > 0$ in the definition \eqref{NU1} of $\operatorname{NU}_1^{\xi\sig}$ is essential.
By definition $\operatorname{NU}_1^{\xi \sig} \subseteq \operatorname{NU}_0^{\xi \sig}$. When this paper was first posted, we did not know whether $\operatorname{NU}_1^{\xi \sig}$ is a strict subset of $\operatorname{NU}_0^{\xi \sig}$. Afterward, Bhatia~\cite{Bhatia-23} proved that in fact, $\operatorname{NU}_0^{\xi \sig} = \operatorname{NU}_1^{\xi \sig}$. In fact, something stronger is true: With probability one, there are no pairs of points $(x,s;y,t) \in \Rup$ and pairs of distinct geodesics $g_1,g_2$ from $(x,s)$ to $(y,t)$ satisfying, for some $\varepsilon > 0$, $g_1(u) = g_2(u)$ for all $u \in (s,s + \varepsilon)\cup (t - \varepsilon,t)$ (\cite[Theorem 1]{Bhatia-23}).
In BLPP, the set $\operatorname{NU}_1$ plays a significant role as the set of points from which the leftmost and rightmost competition interfaces have different directions (Theorem 4.32(ii) in~\cite{Seppalainen-Sorensen-21b}). Presently, we do not have an analogous characterization in DL.
Since $\operatorname{NU}_0^{\xi -} \cup \operatorname{NU}_0^{\xi +}$ captures only the $L/R$ distinction and not the $\pm$ distinction, it does \textit{not} in general contain all the initial points from which the $\xi$-directed semi-infinite geodesic is not unique. However, when the $\xi\pm$ distinction is absent, Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG} implies that $\operatorname{NU}_0^{\xi}=\operatorname{NU}_0^{\xi\pm}$ is exactly the set of points $p \in \mathbb{R}^2$ such that the semi-infinite geodesic from $p$ in direction $\xi$ is not unique. This happens under two scenarios: when $\xi \notin \Xi$, and when we restrict attention to the $\xi$-dependent event of full probability on which $g_{p}^{\xi -,S} = g_p^{\xi +,S}$ for all $p \in \mathbb{R}^2$ and $S \in \{L,R\}$.
The failure to capture the $\pm$ non-uniqueness is also evident from the size of $\operatorname{NU}_0$. Whenever $\xi \in \Xi$, there are at least two semi-infinite geodesics with direction $\xi$ from {\it every} initial point. But along a fixed time level $\operatorname{NU}_0$ is countable, and thereby a strict subset of $\mathbb{R}^2$ (Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_count} below).
Recall that $\mathcal{H}_s=\{(x,s): x \in \mathbb{R}\}$ is the set of space-time points at time level $s$. Theorem~\ref{thm:DLBusedc_description}\ref{itm:DL_dc_set_count} states that on a single event of full probability, $\Xi \subseteq \mathbb{R} \setminus \mathbb{Q}$, so for $\xi \in \mathbb{Q}$, we can drop the $\pm$ distinction and write $\operatorname{NU}_i^\xi =\operatorname{NU}_i^{\xi -} = \operatorname{NU}_i^{\xi +}$. \begin{theorem} \label{thm:DLNU} On a single event of probability one, for $i = 0,1$, the set $\operatorname{NU}_i$ satisfies
\begin{equation} \label{109}
\operatorname{NU}_i = \textstyle\bigcup_{\xi \in \mathbb{Q}}\operatorname{NU}_i^{\xi}.
\end{equation} In particular, the following hold. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_NU_p0}
For each $p \in \mathbb{R}^2$, $\mathbb P(p \in \operatorname{NU}_0) = 0$, and the full-probability event of the theorem can be chosen so that $\operatorname{NU}_0$ contains no points of $\mathbb{Q}^2$.
\item \label{itm:DL_NU_count} On a single event of full probability, simultaneously for every $s \in \mathbb{R}$, $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$, the set $\operatorname{NU}_0^{\xi \sig} \cap\, \mathcal{H}_s$ is countably infinite and unbounded in both directions. Specifically, for each $s \in \mathbb{R}$, there exist sequences $x_n \to -\infty$ and $y_n \to +\infty$ such that $(x_n,s),(y_n,s) \in \operatorname{NU}_0^{\xi \sig}$.
By~\eqref{109}, $\operatorname{NU}_0 \cap\, \mathcal{H}_s$ is also countably infinite. \end{enumerate} \end{theorem} \begin{remark} The set $\mathbb{Q}$ can be replaced by any countable dense subset of $\mathbb{R}$, by adjusting the full-probability event. In all applications in this paper, we use the set $\mathbb{Q}$. \end{remark}
The next theorem states properties of Busemann geodesics that involve the $L/R$ and $\pm$ distinctions.
\begin{theorem} \label{thm:g_basic_prop} The following hold on a single event of full probability. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_mont_dir} For $s < t$, $x \in \mathbb{R}$, $\xi_1 < \xi_2$, and $S \in \{L,R\}$,
\[
g_{(x,s)}^{\xi_1 -,S}(t) \le g_{(x,s)}^{\xi_1 +,S}(t) \le g_{(x,s)}^{\xi_2 -,S}(t) \le g_{(x,s)}^{\xi_2 +,S}(t).
\]
\item \label{itm:DL_SIG_unif} Let $\xi \in \mathbb{R}$, let $K \subseteq \mathbb{R}$ be a compact set, and let $T > \max K$. Then, there exists a random $\varepsilon = \varepsilon(\xi,T,K)>0$ such that, whenever $\xi - \varepsilon < \alpha < \xi < \beta < \xi + \varepsilon$, $\sigg \in \{-,+\}$, $S \in \{L,R\}$, and $x,s \in K$,
\[
g_{(x,s)}^{\alpha \sig,S}(t) = g_{(x,s)}^{\xi -,S}(t)\qquad\text{and}\qquad g_{(x,s)}^{\beta \sig,S}(t) = g_{(x,s)}^{\xi+,S}(t)\qquad\text{for all }t \in [s,T].
\]
\item \label{itm:limits_to_inf} For each $(x,s) \in \mathbb{R}^2$, $t > s$, $\sigg \in \{-,+\}$, and $S \in \{L,R\}$,
$
\lim_{\xi \to \pm \infty} g_{(x,s)}^{\xi \sig,S}(t) = \pm \infty.
$
\item \label{itm:DL_SIG_mont_x} For all $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, $s < t$ and $x < y$, $g_{(x,s)}^{\xi \sig,R}(t) \le g_{(y,s)}^{\xi \sig,L}(t)$. More generally, if $x < y$, $s \in \mathbb{R}$, and $g_1$ is a $\xi \sig$ geodesic from $(x,s)$ and $g_2$ is a $\xi \sig$ geodesic from $(y,s)$ such that $g_1(t) = g_2(t)$ for some $t > s$, then $g_1(u) = g_2(u)$ for all $u > t$. In other words, if $g_1$ and $g_2$ intersect, they coalesce at their first point of intersection.
\item \label{itm:DL_SIG_conv_x} For all $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, $S \in \{L,R\}$, $x \in \mathbb{R}$, and $s < t$,
\begin{equation} \label{371}
\lim_{w \nearrow x} g_{(w,s)}^{\xi \sig,S}(t) = g_{(x,s)}^{\xi \sig,L}(t),\qquad\text{and}\qquad \lim_{y \searrow x} g_{(y,s)}^{\xi \sig,S}(t) = g_{(x,s)}^{\xi \sig,R}(t),
\end{equation}
and if \; $g_{(x,s)}^{\xi \sig,L}(t) = g_{(x,s)}^{\xi \sig,R}(t) =: g_{(x,s)}^{\xi \sig}(t)$, then for $S \in \{L,R\}$,
\begin{equation} \label{372}
\lim_{(w,u) \rightarrow (x,s)} g_{(w,u)}^{\xi \sig,S}(t) = g_{(x,s)}^{\xi \sig}(t).
\end{equation}
Furthermore,
\begin{equation} \label{373}
\lim_{x \to \pm \infty} g_{(x,s)}^{\xi \sig,S}(t) = \pm \infty.
\end{equation} \end{enumerate} \end{theorem} \begin{remark} \label{rmk:mixing_LR_pm}
In general, Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_mont_dir} cannot be extended to mix $L$ with $R$. Pick a point $(x,s) \in \operatorname{NU}_0$, where $\operatorname{NU}_0$ is defined as in~\eqref{NU0_global}. Then, on the full-probability event of Theorem~\ref{thm:DLNU}, there exists a rational direction $\xi$ and $t > s$ such that
\[
g_{(x,s)}^{\xi -,L}(t) = g_{(x,s)}^{\xi +,L}(t) < g_{(x,s)}^{\xi -,R}(t) = g_{(x,s)}^{\xi +,R}(t).
\]
By Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_unif}, we may choose $\xi_1 < \xi < \xi_2$ sufficiently close to $\xi$ such that \[ g_{(x,s)}^{\xi_2 -,L}(t) = g_{(x,s)}^{\xi_2+,L}(t) = g_{(x,s)}^{\xi -,L}(t) < g_{(x,s)}^{\xi +,R}(t) = g_{(x,s)}^{\xi_1 -,R}(t) = g_{(x,s)}^{\xi_1+,R}(t). \]
Item~\ref{itm:DL_SIG_mont_x} is an extension of Item 2 of Theorem 3.4 in~\cite{Rahman-Virag-21} to all directions and all pairs of initial points on the same horizontal level.
It is not true that for all $\xi \in \mathbb{R}$, $s < t$, and $x < y$, $g_{(x,s)}^{\xi +,R}(t) \le g_{(y,s)}^{\xi -,L}(t)$. This is discussed further in Remark~\ref{rmk:split_from_all_p} below. \end{remark}
The next theorem controls all semi-infinite geodesics with Busemann geodesics.
\begin{theorem} \label{thm:all_SIG_thm_intro}
The following hold on a single event of probability one. Let \\ $(x_r,t_r)_{r \in \mathbb{R}_{\ge 0}}$ be any net such that $t_r \to \infty$ and $x_r/t_r \to \xi$.
\begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_LRmost_SIG}
Let $(x,s) \in \mathbb{R}^2$ and $\xi \in \mathbb{R}$. For each $r$ large enough so that $t_r > s$, let $g_r:[s,t_r] \to \mathbb{R}$ be a geodesic from $(x,s)$ to $(x_r,t_r)$. Then, for each $t \ge s$,
\begin{equation} \label{987}
g_{(x,s)}^{\xi -,L}(t) \le \liminf_{r \to \infty} g_r(t) \le \limsup_{r \to \infty} g_r(t) \le g_{(x,s)}^{\xi +,R}(t).
\end{equation}
In particular, $g_{(x,s)}^{\xi-,L}$ is the leftmost and $g_{(x,s)}^{\xi+,R}$ the rightmost among \textbf{all} semi-infinite geodesics from $(x,s)$ in direction $\xi$.
\item \label{itm:finite_geod_stick} Let $K \subseteq \mathbb{R}^2$ be compact. Suppose that there is a level $t$ after which all semi-infinite geodesics from $(x,s) \in K$ in direction $\xi$ have coalesced. For $u \ge t$, let $g(u)$ be this geodesic. Then, given $T > t$, there exists $R \in \mathbb{R}_{>0}$ such that for $r \ge R$ and all $(x,s) \in K$, if $g_r:[s,t_r]\to\mathbb{R}$ is a geodesic from $(x,s)$ to $(x_r,t_r)$, then
\[
g_r(u) = g(u) \qquad\text{for all }u \in [t,T].
\]
In particular, suppose there is a unique semi-infinite geodesic from $(x,s)$ in direction $\xi$, denoted by $g_{(x,s)}^\xi$. Then given $T > s$, for sufficiently large $r$, we have
\[
g_r(u) = g_{(x,s)}^\xi(u) \qquad\text{for all }u \in [s,T].
\] \end{enumerate} \end{theorem} \begin{remark} Theorem~\ref{thm:DL_all_coal}\ref{itm:DL_allsigns_coal} below states that the assumed coalescence in Item~\ref{itm:finite_geod_stick} occurs whenever $\xi \notin \Xi$. The second statement of Item~\ref{itm:finite_geod_stick} is in Corollary 3.1 in~\cite{Rahman-Virag-21}. We provide a different proof that uses the regularity of the Busemann process. \end{remark}
\subsection{Proofs}
In this section, we prove Theorems~\ref{thm:DLNU}, \ref{thm:g_basic_prop}, and~\ref{thm:all_SIG_thm_intro}. In each of these, the full-probability event is $\Omega_2$~\eqref{omega2}. We start by proving parts of Theorem~\ref{thm:g_basic_prop}, then go to the proof of Theorem~\ref{thm:DLNU}.
\begin{proof}[Proof of Theorem~\ref{thm:g_basic_prop}, Items~\ref{itm:DL_mont_dir}--\ref{itm:limits_to_inf}]
\noindent \textbf{Item~\ref{itm:DL_mont_dir} (monotonicity of geodesics in the direction parameter)} was already proven as Equation \eqref{eqn:mont_maxes}. In fact, this item holds on $\Omega_1$.
\noindent \textbf{Item~\ref{itm:DL_SIG_unif} (geodesics agree locally for close directions): } This follows a similar proof as the proof of Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick}. Let $K$ be a compact subset of $\mathbb{R}$, and let $T$ be an integer greater than $\max K$. Set \[ A = \inf\{g_{(x,s)}^{(\xi - 1)-,L}(T):x,s \in K\},\qquad \text{and}\qquad B = \sup\{g_{(x,s)}^{(\xi + 1)+,R}(T):x,s \in K\}. \] By Lemma~\ref{lem:bounded_maxes} and Item~\ref{itm:DL_mont_dir}, $-\infty < A < B < \infty$. Then, for all $0 < \varepsilon < 1$ sufficiently small, all $\xi- \varepsilon < \alpha < \xi$, and all $x,s \in K$, the functions $z \mapsto \mathcal{L}(x,s;z,T) + W_{\alpha \sig}(z,T;0,T)$ and $z \mapsto \mathcal{L}(x,s;z,t) + W_{\xi-}(z,T;0,T)$ agree on the set $[A,B]$, which contains all maximizers. Hence, for such $\alpha$ and $\sigg \in \{-,+\}$, and $S \in \{L,R\}$, $g_{(x,s)}^{\alpha \sig ,S}(T) = g_{(x,s)}^{\xi -,S}(T)$. Since $g_{(x,s)}^{\alpha \sig,L}:[s,\infty) \to \mathbb{R}$ and $g_{(x,s)}^{\alpha \sig,R}:[s,\infty) \to \mathbb{R}$ define semi-infinite geodesics that are, respectively, the leftmost and rightmost geodesics between any of their points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}-\ref{itm:DL_LRmost_geod}), it must also hold that for $S \in \{L,R\}$ and $t \in [t,T]$, $g_{(x,s)}^{\alpha \sig,S}(t) = g_{(x,s)}^{\xi -,S}(t) $. Otherwise, taking $S = L$ without loss of generality, there would exist two distinct leftmost geodesics from $(x,s)$ to $(g_{(x,s)}^{\xi -,L}(T),T)$, a contradiction. The proof for the $\xi +$ geodesics where $\beta$ is sufficiently close to $\xi$ from the right is analogous.
\noindent \textbf{Item~\ref{itm:limits_to_inf} (limit of geodesics as direction goes to $\pm \infty)$:} This holds on $\Omega_2$ by definition~\eqref{omega2}.
\noindent We postpone the proof of Items~\ref{itm:DL_SIG_mont_x} and~\ref{itm:DL_SIG_conv_x} until after the following proof. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DLNU} (Description of the sets $\operatorname{NU}_i$)]
By Theorem~\ref{thm:DLBusedc_description}\ref{itm:DL_NU_count}, on the event $\Omega_2$, $\alpha \notin \Xi$ for all $\alpha \in \mathbb{Q}$, so we omit the $\pm$ distinction in this case. We first prove~\eqref{109}. If $(x,s) \in \operatorname{NU}_0^{\xi \sig}$ then
$g_{(x,s)}^{\xi \sig,L}(t) < g_{(x,s)}^{\xi \sig,R}(t)$ for some $t > s$. By Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_unif}, there exists a rational direction $\alpha$ (greater than $\xi$ if $\sigg = +$ and less than $\xi$ if $\sigg = -$) such that \[ g_{(x,s)}^{\alpha,L}(t) = g_{(x,s)}^{\xi \sig,L}(t) < g_{(x,s)}^{\xi \sig,R}(t) =g_{(x,s)}^{\alpha,R}(t). \] Hence, $(x,s) \in \operatorname{NU}_0^\alpha$. An analogous proof shows that $\operatorname{NU}_1 = \bigcup_{\xi \in \mathbb{Q}} \operatorname{NU}_1^\xi$.
\noindent \textbf{Item~\ref{itm:DL_NU_p0}:} By Theorem~\ref{thm:RV-SIG-thm}\ref{itm:pd_fixed}, for fixed direction $\xi$ and fixed initial point $p$, there is a unique semi-infinite geodesic from $p$ in direction $\xi$, implying $(x,s) \notin \operatorname{NU}_0^\xi$. The result now follows directly from~\eqref{109} and a union bound. In particular, by definition of the event $\Omega_1\supset \Omega_2$~\eqref{omega1}, for each $(q,r) \in \mathbb{Q}^2$ and $\xi \in \mathbb{Q}$, $(q,r) \notin \operatorname{NU}_0^\xi$. Then, by~\eqref{109}, on the event $\Omega_2$, \\$\operatorname{NU}_0 \subseteq \mathbb{R}^2 \setminus \mathbb{Q}^2$.
\noindent We postpone the proof of Item~\ref{itm:DL_NU_count} until the end of this subsection. \end{proof}
\begin{proof}[Remaining proofs of Theorem~\ref{thm:g_basic_prop}]
\noindent
\textbf{Item~\ref{itm:DL_SIG_mont_x} (Spatial monotonicity of geodesics):} We first prove a weaker result. Namely, for $s \in \mathbb{R}$, $x < y$, $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, and $S \in \{L,R\}$, \begin{equation} \label{110} g_{(x,s)}^{\xi \sig,S}(t) \le g_{(y,s)}^{\xi \sig,S}(t)\qquad\text{for all }t \ge s. \end{equation} By continuity of geodesics, it suffices to assume that $z := g_{(x,s)}^{\xi \sig,L}(t) = g_{(y,s)}^{\xi \sig,L}(t)$ for some $t > s$, and then show that $g_{(x,s)}^{\xi \sig,L}(u) = g_{(y,s)}^{\xi \sig,L}(u)$ for all $u > t$. By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, if $z := g_{(x,s)}^{\xi \sig,S}(t) = g_{(y,s)}^{\xi \sig,S}(t)$, then for $u > t$, both $g_{(x,s)}^{\xi \sig,L}(u)$ and $g_{(y,s)}^{\xi \sig,L}(u)$ are the leftmost maximizer of $\mathcal{L}(z,t;w,u) + W_{\xi \sig}(w,u;0,u)$ over $w \in \mathbb{R}$, so they are equal.
Now, to prove the stated result, we follow a similar argument as Item 2 of Theorem 3.4 in~\cite{Rahman-Virag-21}, adapted to give a global result across all direction, signs, and pairs of points along the same horizontal line. Let $g_1$ be a $\xi \sig$ geodesic from $(x,s)$ and let $g_2$ be a $\xi \sig$ geodesic from $(y,s)$, and assume that $g_1(t) = g_2(t)$ for some $t > s$. By continuity of geodesics, we may take $t$ to be the minimal such time. Choose $r \in (s,t)\cap \mathbb{Q}$ and then choose $q \in (g_1(r),g_2(r)) \cap \mathbb{Q}$. See Figure~\ref{fig:choose_rational}. By Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_p0}, on the event $\Omega_2$, there is a unique $\xi \sig$ Busemann geodesic from $(q,r)$, which we shall call $g = g_{(q,r)}^{\xi \sig,L} = g_{(q,r)}^{\xi \sig,R}$. For $u \ge r$, \begin{equation} \label{210}
g_1(u)\le g_{(x,s)}^{\xi \sig,R}(u) \le g(u) \le g_{(y,s)}^{\xi \sig,L}(u) \le g_2(u). \end{equation} The two middle inequalities come from~\eqref{110}. The two outer inequalities come from the definition of $g_{(x,s)}^{\xi \sig,L/R}(u)$ as the left and rightmost maximizers.
By assumption and~\eqref{210}, $z := g_1(t) = g(t) = g_2(t)$. By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:arb_geod_cons}\ref{itm:maxes}, for $u > t$, $g_1(u),g_2(u)$, and $g(u)$ are all maximizers of $\mathcal{L}(z,t;w,u) + W_{\xi \sig}(w,u;0,u)$ over $w \in \mathbb{R}$. However, since there is a unique $\xi \sig$ geodesic from $(q,r)$, there can be only one such maximizer, so the inequalities in~\eqref{210} are equalities for $u \ge t$. \begin{figure}
\caption{\small Choosing a point $(q,r) \in \mathbb{Q}^2$ whose $\xi \tiny{\boxempty}$ geodesic is unique}
\label{fig:choose_rational}
\end{figure}
\noindent \textbf{Item~\ref{itm:DL_SIG_conv_x} (limits of geodesics in the spatial parameter):} We start by proving~\eqref{371}. We prove the statement for the limits as $w \nearrow x$, and the limits as $w \searrow x$ follow analogously. By Item~\ref{itm:DL_SIG_mont_x}, $z := \lim_{w \nearrow x} g_{(w,s)}^{\xi \sig,S}(t)$ exists and is less than or equal to $g_{(x,s)}^{\xi \sig,L}(t)$. Further, by the same monotonicity, for all $w \in [x - 1,x]$, all maximizers of $\mathcal{L}(w,s;y,t) + W_{\xi \sig}(y,t;0,t)$ over $y \in \mathbb{R}$ lie in the common compact set $[g_{(x - 1,s)}^{\xi \sig,L}(t),g_{(x,s)}^{\xi \sig,R}(t)]$. By continuity of the directed landscape (Lemma~\ref{lem:Landscape_global_bound}), as $w \nearrow x$, the function $y \mapsto \mathcal{L}(w,s;y,t) + W_{\xi \sig}(y,t;0,t)$ converges uniformly on compact sets to the function $y \mapsto \mathcal{L}(x,s;y,t) + W_{\xi \sig}(y,t;0,t)$. Hence, Lemma~\ref{lemma:convergence of maximizers from converging functions} implies that $z$ is a maximizer of $\mathcal{L}(x,s;y,t) + W_{\xi \sig}(y,t;0,t)$ over $y \in \mathbb{R}$. Since $z \le g_{(x,s)}^{\xi \sig,L}(t)$, and $g_{(x,s)}^{\xi \sig,L}(t)$ is the leftmost such maximizer, equality holds.
The proof of~\eqref{372} is similar: in this case, Lemma~\ref{lem:bounded_maxes} implies that for all $(w,u)$ sufficiently close to $(x,s)$, the maximizers of $y \mapsto\mathcal{L}(w,u;y,t) + W_{\xi \sig}(y,t;0,t)$ lie in a common compact set. Then, by Lemma~\ref{lemma:convergence of maximizers from converging functions}, every subsequential limit of $g_{(w,u)}^{\xi \sig,S}(t)$ as $(w,u) \to (x,s)$ is a maximizer of $y \mapsto\mathcal{L}(x,s;y,t) + W_{\xi \sig}(y,t;0,t)$. By assumption, there is only one such maximizer, so the desired convergence holds.
Lastly, to show~\eqref{373}, we recall that the Busemann process evolves as the KPZ fixed point (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:Buse_KPZ_description}). The Busemann functions are continuous and satisfy the asymptotics prescribed in Lemma~\ref{lem:DL_horiz_Buse}\ref{itm:DL_lim}. Therefore, for each $t,\xi$, and $\sigg$, there exists constants $a,b > 0$ so that $|W_{\xi \sig}(x,t;0,t)| \le a + b|x|$. Lemma~\ref{lem:max_restrict}\ref{itm:KPZrestrict} applied to the temporally reflected version of $\mathcal{L}$ states that for sufficiently large $|x|$, $g_{(x,s)}^{\xi \sig,S}(t) \in (x - |x|^{2/3},x + |x|^{2/3})$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:all_SIG_thm_intro}] We remind the reader that this theorem controls arbitrary geodesics via the Busemann geodesics.
\noindent \textbf{Item~\ref{itm:DL_LRmost_SIG}:} Let $\alpha < \xi < \beta$. By directedness of Busemann geodesics (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}) and the assumption $x_r/r_r \to \xi$, for all sufficiently large $r$, \[ g_{(x,s)}^{\alpha -,L}(t_r) < x_r < g_{(x,s)}^{\beta +,R}(t_r). \] Since $g_{(x,s)}^{\alpha -,L}$ is the leftmost geodesic between any of its points and $g_{(x,s)}^{\beta +,R}$ is the rightmost (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), it follows that for $u \in [s,t_r]$, \begin{equation} \label{504} g_{(x,s)}^{\alpha -,L}(u) \le g_r(u) \le g_{(x,s)}^{\beta +,R}(u). \end{equation} Hence, for all $t \ge s$, \[ g_{(x,s)}^{\alpha -,L}(t) \le \liminf_{r \to \infty} g_r(t) \le \limsup_{r \to \infty} g_r(t) \le g_{(x,s)}^{\beta +,R}(t). \] By Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_unif}, taking limits as $\alpha \nearrow \xi$ and $\beta \searrow \xi$ completes the proof.
\noindent \textbf{Item~\ref{itm:finite_geod_stick}:} Assume that all geodesics in direction $\xi$, starting from a point in the compact set $K$, have coalesced by time $t$, and for $u \ge t$, let $g(u)$ be the spatial location of this common geodesic. By Item~\ref{itm:DL_LRmost_SIG}, for all $p \in K$ and $u \ge t$, \[ g(u) = g_p^{\xi -,L}(u) = g_p^{\xi +,R}(u). \] Let $T > t$ be arbitrary. By Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_unif}, we may choose $\alpha < \xi < \beta$ such that, for all $p \in K$ and $u \in [t,T]$, \begin{equation} \label{301} g_{(g(t),t)}^{\alpha-,L}(u) = g_p^{\alpha -,L}(u) = g(u) = g_p^{\beta +,R}(u) = g_{(g(t),t)}^{\beta +,R}(u). \end{equation} The outer equalities hold because the geodesics pass through $(g(t),t)$. With this choice of $\alpha,\beta$, by the directedness of Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG} and since $x_r/t_r \to \xi$, we may choose $r$ large enough so that $t_r \ge T$ and $ g_{(g(t),t)}^{\alpha -,L}(t_r) < x_r < g_{(g(t),t)}^{\beta +,R}(t_r). $
Then, as in the proof of Item~\ref{itm:DL_LRmost_SIG}, for all $u \in [t,t_r]$, \[ g_{(g(t),t)}^{\alpha -,L}(u) \le g_r(u) \le g_{(g(t),t)}^{\beta +,R}(u). \] Combining this with~\eqref{301} completes the proof. \end{proof}
\noindent It remains to prove Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_count}. We first prove a lemma.
\begin{lemma} \label{lem:NU_line} Let $\omega \in \Omega_2$, $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, $\mathbb{Q} \ni s < t \in \mathbb{R}$, and assume that there is a nonempty interval $I = (a,b) \subseteq \mathbb{R}$ such that for all $x \in \mathbb{Q}$, $g_{(x,s)}^{\xi \sig}(t) \notin I$ {\rm(}By Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_p0}, we may ignore the $L/R$ distinction when $(x,s) \in \mathbb{Q}^2${\rm)}. Then, there exists $\hat x \in \mathbb{R}$ such that \begin{equation} \label{794} g_{(\hat x,s)}^{\xi \sig,L}(t) \le a < b \le g_{(\hat x,s)}^{\xi \sig,R}(t). \end{equation} \end{lemma} \begin{proof} Choose some $y \in (a,b)$, and let \[ \hat x = \sup\{x \in \mathbb{Q} : g_{(x,s)}^{\xi \sig}(t) < y\}. \] By Equation~\eqref{373} of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}, $\hat x \in \mathbb{R}$. By the monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, for all $\mathbb{Q} \ni x < \hat x$, $g_{(x,s)}^{\xi \sig}(t) < y$, while for all $\mathbb{Q} \ni x > \hat x$, $g_{(x,s)}^{\xi \sig}(t) \ge y$. By assumption of the lemma, this further implies that for $\mathbb{Q} \ni x < \hat x$, $g_{(x,s)}^{\xi \sig}(t) \le a$ while for $\mathbb{Q} \ni x > \hat x$, $g_{(x,s)}^{\xi \sig}(t) \ge b$. By taking limits via Equation~\eqref{371} of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}, we obtain~\eqref{794}. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_count} ($\operatorname{NU}_0^{\xi \sig} \cap \mathcal{H}_s$ is countably infinite and unbounded)] We prove the statement in three steps. First, we show that on $\Omega_2$, for all $s \in \mathbb{Q}$, $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, the set $\operatorname{NU}_0^{\xi \sig} \cap\, \mathcal{H}_s$ is infinite and unbounded in both directions. Next, we show that, on $\Omega_2$, $\operatorname{NU}_0^{\xi \sig} \cap\, \mathcal{H}_s$ is in fact infinite and unbounded in both directions for all $s \in \mathbb{R}$. Lastly, we show that the set $\operatorname{NU}_0 \cap\, \mathcal{H}_s$ (the union over all directions and signs) is countable.
For the first step, Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_p0} states that, on the event $\Omega_2$, for each $(x,s) \in \mathbb{Q}^2$, $\xi \in \mathbb{R}$, and $\sigg \in \{-,+\}$, there is a unique $\xi \sig$ geodesic $g_{(x,s)}^{\xi \sig}$, and therefore this geodesic is both the leftmost and rightmost $\xi \sig$ geodesic from $(x,s)$. Since leftmost (resp. rightmost) Busemann geodesics are leftmost (rightmost) geodesics between any two of their points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), it follows that $g_{(x,s)}^{\xi \sig}$, restricted to times $t \in [s,s+2]$, is the unique geodesic from $(x,s)$ to $(g_{(x,s)}^{\xi \sig}(s + 2),s +2)$. By Lemma~\ref{lem:bounded_maxes}, for each compact set $K$, the set
\[
\{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap K\}
\] is contained in some compact set $K'$. Then, we have the following inclusion of sets: \begin{align} &\quad \; \{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap K \} \label{873} \subseteq \bigcup_{g \in \mathcal A_{K,K'}}\{g(s + 1) \} \end{align} where \[ \mathcal A_{K,K'} = \{g: \text{$g$ is the unique geodesic from }(x,s) \text{ to }(y,s+2) \text{ for some } x\in K,y \in K'\}. \] By Lemma~\ref{lem:geod_pp}, the set in the RHS of~\eqref{873} is finite, so the set on the LHS is finite as well. Therefore, the set \begin{equation} \label{875} \{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \} = \bigcap_{k \in \mathbb{Z}_{>0}}\{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap [-k,k] \} \end{equation} is a union of finite nested sets. Further, by the ordering of geodesics from Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, for each $k$, the difference \[ \{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap [-(k + 1),k + 1] \} \setminus \{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap [-k,k] \} \] lies entirely in the union of intervals \[ \Bigl(-\infty, \inf \bigl\{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap [-k,k] \bigr\}\Bigr] \cup \Bigl[\sup \bigl\{g_{(x,s)}^{\xi \sig}(s + 1): x \in \mathbb{Q} \cap [-k,k] \bigr\},\infty\Bigr). \] Therefore, the set~\eqref{875} has no limit points. Further, by Equation~\eqref{373} of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}, the set~\eqref{875} is unbounded in both directions. These two facts imply that there exist infinitely many nonempty intervals whose intersection with the set~\eqref{875} is empty, and the set of endpoints of such intervals is unbounded. By Lemma~\ref{lem:NU_line}, for each $k > 0$, there exists $(x,s) \in \operatorname{NU}_0^{\xi \sig}$ such that $g_{(x,s)}^{\xi \sig,R}(s + 1) \ge k$, and there exists $(x,s) \in \operatorname{NU}_0^{\xi \sig}$ such that $g_{(x,s)}^{\xi \sig,L}(s + 1) \le -k$. Next, assume, by way of contradiction, that the set $\{x \in \mathbb{R}: (x,0) \in \operatorname{NU}_0^{\xi \sig}\}$ has an upper bound $b$. Then, by the monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, for all $x \in \mathbb{R}$ with $(x,s) \in \operatorname{NU}_0^{\xi \sig}$, $g_{(x,s)}^{\xi \sig,R}(s + 1) \le g_{(b,s)}^{\xi \sig,R}(s +1)$. But this contradicts the fact we showed that $\{g_{(x,s)}^{\xi \sig,R}(s + 1): x \in \mathbb{R}\}$ is not bounded above. Hence, there exists a sequence $y_n \to \infty$ such that $(y_n,s) \in \operatorname{NU}_0^{\xi \sig}$ for all $n$. By a similar argument, there exists a sequence $x_n \to -\infty$ such that $(x_n,s) \in \operatorname{NU}_0^{\xi \sig}$ for all $n$.
Now, for arbitrary $s \in \mathbb{R}$, pick a rational number $T > s$. Pick $(z,T) \in \operatorname{NU}_0^{\xi \sig}$, and let \begin{align*} x_1 = \sup\{ x \in \mathbb{R}: g_{(x,s)}^{\xi\sig, L}(T) \le z \}, \qquad \text{and}\qquad x_2 = \inf\{ x \in \mathbb{R}: g_{(x,s)}^{\xi\sig, R}(T) \ge z \}. \end{align*} By the limits in Equation~\eqref{373} of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}, $x_1$ and $x_2$ lie in $\mathbb{R}$.
We first show that $x_2 \le x_1$. If not, then choose $x \in (x_1,x_2)$. Then, $g_{(x,s)}^{\xi\sig, R}(T) < z < g_{(x,s)}^{\xi\sig, L}(T)$, contradicting the meaning of L and R. Hence $x_2 \le x_1$. For any $x > x_2$, $g_{(x,s)}^{\xi \sig,R}(T) \ge z$, and by the limit in Equation~\eqref{371} of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}, $g_{(x_2,s)}^{\xi \sig,R}(T) \ge z$ as well. By an analogous argument, for $x < x_1$, $g_{(x,s)}^{\xi \sig,L}(T) \le z$, and the inequality $g_{(x_1,s)}^{\xi \sig,L}(T) \le z$ holds by the same argument. Hence, for $x \in [x_2,x_1]$, \[ g_{(x,s)}^{\xi \sig,L}(T) \le z,\qquad\text{and}\qquad g_{(x,s)}^{\xi \sig,R}(T) \ge z. \] Then, by the monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, for $t \ge T$, \begin{equation} \label{1000} g_{(x,s)}^{\xi \sig,L}(t) \le g_{(z,T)}^{\xi \sig,L}(t) \le g_{(z,T)}^{\xi \sig,R}(t) \le g_{(x,s)}^{\xi \sig,R}(t). \end{equation} By assumption that $(z,T) \in \operatorname{NU}_0^{\xi \sig}$, there exists $t > T$ such that the middle inequality in~\eqref{1000} is strict, so $(x,s) \in \operatorname{NU}_0^{\xi \sig}$. Furthermore, by assumption, the set $\{z \in \mathbb{R}: (z,T) \in \operatorname{NU}_0\}$ has neither an upper or lower bound. Then, by the $t = T$ case of~\eqref{1000} and a similar argument as for the $s = 0$ case, the set $\{x \in \mathbb{R}: (x,s) \in \operatorname{NU}_0\}$ also has neither an upper nor lower bound.
We lastly show countability of the sets. By~\eqref{109}, it suffices to show that for each $\xi \in \mathbb{Q}$ and $s \in \mathbb{R}$, $\operatorname{NU}_0^{\xi} \cap\, \mathcal{H}_s$ is countable. The proof is that of Theorem 3.4, Item 3 in~\cite{Rahman-Virag-21}, adapted to all horizontal lines simultaneously. For each $(x,s) \in \operatorname{NU}_0^\xi$, there exists $t > s$ such that $g_{(x,s)}^{\xi ,L}(t) < g_{(x,s)}^{\xi ,R}(t)$. By continuity of geodesics, the space between the two geodesics contains an open subset of $\mathbb{R}^2$. By the monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, for $x < y$, $g_{(x,s)}^{\xi ,R}(t) \le g_{(y,s)}^{\xi ,L}(t)$ for all $t \ge s$. Hence, for $x < y$, with $(x,s),(y,s) \in \operatorname{NU}_0^\xi$, the associated open sets in $\mathbb{R}^2$ are disjoint, and $\operatorname{NU}_0^\xi \cap\, \mathcal{H}_s$ is at most countably infinite. \end{proof}
\section{Coalescence and the global geometry of geodesics} \label{sec:geometry_sec} We can now describe the global structure of the semi-infinite geodesics, beginning with coalescence. \begin{theorem} \label{thm:DL_all_coal} On a single event of full probability, the following hold across all directions $\xi \in \mathbb{R}$ and signs $\sigg \in \{-,+\}$. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_allsigns_coal} For all
$p,q \in \mathbb{R}^2$, if $g_1$ and $g_2$ are $\xi \sig$ Busemann geodesics from $p$ and $q$, respectively, then $g_1$ and $g_2$ coalesce. If the first point of intersection of the two geodesics is not $p$ or $q$, then the first point of intersection is the coalescence point of the two geodesics.
\item \label{itm:DL_split_return}
Let $g_1$ and $g_2$ be two distinct $\xi \sig$ Busemann geodesics from an initial point $(x,s)\in \operatorname{NU}_0^{\xi \sig}$. Then, the set $\{t > s: g_1(t) \neq g_2(t)\}$ is a bounded open interval. That is, after the geodesics split, they coalesce exactly when they meet again.
\item \label{itm:unif_coal} For each
compact set $K \subseteq \mathbb{R}^2$, there exists a random $T = T(K,\xi,\sigg)<\infty$ such that for any two $\xi \sig$ geodesics $g_1$ and $g_2$ whose starting points lie in $K$, $g_1(t) = g_2(t)$ for all $t \ge T$. That is, there is a time level $T$ after which all semi-infinite geodesics started from points in $K$ have coalesced into a single path. \end{enumerate} \end{theorem} \begin{remark} Theorem 1 of~\cite{Bhatia-23} implies the following refinements of the results in this section. In Theorem~\ref{thm:DL_all_coal}\ref{itm:DL_split_return}, $\{t > s: g_1(t) \neq g_2(t)\}=(s, r)$ for some $r\in(s,\infty)$. Under Condition~\ref{itm:DL_good_dir} of Theorem~\ref{thm:DL_good_dir_classification} below, the entire collection of semi-infinite geodesics in direction $\xi$ is a tree. \end{remark}
The following gives a full classification of the directions in which geodesics coalesce. We refer the reader to Theorems~\ref{thm:DL_eq_Buse_cpt_paths} and~\ref{thm:Buse_pm_equiv} below for the connection between coalescence and the regularity of the Busemann process.
\begin{theorem} \label{thm:DL_good_dir_classification} On a single event of probability one, the following are equivalent. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_good_dir} $\xi \notin \Xi$.
\item \label{itm:DL_LR_all_agree} $g_{p}^{\xi -,S} = g_{p}^{\xi +,S}$ for all $p \in \mathbb{R}^2$ and $S \in \{L,R\}$.
\item \label{itm:DL_good_dir_coal} All semi-infinite geodesics in direction $\xi$ coalesce {\rm(}whether Busemann geodesics or not{\rm)}.
\item \label{itm:DL_good_dir_unique_geod} For all $p \in \mathbb{R}^2 \setminus \operatorname{NU}_0$, there is a unique geodesic starting from $p$ with direction $\xi$.
\item \label{itm:DL_good_dir_pt_unique} There is a unique $\xi$-directed semi-infinite geodesic from some $p\in \mathbb{R}^2$.
\item \label{itm:DL_good_dir_L_unique} There exists $p \in \mathbb{R}^2$ such that $g_{p}^{\xi -,L} = g_{p}^{\xi +,L}$.
\item \label{itm:DL_good_dir_R_unique} There exists $p \in \mathbb{R}^2$ such that $g_{p}^{\xi -,R} = g_{p}^{\xi +,R}$ \end{enumerate} Under these equivalent conditions, the following also holds. \begin{enumerate}[resume, label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_allBuse} From any $p \in \mathbb{R}^2$, all semi-infinite geodesics in direction $\xi$ are Busemann geodesics. \end{enumerate} \end{theorem} \begin{remark} \label{rmk:split_from_all_p} The equivalence~\ref{itm:DL_good_dir}$\Leftrightarrow$\ref{itm:DL_good_dir_L_unique} implies that $\forall\xi \in \Xi$ and $p \in \mathbb{R}^2$, geodesics $g_{p}^{\xi -,L}$ and $g_{p}^{\xi +,L}$ are distinct. The same is true when $L$ is replaced with $R$. Since $g_{p}^{\xi -,L}$ and $g_p^{\xi +,L}$ are both leftmost geodesics between any two of their points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}) then if $\xi\in\Xi$, these two geodesics must separate at some time $t\ge s$, and they cannot ever come back together. For each $\xi \in \Xi$, there are two coalescing families of geodesics, namely the $\xi-$ and $\xi +$ geodesics. (See again Figure~\ref{fig:non_unique_comp}.) In particular, whenever $\xi \in \Xi$, $s \in \mathbb{R}$, and $x < y$, $g_{(x,s)}^{\xi +,L}(t) > g_{(y,s)}^{\xi -,R}(t)$ for sufficiently large $t$, as alluded to in Remark~\ref{rmk:mixing_LR_pm}. \end{remark}
\subsection{Proofs} In each of these theorems, the full-probability event is $\Omega_2$~\eqref{omega2}. We start by proving some lemmas that allow us to prove Theorem~\ref{thm:DL_all_coal}. The proof of Theorem~\ref{thm:DL_good_dir_classification} comes at the very end of this subsection. Section~\ref{sec:Buseextraproofs} proves Theorem~\ref{thm:DLSIG_main} as well as lingering results from Section~\ref{sec:Buse_geod_results}. \begin{lemma} \label{lem:Buse_equality_coal} Let $\omega \in \Omega_1$, $s \in \mathbb{R}$ and $x < y \in \mathbb{R}$. Assume, for some $\alpha < \xi$ and $\sigg_1,\sigg_2 \in \{-,+\}$, that $W_{\alpha \sig_1}(y,s;x,s) = W_{\xi \sig_2}(y,s;x,s)$. We also allow $\alpha = \xi$ if $\sigg_1 = -$ and $\sigg_2 = +$. If $t > s$ and $g_{(x,s)}^{\xi \sig_2,R}(t) \le g_{(y,s)}^{\alpha \sig_1,L}(t)$, then for all $u \in [s,t]$, \begin{equation} \label{111} g_{(x,s)}^{\alpha \sig_1,R}(u) = g_{(x,s)}^{\xi \sig_2,R}(u)\qquad\text{and}\qquad g_{(y,s)}^{\alpha \sig_1,L}(u) = g_{(y,s)}^{\xi \sig_2,L}(u). \end{equation} \end{lemma} \begin{proof} By assumption, whenever $w < z$ and $t \in \mathbb{R}$, Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_gen_mont} gives \begin{equation} \label{100} W_{\alpha \sig_1}(z,t;w,t) \le W_{\xi \sig_2}(z,t;w,t). \end{equation} For the rest of the proof we suppress the $\sigg_1,\sigg_2$ notation. By Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add},\ref{itm:Buse_KPZ_description}, \begin{align} W_\xi(y,s;x,s) &= W_\xi(y,s;0,t) - W_\xi(x,s;0,t) \nonumber \\ &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(y,s;z,t) + W_\xi(z,t;0,t)\} - \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_\xi(z,t;0,t)\}, \label{eqn:W_queue} \end{align} and the same with $\xi$ replaced by $\alpha$. Recall that $g_{(x,s)}^{\xi \sig,L}(t)$ and $g_{(x,s)}^{\xi \sig,R}(t)$ are, respectively, the leftmost and rightmost maximizers of $\mathcal{L}(x,s;z,t) + W_{\xi \sig}(z,t;0,t)$ over $z \in \mathbb{R}$. Understanding that these quantities depend on $s$ and $t$, we use the shorthand notation $g_x^{\xi,R} = g_{(x,s)}^{\xi \sig_1,R}(t)$, and similarly with the other quantities. Then, we have \begin{align}
\mathcal{L}(x,s;g_x^{\xi,R},t) + W_\xi(g_x^{\xi,R},t;0,t) &- (\mathcal{L}(x,s;g_x^{\xi,R},t) + W_\alpha(g_x^{\xi,R},t;0,t)) \nonumber \\
\ge \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_\xi(z,t;0,t)\} &- \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_\alpha(z,t;0,t)\} \label{101}\\
=\sup_{z \in \mathbb{R}}\{\mathcal{L}(y,s;z,t) + W_\xi(z,t;0,t)\} &- \sup_{z \in \mathbb{R}}\{\mathcal{L}(y,s;z,t) + W_\alpha(z,t;0,t)\} \nonumber \\
\ge \mathcal{L}(y,s;g_y^{\alpha,L},t) + W_\xi(g_y^{\alpha,L},t;0,t) &- (\mathcal{L}(y,s;g_y^{\alpha,L},t) + W_\alpha(g_y^{\alpha,L},t;0,t)), \label{102} \end{align} where the middle equality came from the assumption that $W_{\xi}(y,s;x,s) = W_{\alpha}(y,s;x,s)$ and Equation~\eqref{eqn:W_queue} applied to both $\xi$ and $\alpha$. Rearranging the first and last lines yields \[ W_\xi(g_y^{\alpha,L},t;g_{x}^{\xi,R},t) \le W_\alpha(g_y^{\alpha,L},t;g_{x}^{\xi,R},t). \] However, the assumption $g_x^{\xi,R} \le g_{y}^{\alpha,L}$ combined with~\eqref{100} implies that this inequality is an equality. Hence, inequalities~\eqref{101} and~\eqref{102} are also equalities. From the equality~\eqref{101}, \[ \mathcal{L}(x,s;g_x^{\xi,R},t) + W_\alpha(g_x^{\xi,R},t;0,t) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + W_\alpha(z,t;0,t)\}, \] so $z=g_x^{\xi,R}$ is a maximizer of $\mathcal{L}(x,s;z,t) + W_\alpha(z,t;0,t)$. By definition, $g_{x}^{\alpha,R}$ is the rightmost maximizer, and by geodesic ordering (Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_mont_dir}), $g_x^{\xi,R} \ge g_x^{\alpha,R}$, so $g_x^{\xi,R} = g_x^{\alpha,R}$. An analogous argument applied to~\eqref{102} implies $g_y^{\alpha,L} = g_y^{\xi,L}$. We have shown that \[ g_{(x,s)}^{\alpha \sig_1,R}(t) = g_{(x,s)}^{\xi \sig_2,R}(t),\qquad\text{and}\qquad g_{(y,s)}^{\alpha \sig_1,L}(t) = g_{(y,s)}^{\xi \sig_2,L}(t). \] Since $g_{(x,s)}^{\alpha \sig_1,R}$ and $g_{(x,s)}^{\xi \sig_2,R}$ are both the rightmost geodesics between any two of their points and similarly with the leftmost geodesics from $(y,s)$ (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), Equation~\eqref{111} holds for all $u \in [s,t]$, as desired. \end{proof}
\begin{lemma} \label{lem:DL_LR_coal} Let $\omega \in \Omega_2$, $s \in \mathbb{R}$, and $x < y$. If, for some $\alpha < \xi$ and $\sigg_1,\sigg_2 \in \{-,+\}$ we have that $W_{\alpha \sig_1}(y,s;x,s) = W_{\xi \sig_2}(y,s;x,s)$, then $g_{(x,s)}^{\alpha \sig_1,R}$ coalesces with $g_{(y,s)}^{\alpha \sig_1,L}$, $g_{(x,s)}^{\xi \sig_2,R}$ coalesces with $g_{(y,s)}^{\xi \sig_2,L}$, and the coalescence points of the two pairs of geodesics are the same. \end{lemma} \begin{proof} By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, $g_{(x,s)}^{\xi \sig_2,R}(t)/t \to \xi$ while $g_{(y,s)}^{\alpha \sig_1,L}(t)/t \to \alpha$ as $t \to \infty$. By this and continuity of geodesics, there exists a minimal time $t > s$ such that $z := g_{(x,s)}^{\xi \sig_2,R}(t) = g_{(y,s)}^{\alpha \sig_1,L}(t)$. By Lemma~\ref{lem:Buse_equality_coal}, \[ g_{(x,s)}^{\alpha \sig_1,R}(u) = g_{(x,s)}^{\xi \sig_2,R}(u)\qquad\text{and}\qquad g_{(y,s)}^{\alpha \sig_1,L}(u) = g_{(y,s)}^{\xi \sig_2,L}(u) \qquad \text{for all }u \in [s,t]. \] Since $t$ was chosen to be minimal, Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x} implies that the pair $g_{(x,s)}^{\alpha \sig_1,R}$, $g_{(y,s)}^{\alpha \sig_1,L}$ and the pair $g_{(x,s)}^{\xi \sig_2,R}$, $g_{(y,s)}^{\xi \sig_2,L}$ both coalesce at $(z,t)$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DL_all_coal}]
\noindent \textbf{Item~\ref{itm:DL_allsigns_coal} (Coalescence):} Let $g_1$ and $g_2$ be $\xi \sigg$ Busemann geodesics from $(x,s)$ and $(y,t)$, respectively, and take $s \le t$ without loss of generality. Let $a = (g_1(t) \wedge y) - 1$ and $b = (g_1(t) \vee y) + 1$. By Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, for all $u \ge t$, \begin{equation} \label{112} g_{(a,t)}^{\xi \sig,R}(u) \le g_1(u) \wedge g_2(u) \le g_1(u)\vee g_2(u) \le g_{(b,t)}^{\xi \sig,L}(u). \end{equation} By Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick}, there exists $\alpha$, sufficiently close to $\xi$, (from the left for $\sigg = -$ and from the right for $\sigg = +$) such that $W_{\xi \sig}(b,t;a,t) = W_{\alpha \sig}(b,t;a,t)$. By Lemma~\ref{lem:DL_LR_coal}, $g_{(a,t)}^{\xi \sig,R}$ coalesces with $g_{(b,t)}^{\xi \sig,L}$. Then, for $u$ large enough, all inequalities in~\eqref{112} are equalities, and $g_1$ and $g_2$ coalesce.
If the first point of intersection is not $(y,t)$, then $g_1(t) \neq y$, and the coalescence point of $g_1$ and $g_2$ is the first point of intersection by Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}.
\noindent \textbf{Item~\ref{itm:DL_split_return} (Geodesics coalesce when they meet):} Let $(x,s) \in \operatorname{NU}_0^{\xi \sig}$, and let $g_1$ and $g_2$ be two distinct $\xi \sig$ Busemann geodesics from $(x,s)$. The set $\text{GNEQ} := \{t>s:g_1(t) \neq g_2(t)\}$ is therefore nonempty and infinite by continuity of $g_1$ and $g_2$. Assume, by way of contradiction, that $\text{GNEQ}$ is not an open interval. By continuity of geodesics, $\text{GNEQ}$ cannot be a closed or half-closed interval, so $\text{GNEQ}$ is not path connected. Thus, there exists $t_1 < t_2 < t_3$ so that \[ g_1(t_1) \neq g_2(t_1),\quad g_1(t_2) = g_2(t_2),\quad \text{ and}\quad g_1(t_3) \neq g_2(t_3). \]
The geodesics $g_1|_{[t_1,\infty)}$ and $g_2|_{[t_1,\infty)}$ started from $(g_1(t_1),t_1)$ and $(g_2(t_1),t_1)$, respectively, are both Busemann geodesics by their construction in Theorem~\ref{thm:DL_SIG_cons_intro}. Since the geodesics $g_1|_{[t_1,\infty)}$ and $g_2|_{[t_1,\infty)}$ start at different spatial locations (namely $g_1(t_1)$ and $g_2(t_1)$) along the same time level $t_1$, they cannot intersect at either of their starting points. By Item~\ref{itm:DL_allsigns_coal}, the two geodesics $g_1|_{[t_1,\infty)}$ and $g_2|_{[t_1,\infty)}$ must coalesce, and the first point of intersection is the coalescence point. Since $g_1(t_2) = g_2(t_2)$, this implies that $g_1(t) = g_2(t)$ for all $t > t_2$, a contradiction to the existence of $t_3$.
\noindent \textbf{Item~\ref{itm:unif_coal} (Uniformity of coalescence):} Let $\xi \in \mathbb{R}$, $\sigg \in \{-,+\}$, and let the compact set $K$ be given. Let $S$ be the smallest integer greater than $\max\{s: (x,s) \in K\}$. Set \[ A := \inf\{g_{(x,s)}^{\xi \sig,L}(S): (x,s) \in K\},\qquad\text{and}\qquad B := \sup\{g_{(x,s)}^{\xi \sig,R}(S):(x,s) \in K\}. \] By Lemma~\ref{lem:bounded_maxes}, $-\infty < A \le B < \infty$. Then, by Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}, whenever $g$ is a $\xi \sig$ geodesic starting from $(x,s) \in K$, \[ g_{(A,S)}^{\xi \sig,L}(t) \le g(t) \le g_{(B,S)}^{\xi \sig,R}(t) \qquad\text{for all }t \ge S. \] To complete the proof, let $T$ be the time at which $g_{(A,S)}^{\xi \sig,L}$ and $g_{(B ,S)}^{\xi \sig,R}$ coalesce, which is guaranteed to be finite by Item~\ref{itm:DL_allsigns_coal}. \end{proof}
For two initial points on a horizontal level, as $\xi$ varies, a constant Busemann process corresponds to a constant coalescence point of the geodesics.
The non-uniqueness of geodesics requires us to be careful about the choice of left and right geodesic. \begin{definition} \label{def:coal_pt} For $s \in \mathbb{R}$ and $x < y$, let $\mathbf z^{\xi \sig}(y,s;x,s)$ be the coalescence point of $g_{(y,s)}^{\xi \sig,L}$ and $g_{(x,s)}^{\xi \sig,R}$. \end{definition}
\begin{theorem} \label{thm:DL_eq_Buse_cpt_paths} On a single event of probability one, for all reals $\alpha < \beta$, $s$, and $x < y$, the following are equivalent. \begin{enumerate}[label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt \item \label{itm:DL_buse_eq}$W_{\alpha +}(y,s;x,s) = W_{\beta -}(y,s;x,s)$. \item \label{itm:DL_coal_pt_equal} $\mathbf z^{\alpha +}(y,s;x,s) = \mathbf z^{\beta -}(y,s;x,s)$. \item \label{itm:DL_paths} There exist $t > s$ and $z \in \mathbb{R}$ such that there are paths $g_1:[s,t] \to \mathbb{R}$ {\rm(}connecting $(x,s)$ and $(z,t)${\rm)} and $g_2:[s,t] \to \mathbb{R}$ {\rm(}connecting $(y,s)$ to $(z,t)${\rm)} such that for all $\xi \in (\alpha,\beta)$, $\sigg \in \{-,+\}$, and $u \in [s,t)$, \begin{equation} \label{124}\begin{aligned} g_1(u) &= g_{(x,s)}^{\xi \sig,R}(u) = g_{(x,s)}^{\alpha +,R}(u) = g_{(x,s)}^{\beta -,R}(u) \\ &< g_2(u) =g_{(y,s)}^{\xi \sig,L}(u) = g_{(y,s)}^{\alpha +,L}(u) = g_{(y,s)}^{\beta -,L}(u). \end{aligned} \end{equation} \end{enumerate} \end{theorem}
\begin{proof}
\ref{itm:DL_buse_eq}$\Rightarrow$\ref{itm:DL_coal_pt_equal} follows from Lemma~\ref{lem:DL_LR_coal}.
\noindent \ref{itm:DL_coal_pt_equal}$\Rightarrow$\ref{itm:DL_buse_eq}: Assume $(z,t) := \mathbf z^{\alpha +}(y,s;x,s) = \mathbf z^{\beta -}(y,s;x,s)$. By additivity (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}) and Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, \begin{align*} W_{\alpha +}(y,s;x,s) &= W_{\alpha +}(y,s;z,t) - W_{\alpha +}(x,s;z,t) \\ &= \mathcal{L}(y,s;z,t) - \mathcal{L}(x,s;z,t) \\ &= W_{\beta -}(y,s;z,t) - W_{\beta -}(x,s;z,t) = W_{\beta -}(y,s;x,s). \end{align*}
\noindent \ref{itm:DL_coal_pt_equal}$\Rightarrow$\ref{itm:DL_paths}: Let $(z,t)$ be as in the proof of \ref{itm:DL_coal_pt_equal}$\Rightarrow$\ref{itm:DL_buse_eq}. By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}, the restriction of $g_{(x,s)}^{\alpha +,R}$ and $g_{(x,s)}^{\beta -,R}$ to the domain $[s,t]$ are both rightmost geodesics between $(x,s)$ and $(z,t)$, and therefore they agree on this restricted domain. Similarly, $g_{(y,s)}^{\alpha +,L}$ and $g_{(y,s)}^{\beta -,L}$ agree on the domain $[s,t]$. By the monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_mont_dir}, and since $(z,t)$ is the common coalescence point,~\eqref{124} holds for $u \in [s,t)$, as desired.
\noindent \ref{itm:DL_paths}$\Rightarrow$\ref{itm:DL_coal_pt_equal} is immediate. \end{proof}
\begin{theorem} \label{thm:Buse_pm_equiv} On a single event of probability one, for all reals $s,\xi \in \mathbb{R}$, and $x < y$, the following are equivalent. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_pm_Buse_eq} $W_{\xi -}(y,s;x,s) = W_{\xi +}(y,s;x,s).$
\item \label{itm:DL_pm_coal_pt} $\mathbf z^{\xi -}(y,s;x,s) = \mathbf z^{\xi +}(y,s;x,s)$.
\item \label{itm:DL_disjoint_paths} $g_{(x,s)}^{\xi-,R}(t) = g_{(y,s)}^{\xi +,L}(t)$ for some $t > s$, i.e., the paths $g_{(x,s)}^{\xi -,R}$ and $g_{(y,s)}^{\xi +,L}$ intersect. \end{enumerate} \end{theorem} \begin{remark} In Item~\ref{itm:DL_disjoint_paths}, if $\xi \in \Xi$, then despite intersecting, the geodesics $g_{(x,s)}^{\xi -,R}$ and $g_{(y,s)}^{\xi +,L}$ cannot coalesce. This follows from Theorem~\ref{thm:DL_good_dir_classification}, which gives a full classification of the directions in which all semi-infinite geodesics coalesce. \end{remark} \begin{proof}[Proof of Theorem~\ref{thm:Buse_pm_equiv}] \ref{itm:DL_pm_Buse_eq}$\Rightarrow$\ref{itm:DL_pm_coal_pt}: If $W_{\xi-}(y,s;x,s) = W_{\xi+}(y,s;x,s)$, then Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick} implies that for some $\alpha < \xi < \beta$, $W_{\alpha +}(y,s;x,s) = W_{\beta -}(y,s;x,s)$. Then, we apply \ref{itm:DL_buse_eq}$\Rightarrow$\ref{itm:DL_paths} of Theorem~\ref{thm:DL_eq_Buse_cpt_paths} to conclude that for some $t > s$ and $z \in \mathbb{R}$, \[ g_{(x,s)}^{\xi -,R}(u) = g_{(x,s)}^{\xi +,R}(u) < g_{(y,s)}^{\xi -,L}(u) = g_{(y,s)}^{\xi +,L}(u),\qquad\text{for }u \in [s,t), \] whereas for $u = t$, all terms above equal some common value $z$. Therefore, $(z,t) = \mathbf z^{\xi -}(y,s;x,s) = \mathbf z^{\xi +}(y,s;x,s)$.
\noindent \ref{itm:DL_pm_coal_pt}$\Rightarrow$\ref{itm:DL_pm_Buse_eq}: Similarly as in the proof of \ref{itm:DL_coal_pt_equal}$\Rightarrow$\ref{itm:DL_buse_eq} of Theorem~\ref{thm:DL_eq_Buse_cpt_paths}, if $(z,t) = \mathbf z^{\xi -}(y,s;x,s) = \mathbf z^{\xi +}(y,s;x,s)$, then $ W_{\xi -}(y,s;x,s) = \mathcal{L}(y,s;z,t) - \mathcal{L}(x,s;z,t) = W_{\xi +}(y,s;x,s). $
\noindent \ref{itm:DL_pm_coal_pt}$\Rightarrow$\ref{itm:DL_disjoint_paths}: Assume $(z,t) = \mathbf z^{\xi -}(y,s;x,s) = \mathbf z^{\xi +}(y,s;x,s)$. Then, $g_{(x,s)}^{\xi -,R}(t) = z = g_{(y,s)}^{\xi +,L}(t)$.
\noindent \ref{itm:DL_disjoint_paths}$\Rightarrow$\ref{itm:DL_pm_coal_pt}:
Assume that $g_{(x,s)}^{\xi-,R}(t) = g_{(y,s)}^{\xi +,L}(t)$ for some $t > s$. Let $t$ be the minimal such time, and let $(z,t)$ be the point where the geodesics first intersect. By Theorem~\ref{thm:g_basic_prop}, Items~\ref{itm:DL_mont_dir} and~\ref{itm:DL_SIG_mont_x}, for $u > s$,
\begin{equation} \label{406}
g_{(x,s)}^{\xi-,R}(u) \le g_{(x,s)}^{\xi +,R}(u) \wedge g_{(y,s)}^{\xi -,L}(u) \le g_{(x,s)}^{\xi +,R}(u) \vee g_{(y,s)}^{\xi -,L}(u) \le g_{(y,s)}^{\xi +,L}(u).
\end{equation}
In particular, when $u = t$, all inequalities in~\eqref{406} are equalities. Further, since $g_{(x,s)}^{\xi -,R}$,$g_{(x,s)}^{\xi +,R}$ are rightmost geodesics between $(x,s)$ and $(z,t)$ (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), $g_{(x,s)}^{\xi -,R}(u) = g_{(x,s)}^{\xi +,R}(u)$ for $u \in [s,t]$. Similarly, $g_{(y,s)}^{\xi -,L}(u) = g_{(y,s)}^{\xi +,L}(u)$ for $u \in [s,t]$. Since $t$ was chosen minimally for $g_{(x,s)}^{\xi-,R}(t) = g_{(y,s)}^{\xi +,L}(t)$, we have $(z,t) = \mathbf z^{\xi -}(y,s;x,s) = \mathbf z^{\xi +}(y,s;x,s)$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DL_good_dir_classification} (Classification of coalescence directions)] \ref{itm:DL_good_dir}$\Rightarrow$\ref{itm:DL_LR_all_agree}: If $\xi \notin \Xi$, then $W_{\xi -} = W_{\xi +}$, so \ref{itm:DL_LR_all_agree} follows by the construction of the Busemann geodesics from the Busemann functions.
\noindent \ref{itm:DL_LR_all_agree}$\Rightarrow$\ref{itm:DL_good_dir_coal}: Since a geodesic in direction $\xi$ from $(x,s)$ must pass through each horizontal level $t > s$, it is sufficient to show that, for $s \in \mathbb{R}$ and $x < y$, whenever $g_1$ is a semi-infinite geodesic from $(x,s)$ in direction $\xi$ and $g_2$ is a semi-infinite geodesic from $(y,s)$ in direction $\xi$, $g_1$ and $g_2$ coalesce. Assuming~\ref{itm:DL_LR_all_agree} and using Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG}, for all $t > s$, \[ g_{(x,s)}^{\xi +,L}(t) = g_{(x,s)}^{\xi -,L}(t) \le g_1(t) \wedge g_2(t) \le g_1(t) \vee g_2(t) \le g_{(y,s)}^{\xi +,R}(t). \] By Theorem~\ref{thm:DL_all_coal}\ref{itm:DL_allsigns_coal}, $g_{(x,s)}^{\xi +,L}$ and $g_{(y,s)}^{\xi +,R}$ coalesce, so all inequalities above are equalities for large $t$, and $g_1$ and $g_2$ coalesce.
\noindent \ref{itm:DL_good_dir_coal}$\Rightarrow$\ref{itm:DL_good_dir}: We prove the contrapositive. If $\xi \in \Xi$, then by Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_gen_mont}-\ref{itm:Buse_KPZ_description}, \\$W_{\xi -}(y,s;x,s) < W_{\xi +}(y,s;x,s)$ for some $x < y$ and $s \in \mathbb{R}$. By \ref{itm:DL_pm_Buse_eq}$\Leftrightarrow$\ref{itm:DL_disjoint_paths} of Theorem~\ref{thm:Buse_pm_equiv}, $g_{(x,s)}^{\xi-,R}(t) < g_{(y,s)}^{\xi +,L}(t)$ for all $t > s$. In particular, $g_{(x,s)}^{\xi-,R}$ and $g_{(y,s)}^{\xi +,L}$ do not coalesce.
\noindent \ref{itm:DL_LR_all_agree}$\Rightarrow$\ref{itm:DL_good_dir_unique_geod}: By definition of $\operatorname{NU}_0$, whenever $p\notin \operatorname{NU}_0$, $g_p^{\xi \sig,L} = g_{p}^{\xi \sig,R}$ for $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$. Hence, assuming $p \notin \operatorname{NU}_0$ and $g_{p}^{\xi -,R} = g_{p}^{\xi +,R}$, we also have $g_{p}^{\xi-,L} = g_{p}^{\xi +,R}$, so there is a unique geodesic from $p$ in direction $\xi$ by Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG}.
\noindent \ref{itm:DL_good_dir_unique_geod}$\Rightarrow$\ref{itm:DL_good_dir_pt_unique}: By Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_p0}, on the event $\Omega_2$, $\operatorname{NU}_0$ contains no points of $\mathbb{Q}^2$, and therefore, $\operatorname{NU}_0$ is not all of $\mathbb{R}^2$.
\noindent \ref{itm:DL_good_dir_pt_unique}$\Rightarrow$\ref{itm:DL_good_dir_L_unique} and \ref{itm:DL_good_dir_pt_unique}$\Rightarrow$\ref{itm:DL_good_dir_R_unique} are direct consequences of Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG}: If there is a unique semi-infinite geodesic in direction $\xi$ from a point $p \in \mathbb{R}^2$, then $g_{p}^{\xi -,L} = g_{p}^{\xi +,L} = g_{p}^{\xi -,R} = g_{p}^{\xi +,R}$.
\noindent \ref{itm:DL_good_dir_L_unique}$\Rightarrow$\ref{itm:DL_LR_all_agree}: Let $p$ be a point from which $g_{p}^{\xi-,L} = g_{p}^{\xi +,L}$, and call this common geodesic $g$. Let $q$ be an arbitrary point in $\mathbb{R}^2$. By Theorem~\ref{thm:DL_all_coal}\ref{itm:DL_allsigns_coal}, $g_{q}^{\xi -,L}, g_{q}^{\xi +,L}, g_{q}^{\xi - ,R}$, and $g_{q}^{\xi +,R}$ each coalesce with $g$, so $g_{q}^{\xi -,L}$ and $g_{q}^{\xi +,L}$ coalesce. Since both geodesics are the leftmost geodesics between their points by Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}, they must be the same. Similarly, $g_{q}^{\xi-,R} = g_{q}^{\xi +,R}$.
\noindent \ref{itm:DL_good_dir_R_unique}$\Rightarrow$\ref{itm:DL_LR_all_agree}: follows by the same proof.
\noindent \textbf{Item~\ref{itm:DL_allBuse}:} Let $\xi \in \mathbb{R} \setminus \Xi$, and let $g$ be a semi-infinite geodesic in direction $\xi$, starting from a point $(x,s) \in \mathbb{R}^2$. By Lemma~\ref{lem:L_and_Buse_ineq} and Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:arb_geod_cons}, it is sufficient to show that for sufficiently large $t$, \begin{equation} \label{647} \mathcal{L}(x,s;g(t),t) = W_{\xi}(x,s;g(t),t). \end{equation} (we dropped the $\pm$ distinction since $W_{\xi -} = W_{\xi +}$). By Item~\ref{itm:DL_good_dir_coal}, $g$ coalesces with $g_{(x,s)}^{\xi,R}$. Then, for sufficiently large $t$, $g(t) = g_{(x,s)}^{\xi,R}(t)$ and by Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG},~\eqref{647} holds. \end{proof}
\subsection{Remaining proofs from Section~\ref{sec:Buse_geod_results} and Proof of Theorem~\ref{thm:DLSIG_main}} \label{sec:Buseextraproofs} We complete some unfinished business. \begin{proof}[Proof of Items~\ref{itm:BuseLim1}--\ref{itm:global_attract} of Theorem~\ref{thm:DL_Buse_summ} and the mixing in Theorem~\ref{thm:Buse_dist_intro}\ref{itm:stationarity}] We continue to work on the event $\Omega_2$.
\noindent \textbf{Item~\ref{itm:BuseLim1} of Theorem~\ref{thm:DL_Buse_summ} (Busemann limits I):} By Theorem~\ref{thm:DL_good_dir_classification}\ref{itm:DL_allBuse}, if $\xi \notin \Xi$, all $\xi$-directed semi-infinite geodesics are Busemann geodesics, and they all coalesce. By Theorem~\ref{thm:DL_all_coal}\ref{itm:unif_coal}, there exists a level $T$ such that all geodesics from points starting in the compact set $K$ have coalesced by time $T$. Let $(Z,T)$ denote the location of the point of the common geodesics at time $T$. Let $r_t = (z_t,u_t)_{t \in \mathbb{R}_{\ge 0}}$ be any net with $u_t \to \infty$ and $z_t/u_t \to \xi$. By Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:finite_geod_stick}, for all sufficiently large $t$ and $p \in K$, all geodesics from $p$ to $r_t$ pass through $(Z,T)$. Then, for $p,q \in K$, \[ \mathcal{L}(p;r_t) - \mathcal{L}(q;r_t) = \mathcal{L}(p;Z,T) + \mathcal{L}(Z,T;r_t) - (\mathcal{L}(q;Z,T) + \mathcal{L}(Z,T;r_t)). \] By Theorems~\ref{thm:DL_SIG_cons_intro}\ref{itm:arb_geod_cons}\ref{itm:weight_of_geod} and~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}, the right-hand side is equal to \[ W_\xi(p;Z,T) - W_\xi(q;Z,T) = W_\xi(p;q). \]
\noindent \textbf{Item~\ref{itm:BuseLim2} of Theorem~\ref{thm:DL_Buse_summ} (Busemann limits II):} By Theorem~\ref{thm:DLBusedc_description}\ref{itm:DL_dc_set_count}, on the event $\Omega_2$, $\Xi$ contains no rational directions. Then, for arbitrary $\xi \in \mathbb{R}$, $s \in \mathbb{R}$, $x < y \in \mathbb{R}$, $\alpha,\beta \in \mathbb{Q}$ with $\alpha < \xi < \beta$, and a net $(z_r,u_r)$ with $u_r \to \infty$ and $z_r/u_r \to \xi$, for sufficiently large $r$, $\alpha u_r < z_r < \beta u_r$. Theorem~\ref{thm:DL_Buse_summ}\ref{itm:BuseLim1} gives the existence of the limits in the first and last lines below, while the monotonicity of Lemma~\ref{lem:DL_crossing_facts}\ref{itm:DL_crossing_lemma} justifies the first and last inequalities: \begin{align*} W_\alpha(y,s;x,s) &= \lim_{r \to \infty} \mathcal{L}(y,s;\alpha u_r,u_r) - \mathcal{L}(x,s;\alpha u_r,u_r) \\ &\le \liminf_{r \to \infty} \mathcal{L}(y,s;z_r,u_r) - \mathcal{L}(x,s;z_r,u_r) \\ &\le \limsup_{r \to \infty} \mathcal{L}(y,s;z_r,u_r) - \mathcal{L}(x,s;z_r,u_r)\\ &\le \lim_{r \to \infty} \mathcal{L}(y,s;\beta u_r,u_r) -\mathcal{L}(x,s;\beta u_r,u_r)= W_{\beta}(y,s;x,s). \end{align*} Sending $\mathbb{Q} \ni \alpha \nearrow \xi$ and $\mathbb{Q} \ni \beta \searrow \xi$ and using Item~\ref{itm:DL_unif_Buse_stick} completes the proof.
\noindent \textbf{Item~\ref{itm:global_attract} of Theorem~\ref{thm:DL_Buse_summ} (Global attractiveness):} We follow a similar proof to the attractiveness in Theorem~\ref{thm:SH10}. Let $\xi \notin \Xi$ and assume $\mathfrak h \in \operatorname{UC}$ is a function satisfying the drift condition~\eqref{eqn:drift_assumptions}. Recall that we define \begin{equation} \label{hst} h_{s,t}(x) = \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,t) + \mathfrak h(z)\}. \end{equation} For $a > 0$ and $s < t$, Theorems~\ref{thm:DL_Buse_summ}\ref{itm:DL_unif_Buse_stick} and~\ref{thm:DLBusedc_description}\ref{itm:DL_dc_set_count} allows us to choose $\varepsilon = \varepsilon(\xi) > 0$ small enough so that $\xi \pm 2\varepsilon \in \mathbb{Q} $ (and thus $\xi \pm 2\varepsilon \notin \Xi$), and so for all $x \in [-a,a]$, \begin{equation} \label{pmeq} W_{\xi \pm 2\varepsilon}(x,s;0,s) = W_\xi(x,s;0,s). \end{equation} By Theorem~\ref{thm:DL_all_coal}\ref{itm:unif_coal}, there exists a random $T = T(a,\xi \pm \varepsilon)$ such that all $\xi - 2\varepsilon$ Busemann geodesics have coalesced by time $T$ and all $\xi + 2\varepsilon$ Busemann geodesics have coalesced by time $T$. For $t > T$, let $g^{\xi \pm 2\varepsilon}(t)$ be locations of these two common geodesics at time $t$. By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:arb_geod_cons}\ref{itm:geo_dir}, $g^{\xi \pm 2\varepsilon}(t)/t \to \xi \pm 2\varepsilon$. By the reflected version of Equation~\eqref{downexit} in Lemma~\ref{lem:unq}, there exists $t_0(a,\varepsilon(\xi),s)$ so that for $t > t_0$, whenever $x \in [-a,a]$ and $z$ is a maximizer in~\eqref{hst}, $
g^{\xi - 2\varepsilon}(t) < z < g^{\xi + 2\varepsilon}(t). $ Then, by Lemma~\ref{lem:DL_crossing_facts}\ref{itm:KPZ_crossing_lemma}, for such large $t$, \[ W_{\xi - 2\varepsilon}(x,s;0,s) \le h_{s,t}(x) - h_{s,t}(0) \le W_{\xi + 2\varepsilon}(x,s;0,s), \] while for $-a \le x \le 0$, the equalities reverse. Combined with~\eqref{pmeq}, this completes the proof.
\noindent \textbf{Item~\ref{itm:stationarity} of Theorem~\ref{thm:Buse_dist_intro} (Mixing):} Set $r_z = (az,bz)$. By a standard $\pi-\lambda$ argument, it suffices to show that for $\xi_1,\ldots\xi_k \in \mathbb{R}$ (ignoring the sign $\sigg$ since $\xi_i \notin \Xi$ a.s.), all compact sets $K := K_1 \times K_2^k \subseteq \Rup \times (\mathbb{R}^4)^k$, and all Borel sets $A,B \in C(K,\mathbb{R})$, \begin{align*}
&\lim_{z \to \infty} \mathbb P\Bigl(\{\mathcal{L}, W_{\xi_{1:k}}\}\big|_K \in A, \{T_{z;a,b} \mathcal{L}, T_{z;a,b}W_{\xi_{1:k}}\}\big|_K \in B\Bigr) \\
&\qquad\qquad= \mathbb P\bigl( \{\mathcal{L}, W_{\xi_{1:k}}\}\big|_K \in A\bigr) \mathbb P\bigl(\{\mathcal{L}, W_{\xi_{1:k}}\}\big|_K \in B \bigr), \end{align*} where we use the shorthand notation \[
\{\mathcal{L}, W_{\xi_{1:k}}\}\big|_K := \{\mathcal{L}(v), W_{\xi_i}(p;q):1 \le i \le k,(v,p,q) \in K\}, \] and $T_{z;a,b}$ acts on $\mathcal{L}$ and $W$ as projections of $\{\mathcal{L},W\}$. By Theorem~\ref{thm:DL_Buse_summ}\ref{itm:BuseLim1}, we may choose $t > 0$ sufficiently large so that \begin{equation} \label{busc1} \mathbb P(W_{\xi_i}(p;q) = \mathcal{L}(p;(t\xi,t)) - \mathcal{L}(q;(t\xi,t)) \;\forall (p,q) \in K_2, 1 \le i \le k) \ge 1 - \varepsilon. \end{equation} By stationarity of the process under space-time shifts, we also have that for such large $t$ and all $z \in \mathbb{R}$, \begin{equation} \label{busc2} \mathbb P(T_{z;a,b} W_{\xi_i}(p;q) = T_{z;a,b} [\mathcal{L}(p ;(t\xi,t)) - \mathcal{L}(q;(t\xi,t))] \;\forall (p,q) \in K_2, 1 \le i \le k) \ge 1 - \varepsilon \end{equation} Let $C_{z,t}$ be the intersection of the events in~\eqref{busc1} over $1 \le i \le k$ with the event~\eqref{busc2}. Then for large enough $t$, $\mathbb P(C_{z,t}) \ge 1 - 2\varepsilon$, and \begin{align*}
&\Bigl|\mathbb P\Bigl(\{\mathcal{L}, W_{\xi_{1:k}}\}|_K \in A, \{T_{z;a,b} \mathcal{L}, T_{z;a,b}W_{\xi_{1:k}}\}|_K \in B\Bigr) \\
&\qquad- \mathbb P\Bigl( \{\mathcal{L}, W_{\xi_{1:k}}\}|_K \in A\Bigr) \mathbb P\Bigl(\{\mathcal{L}, W_{\xi_{1:k}}\}|_K \in B \Bigr) \Bigr| \\
&\le \Bigl|\mathbb P\Bigl(\{\mathcal{L}, W_{\xi_{1:k}}\}|_K \in A, \{T_{z;a,b} \mathcal{L}, T_{z;a,b}W_{\xi_{1:k}}\}|_K \in B, C_{z,t}\Bigr) \\
&\qquad -\mathbb P\Bigl( \{\mathcal{L}, W_{\xi_{1:k}}\}|_K \in A,C_{z,t}\Bigr) \mathbb P\Bigl(\{\mathcal{L}, W_{\xi_{1:k}}\}|_K \in B,C_{z,t} \Bigr) \Bigr| + C\varepsilon \\
&= \Bigl|\mathbb P\Bigl(\{\mathcal{L}(v), \mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))\}|_K \in A, \\
&\qquad\qquad \{T_{z;a,b} \mathcal{L}(v), T_{z;a,b}[\mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))]\}|_K \in B, C_{z,t}\Bigr) \\
&\qquad -\mathbb P\Bigl( \{\mathcal{L}(v), \mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))\}|_K \in A,C_{z,t}\Bigr) \\ &\qquad\qquad\times \mathbb P\Bigl(\{\mathcal{L}(v), \mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))\}|_K \in B,C_{z,t} \Bigr) \Bigr| + C\varepsilon \\
&\le \Bigl|\mathbb P\Bigl(\{\mathcal{L}(v), \mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))\}|_K \in A, \\
&\qquad\qquad \{T_{z;a,b} \mathcal{L}(v), T_{z;a,b}[\mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))]\}|_K \in B\Bigr) \\
&\qquad -\mathbb P\Bigl( \{\mathcal{L}(v), \mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))\}|_K \in A\Bigr) \\ &\qquad\qquad\times \mathbb P\Bigl(\{\mathcal{L}(v), \mathcal{L}(p;(t\xi_{1:k},t)) - \mathcal{L}(q;(t\xi_{1:k},t))\}|_K \in B \Bigr) \Bigr| + C'\varepsilon, \end{align*} where the constants $C,C'$ came as the cost of adding and removing the high probability event $C_{z,t}$. The proof is complete by sending $z \to \infty$ and using the mixing of $\mathcal{L}$ under the shift $T_{z;a,b}$. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:DLSIG_main}] \textbf{Item~\ref{itm:all_dir} (All geodesics have a direction):} First, we show that, on $\Omega_2$, if $g$ is a semi-infinite geodesic starting from $(x,s)$, then \begin{equation} \label{594} -\infty < \liminf_{t \to \infty} t^{-1}{g(t)} \le \limsup_{t \to \infty} t^{-1}{g(t)} < \infty. \end{equation} We show the rightmost inequality, the leftmost being analogous. Assume, by way of contradiction, that $\limsup_{t \to \infty} g(t)/t = \infty$. By the directedness of Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, $\forall \xi \in \mathbb{R}$ there exists an infinite sequence $t_i \to \infty$ such that $g(t_i) > g_{(x,s)}^{\xi +,L}(t_i)$ for all $i$. Since $g_{(x,s)}^{\xi +,L}$ is the leftmost geodesic between any two of its points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), we must have $g(t) \ge g_{(x,s)}^{\xi+,L}(t)$ $\forall \xi \in \mathbb{R}$ and $t \in \mathbb{R}$. By Theorem~\ref{thm:g_basic_prop}\ref{itm:limits_to_inf}, $g(t) = \infty$ $\forall t > s$, a contradiction.
Having established~\eqref{594}, assume by way of contradiction that \[ \liminf_{t \to \infty} t^{-1} {g(t)} < \limsup_{t \to \infty} t^{-1}{g(t)}. \] Choose some $\xi$ strictly between the two values above. By the directedness of Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, there exists a sequence $t_i \to \infty$ such that $g_{(x,s)}^{\xi +,R}(t_i) < g(t_i)$ for $i$ even and $g_{(x,s)}^{\xi +,R}(t_i) > g(t_i)$ for $i$ odd. This cannot occur since $g_{(x,s)}^{\xi +,R}$ is the rightmost geodesic between any two of its points.
By Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_all_SIG}, for each $\xi \in \mathbb{R}$ and $(x,s) \in \mathbb{R}^2$, $g_{(x,s)}^{\xi +,R}$, for example, is a semi-infinite geodesic from $(x,s)$ in direction $\xi$, justifying the claim that there is at least one semi-infinite geodesic from each point and in every direction.
\noindent \textbf{Item~\ref{itm:good_dir_coal} (Coalescence):} The first statement follows from the equivalences \ref{itm:DL_good_dir}$\Leftrightarrow$\ref{itm:DL_good_dir_coal}$\Leftrightarrow$\ref{itm:DL_good_dir_unique_geod} of Theorem~\ref{thm:DL_good_dir_classification}. By Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_p0}, $\mathbb P(p \in \operatorname{NU}_0)=0$ $\forall p \in \mathbb{R}^2$. This and Fubini's theorem imply that the set $\operatorname{NU}_0$ almost surely has planar Lebesgue measure zero.
\noindent \textbf{Item~\ref{itm:bad_dir_split} (Non-uniqueness in exceptional directions):} This follows from Remark~\ref{rmk:split_from_all_p}. \end{proof}
\section{Random measures and their supports}\label{sec:meas_supp} This section studies further the points with disjoint geodesics in the same direction, discussed in Theorem~\ref{thm:Split_pts} and Remark~\ref{rmk:supports}. Recall the functions $f_{s,\xi}(x) = W_{\xi +}(x,s;0,s) - W_{\xi -}(x,s;0,s)$ defined in~\eqref{fsdir} and the sets $\mathfrak S_{s,\xi}$ from \eqref{Split_sdir}: \begin{equation}\label{eqn:gen_split_set4}\begin{aligned}
\mathfrak S_{s,\xi} &:= \{x \in \mathbb{R}: \exists \text{
\textbf{disjoint}} \text{ }\text{semi-infinite geodesics from }(x,s) \text{ in direction }\xi\}\\
\mathfrak S &:= \bigcup_{s \hspace{0.9pt}\in\hspace{0.9pt} \mathbb{R}, \, \xi\hspace{0.9pt} \in\hspace{0.9pt} \Xi} \mathfrak S_{s,\xi} \times \{s\}.
\end{aligned}\end{equation}
Each $\xi \in \mathbb{R}$ is a direction of discontinuity with probability zero. Conditioning on $\xi \in \Xi$ is done through the Palm kernel from the theory of random measures (see \cite{Kallenberg-book} for background). The next theorem is proved in Section~\ref{sec:Palm}, together with a study of the random point process $\{(\tau_\xi, \xi)\}_{\xi\hspace{0.9pt}\in \hspace{0.9pt}\Xi}$. The Palm conditioning is made precise in Theorems \ref{thm:Lac} and \ref{thm:indep_loc}.
\begin{theorem} \label{thm:BusePalm} For $\xi \in \mathbb{R}$ consider the random function $f_\xi := f_{0,\xi}$ from~\eqref{fsdir}. Let \[ \tau_\xi = \inf\{x > 0: f_{\xi}(x) > 0 \}\qquad\text{and}\qquad \bck{{\tau}_\xi} = \inf\{x > 0: -f_{\xi}(-x) > 0\} \] denote the points to the right and left of the origin beyond which $W_{\xi +}(\aabullet,0;0,0)$ and $W_{\xi -}(\aabullet,0;0,0)$ separate, if ever. Then, conditionally on $\xi \in \Xi$ in the appropriate Palm sense, the restarted functions \[ x\mapsto f_{\xi}(x + \tau_\xi) -f_{\xi}(\tau_\xi) \quad\text{ and } \quad
x \mapsto -f_{\xi}(-x - \bck{\tau_{\xi}}) + f_{\xi}(-\bck{\tau_\xi}),\quad x \in\mathbb{R}_{\ge0}, \]
are equal in distribution to two independent running maximums of Brownian motion with diffusivity $2$ and zero drift. In particular, they are equal in distribution to two independent appropriately normalized versions of Brownian local time. See Figure~\ref{fig:loc_time}. \end{theorem}
\begin{figure}
\caption{\small The Busemann difference profile $f_\xi(x)$. The function vanishes in a nondegenerate random neighborhood of $x = 0$ and evolves as two independent Brownian local times to the left and right {\rm(}Theorem \ref{thm:BusePalm}{\rm)}.}
\label{fig:loc_time}
\end{figure}
As described in the next theorem, $\mathfrak S_{s,\xi}$ is the support of a random measure, up to the removal of an at most countable set.
\begin{theorem} \label{thm:random_supp}
On a single event of full probability, the function $f_{s,\xi}$ is nondecreasing simultaneously for all $s \in \mathbb{R}$ and $\xi \in \Xi$. Denote the set of local variation of $f_{s,\xi}$ by \begin{equation} \label{Dsdir} \mathcal{D}_{s,\xi} = \{ x\in\mathbb{R}: f_{s,\xi}(x - \varepsilon) < f_{s,\xi}(x + \varepsilon)\; \forall \varepsilon > 0 \}. \end{equation} Then, on a single event of full probability, simultaneously for each $s \in \mathbb{R}$ and $\xi \in \Xi$, \begin{equation} \label{eqn:supp_set} \mathcal{D}_{s,\xi} = \mathfrak S_{s,\xi}^L \cup \mathfrak S_{s,\xi}^R \;\subseteq \;\mathfrak S_{s,\xi}, \end{equation} where for $S \in \{L,R\}$, \begin{equation} \label{eqn:split_LR_sdir} \mathfrak S_{s,\xi}^S := \{x \in \mathbb{R}: g_{(x,s)}^{\xi -,S} \text{ and } g_{(x,s)}^{\xi +,S} \text{ are disjoint}\}. \end{equation}
$(\mathfrak S_{s,\xi} \setminus \mathcal{D}_{s,\xi}) \times \{s\}$ is contained in the at most countable set $\operatorname{NU}_1^{\xi -} \cap \hspace{0.9pt}\operatorname{NU}_1^{\xi +} \cap \,\mathcal{H}_s$. \end{theorem} \begin{remark} \label{rmk:NUsupp} Presently, we do not know if $\mathcal{D}_{s,\xi}$ equals $\mathfrak S_{s,\xi}$. Since $\operatorname{NU}_1^{\xi -} \cap \operatorname{NU}_1^{\xi +} \subseteq \operatorname{NU}_1$ and $\operatorname{NU}_1 \cap \,\mathcal{H}_s$ is at most countable (Theorem~\ref{thm:DLNU}\ref{itm:DL_NU_count}), $\mathfrak S_{s,\xi}$ and $\mathcal{D}_{s,\xi}$ have the same Hausdorff dimension for all $s \in \mathbb{R}$ and $\xi \in \Xi$. In Section~\ref{sec:last_proofs} we prove that this Hausdorff dimension is $\frac{1}{2}$ on an $s$-dependent probability one event (as Theorem~\ref{thm:Split_pts}\ref{itm:Hasudorff1/2}). \end{remark}
The remainder of this section develops the theory needed to prove Theorems~\ref{thm:BusePalm} and~\ref{thm:random_supp} and ultimately Theorem~\ref{thm:Split_pts}. Sections~\ref{sec:Palm} and~\ref{sec:decoup} develop the Palm kernel theory necessary for Theorem~\ref{thm:BusePalm}. The proofs of Theorems~\ref{thm:BusePalm},~\ref{thm:random_supp},~\ref{thm:Split_pts} are in Section~\ref{sec:last_proofs}, along with the unfinished business of Theorem~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t}.
\subsection{Random measures and Palm kernels} \label{sec:Palm}
To study Palm conditioning, we represent the Busemann process $\{W_{\xi+}(\aabullet,0,0,0)\}_{\xi\in\mathbb{R}}$ by the stationary horizon $\{G_\xi(\aabullet)\}_{\xi\in\mathbb{R}}$, as permitted by Theorem \ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process}.
Define the process of jumps \begin{equation}
H := \{H_\xi\}_{\xi\hspace{0.5pt}\in\hspace{0.5pt}\mathbb{R}} = \{G_\xi-G_{\xi-}\}_{\xi\hspace{0.5pt}\in\hspace{0.5pt}\mathbb{R}} \end{equation} where $G_{\xi -} = \lim_{\alpha \nearrow \xi} G_\alpha$. Either $H_\xi$ vanishes identically or $H_\xi$ is a nondecreasing continuous function that vanishes in a nondegenerate (random) neighborhood of the origin. By a combination of Theorem \ref{thm:SH10}\ref{itm:SH_sc}--\ref{itm:SH_sc2}, \begin{equation}\label{H65} \{H_{\xi+\eta}(y+x)-H_{\xi+\eta}(y): x \in \mathbb{R}\}_{\xi \in \mathbb{R}}\;\deq\;\{H_{\xi}(x):x \in \mathbb{R}\}_{\xi \in \mathbb{R}} \quad \forall\hspace{0.5pt} y, \eta\in\mathbb{R}. \end{equation} We study the functions $H_\xi(x)$ first for $x\ge0$. Approximate $H$ by a process $H^{N}$ defined on dyadic rational $\xi$. For $N\in \mathbb{Z}_{>0}$ let \begin{equation}\label{Hd}
H^{N}_{\xi_i}=G_{\xi_i}-G_{\xi_{i-1}} \qquad\text{for } \ \xi_i=\xi^N_i=i2^{-N} \ \text{ and } \ i\in \mathbb{Z}. \end{equation}For $i \in \mathbb{Z}$, let \begin{equation}\label{taui}
\tau_{\xi_i}^{N}= \inf\{x > 0: H^{N}_{\xi_i}(x) > 0\}. \end{equation}
Since the $G_{\xi_i}$ have different drifts for different values of $i$, $\tau_{\xi_i}^{N}<\infty$ almost surely. For $f\in C(\mathbb{R})$ and $\tau \in \mathbb{R}$, let $[f]^{\tau}\in C(\mathbb{R}_{\ge0})$ denote the restarted function \begin{equation} [f]^{\tau}(x) =f(\tau+x) -f(\tau) \ \text{ for } x\in [0,\infty) . \end{equation}
Denote by $\mathcal{D}^{\alpha}$ the distribution on $C(\mathbb{R}_{\ge0})$ of the running maximum of a Brownian motion with drift $\alpha\in\mathbb{R}$ and diffusivity $2$. That is, if $X$ denotes standard Brownian motion, then \[ \mathcal{D}^{\alpha}(A) = \mathbb P\bigl\{ \bigl[ \sup_{0\leq u\leq s} 2X(u)+\alpha u\bigr]_{s\in[0,\infty)} \in A \bigr\} \] for Borel sets $A\subset C(\mathbb{R}_{\ge0})$. When the drift vanishes ($\alpha=0$) we abbreviate $\mathcal{D}=\mathcal{D}^0$.
\begin{lemma} \label{lem:WBSM}
Let $B^\alpha = \{B^\alpha(x): x \ge 0\}$ be a Brownian motion with drift $\alpha$ and diffusivity $2$. Let $W$ be an almost surely negative random variable independent of $B^\alpha$. Let \[\theta = \inf\{x > 0: W+B^{\alpha}(x) \ge 0 \}. \] Then, for all $x > 0$, \begin{equation} \label{condD}
\mathbb P\Big(\Big[\sup_{0\leq s\leq \theta+u} W+B^{\alpha}(s)\Big]^+_{u\in[0,\infty)}\in \aabullet \,\Big|\,\theta=x\Big) = \mathcal{D}^\alpha(\aabullet). \end{equation} In particular, \begin{equation} \label{uncondD} \mathbb P\Big(\Big[\sup_{0\leq s\leq \theta+u} W+B^{\alpha}(s)\Big]^+_{u\in[0,\infty)}\in \aabullet\Big) = \mathcal{D}^\alpha(\aabullet) \end{equation} \end{lemma} \begin{proof}
Let $A\in\mathcal B(C(\mathbb{R}_{\ge0}))$ and $t>0$. Below, notice that $B^{\alpha}(\theta)=-W$. Then, noting that $\theta$ is a stopping time with respect to the filtration $\mathcal{F}_y=\sigma\big(W,\{B^{\alpha}(x)\}_{x\in[0,y]}\big)$, we use the strong Markov property to restart at time $\theta$.
\begin{equation}\label{Oh2}
\begin{aligned}
&\quad\; \mathbb P\Big(\Big[\sup_{0\leq s\leq \theta+u} W+B^{\alpha}(s)\Big]^+_{u\in[0,\infty)}\in A \,\Big|\,\theta=x\Big)\\
&=\mathbb P\Big( \Bigl[\,\sup_{\theta\leq s\leq \theta+u} W+B^{\alpha}(s)\Bigr]_{u\in[0,\infty)} \in A \,\Big|\,\theta=x\Big)\\
&=\mathbb P\Big( \Bigl[\,\sup_{0\leq s\leq u} B^{\alpha}(\theta+s)- B^{\alpha}(\theta)\Bigr]_{u\in[0,\infty)} \in A \,\Big|\,\theta=x\Big)\\
&=\mathbb P\Big( \Bigl[\,\sup_{0 \leq s\leq u} B^{\alpha}(s)\Bigr]_{u\in[0,\infty)} \in A\Big)=\mathcal{D}^{\alpha}(A).
\end{aligned}
\end{equation}
The claim of~\eqref{condD} has now been verified. Equation~\eqref{uncondD} follows.
\end{proof}
\begin{corollary} \label{cor:discrete_restart}
Let $\alpha_N=2^{-N+1}$. Then for all $i\in \mathbb{Z}$ and $x>0$,
\begin{equation}\label{eq4}
\mathbb P\big(\big[H^{N}_{\xi_i}\big]^{\tau^{N}_{\xi_i}}\in \aabullet\,\,\big|\,\tau^{N}_{\xi_i}=x\big)= \mathcal{D}^{\alpha_N}(\aabullet) .
\end{equation}
\end{corollary} \begin{proof}
From the definition of the stationary horizon (Definition \ref{def:SH}) one can deduce that, for each $i\in\mathbb{Z}$, the process $H^{N}_{\xi_i}$
has the same distribution as the process
\begin{equation} \label{W+B}
\widetilde{J}^{N}(y)=\Big[\sup_{0\leq x\leq y} W+B^{\alpha_N}(x)\Big]^+
\end{equation}
where $B^{\alpha_N}$ is a Brownian motion with drift $\alpha_N=2^{-N+1}$ and diffusivity $2$, and $W$ is an almost surely negative random variable independent of $B^{\alpha_N}$. Define
\begin{equation}\label{thetaN}
\theta^N = \inf\{x > 0: \widetilde J^{N}(x) > 0 \}= \inf\{x > 0: W+B^{\alpha_N}(x) \ge 0 \}.
\end{equation}
Hence, now $(H^{N}_{\xi_i},\tau^{N}_{\xi_i})\deq (\widetilde{J}^{N},\theta^N)$, and the result follows from Lemma~\ref{lem:WBSM}.
\end{proof}
For $\xi\in \mathbb{R}$ let \begin{equation}\label{eq9} \tau_\xi=\inf\{x\ge 0:H_\xi(x) > 0\}. \end{equation} The connection with the discrete counterpart in \eqref{taui} is \begin{equation}\label{tau45} \tau^N_{\xi_i}= \min\{ \tau_\xi: \xi\in(\xi_{i-1}, \xi_i]\}. \end{equation}
On the space $\mathbb{R}_{\ge0}\times \mathbb{R}$ define the random point measure and its mean measure \begin{equation}\label{SHpp}
\Gamma=\sum_{(\tau_\xi,\xi):\tau_{\xi}<\infty} \delta_{(\tau_\xi,\xi)}
\quad\text{ and } \quad
\lambda_{\Gamma}(\aabullet):=
\mathbb{E}[ \hspace{0.9pt}{\Gamma}(\aabullet)\hspace{0.9pt}]. \end{equation} The point process $\Gamma$ records the jump directions $\xi$ and the points $\tau_\xi$ where $G_\xi$ and $G_{\xi-}$ separate on $\mathbb{R}_{\ge0}$. Theorem~\ref{thm:SH10}\ref{itm:SH_j} ensures that $\Gamma$ and $\lambda_{\Gamma}$ are locally finite. It will cause no confusion to use the same symbol $\Gamma$ to denote the random set: \[
\Gamma=\{(\tau_\xi,\xi): \xi\in\mathbb{R}, \tau_{\xi}<\infty\} . \] Then also $\lambda_{\Gamma}(\aabullet)=
\mathbb{E}(\hspace{0.9pt}|\Gamma\cap \aabullet|\hspace{0.9pt})$ where $| \aabullet |$ denotes cardinality. The counterparts for the approximating process are
\begin{equation} \label{SHppdis} \Gamma^{(N)}=\{(\tau^{N}_{\xi_i},\xi_i):i\in \mathbb{Z}, \tau^{N}_{\xi_i}<\infty\} \quad\text{and}\quad
\lambda_{\Gamma}^{(N)}(\aabullet):=\mathbb{E}(|\Gamma^{(N)}\cap \aabullet|). \end{equation}
The dyadic partition in \eqref{Hd} imposes a certain monotonicity as $N$ increases: $\tau_\xi$ values can be added but not removed. The $\xi$-coordinates that are not dyadic rationals move as the partition refines. So we have \begin{equation}\label{tau67} \{ \tau^{N}_{\xi_i} : (\tau^{N}_{\xi_i},\xi_i) \in \Gamma^{(N)}\} \subset \{ \tau^{N+1}_{\xi_i} : (\tau^{N+1}_{\xi_i},\xi_i) \in \Gamma^{(N+1)}\} \subset \{ \tau_{\xi} : (\tau_{\xi},\xi) \in \Gamma\}. \end{equation}
\begin{lemma}\label{lm:ac}
The measure $\lambda_{\Gamma}$ and Lebesgue measure $m$ are mutually absolutely continuous on $\mathbb{R}_{>0}\times\mathbb{R}$.
The Radon-Nikodym derivative is given by
\begin{equation}\label{RN128}
\frac{d\lambda_{\Gamma}}{dm}(\tau,\xi) = \sqrt{\frac{2}{\pi \tau}} \qquad \text{for } \ (\tau,\xi)\in\mathbb{R}_{>0}\times\mathbb{R} .
\end{equation} \end{lemma} \begin{proof} From Theorem~\ref{thm:SH10}\ref{itm:exp}, for $\xi \in \mathbb{R}, \tau > 0$, and $\delta > 0$, \[ \lambda_{\Gamma}\big((\tau,\tau+\delta]\times [\xi-\delta,\xi+\delta]\big) = 4\sqrt{\frac{2}{\pi}}\hspace{0.9pt}\delta\hspace{0.9pt}\bigl(\sqrt{\tau + \delta} - \sqrt{\tau}\hspace{1.1pt}\bigr) = \int_{\xi - \delta}^{\xi + \delta} \int_{\tau}^{\tau + \delta} \sqrt{\frac{2}{\pi x}}\,dx \,d\alpha. \qedhere
\] \end{proof}
By \eqref{RN128}, $\lambda_{\Gamma}$ does not have a finite marginal on the $\xi$-component, as expected since the jump directions are dense. Hence, below we do Palm conditioning on the pair $(\tau_{\xi},\xi)\in\mathbb{R}_{>0}\times\Xi$ and not on the jump directions $\xi\in\Xi$ alone.
\begin{lemma}\label{lm:SH5} Let $A \subseteq C(\mathbb{R}_{\ge0})$ be a Borel set. Then for any open rectangle $R=(a,b)\times(c,d)\subseteq \mathbb{R}_{\ge0}\times\mathbb{R}$,
\begin{equation}\label{Omr2}
\mathbb{E}\Bigl[ \; \sum_{(\tau,\,\xi)\,\in\, \Gamma}\mathbf{1}_{A}([H_\xi]^\tau)\hspace{0.9pt}\mathbf{1}_{R}(\tau,\xi)\Bigr] =\lambda_{\Gamma}(R)\hspace{0.9pt}\mathcal{D}(A).
\end{equation} \end{lemma}
\begin{proof} It suffices to prove \eqref{Omr2} for continuity sets $A$ of the distribution $\mathcal{D}$ of the type $ A = \{f\in C(\mathbb{R}_{\ge0}): f|_{[0,k]} \in A_k\}$ for $k > 0$ and Borel $A_k \subseteq C[0,k]$. Such sets form a $\pi$-system that generates the Borel $\sigma$-algebra of $C(\mathbb{R}_{\ge0})$.
We prove \eqref{Omr2} for $H^{N}$. Below the values $\xi_i=i2^{-N}$ are not random and hence can come outside the expectation. Condition on $\tau^N_{\xi_i}$ and use \eqref{eq4}:
\begin{equation}\label{Omr3}
\begin{aligned}
&\mathbb{E}\Big(\sum_{(\tau^N_{\xi_i}\!,\,\xi_i)\in R\cap\Gamma^{(N)}} \mathbf{1}_A\big([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big)\Big)
=\mathbb{E}\Big(\sum_{\xi_i\in(c,d)} \mathbf{1}_A\big([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big)\mathbf{1}_{(a,b)}\big(\tau^{N}_{\xi_i}\big)\Big)\\
&=\sum_{\xi_i\in(c,d)} \mathbb{E}\bigg(\mathbf{1}_{(a,b)}\big(\tau^{N}_{\xi_i}\big)\, \mathbb{E}\Big[\mathbf{1}_A\big([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big)\hspace{0.9pt}\Big|\hspace{0.9pt}\tau^N_{\xi_i}\Big]\bigg) \\&
\stackrel{\eqref{eq4}}{=}\sum_{\xi_i\in(c,d)}\mathbb P\big(\tau^{N}_{\xi_i}\in (a,b)\big)\mathcal{D}^{\alpha_N}(A)
=\mathcal{D}^{\alpha_N}(A)\hspace{0.9pt} \lambda_{\Gamma}^{(N)}(R).
\end{aligned}
\end{equation}
To conclude the proof, we check that \eqref{Omr2} arises as we let $N\to\infty$ in the first and last member of the string of equalities above. $\mathcal{D}^{\alpha_N}(A)\to\mathcal{D}(A)$ by the continuity of $\alpha\mapsto \mathcal{D}^\alpha$ in the weak topology and the assumption that $A$ is a continuity set.
As an intermediate step, we verify that $\forall k > 0$,
$\mathbf{1}_{\mathcal{U}_N^k}\to 1$ almost surely
for the events
\begin{align}\label{Uset}
\mathcal{U}_N^k&=\bigl\{\,|\Gamma^{(N)}\cap R| = |\Gamma\cap R| \text{ \,and for every $(\tau,\xi)\in \Gamma\cap R$ there is a unique} \\
&\qquad
\text{ $(\tau^N_{\xi_i},\xi_i)\in \Gamma^{(N)}\cap R$
such that $[H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big|_{[0,k]}=[H_{\xi}]^{\tau_{\xi}}\big|_{[0,k]}$}\bigr\}. \nonumber
\end{align}
Almost surely, $\Gamma\cap R$ is finite and none of its points lie on the boundary of $R$. For any such realization, the condition in braces holds when (i) all points $(\tau_\xi, \xi)\in\Gamma\cap R$ lie in distinct rectangles $(a,b)\times(\xi_{i-1}, \xi_i]\subset(a,b)\times(c,d)$, (ii) when no point $(\tau^N_{\xi_i},\xi_i)\in \Gamma^{(N)}\cap R$ is generated by a point $(\tau_\xi, \xi)\in\Gamma$ outside $R$, and (iii) when $N$ is large enough so that for the unique $i$ with $\xi_i < \xi \le \xi_{i + 1}$, $G_{\xi-}(x) = G_{\xi_i}(x)$ and $G_{\xi+}(x) = G_{\xi_{i + 1}}(x)$ for all $x \in [0,\tau_\xi + k]$. By Theorem~\ref{thm:SH10}\ref{itm:SH_j}, this happens for all the finitely many $(\tau,\xi) \in \Gamma \cap R$ when the mesh $2^{-N}$ is fine enough. Thus, for each $k > 0$, almost every realization lies eventually in $\mathcal{U}_N^k$.
We prove that
$\lambda_{\Gamma}^{(N)}(R)\to \lambda_{\Gamma}(R)$. The paragraph above gave $|\Gamma^{(N)}\cap R| \to |\Gamma\cap R|$ almost surely. We also have $|\Gamma^{(N)}\cap R| \le |\Gamma\cap ((a,b)\times(c-1,d))|$ because \eqref{tau45} shows that each point $(\tau^N_{\xi_i},\xi_i)$ that is not matched to a unique point $(\tau_\xi, \xi)\in\Gamma\cap R$ must be generated by some point $(\tau_\xi, \xi)\in \Gamma\cap ((a,b)\times(c-1,d))$. The limit $\lambda_{\Gamma}^{(N)}(R)\to \lambda_{\Gamma}(R)$ comes now from dominated convergence.
It remains to show that \[
\mathbb{E}\Big(\sum_{(\tau^N_{\xi_i}\!,\,\xi_i)\in R\cap\Gamma^{(N)}} \mathbf{1}_{ A}([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}})\Big)
\underset{N\to\infty}\longrightarrow
\mathbb{E}\Big(\sum_{(\tau_{\xi},\xi)\in R\cap\Gamma} \mathbf{1}_{ A}([H_\xi]^{\tau_\xi})\Big). \]
This follows by choosing $k > 0$ so that $A$ depends only on the domain $[0,k]$. Then, the difference in absolute values in the display below vanishes on $\mathcal{U}_N^k$. \[ \begin{aligned}
&\lim_{N\to\infty} \mathbb{E}\bigg[ \,\Big|\sum_{(\tau^N_{\xi_i}\!,\,\xi_i)\in R\cap\Gamma^{(N)}} \mathbf{1}_{ A}\bigl([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\bigr)-\sum_{(\tau_{\xi},\xi)\in R\cap\Gamma} \mathbf{1}_{ A}\bigl([H_{\xi}]^{\tau_{\xi}}\bigr)\Big|
\cdot (\mathbf{1}_{\mathcal{U}_N^k}+\mathbf{1}_{(\mathcal{U}_N^k)^c})\bigg] \\ &\qquad \qquad \le \lim_{N\to\infty}
2\hspace{0.9pt} \mathbb{E}\bigl[ \hspace{0.9pt} |\Gamma\cap ((a,b)\times(c-1,d))| \cdot \mathbf{1}_{(\mathcal{U}^k_N)^c} \bigr] =0,
\end{aligned}
\]
and the last equality follows by dominated convergence. \end{proof}
To capture the distribution of $[H_\xi]^{\tau_\xi}$, we augment the point measure $\Gamma$ of \eqref{SHpp} to a point measure on the space $\mathbb{R}_{\ge0}\times \mathbb{R}\times C(\mathbb{R}_{\ge0})$: \begin{equation}\label{SHHpp}
\Lambda=\sum_{(\tau_\xi,\,\xi)\, \in \,\Gamma} \delta_{(\tau_\xi,\,\xi,\, [H_\xi]^{{\scaleobj{1.5}{\tau}}_{\!\!\xi}})}. \end{equation}
The \textit{Palm kernel} of $[H_\xi]^{\tau_\xi}$ with respect to $\Gamma$ is the stochastic kernel $Q$ from $\mathbb{R}_{\ge0}\times \mathbb{R}$ into $C(\mathbb{R}_{\ge0})$ that satisfies the following identity: for every bounded Borel function $\Psi$ on $\mathbb{R}_{\ge0}\times \mathbb{R}\times C(\mathbb{R}_{\ge0})$ that is supported on $B\times C(\mathbb{R}_{\ge0})$ for some bounded Borel set $B\subset \mathbb{R}_{\ge0}\times \mathbb{R}$, \begin{equation}\label{Opalm}\begin{aligned} \mathbb{E} \sum_{(\tau_\xi,\,\xi)\, \in \, B\hspace{0.7pt} \cap\hspace{0.7pt}\Gamma} \Psi\bigl(\tau_\xi,\,\xi,\, [H_\xi]^{\tau_\xi}\bigr) &\; = \; \mathbb{E}\!\!\int\limits_{\mathbb{R}_{\ge0}\times \mathbb{R}\times C(\mathbb{R}_{\ge0})} \!\! \Psi(\tau, \xi, h) \,\Lambda(d\tau, d\xi, dh) \\ &= \; \int\limits_{\mathbb{R}_{\ge0}\times\mathbb{R}} \lambda_{\Gamma}(d\tau, d\xi) \int\limits_{C(\mathbb{R}_{\ge0})} Q(\tau, \xi, dh)\, \Psi(\tau, \xi, h) . \end{aligned} \end{equation} The first equality above is a restatement of the definition of $\Lambda$ and included to make the next proof transparent.
The key result of this section is this characterization of $Q$.
\begin{theorem}\label{thm:Lac} For Lebesgue-almost every $(\tau, \xi)$, $Q(\tau, \xi, \aabullet)=\mathcal{D}(\aabullet)$, the distribution of the running maximum of a Brownian motion with diffusivity 2.
\end{theorem}
\begin{proof} This comes from Lemma \ref{lm:SH5}:
take $\Psi(\tau, \xi, h)=\mathbf{1}_R(\tau, \xi)\mathbf{1}_A(h)$ in \eqref{Opalm} and note that the left-hand side of \eqref{Omr2} is exactly the left-hand side of \eqref{Opalm}. Lemma \ref{lm:ac} turns $\lambda_{\Gamma}$-almost everywhere into Lebesgue-almost everywhere.
\end{proof}
Denote the set of directions $\xi$ for which $G_\xi$ and $G_{\xi-}$ separate on $\mathbb{R}_{\ge0}$ by \begin{equation}\label{XiSHdef}
\begin{aligned}
\Xi_G&=\{\xi\in\mathbb{R}:\tau_\xi < \infty\}.
\end{aligned} \end{equation} \begin{theorem}\label{thm:Lac5} Let $A\subseteq C(\mathbb{R}_{\ge0})$ be a Borel set such that $\mathcal{D}(A)=0$. Then \begin{equation}\label{Oeq}
\mathbb P\big(\exists \xi\in \Xi_G : [H_\xi]^{\tau_\xi}\in A\big)=0. \end{equation} \end{theorem} \begin{proof} Let $R_N=(0,N)\times(-N,N)$. Since $\xi\in\Xi_G$ means that $\tau_\xi<\infty$, we have
\begin{align*}
&\mathbb P\big(\exists \xi\in \Xi_G : [H_\xi]^{\tau_\xi}\in A\big)
= \lim_{N\to\infty} \mathbb P\big(\exists \xi\in \Xi_G : (\tau_\xi,\xi)\in R_N, [H_\xi]^{\tau_\xi}\in A\big) \\ & \qquad\quad \leq \lim_{N\to\infty} \mathbb{E}\sum_{(\tau,\,\xi)\in \Gamma}\mathbf{1}_{A}([H_\xi]^\tau) \hspace{0.9pt}\mathbf{1}_{R_N}(\tau, \xi) \overset{\eqref{Omr2}}= \lim_{N\to\infty} \lambda_{\Gamma}(R_N) \hspace{0.9pt} \mathcal{D}(A) =0.
\qquad \ \qedhere \end{align*}
\end{proof}
We show that \eqref{XiSHdef} captures all $\xi$ at which a jump happens on the real line.
\begin{corollary} \label{cor:dcLR} With probability one, $\Xi_G = \{\xi \in \mathbb{R}: H_\xi(x) \neq 0 \text{ for some }x \in \mathbb{R}\}$. Furthermore, for each $\xi \in \Xi_G$, $\lim_{x \to \pm \infty} H_\xi(x) = \pm \infty$. \end{corollary} \begin{proof} By Theorem~\ref{thm:Lac5} and the associated fact for the running max of a Brownian motion, \begin{equation} \label{XIshto+inf} \mathbb P(\forall \xi \in \Xi_G, \lim_{x \to +\infty} H_\xi(x) = +\infty) = 1. \end{equation} By definition, $\Xi_G = \{\xi \in \mathbb{R}: H_\xi(x) \neq 0 \text{ for some }x >0\}$.
Now, we show that if $H_\xi(x) \neq 0$ for some $x < 0$, then $H_\xi(x) \neq 0$ for some $x > 0$. If not, then there exist $\xi \in \mathbb{R}$ and $m \in \mathbb{Z}_{<0}$ such that $[H_\xi]^{m}|_{[0,\infty)} \neq 0$, but $[H_\xi]^m|_{[-m,\infty)}$ is constant. In particular, $[H_\xi]^{m}|_{[0,\infty)}$ is bounded. Let $\tau^m_\xi = \inf \{x > 0: [H_\xi]^m(x) > 0\}$. Then, $[H_\xi]^{m}|_{[0,\infty)} \neq 0$ iff $\tau^m_\xi<\infty$, and we have \begin{equation} \label{XiLR} \begin{aligned}
& \mathbb P\bigl(\Xi_G \neq \{\xi \in \mathbb{R}: H_\xi(x) \neq 0 \text{ for some }x \in \mathbb{R}\}\bigr) \\
&\quad
\le \sum_{m \in \mathbb{Z}_{<0}}\mathbb P\bigl(\exists \xi \in \mathbb{R}: \; \tau^m_\xi<\infty
\text{ but } [H_\xi]^m|_{[0,\infty)} \text{ is bounded} \bigr) = 0. \end{aligned} \end{equation} The probability equals zero by~\eqref{XIshto+inf} because by \eqref{H65}, $[H]^m\deq H$. To finish,~\eqref{XIshto+inf} proves the limits for $x \to +\infty$. The limits as $x \to -\infty$ then follow from~\eqref{XiLR} and the reflection invariance of Corollary~\ref{cor:SH_reflect}. \end{proof}
Let $\nu_f$ denote the Lebesgue-Stieltjes measure of a non-decreasing function $f$ on $\mathbb{R}$. Denote the support of $\nu_f$ by $\text{supp}(\nu_f)$. The Hausdorff dimension of a set $A$ is denoted by $\dim_H(A)$.
\begin{corollary} \label{cor:SHHaus1/2} Consider the Lebesgue-Stieltjes measure $\nu_{H_\xi}$ for $\xi\in \Xi_G$ on the entire real line. Then we have \begin{equation}\label{eq8}
\mathbb P\big\{\forall \xi\in \Xi_G : \dim_H \big(\text{\rm supp}(\nu_{H_\xi})\big)=1/2\big\}=1. \end{equation} \end{corollary} \begin{proof}
First, note that
\[
\big\{\exists \xi\in \Xi_G : \text{dim}_H\big(\text{supp}(\nu_{H_\xi})\big)\neq \tfrac12\big\}\subseteq\!\bigcup_{m\in\mathbb{Z}_{\le0}}\!\!\big\{\exists \xi\in \Xi_G : \text{dim}_H\big(\text{supp}(\nu_{H_\xi})\cap[m,\infty)\big)\neq \tfrac12\big\}.
\]
By \eqref{H65}, it is enough to take $m=0$ and show that
\[
\mathbb P\big(\exists \xi\in \Xi_G :\text{dim}_H\big(\text{supp}(\nu_{H_\xi})\cap[0,\infty)\big)\neq 1/2\big)=0.
\]
This last claim follows from Theorem \ref{thm:Lac5} because the event in question has zero probability for the running maximum of Brownian motion
(\cite{morters_peres_2010}, Theorem 4.24 and Exercise 4.12).
\end{proof}
\begin{remark} Representation of the difference of Busemann functions as the running maximum of random walk goes back to \cite{bala-busa-sepp-20}. It was used in \cite{busa-ferr-20} to capture the local universality of geodesics.
The representation of the difference profile as the running maximum of Brownian motion in the point-to-point setup emerges from the Pitman transform \cite{Ganguly-Hegde-2021,Dauvergne-22}. Theorem 1 and Corollary 2 in~\cite{Ganguly-Hegde-2021} are point-to-point analogues of our Theorem \ref{thm:Lac} and Corollary \ref{cor:SHHaus1/2}. Their proof is different from ours. Although an analogue of the Pitman transform exists in the stationary case \cite[Section 3]{Busani-2021}, comparing the running maximum of a Brownian motion to the profile requires different tools in the two settings. \end{remark}
\subsection{Decoupling} \label{sec:decoup} By Corollary~\ref{cor:dcLR}, whenever $\xi$ is a jump direction, the difference profiles for both positive and negative $x$ are nontrivial. We extend Theorem~\ref{thm:Lac} to show that these two difference profiles are independent and equal in distribution. We spell out only the modifications needed in the arguments of the previous section. For the difference profile on the left, define for $x\ge0$
\[
\bck{H}_{\xi}(x):=-H_{\xi}(-x)
\quad\text{and}\quad
\bck{\tau_\xi}:=\inf \{x > 0:\bck{H}_{\xi} > 0\}. \] For $N\in \mathbb{Z}_{>0}$ and $\xi_i$ as in \eqref{Hd}, the discrete approximations are \[
\bck{H}^N_{\xi_i}(x):=-H^N_{\xi_i}(-x)
\quad\text{and}\quad
\bck{\tau_{\xi_i}}^N:=\inf\{x > 0:\bck{H}^N_{\xi_i}(x) > 0\}. \]
The measures $\bck{\Gamma}$, $\lambda_{\bck{\Gamma}}$, $\bck{\Gamma}^{(N)}$, and $\lambda_{\bck{\Gamma}}^{(N)}$ are defined as in~\eqref{SHpp} and~\eqref{SHppdis}, but now with $(\bck{\tau_\xi},\xi)$ and $(\bck{\tau_{\xi_i}},\xi_i)$.
Extend the measure $\Lambda$ of \eqref{SHHpp} with a component for the left profile: \[ \Lambda'=\sum_{(\bck{\tau_\xi},\,\xi)\, \in \,\bck{\Gamma}} \delta_{(\bck{\tau_\xi},\,\xi,\; [H_\xi]^{{\scaleobj{1.5}{\tau}}_{\!\!\xi}}, \; [\bck{H}_\xi]^{\bck{\scaleobj{1.5}{\tau}}_{\!\!\xi}}) }. \] Since $\tau_\xi < \infty$ if and only if $\bck{\tau_\xi} < \infty$ (Corollary~\ref{cor:dcLR}), it is immaterial whether we sum over $({\tau}_\xi,\xi)$ or $(\bck{\tau_\xi},\xi)$. The latter is more convenient for the next calculations.
The \textit{Palm kernel} of $\big([H_\xi]^{\tau_\xi},[\bck{H}_\xi]^{\bck{\tau_\xi}}\big)$ with respect to $\bck{\Gamma}$ is the stochastic kernel $Q^2$ from $\mathbb{R}_{\ge0}\times \mathbb{R}$ into $C(\mathbb{R}_{\ge0})\times C(\mathbb{R}_{\ge0})$ that satisfies the following identity: for every bounded Borel function $\Psi$ on $\mathbb{R}_{\ge0}\times \mathbb{R}\times C(\mathbb{R}_{\ge0})\times C(\mathbb{R}_{\ge0})$ that is supported on $B\times C(\mathbb{R}_{\ge0})\times C(\mathbb{R}_{\ge0})$ for some bounded Borel set $B\subset \mathbb{R}_{\ge0}\times \mathbb{R}$,
\begin{equation}\label{Opalm2}
\begin{aligned}
&\quad \,\mathbb{E} \Bigl[ \; \sum_{(\bck{\tau_\xi},\,\xi)\, \in \, B\hspace{0.7pt} \cap\hspace{0.7pt}\bck{\Gamma}} \Psi\bigl(\bck{\tau_\xi},\,\xi,\, [H_\xi]^{\tau_\xi},[\bck{H}_\xi]^{\bck{\tau_\xi}}\bigr) \Bigr] \\
&= \int\limits_{\mathbb{R}_{\ge 0} \times \mathbb{R}} \lambda_{\bck{\Gamma}}(d\bck{\tau}, d\xi)
\int\limits_{C(\mathbb{R}_{\ge0})\times C(\mathbb{R}_{\ge 0})} Q^2(\bck{\tau}, \xi, dh^1,dh^2)\, \Psi(\bck{\tau}, \xi, h^1,h^2).
\end{aligned}
\end{equation}
\begin{theorem} \label{thm:indep_loc}
For
Lebesgue-almost every $(\tau, \xi)$, $Q^2(\tau, \xi, \aabullet)=(\mathcal{D} \otimes \mathcal{D})(\aabullet)$, the product of the distribution of the running maximum of a Brownian motion with diffusivity 2. In particular, for any Borel set $A \subseteq C(\mathbb{R}_{\ge 0}) \times C(\mathbb{R}_{\ge 0})$ such that $(\mathcal{D} \otimes \mathcal{D})(A) = 0$,
\[
\mathbb P\bigl\{ \exists \xi \in \Xi_G: \bigl([H_\xi]^{\tau_\xi},[\bck{H}_\xi]^{\bck{\tau_\xi}}\bigr) \in A\bigr\} = 0.
\]
\end{theorem} \begin{proof} By definition of the stationary horizon (Definition~\ref{def:SH}), as functions in $C(\mathbb{R})$, \begin{equation} \label{supdif} H_{\xi_i}(y) \deq \sup_{-\infty < x \le y}\{B^{\alpha_N}(x)\} - \sup_{-\infty < x \le 0}\{B^{\alpha_N}(x)\}, \end{equation} where $B^{\alpha_N}$ is a two-sided Brownian motion with drift $\alpha_N$ and diffusivity $2$, with $B^{\alpha_N}(0) = 0$. By adjusting our probability space if needed, we will assume that such a process $B^{\alpha_N}$ exists on our space and $H_{\xi_i}$ is given as~\eqref{supdif}. Define two independent $\sigma$-algebras \[ \mathcal{F}_- = \sigma(B^{\alpha_N}(x): x \le 0),\qquad\text{and}\qquad \mathcal{F}_+ = \sigma(B^{\alpha_N}(x): x \ge 0). \]
When $y > 0$, we may write \begin{equation} \label{shdif2} H_{\xi_i}^N(y) = \Bigl[W + \sup_{0 \le x \le y} B^{\alpha_N}(x)\Bigr]^+, \end{equation} where $W = -\sup_{-\infty < x \le 0}\{B^{\alpha_N}(x)\} \in \mathcal{F}_-$, and $\sup_{0 \le x \le y} B^{\alpha_N}(x) \in \mathcal{F}_+$. Then, conditional on $\mathcal{F}_-$, $W$ is constant while the law of $B^{\alpha_N}(x)$ for $x \ge 0$ is unchanged. Then, by~\eqref{shdif2} and Equation~\eqref{uncondD} of Lemma~\ref{lem:WBSM} in the special case where $W$ is constant (using the exact same reasoning as in the proof of Corollary~\ref{cor:discrete_restart}), \begin{equation} \label{F-cond}
\mathbb P(\big[H^{N}_{\xi_i}\big]^{\tau^{N}_{\xi_i}} \in \aabullet\,|\,\mathcal{F}_-) = \mathcal{D}^{\alpha_N}(\aabullet). \end{equation}
For a fixed $i$, $\bck{H}^N_{\xi_i}$ and $H^N_{\xi_i}$ have the same distribution as functions on $\mathbb{R}$. This comes by first applying Corollary~\ref{cor:SH_reflect} and then \eqref{H65}, shifting the directions by $\xi_{i-1}+\xi_i$: \begin{align*}
\bck{H}^N_{\xi_i}(x) &= -{H}^N_{\xi_i}(-x) = - G_{\xi_i}(-x) + G_{\xi_{i-1}}(-x)
\deq - G_{-\xi_i}(x) + G_{-\xi_{i-1}}(x)\\
&\deq - G_{\xi_{i-1}}(x) + G_{\xi_i}(x)
={H}^N_{\xi_i}(x) . \end{align*}
By~\eqref{supdif}, $(\bck{ H}_{\xi_i}^N,\bck{\tau}_{\xi_i}^N) \in \mathcal{F}_-$. We mimic the calculation in \eqref{Omr3}, for two Borel sets $A_1,A_2\subseteq C(\mathbb{R}_{\ge 0})$ and an open rectangle $R=(a,b)\times(c,d)\subseteq \mathbb{R}_{\ge0}\times\mathbb{R}$:
\begin{align}\label{Omr4}
&\quad\;\mathbb{E}\Big(\sum_{(\bck{\tau}^N_{\!\!\xi_i},\;\xi_i)\in R\cap\bck{\Gamma}^{(N)}} \mathbf{1}_{A_1}\big([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big)\mathbf{1}_{A_2}\big([\bck{H}^{N}_{\xi_i}]^{\bck{\tau}^{N}_{\xi_i}}\big)\Big) \\
&= \sum_{\xi_i\in(c,d)} \mathbb{E}\bigg(\mathbf{1}_{A_2}\big([\bck{H}^{N}_{\xi_i}]^{\bck{\tau}^{N}_{\xi_i}}\big)\;\mathbf{1}_{(a,b)}\big(\bck{\tau}^{N}_{\xi_i}\big) \mathbb{E}\Big[\big(\mathbf{1}_{A_1}\big([H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big)\hspace{0.9pt}\Big|\mathcal{F}_-\Big]\bigg) \nonumber\\
&\stackrel{\eqref{F-cond}}{=} \sum_{\xi_i\in(c,d)} \mathbb{E}\bigg( \mathbb{E}\Big[\mathbf{1}_{A_2}\big([\bck{H}^{N}_{\xi_i}]^{\bck{\tau}^{N}_{\xi_i}}\big)\;\mathbf{1}_{(a,b)}\big(\bck{\tau}^{N}_{\xi_i}\big)\hspace{0.9pt}\Big|\bck{\tau}_{\xi_i}^N\Big] \bigg)\mathcal{D}^{\alpha_N}(A_1) \nonumber\\
&= \sum_{\xi_i\in(c,d)} \mathbb{E}\bigg(\mathbf{1}_{(a,b)}\big(\bck{\tau}^{N}_{\xi_i}\big) \mathbb{E}\Big[\mathbf{1}_{A_2}\big([\bck{H}^{N}_{\xi_i}]^{\bck{\tau}^{N}_{\xi_i}}\big)\;\hspace{0.9pt}\Big|\bck{\tau}_{\xi_i}^N\Big] \bigg)\mathcal{D}^{\alpha_N}(A_1) \nonumber \\
&\stackrel{\eqref{eq4}}{=} \sum_{\xi_i\in(c,d)} \mathbb P\big(\bck{\tau}^{N}_{\xi_i}\in (a,b)\big)\mathcal{D}^{\alpha_N}(A_1)\mathcal{D}^{\alpha_N}(A_2) \nonumber
\\&
=\mathcal{D}^{\alpha_N}(A_1)\mathcal{D}^{\alpha_N}(A_2)\hspace{0.9pt} \lambda_{\bck{\Gamma}}^{(N)}(R).\nonumber
\end{align} As in the proof of Lemma \ref{lm:SH5}, we derive from the above that \begin{equation}\label{Q2}
\mathbb{E}\Big(\sum_{(\bck{\tau}_{\xi},\xi)\in R\cap\bck{\Gamma}} \mathbf{1}_{A_1}\big([H_{\xi}]^{\tau_{\xi}}\big)\mathbf{1}_{A_2}\big([\bck{H}_{\xi}]^{\bck{\tau}_{\xi}}\big)\Big)=\mathcal{D}(A_1)\mathcal{D}(A_2)\hspace{0.9pt} \lambda_{\bck{\Gamma}}(R), \end{equation} through the convergence of line~\eqref{Omr4} to the left-hand side of \eqref{Q2}. Instead of the events $\mathcal{U}_N^k$ in \eqref{Uset}, consider \begin{align*}
\mathcal{\widetilde U}_N^k&=\bigl\{\,|\bck{\Gamma}^{(N)}\cap R| = |\bck{\Gamma}\cap R|, \text{ \,and $\forall \hspace{0.9pt}(\bck{\tau},\xi)\in \bck{\Gamma}\cap R$, \ $\exists$ unique $(\tau^N_{\xi_i},\xi_i)\in \bck{\Gamma}^{(N)}\cap R$
} \\
&\qquad\qquad\text{ such that $[H^{N}_{\xi_i}]^{\tau^{N}_{\xi_i}}\big|_{[0,k]}=[H_{\xi}]^{\tau_{\xi}}\big|_{[0,k]}$ and $[\bck{H}^{N}_{\xi_i}]^{\bck{\tau}^{N}_{\xi_i}}\big|_{[0,k]}=[\bck{H}_{\xi}]^{\bck{\tau}_{\xi}}\big|_{[0,k]}$}\bigr\}.
\end{align*} For each $k>0$, $\mathbf{1}_{\mathcal{\widetilde U}_N^k}\to 1$ almost surely, as it did for \eqref{Uset}. Indeed, there are finitely many pairs $(\bck{\tau},\xi) \in \bck{\Gamma} \cap R$, and each has a finite forward splitting time $\tau$. All these can be confined in a common compact rectangle.
From here, the proof
continues as for Lemma \ref{lm:SH5} and Theorem~\ref{thm:Lac5}. \end{proof}
\subsection{Remaining proofs} \label{sec:last_proofs} It remains to prove Theorems~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t},~\ref{thm:BusePalm}, and~\ref{thm:Split_pts}. Recall the definition of the function from~\eqref{fsdir}: $ f_{s,\xi}(x) = W_{\xi+}(x,s;0,s) - W_{\xi -}(x,s;0,s). $ \begin{equation} \label{omega3} \begin{aligned} &\text{Let $\Omega_3$ be the subset of $\Omega_2$ on which the following holds: for each $T \in \mathbb{Z}$,}\\ &\text{whenever $\xi \in \mathbb{R}$ is such that $f_{T,\,\xi} \neq 0$, then} \lim_{x \to \pm \infty} f_{T,\,\xi}(x) = \pm \infty. \end{aligned} \end{equation} By Theorem~\ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process} and Corollary~\ref{cor:dcLR}, $\mathbb P(\Omega_3) = 1$. \begin{proof}[Proof of Theorem~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t}] We work on the full-probability event $\Omega_3$. The statement \eqref{bad_ub} to be proved is $\xi \in \Xi \iff \forall s\in\mathbb{R}:\lim_{x \to \pm \infty} f_{s,\xi}(x) = \pm \infty$.
If, for \textit{any} $s$, $f_{s,\xi} \to \pm \infty$ as $x \to \pm \infty$, then $W_{\xi-}(x,s;0,s) \neq W_{\xi +}(x,s;0,s)$ for $|x|$ sufficiently large, and $\xi \in \Xi$. It remains to prove the converse statement. From~\eqref{881}, \[ \Xi = \bigcup_{T \in \mathbb{Z}} \{\xi \in \mathbb{R}: W_{\xi - }(x,T;0,T) \neq W_{\xi +}(x,T;0,T) \text{ for some }x \in \mathbb{R}\}. \] To finish the proof of~\eqref{bad_ub}, by definition of $\Omega_3$, it suffices to show these two statements: \begin{enumerate}[label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item If $f_{s,\xi} \neq 0$ for some $s,\xi \in \mathbb{R}$ then $f_{T,\,\xi} \neq 0$ for all $T > s$.
\item For $T \in \mathbb{Z}, \xi \in \mathbb{R}$, if $f_{T,\,\xi} \neq 0$, then for all $s < T$, $\lim_{x \to \pm \infty} f_{s,\xi}(x) = \pm \infty$. \end{enumerate}
Part (i) follows from the equality below. By~\eqref{880}, for $s < T$, \begin{equation} \label{883}\begin{aligned} W_{\xi \sig}(x,s;0,s) &= \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,T) + W_{\xi \sig}(z,T;0,T)\}\\[-3pt] &\qquad\qquad - \sup_{z \in \mathbb{R}} \{\mathcal{L}(0,s;z,T) + W_{\xi \sig}(z,T;0,T)\}. \end{aligned} \end{equation}
To prove (ii), we show the limits as $x \to +\infty$, and the limits as $x \to -\infty$ follow analogously. Let $T \in \mathbb{Z},\xi \in \mathbb{R}$ be such that $f_{T,\,\xi} \neq 0$, and let $R > 0$. By definition of the event $\Omega_3$, we may choose $Z > 0$ sufficiently large so that $\inf_{z \ge Z}\{f_{T,\,\xi}(z)\} \ge R$. Then, by Equation~\eqref{373} of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}, for all sufficiently large $x$ and $\sigg \in \{-,+\}$, \[ \sup_{z \in \mathbb{R}}\{\mathcal{L}(x,s;z,T) + W_{\xi \sig}(z,T;0,T)\} = \sup_{z \ge Z}\{\mathcal{L}(x,s;z,T) + W_{\xi \sig}(z,T;0,T)\}. \] Let \[ A := \sup_{z \in \mathbb{R}}\{\mathcal{L}(0,s;z,T) + W_{\xi +}(z,T;0,T)\}- \sup_{z \in \mathbb{R}}\{\mathcal{L}(0,s;z,T) + W_{\xi -}(z,T;0,T)\}, \] and note that this is a constant in $x$. Then, by~\eqref{883}, \begin{align*}
-f_{s,\xi}(x) &= \sup_{z \ge Z}\{\mathcal{L}(x,s;z,T) + W_{\xi -}(z,T;0,T)\} \\
&\qquad\qquad
- \sup_{z \ge Z}\{\mathcal{L}(x,s;z,T) + W_{\xi +}(z,T;0,T)\} + A \\
&\le \sup_{z \ge Z}\{W_{\xi-}(z,T;0,T) - W_{\xi +}(z,T;0,T) \} +A \\&
= -\inf_{z \ge Z}\{f_{T,\,\xi}(z)\} + A \le -R + A, \end{align*} so that $f_{s,\xi}(x) \ge R - A$. Since $A$ is constant in $x$ and $R$ is arbitrary, the desired result follows.
Note that~\eqref{bad_ub} immediately proves~\eqref{eqn:dcset_union1} in the case $x = 0$. The general case follows from additivity of the Busemann functions (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}) and~\eqref{bad_ub}. \end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:BusePalm} (Local time description of the difference profile)] This comes by Theorem~\ref{thm:indep_loc} since $\{W_\xi(\abullet,0;0,0)\}_{\xi \in \mathbb{R}}$ $\deq G$ (Theorem~\ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process}), with probability one $\xi \in \Xi$ iff $\tau_{\xi} < \infty$ iff $\bck{\tau_\xi} < \infty$ (Theorem~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t}, Corollary~\ref{cor:dcLR}), and the running maximum process and the local time process of a Brownian motion are equal in distribution (L\'evy~\cite{Levy_book}). \end{proof}
For the convenience of the reader, we repeat definitions \eqref{Split_sdir}--\eqref{eqn:gen_split_set} and~\eqref{Dsdir},\eqref{eqn:split_LR_sdir}. As before, $S \in \{L,R\}$. \begin{align*}
\mathfrak S_{s,\xi} &= \{x \in \mathbb{R}: \text{there exist disjoint semi-infinite geodesics from }(x,s) \text{ in direction }\xi\}, \\
\mathfrak S &= \bigcup_{s \in \mathbb{R}, \xi \in \Xi} \mathfrak S_{s,\xi} \times \{s\},
\qquad
\mathfrak S_{s,\xi}^{S} = \{x \in \mathbb{R}: g_{(x,s)}^{\xi -,S} \text{ and } g_{(x,s)}^{\xi +,S} \text{ are disjoint}\}, \\
\mathfrak S^S &= \bigcup_{\xi \in \Xi, s \in \mathbb{R}} \mathfrak S_{s,\xi}^S \times \{s\}, \qquad\text{and}\qquad
\mathcal{D}_{s,\xi} = \{ x\in\mathbb{R}: f_{s,\xi}(x - \varepsilon) < f_{s,\xi}(x + \varepsilon)\; \forall \varepsilon > 0 \}.
\end{align*} \begin{remark} \label{rmk:splitsetseq} In contrast with $\mathfrak S$ in~\eqref{eqn:gen_split_set4}, the sets $\mathfrak S^S$ are concerned only with leftmost ($S=L$) and rightmost ($S=R$) Busemann geodesics. In BLPP, the analogues of $\mathfrak S^L$ and $\mathfrak S^R$ are both equal to the set of initial points from which some geodesic travels initially vertically (Theorems 2.10 and 4.30 in~\cite{Seppalainen-Sorensen-21b}). Furthermore, in BLPP, the analogue of this set contains $\operatorname{NU}_0$. We do not presently know whether either is true in DL. \end{remark}
\begin{proof}[Proof of Theorem~\ref{thm:random_supp}] The full-probability event is $\Omega_2$ in~\eqref{omega2}. The monotonicity of the function $f_{s,\xi}$ follows from~\eqref{801}. We now prove that $\mathcal{D}_{s,\xi} = \mathfrak S_{s,\xi}^L \cup \mathfrak S_{s,\xi}^R$. Assume that $y \notin \mathcal{D}_{s,\xi}$. Then, there exist $a < y < b$ such that $f_{s,\xi}$ is constant on $[a,b]$. Hence, for $a \le x < y$, \[ W_{\xi +}(x,s;0,s) - W_{\xi -}(x,s;0,s) = W_{\xi +}(y,s;0,s) - W_{\xi -}(y,s;0,s), \] and by additivity $W_{\xi-}(y,s;x,s) = W_{\xi +}(y,s;x,s)$ (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:DL_Buse_add}). Choose $t > s$ sufficiently small so that $g_{(x,s)}^{\xi +,R}(t) < g_{(y,s)}^{\xi -,L}(t)$. By Lemma~\ref{lem:Buse_equality_coal}, $g_{(y,s)}^{\xi -,L}(u) = g_{(y,s)}^{\xi +,L}(u)$ for $u \in [s,t]$. By a symmetric argument, instead choosing a point $x > y$, $g_{(y,s)}^{\xi -,R}$ and $g_{(y,s)}^{\xi +,R}$ agree near the starting point $(y,s)$. Hence, $y \notin \mathfrak S_{s,\xi}^L \cup \mathfrak S_{s,\xi}^R$.
Next, assume that $y \in \mathcal{D}_{s,\xi}$. Then, for all $x < y < z$, \[ W_{\xi +}(x,s;0,s) - W_{\xi -}(x,s;0,s) < W_{\xi +}(z,s;0,s) - W_{\xi -}(z,s;0,s) \] and hence either (i) $W_{\xi -}(y,s;x,s) < W_{\xi +}(y,s;x,s)$ for all $x < y$ or (ii) $W_{\xi -}(z,s;y,s) < W_{\xi +}(z,s;y,s)$ for all $z > y.$
We show that $g_{(y,s)}^{\xi -,L}$ and $g_{(y,s)}^{\xi +,L}$ are disjoint in the first case. A symmetric proof shows that $g_{(y,s)}^{\xi -,R}$ and $g_{(y,s)}^{\xi +,R}$ are disjoint in the second case. So assume $W_{\xi -}(y,s;x,s) < W_{\xi +}(y,s;x,s)$ for all $x < y$. Sending $x \nearrow y$, $g_{(x,s)}^{\xi -,R}$ converges to $g_{(y,s)}^{\xi -,L}$ by Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_conv_x}. Assume, by way of contradiction, that $g_{(y,s)}^{\xi -,L}(u) = g_{(y,s)}^{\xi +,L}(u)$ for some $u > s$. This implies then $g_{(y,s)}^{\xi -,L}(t) = g_{(y,s)}^{\xi +,L}(t)$ for all $t \in [s,u]$ since both paths are the leftmost geodesic between any two of their points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}). For $t \ge s$, the convergence $g_{(x,s)}^{\xi -,R}(t) \to g_{(y,s)}^{\xi -,L}(t)$ is monotone by Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_mont_x}. Since geodesics are continuous paths, Dini's theorem implies that, as $x \nearrow y$, $g_{(x,s)}^{\xi -,R}(t)$ converges to $g_{(y,s)}^{\xi -,L}(t) = g_{(y,s)}^{\xi + ,L}(t)$ uniformly in $t \in [s,u]$. Lemma~\ref{lm:BGH_disj} implies that, for sufficiently close $x < y$, $g_{(x,s)}^{\xi -,R}$ and $g_{(y,s)}^{\xi +,L}$ are not disjoint. This contradicts \ref{itm:DL_pm_Buse_eq}$\Leftrightarrow$\ref{itm:DL_paths} of Theorem~\ref{thm:Buse_pm_equiv} since we assumed $W_{\xi -}(y,s;x,s) < W_{\xi +}(y,s;x,s)$ for all $x < y$.
Lastly, we show that $(\mathfrak S_{s,\xi} \setminus \mathcal{D}_{s,\xi}) \times \{s\} \subseteq \operatorname{NU}_1^{\xi - } \cap \operatorname{NU}_1^{\xi +} \cap\, \mathcal{H}_s$. Let $x \in \mathfrak S_{s,\xi} \setminus \mathcal{D}_{s,\xi}$. By Theorem~\ref{thm:all_SIG_thm_intro}\ref{itm:DL_LRmost_SIG}, $g_{(x,s)}^{\xi -,L}$ is the leftmost $\xi$-directed geodesic from $(x,s)$, and $g_{(x,s)}^{\xi +,R}$ is the rightmost. Since $x \in \mathfrak S_{s,\xi}$, these two geodesics must be disjoint. Since $x \notin \mathcal{D}_{s,\xi}$, $g_{(x,s)}^{\xi -,L}$ and $g_{(x,s)}^{\xi +,L}$ are not disjoint, and $g_{(x,s)}^{\xi-,R}$ and $g_{(x,s)}^{\xi +,R}$ are not disjoint. Since the leftmost/rightmost semi-infinite geodesics are leftmost/rightmost geodesics between their points (Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod}), there exists $\varepsilon > 0$ such that for $t \in (s,s + \varepsilon)$, \[ g_{(x,s)}^{\xi -,L}(t) = g_{(x,s)}^{\xi +,L}(t) < g_{(x,s)}^{\xi -,R}(t) = g_{(x,s)}^{\xi +,R}(t), \] so, recalling the definition~\eqref{NU1}, $(x,s) \in \operatorname{NU}_1^{\xi -} \cap \operatorname{NU}_1^{\xi +} \cap\, \mathcal{H}_s$. \end{proof}
\begin{lemma} \label{lem:rm_geod} Given $\omega \in \Omega_2$ and $(x,s;y,u) \in \Rup$, let $g:[s,u] \to \mathbb{R}$ be the leftmost {\rm(}resp.\ rightmost{\rm)} geodesic between $(x,s)$ and $(y,u)$. Then, $(g(t),t) \in \mathfrak S^L$ ${\rm(} \text{resp.\ } \mathfrak S^R {\rm)}$ for some $t \in [s,u)$. Furthermore, among the directions $\xi$ for which $g_{(x,s)}^{\xi-,L}$ and $g_{(x,s)}^{\xi+,L}$ separate at some $t \in [s,u)$, there is a unique direction $\widehat \xi$ such that \[ g_{(x,s)}^{\widehat \xi-,L}(u) \le y < g_{(x,s)}^{\widehat \xi+,L}(u). \] The same holds with $L$ replaced by $R$ and the strict and weak inequalities swapped. \end{lemma} \begin{proof} We prove the statement for leftmost geodesics. The proof for rightmost geodesics is analogous. Set \begin{equation} \label{468} \widehat \xi := \sup \{\xi \in \mathbb{R}: g_{(x,s)}^{\xi \sig,L}(u) \le y\} = \inf \{\xi \in \mathbb{R}: g_{(x,s)}^{\xi \sig,L}(u) > y\}. \end{equation} The monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_mont_dir} guarantees that the second equality holds, and that the definition is independent of the choice of $\sigg \in \{-,+\}$. Theorem~\ref{thm:g_basic_prop}\ref{itm:limits_to_inf} guarantees that $\widehat \xi \in \mathbb{R}$. By definition of $\widehat \xi$ and the monotonicity of Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_mont_dir}, $g_{(x,s)}^{\alpha \sig,L}(u) \le y = g(u) < g_{(x,s)}^{\beta \sig,L}(u)$ whenever $\alpha < \widehat \xi < \beta$ and $\sigg \in \{-,+\}$.
But by Theorem~\ref{thm:g_basic_prop}\ref{itm:DL_SIG_unif}, the $\beta\sigg$ and $\widehat\xi+$ geodesics agree locally when $\beta$ is close enough to $\widehat\xi$. We can conclude that \begin{equation} \label{423} g_{(x,s)}^{\widehat \xi -,L}(u) \le y = g(u) < g_{(x,s)}^{\widehat \xi +,L}(u). \end{equation} Since all three are leftmost geodesics (recall Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod} for the Busemann geodesics), \begin{equation} \label{523} g_{(x,s)}^{\widehat \xi -,L}(t)\le g(t) \le g_{(x,s)}^{\widehat \xi +,L}(t)\qquad\text{for } t \in [s,u]. \end{equation} By~\eqref{423} the paths $g_{(x,s)}^{\widehat \xi -,L}$ and $g_{(x,s)}^{\widehat \xi +,L}$ must separate at some time $t \in [s,u)$. Furthermore, once $g_{(x,s)}^{\widehat \xi -,L}$ splits from $g_{(x,s)}^{\widehat \xi +,L}$ at a point $(z_1,t_1)$, the geodesics must stay apart. Otherwise, they would meet again at a point $(z_2,t_2)$, and Theorem~\ref{thm:DL_SIG_cons_intro}\ref{itm:DL_LRmost_geod} implies that both paths are the leftmost geodesic between $(z_1,t_1)$ and $(z_2,t_2)$. See Figure~\ref{fig:splitting}. Set $\hat t = \inf\{t > s: g_{(x,s)}^{\widehat \xi-,L}(t) < g_{(x,s)}^{\widehat \xi +,L}(t)\}$. Then, $g_{(x,s)}^{\widehat \xi-,L}(t) < g_{(x,s)}^{\widehat \xi +,L}(t)$ for all $t > \hat t$. By~\eqref{523} and continuity of geodesics, $g_{(x,s)}^{\widehat \xi-,L}(t) = g(t) = g_{(x,s)}^{\widehat \xi +,L}(t)$ for $t \in [s,\hat t\hspace{0.7pt}]$, and so $(g(\hat t),\hat t\hspace{0.7pt}) \in \mathfrak S^L$.
\end{proof}
\begin{figure}
\caption{\small The black/thin path is the path $g$. The red/thick paths are the semi-infinite geodesics $g_{(x,s)}^{\widehat \xi-,L}$ and $g_{(x,s)}^{\widehat \xi+,L}$ after they split from $g$. Once the red paths split, they cannot return, or else there would be two leftmost geodesics from $(g(\hat t),\hat t)$ to the point where they come back together.}
\label{fig:splitting}
\end{figure}
\begin{proof}[Proof of Theorem~\ref{thm:Split_pts}]
\noindent \textbf{Item~\ref{itm:split_dense} ($\mathfrak S$ is dense):} Work on the full-probability event $\Omega_2$. Since $\mathfrak S \supseteq \mathfrak S^L \cup \mathfrak S^R$, it suffices to show that for $(x,s) \in \mathbb{R}^2$ there is a sequence $(y_n,t_n) \in \mathfrak S^L$ converging to $(x,s)$. Let $g$ be the leftmost geodesic from $(x,s)$ to $(x,s + 1)$. Then $\forall n \ge 1$, $g|_{[s,s + n^{-1}]}$ is the leftmost geodesic from $(x,s)$ to $(x,s + n^{-1})$. By Lemma~\ref{lem:rm_geod}, $\forall n \in \mathbb{Z}_{> 0}$ $\exists (x_n,t_n) \in \mathfrak S^L$ such that $x_n = g(t_n)$ and $s \le t_n \le s + n^{-1}$. The proof is complete by continuity of geodesics.
\noindent \textbf{Item~\ref{itm:splitp0} ($\mathbb P(p \in \mathfrak S) = 0$ for all $p \in \mathbb{R}^2$):} If there exist disjoint semi-infinite geodesics from $(x,s)$, then for each level $t > s$, there exist disjoint geodesics from $(x,s)$ to some points $(y_1,t),(y_2,t)$. For each fixed $(x,s)$, with probability one, this occurs for no such points by~\cite[Remark 1.12]{Bates-Ganguly-Hammond-22}.
\noindent \textbf{Item~\ref{itm:Hasudorff1/2} (Hausdorff dimension of $\mathfrak S_{s,\xi})$:} Since $s$ is fixed, it suffices to take $s = 0$. By Theorem~\ref{thm:Buse_dist_intro}\ref{itm:SH_Buse_process}, $\{W_{\xi +}(\abullet,0;0,0)\}\deq G$, and by Theorem~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t}, $\xi \in \Xi$ if and only if $f_{0,\xi} \neq 0$. Therefore, Corollary~\ref{cor:SHHaus1/2} implies that $\dim_H(\mathcal{D}_{0,\xi}) = \frac{1}{2}$ with probability one. By Remark~\ref{rmk:NUsupp}, $\mathbb P(\dim_H(\mathfrak S_{0,\xi}) = \frac{1}{2}) = 1$.
\noindent \textbf{Item~\ref{itm:nonempty} ($\mathfrak S_{s,\xi}$ is nonempty and unbounded for all $s$):} By Theorem~\ref{thm:DLBusedc_description}\ref{itm:Busedc_t}, on the event $\Omega_3$, whenever $\xi \in \Xi$, for all $s \in \mathbb{R}$, $f_{s,\xi}(x) \to \pm \infty$ as $x \to \pm \infty$. Since $f_{s,\xi}$ is continuous (Theorem~\ref{thm:DL_Buse_summ}\ref{itm:general_cts}), the set $\mathcal{D}_{s,\xi}$ is unbounded in both directions. The proof is complete since $\mathcal{D}_{s,\xi} \subseteq \mathfrak S_{s,\xi}$ by definition. \end{proof}
\section{Open problems} \label{sec:op} We enumerate open problems that arise from this paper and mention solutions that have appeared since this paper was first posted. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item Prove convergence to SH for the Busemann process of some model other than exponential LPP \cite{Busani-2021} and BLPP \cite{Seppalainen-Sorensen-21b} (For BLPP, convergence has been shown only for finite-dimensional distributions). In our work~\cite{Busa-Sepp-Sore-22b} that came after the first version of this paper, we show convergence of the TASEP speed process from~\cite{Amir_Angel_Valko11} to the SH. In this particle systems context, there are no Busemann functions, but there is a notion of coupled invariant measures. In the long term, a true statement for KPZ universality should include convergence of its coupled invariant measures to the stationary horizon.
\item Recall definitions \eqref{Split_sdir}--\eqref{eqn:gen_split_set} and Remark~\ref{rmk:supports}. Can one describe the size of the sets $\mathfrak S_{s,\xi}$ globally instead of just on a fixed horizontal line, as in Theorem~\ref{thm:Split_pts}? Does $\mathfrak S_{s,\xi}$ have Hausdorff dimension $\frac{1}{2}$ simultaneously for all $s \in \mathbb{R}$ and $\xi \in \Xi$? The support of the Airy difference profile along a vertical line was recently studied in~\cite{Ganguly-Zhang-2022a}. What properties does the set $\mathfrak S$ have along a vertical line?
\item Are all semi-infinite geodesics Busemann geodesics? (Theorem~\ref{thm:DL_good_dir_classification}\ref{itm:DL_allBuse} covers the case $\xi \notin \Xi$.) Equivalently, does every semi-infinite geodesic in direction $\xi \in \Xi$ coalesce with a $\xi-$ or $\xi+$ geodesic?
\item For $\xi \in \mathbb{R}$ and $\sigg \in \{-,+\}$, is $\operatorname{NU}_1^{\xi \sig}$ a strict subset of $\operatorname{NU}_0^{\xi \sig}$? (Recall definitions \eqref{NU0}-~\eqref{NU1}.) That is, are there $\xi \sig$ geodesics that stick together for some time, separate, then come back together, or must they separate immediately? See Figure~\ref{fig:NU}. After the posting of the first version of this paper, it was shown by Bhatia~\cite{Bhatia-23} that the two sets are equal.
\item The set $\operatorname{NU}_0$ is countably infinite on each horizontal line and hence globally uncountable (Theorem~\ref{thm:DLNU}). What is the Hausdorff dimension of $\operatorname{NU}_0$? It has since been shown in~\cite{Bhatia-23} that for fixed direction $\xi \in \mathbb{R}$, $\operatorname{NU}_0^\xi$ almost surely has Hausdorff dimension $\frac{4}{3}$. By Theorem~\ref{thm:DLNU}, the full set $\operatorname{NU}_0$ also has Hausdorff dimension $\frac{4}{3}$.
\item In BLPP, the analogue of the inclusion $\operatorname{NU}_0 \subseteq \mathfrak S$ holds \cite{Seppalainen-Sorensen-21b}. The reason is that in BLPP, the analogue of the set $\mathfrak S$ is the set of initial points from which some finite geodesic begins with a vertical step. We do not have such a description in DL. Does the inclusion still hold?
\item Are the sets $\mathfrak S^L$ and $\mathfrak S^R$ defined in~\eqref{eqn:split_LR_sdir} equal, as is the case for the analogous sets in BLPP? See Remark~\ref{rmk:splitsetseq}. \end{enumerate}
\begin{appendix} \section{Maximizers of continuous functions} For a function $f:\mathbb{R} \to \mathbb{R}$, we denote its increments by $f(x,y) := f(y) - f(x).$ For two functions $f,g:\mathbb{R} \to \mathbb{R}$, we say that $f \;{\le}_{\rm inc}\; g$ if $f(x,y) \le g(x,y)$ for all $x < y$ in $\mathbb{R}$. \begin{lemma} \label{lemma:max_monotonicity} Let $f,g:\mathbb{R} \to \mathbb{R}$ be continuous functions satisfying $f(x)\vee g(x) \to -\infty$ as $x \to \pm \infty$ and $f \;{\le}_{\rm inc}\; g$. Let $x_f^L$ and $x_f^R$ be the leftmost and rightmost maximizers of $f$ over $\mathbb{R}$, and similarly defined for $g$. Then, $x_f^L \le x_g^L$ and $x_f^R \le x_g^R$. \end{lemma} \begin{proof} By the definition of $x_g^R$ and by the assumption $f \;{\le}_{\rm inc}\; g$, for all $x > x_g^R$ \[ f(x_g^R,x) \le g(x_g^R,x) < 0. \] Hence, the rightmost maximizer of $f$ must be less than or equal to $x_g^R$. We get the statement for leftmost maximizers by considering the functions $x \mapsto f(-x)$ and $g \mapsto g(-x)$. \end{proof}
\begin{lemma} \label{lem:ext_mont} Assume that $f,g:\mathbb{R} \to \mathbb{R}$ satisfy $f \;{\le}_{\rm inc}\; g$. Then, for $a \le x \le y \le b$, \[ 0 \le g(x,y) - f(x,y) \le g(a,b) - f(a,b). \] \end{lemma} \begin{proof} The first inequality follows immediately from the assumption $f \;{\le}_{\rm inc}\; g$. The second follows from the inequality \[ f(a,b) - f(x,y) = f(a,x) + f(y,b) \le g(a,x) + g(y,b) = g(a,b) - g(x,y). \qedhere \] \end{proof}
\begin{lemma} \label{lemma:convergence of maximizers from converging functions} Let $S \subseteq \mathbb{R}^n$, and let $f_n:S \rightarrow \mathbb{R}$ be a sequence of continuous functions, converging uniformly to the function $f:S \rightarrow \mathbb{R}$. Assume that there exists a sequence $\{c_n\}$, of maximizers of $f_n$, converging to some $c \in S$. Then, $c$ is a maximizer of $f$. \end{lemma} \begin{proof} $f_n(c_n) \ge f_n(x)$ for all $x \in S$, so it suffices to show that $f_n(c_n) \rightarrow f(c)$. This follows from the uniform convergence of $f_n$ to $f$, the continuity of $f$, and \[
|f_n(c_n) - f(c)| \le |f_n(c_n) - f(c_n)| +|f(c_n) - f(c)|. \qedhere \] \end{proof}
\section{Directed landscape and the KPZ fixed point} \label{sec:DL_KPZ_appendix} The directed landscape satisfies the following symmetries. \begin{lemma}[\cite{Directed_Landscape}, Lemma 10.2 and \cite{Dauvergne-Virag-21}, Proposition 1.23] \label{lm:landscape_symm} As a random continuous function of $(x,s;y,t) \in \Rup$, the directed landscape $\mathcal{L}$ satisfies the following distributional symmetries, for all $r,c \in \mathbb{R}$ and $q > 0$. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=4pt
\item {\rm(Space-time stationarity)} \label{itm:time_stat} \ \ $\mathcal{L}(x,s;y,t) \deq \mathcal{L}(x+c,s + r;y+c,t + r).
$
\item {\rm(Skew stationarity)} \label{itm:skew_stat}
\ \ $
\mathcal{L}(x,s;y,t) \deq \mathcal{L}(x + cs,s;y + ct,t) -2c(x - y) + (t- s)c^2.
$
\item \label{itm:DL_reflect} {\rm(Spatial and temporal reflections)}
\ \ $
\mathcal{L}(x,s;y,t) \deq \mathcal{L}(-x,s;-y,t) \deq \mathcal{L}(y,-t;x,-s).
$
\item \label{itm:DL_rescaling} {\rm(Rescaling)}
\ \ $
\mathcal{L}(x,s;y,t) \deq q\mathcal{L}(q^{-2}x,q^{-3}s;q^{-2}y,q^{-3}t).
$ \end{enumerate} \end{lemma}
\begin{lemma}[\cite{Directed_Landscape}, Corollary 10.7] \label{lem:Landscape_global_bound} There exists a random constant $C$ such that for all $v = (x,s;y,t) \in \Rup$, we have \[
\Bigl|\mathcal{L}(x,s;y,t) + \frac{(x - y)^2}{t - s}\Bigr| \le C (t - s)^{1/3} \log^{4/3} \Bigl(\frac{2(\|v\| + 2)}{t - s}\Bigr)\log^{2/3}(\|v\| + 2), \]
where $\|v\|$ is the Euclidean norm. \end{lemma} \begin{lemma}[\cite{Dauvergne-22}, Proposition 2.6] \label{lem:DL_erg_coup} For every $i = 1,\ldots,k$ and $\varepsilon > 0$, let \begin{equation} \label{Kie} K_{i,\varepsilon} = \{(x,s;y,t) \in \Rup: s,t \in [0,\varepsilon],x,y \in [i -1/4,i + 1/4]\}.
\end{equation} Then, there exists a coupling of $k + 1$ copies of the directed landscape $\mathcal{L}_0,\mathcal{L}_1,\ldots,\mathcal{L}_k$ so that $\mathcal{L}_1,\ldots,\mathcal{L}_k$ are independent, and almost surely, there exists a random $\varepsilon > 0$ such that for $1 \le i \le k$, $\mathcal{L}_0|_{K_{i,\varepsilon}} = \mathcal{L}_i|_{K_{i,\varepsilon}}$. \end{lemma}
On a measure space $(\Omega,\mathcal{F},\mathbb P)$, a measure-preserving transformation $T$ satisfies $T^{-1}E \in \mathcal{F}$ and $\mathbb P(T^{-1}E) = \mathbb P(E)$ for all $E \in \mathcal{F}$. Such a transformation is said to be \textit{ergodic} if $\mathbb P(E) \in\{0,1\}$ whenever $T^{-1}E = E$. The transformation $T$ is said to be \textit{mixing} if, for all $A,B \in \mathcal{F}$, $\mathbb P(A \cap T^{-k}B) \to \mathbb P(A) \mathbb P(B)$ as $k \to \infty$. By setting $A = B$, one sees that mixing implies ergodicity. \begin{lemma} \label{lm:horiz_shift_mix} For $a,b \in \mathbb{R}$, not both $0$ and $z >0$, consider the shift operator $T_{z;a,b}$ acting on the directed landscape $\mathcal{L}$ as \[ T_{z;a,b} \mathcal{L}(x,s;y,t) = \mathcal{L}(x + az,s + bz;y + az;t + bz), \] where both sides are understood as a process on $\Rup$. Then, $\mathcal{L}$ is mixing under this transformation. That is, for all Borel subsets $A,B$ of the space $C(\Rup,\mathbb{R})$, \[ \mathbb P(\mathcal{L} \in A, T_{z;a,b}\mathcal{L} \in B) \overset{z \to \infty}{\longrightarrow} \mathbb P(\mathcal{L} \in A)\mathbb P(\mathcal{L} \in B). \] In words, the directed landscape is mixing (and therefore ergodic) under space-time shifts in any planar direction.
\end{lemma} \begin{proof} This key to the proof is Lemma~\ref{lem:DL_erg_coup}, and we thank Duncan Dauvergne for pointing this out to us. We prove the case $a \neq 0$, and the case $a = 0$ and $b \neq 0$ will be proven separately. Further, since we send $z \to \infty$, it suffices to show the result for $a = 1$, for then the result also holds for arbitrary $a$ and $b = ab$. With this simplification, we use the shorthand notation $T_{z;b} = T{z:1,b}$. By Lemma~\ref{lm:landscape_symm}\ref{itm:time_stat}, $\mathcal{L}$ is stationary under the shift $T_{z;b}$. By Dynkin's $\pi$-$\lambda$ theorem, it suffices to show that for a compact set $K\subseteq \Rup$ and Borel sets $A\subseteq C(K,\mathbb{R})$ and $B \subseteq C(K,\mathbb{R})$, \[
\mathbb P(\mathcal{L}|_{K} \in A,(T_{z;b}\mathcal{L})|_{K} \in B) \overset{z \to \infty}{\longrightarrow} \mathbb P(\mathcal{L}|_{K} \in A)\mathbb P(\mathcal{L}|_{K} \in B). \] Further, by temporal stationarity, it suffices to assume that $\inf\{s: (x,s;y,t) \in K\} \ge 0$. Consider the coupling $\mathcal{L}_0,\mathcal{L}_1,\mathcal{L}_2$ of Lemma~\ref{lem:DL_erg_coup} with $k = 2$. Then, using the rescaling and spatial stationarity of Lemma~\ref{lm:landscape_symm}, \begin{align}
&\quad\;\mathbb P(\mathcal{L}|_{K} \in A,(T_{z;b}\mathcal{L})|_{K} \in B) \nonumber \\
&= \mathbb P(\mathcal{L}_0(x,s;y,t)|_{K} \in A, \mathcal{L}_0(x + z,s + bz;y + z,t + bz)|_{K} \in B)\nonumber \\
&= \mathbb P(z^{1/2}\mathcal{L}_0(z^{-1} x,z^{-3/2}s ;z^{-1} y,z^{-3/2} t )|_{K} \in A, \nonumber \\
&\qquad\qquad z^{1/2}\mathcal{L}_0(z^{-1} x + 1,z^{-3/2}s + z^{-1/2} b;z^{-1} y + 1,z^{-3/2} t + z^{-1/2}b)|_{K} \in B) \nonumber \\
&= \mathbb P(z^{1/2}\mathcal{L}_0(z^{-1} x + 1,z^{-3/2}s ;z^{-1} y + 1 ,z^{-3/2} t )|_{K} \in A, \nonumber \\
&\qquad\qquad z^{1/2}\mathcal{L}_0(z^{-1} x + 2,z^{-3/2}s + z^{-1/2}b ;z^{-1} y + 2,z^{-3/2} t + z^{-1/2}b )|_{K} \in B). \label{433} \end{align}
Specifically, we used the rescaling property with $q = z^{1/2}$ in the second equality, and in the third equality, we shifted the entire process by $1$ in the spatial direction. Above, the restrictions $|_{K_j}$ mean that $(x,s;y,t) \in K_j$.
Since $K$ is compact and we assumed $s \ge 0$ for all $(x,s;y,t) \in K$, for any $\varepsilon > 0$, there exists $Z > 0$ such that for $z > Z$, \begin{equation} \label{small_cpct} \begin{aligned}
&\{(z^{-1} x + 1,z^{-3/2}s;z^{-1} y + 1,z^{-3/2} t): (x,s;y,t) \in K\} \subseteq K_{1,\varepsilon}, \quad \text{and}\\
&\{(z^{-1} x + 2,z^{-3/2}s + z^{-1/2}b;z^{-1} y + 2,z^{-3/2} t + z^{-1/2}b): (x,s;y,t) \in K\} \subseteq K_{2,\varepsilon},
\end{aligned} \end{equation} where $K_{i,\varepsilon}$ are defined in~\eqref{Kie}. Let $C_z$ be the event where both these containments hold for the random $\varepsilon > 0$ in Lemma~\ref{lem:DL_erg_coup}, and let $D_z$ be the event in~\eqref{433}. Then, $\mathbb P(C_z) \to 1$ as $z \to +\infty$. Then, for any $\delta > 0$, whenever $z$ is sufficiently large so that $1 - \mathbb P(C_z) = \delta > 0$, \begin{align*}
&\quad \;\Big|\mathbb P(D_z) - \mathbb P(\mathcal{L}|_{K} \in A)\mathbb P(\mathcal{L}|_{K} \in B)\Big| \\
&\le \Big|\mathbb P(D_z \cap C_z) -\mathbb P(\mathcal{L}|_{K} \in A)\mathbb P(\mathcal{L}|_{K} \in B)\Big| + \delta \\
&=\Big|\mathbb P(z^{1/2}\mathcal{L}_1(z^{-1} x + 1,z^{-3/2}s;z^{-1} y + 1,z^{-3/2} t)|_{K} \in A, \\
&\qquad\qquad z^{1/2}\mathcal{L}_2(z^{-1} x + 2,z^{-3/2}s + z^{-1/2}b;z^{-1} y + 2,z^{-3/2} t + z^{-1/2}b)|_{K} \in B, C_z) \\
&\qquad\qquad\qquad- \mathbb P(\mathcal{L}_1|_{K} \in A)\mathbb P(\mathcal{L}_1|_{K} \in B)\Big| + \delta \\
&\le \Big|\mathbb P(z^{1/2}\mathcal{L}_0(z^{-1} x + 1,z^{-3/2}s;z^{-1} y + 1,z^{-3/2} t)|_{K} \in A, \\
&\qquad\qquad z^{1/2}\mathcal{L}_0(z^{-1} x + 2,z^{-3/2}s + z^{-1/2}b;z^{-1} y + 2,z^{-3/2} t + z^{-1/2}b)|_{K} \in B) \\
&\qquad\qquad\qquad- \mathbb P(\mathcal{L}_1|_{K} \in A)\mathbb P(\mathcal{L}_2|_{K} \in B)\Big| + 2\delta \\
&= \Big|\mathbb P(\mathcal{L}_1|_{K} \in A, \mathcal{L}_2|_{K} \in B) - \mathbb P(\mathcal{L}_1|_{K} \in A)\mathbb P(\mathcal{L}_2|_{K} \in B)\Big| + 2\delta = 2\delta, \end{align*}
completing the proof since $\delta$ is arbitrary. Specifically, in the two inequalities, we added and removed the event $C_z$ at the cost of $\delta$. In the first equality above, we used the fact that the containments~\eqref{small_cpct} hold on $C_z$ and $\mathcal{L}_0|_{K_i,\varepsilon} = \mathcal{L}_i|_{K_i,\varepsilon}$ for $i = 1,2$. In the last line, we reversed the application of the rescaling and spatial stationarity, then used the independence of $\mathcal{L}_1$ and $\mathcal{L}_2$ from Lemma~\ref{lem:DL_erg_coup}.
The statement for the vertical shift operator when $a = 0$ is simpler. For a compact set $K$, the processes $\mathcal{L}|_{K}$ and $T_{z;0,b} \mathcal{L}|_{K}$ are independent for sufficiently large $b$ by the independent increments property of the directed landscape, and the desired result follows. \end{proof}
Recall the definition of the state space $\operatorname{UC}$~\eqref{UCdef} for the KPZ fixed point. Recall that the KPZ fixed point $h_t(\abullet;\mathfrak h)$ with initial data $\mathfrak h$ at time $0$ can be represented as \[ h_t(y;\mathfrak h) = \sup_{x \in R}\{\mathfrak h(x) + \mathcal{L}(x,0;y,t)\} \qquad\text{for }t > 0. \] The KPZ fixed point satisfies the semi-group property. That is, for $0 < s < t$, \[ h_t(y;\mathfrak h) = \sup_{x \in \mathbb{R}} \{h_s(x;\mathfrak h) + \mathcal{L}(x,s;y,t) \}. \] In this sense, we may say that $h_t$ has initial data $\mathfrak h$ sampled at time $s < t$, in which case, we write \[ h_t(y;\mathfrak h) = \sup_{x \in \mathbb{R}} \{\mathfrak h(x) + \mathcal{L}(x,s;y,t) \}. \]
\begin{lemma}[\cite{Directed_Landscape,Basu-Ganguly-Hammond-21,Ganguly-Hegde-2021,Pimentel-21b}] \label{lem:DL_crossing_facts} Let $\mathcal{L}:\Rup \to \mathbb{R}$ be a continuous function satisfying the metric composition law~\eqref{eqn:metric_comp} and such that maximizers in~\eqref{eqn:metric_comp} exist. Then, \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:DL_crossing_lemma} Whenever $s < t$, $x_1 < x_2$, $y_1 < y_2$, \[ \mathcal{L}(x_2,s;y_1,t) - \mathcal{L}(x_1,s;y_1,t) \le \mathcal{L}(x_2,s;y_2,t) - \mathcal{L}(x_1,s;y_2,t). \] \end{enumerate} Let $\mathfrak h^1,\mathfrak h^2 \in \operatorname{UC}$, and For $i = 1,2$ and $t > 0$, set \begin{equation} \label{992} h_t(y;\mathfrak h^i) = \sup_{x \in \mathbb{R}}\{\mathfrak h^i(x) + \mathcal{L}(x,0;y,t)\}. \end{equation} Then, assuming that maximizers in~\eqref{992} exist, the following hold. \begin{enumerate}[resume, label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:KPZ_attractiveness} If $\mathfrak h^1 \;{\le}_{\rm inc}\; \mathfrak h^2$, then $h_t(\aabullet;\mathfrak h^1)\;{\le}_{\rm inc}\; h_t(\aabullet;\mathfrak h^2)$ for all $t > 0$.
\item \label{itm:KPZ_crossing_lemma} For $t > 0$ and $i = 1,2$, set \[ Z_t(y;\mathfrak h^i) = \max \argmax_{x \in \mathbb{R}}\{\mathfrak h^i(x) + \mathcal{L}(x,0;y,t)\}. \] Then, if $x < y$ and $Z_t(y;\mathfrak h^1) \le Z_t(x;\mathfrak h^2)$, \[ h_t(y;\mathfrak h^1) - h_t(x;\mathfrak h^1) \le h_t(y;\mathfrak h^2) - h_t(x;\mathfrak h^2). \] \end{enumerate} \end{lemma}
We prove the following technical lemmas that are used in the proofs of the main theorems. \begin{lemma}\label{lem:unq}
Fix $\xi\in \mathbb{R}$ and $a>0$. Consider the KPZ fixed point starting at time $s$ from a function $\mathfrak h \in \operatorname{UC}$. For $t > s$, let $Z_\mathfrak h^{a,s,t}\in\mathbb{R}$ denote the set of exit points from the time horizon $\mathcal{H}_s$ of the geodesics associated with $\mathfrak h$ and that terminate in $\{t\}\times [-a,a]$. That is,
\begin{equation} \label{exitpt}
Z_\mathfrak h^{a,s,t}=\bigcup_{y\in [-a,a]}\argmax_{x\in\mathbb{R}} \{\mathfrak h(x)+\mathcal{L}(x,s;y,t)\}.
\end{equation}
Then, on the full probability event of Lemma~\ref{lem:Landscape_global_bound}, whenever $\mathfrak h \in \operatorname{UC}$ satisfies condition~\eqref{eqn:drift_assumptions}, and when $\varepsilon>0$, $a > 0$, and $s \in \mathbb{R}$, there exists a random $t_0 = t_0(\varepsilon,a,s) > s \vee 0$ such that for any $t> t_0$,
\begin{equation} \label{upexit}
Z_\mathfrak h^{a,s,t}\subset \big[(\xi-\varepsilon)t,(\xi +\varepsilon)t\big].
\end{equation}
In particular, if $\mathfrak h$ is a random function almost surely satisfying condition~\eqref{eqn:drift_assumptions}, then this random $t_0$ exists almost surely, and \[
\lim_{t \to \infty} \mathbb P\Big(Z_\mathfrak h^{a,s,t}\subset \big[(\xi-\varepsilon)t,(\xi +\varepsilon)t\big]\Big) = 1. \] Furthermore, an analogous statement holds on the same full-probability event if $t$ is held fixed and $s \to -\infty$. That is, there exists a random $s_0 = s_0(\varepsilon,a,t)< t \wedge 0 $ such that for any $s < s_0$, \begin{equation} \label{downexit} Z_\mathfrak h^{a,s,t}\subset \big[-(\xi-\varepsilon)s,-(\xi +\varepsilon)s\big] \end{equation} \end{lemma} \begin{proof} We show~\eqref{upexit}, and~\eqref{downexit} follows by an analogous proof. The idea of the proof is that $\mathfrak h(x)+\mathcal{L}(x,0;y,t)$ is a noisy version of $2\xi x-\frac{x^2}{t}$ and $\argmax_{x\in\mathbb{R}} (2\xi x-\frac{x^2}{t})=\xi t$, but the noise cannot change the exit point by much when $t$ is large. The drift conditions~\eqref{eqn:drift_assumptions} are used in the proof to ensure that for $t$ large enough, all maximizers are positive for $\xi > 0$ and negative for $\xi < 0$. Below, we prove the result for $\xi>0$, and the proof for $\xi<0$ follows by symmetry. The case $\xi=0$ will be proven separately. Fix $\varepsilon>0$. Suppressing the dependence on $a,s$, set \begin{equation} \label{F}
F(x;t)=C_{\text{DL}}(t-s)^{1/3}\log^2\big(2\sqrt{a^2+x^2+s^2 + t^2}+4\big), \end{equation} where $C_{\text{DL}}$ is the random positive constant from Lemma \ref{lem:Landscape_global_bound}. \begin{lemma} \label{lem:dFbd}
For $\varepsilon > 0,a > 0,$ and $s < t \in \mathbb{R}$, there exists $t_1 = t_1(\varepsilon,a,s) > s$ such that for all $t > t_0$, $|F'(x;t)| < \varepsilon$ uniformly for all $x \in \mathbb{R}$. In addition, for $t > t_0$, $F'(x;t) > 0$ for $x > 0$ while $F'(x;t) < 0$ for $x < 0$. \end{lemma} \begin{proof} A quick computation shows that \[
F'(x;t)=\frac{2 C_\text{DL}(t-s)^{1/3} x\log\big(2\sqrt{a^2+x^2+t^2 + s^2}+4 \big)}{(\sqrt{a^2+x^2+t^2 + s^2}+2)\sqrt{a^2+x^2+t^2 + s^2}}. \] We note that $
|x|(a^2 + x^2 +t^2 + s^2)^{1/2} \le 1, $ and that $\log(x)/x$ is decreasing for $x > e$. Hence, for all $a > 0,s < t,\varepsilon > 0$, and $x \in \mathbb{R}$, \[
|F'(x;t)| \le 2C(t - s)^{1/3} \frac{\log(2|t| +4)}{|t| + 2} \overset{t \to \infty}{\longrightarrow} 0. \qedhere \] \end{proof}
Back to the main proof, from the drift assumption~\eqref{eqn:drift_assumptions}, for each $\varepsilon > 0$, there exists $R_\varepsilon > 0$ so that for all $x \ge R_\varepsilon$, $|\frac{\mathfrak h(x)}{x} - \xi| \le \frac{\varepsilon}{2}$. Let $C_\varepsilon = \sup_{0 \le R_\varepsilon} \mathfrak h(x)$. By the global bound $\mathfrak h(x) \le a + b|x|$ (assumed in the definition of $\operatorname{UC}$), $C_\varepsilon < \infty$. Further, observe that $-\frac{(x - y)^2}{t - s} = -\frac{x^2}{t - s} + \frac{2xy}{t - s} - \frac{y^2}{t - s}$, and $|\frac{2xy}{t - s} - \frac{y^2}{t - s}| \le \varepsilon + \frac{\varepsilon}{2}|x|$ for large $t$, uniformly for $y \in [-a,a]$. Using this and the bounds of Lemma~\ref{lem:Landscape_global_bound}, for $t > s + 1$ sufficiently large (depending on $\varepsilon,a$), for all $y \in [-a,a]$ and $x \ge 0$,
\begin{equation}\label{ub1'}
\begin{aligned}
&\mathfrak h(x)+\mathcal{L}(x,s;y,t)\leq M_{U}(x;t):= C_\varepsilon + 2\xi x + \varepsilon x-\frac{x^2}{t - s}+ \varepsilon + F(x;t),
\end{aligned}
\end{equation}
Further, for all $x\ge R_\varepsilon$, and sufficiently large $t > s + 1$, \begin{equation} \label{ub2'}
\mathfrak h(x)+\mathcal{L}(x,s;y,t)\geq M_{L}(x;t):= 2\xi x - \varepsilon x -\frac{x^2}{t - s} - \varepsilon -F(x;t). \end{equation}
By the assumption~\eqref{eqn:drift_assumptions}, we may choose $\gamma$ so that $-2\xi < 2\gamma < \liminf_{x \to -\infty} \frac{\mathfrak h(x)}{x} \le + \infty$. Then, applying a similar procedure as before and adjusting the constant $C_\varepsilon$ if needed, for all $y \in [-a,a]$ and $x \le 0$,
\begin{equation} \label{ub3'}
\mathfrak h(x) + \mathcal{L}(x,s;y,t) \le M_U^-(x;t) := C_\varepsilon + 2\gamma x - \frac{x^2}{t - s} + F(x;t).
\end{equation}
We start by using these bounds to show that when $t > s$ is sufficiently large, \begin{equation} \label{max_right} \sup_{x \ge 0}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\} > \sup_{x \le 0}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\}, \qquad\forall y \in [-a,a]. \end{equation} so that all maximizers of $\mathfrak h(x) + \mathcal{L}(x,s;y,t)$ over $x \in \mathbb{R}$ are nonnegative. First, we observe that for $t$ large enough so that $\xi(t - s) \ge R_\varepsilon$, for all $y \in [-a,a]$, \begin{equation}\label{Mlm} \begin{aligned}
\quad \;\sup_{x \ge 0}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\}\ge M_L(\xi (t-s),t)
= (\xi^2 - \xi \varepsilon)(t - s) + o(t).
\end{aligned} \end{equation}
Next, using~\eqref{ub3'}, we obtain \begin{align*} &\quad \;\sup_{x \le 0}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t) \} \\ &\le \sup_{x \le 0}\{2\gamma x - \frac{x^2}{t-s} + C_\varepsilon + F(x;t)\} \\ &\le\sup_{x \le 0}\{2\gamma x - 2\varepsilon x - \frac{x^2}{t-s}\}+ \sup_{x \le 0}\{2\varepsilon x + F(x;t) \} + C_\varepsilon. = (\gamma - \varepsilon)^2(t - s) + o(t) \end{align*} To justify the last equality, we note that the first supremum on the RHS above is equal to $(\gamma - \varepsilon)^2(t - s)$, while for large enough $t$, Lemma~\ref{lem:dFbd} implies that the function inside the second supremum is increasing, so the maximum is achieved at $x = 0$. Note that $F(0;t) = o(t)$. Since $\gamma > -\xi$, by choosing $\varepsilon > 0$ small enough, a comparison with~\eqref{Mlm} verifies~\eqref{max_right} for sufficiently large $t$.
Next, we find (approximately) where the maximizers of $M_{U}$ on $x\geq 0$ are. The function $M_U(x;t)$ has leading order $-(x - y)^2/(t-s)$, so maximizers exist on $x \ge 0$. A quick computation shows that $M_U(0;t) = o(t)$, so by~\eqref{Mlm}, all maximizers are strictly positive for sufficiently large $t$. Hence, for any maximizer $x$, $M_U'(x;t) = 0$.
First, note that \begin{equation}
M'_{U}(x;t)=2(\xi+\varepsilon)-\frac{2x}{t - s}+F'(x;t). \end{equation} By Lemma~\ref{lem:dFbd}, for sufficiently large $t > s$, $0 < F'(x;t) < \varepsilon$ for all $x > 0$. Then, for such $t$, and $y \in [-a,a]$ \begin{equation}
\{x \ge 0 :M'_U(x;t)=0\}\subseteq \big((\xi + \varepsilon) (t - s), (\xi+2\varepsilon)(t - s)\big). \end{equation} and that \begin{equation}\label{d}
\begin{aligned}
&M'_U(x;t)<0 \quad \forall x\geq (\xi + 2\varepsilon)(t - s)\\
& M'_U(x;t)>0 \quad \forall x\leq (\xi + \varepsilon) (t - s).
\end{aligned} \end{equation} Next we consider the supremum of $x\mapsto M_U$ outside the interval \[ I_{\varepsilon}:=[(\xi-2\sqrt{\xi \varepsilon})(t - s),(\xi+2\sqrt{\xi\varepsilon})(t - s)]. \] By choosing $\varepsilon > 0$ small enough, then for $t$ large enough (depending on $\varepsilon,a$), \begin{equation}\label{sub}
\big((\xi - \varepsilon) (t - s),(\xi+2\varepsilon)(t - s)\big) \subseteq I_{\varepsilon}. \end{equation}
From \eqref{d} and \eqref{sub}, we see that to determine the supremum of $M_U$ outside $I_{\varepsilon}$ it is enough to take the maximum of $M_U$ at the endpoints of $I_{\varepsilon}$. Plugging the end points of the interval on the right-hand side of \eqref{sub} in $M_U$, for small enough $\varepsilon$, \begin{equation}\label{sup1}
\begin{aligned}
&M_U((\xi-2\sqrt{\xi\varepsilon})(t - s);t)=\big[\xi^2 -3\xi\varepsilon - 2\xi^{1/2}\varepsilon^{3/2}\big](t - s) + o(t), \qquad\text{and}\\
&M_U( (\xi+2\sqrt{\xi}\varepsilon^{1/2})(t -s);t)=\big[\xi^2-3\xi\varepsilon+2\xi^{1/2}\varepsilon^{3/2}\Big](t - s) + o(t).
\end{aligned} \end{equation}
It follows that for $\varepsilon$ small enough and $t>t_0(\varepsilon,a,s,C_{\text{DL}},C_\varepsilon)$, \begin{equation}\label{Mum}
\sup_{x\notin I_{\varepsilon}} M_U(x;t)\leq \max\{M_U((\xi-2\sqrt{\xi\varepsilon})(t-s)),M_U((\xi+2\sqrt{\xi\varepsilon})(t -s))\}\leq (\xi^2-2\xi\varepsilon)(t-s). \end{equation} From \eqref{ub1'},~\eqref{max_right}, and~\eqref{sub}, for sufficiently large $t$, \begin{equation}\label{sub2}
\begin{aligned}
&\big\{\sup_{x\notin I_{\varepsilon}} M_U(x;t)<\sup_{x\in I_{\varepsilon}} M_L(x;t)\big\}\\
&\subseteq\big\{\sup_{x\notin I_{\varepsilon}} \mathfrak h(x)+\mathcal{L}(x,s;y,t)<\sup_{x\in I_{\varepsilon}} \mathfrak h(x)+\mathcal{L}(x,s;y,t)\quad \forall y\in[-a,a]\big\}\subseteq\{Z_\mathfrak h^{a,s,t}\subset I_{\varepsilon}\}.
\end{aligned} \end{equation} \eqref{Mlm} and \eqref{Mum}, imply that, almost surely, there is a random $t_0 = t_0(\varepsilon,a,s) > 0$ so that for $t > t_0$, \begin{equation}
\sup_{x\notin I_{\varepsilon}} M_U(x;t)<\sup_{x\in I_{\varepsilon}} M_L(x;t), \end{equation} so by replacing $\varepsilon$ with $\varepsilon^2/(4\xi)$, the inclusion~\eqref{sub2} completes the proof in the case $\xi > 0$.
Now we prove the separate $\xi = 0$ case. This time, we set $I_{\varepsilon}=[2\sqrt{\varepsilon}\hspace{0.9pt} (t-s), 2\sqrt{\varepsilon}\hspace{0.9pt} (t-s)]$. Fix a point $x^\star \in \mathbb{R}$ so that $\mathfrak h(x^\star) > -\infty$. Then, for $t$ large enough $x^\star \in I_{\varepsilon}$, and \begin{equation} \label{ub4'} \sup_{x \in I_{\varepsilon}} \mathfrak h(x) + \mathcal{L}(x,s;y,t) \ge \mathfrak h(x^\star) + \mathcal{L}(x^\star,s;y,t)\ge \mathfrak h(x^\star) - \frac{(x^\star - y)^2}{t - s} +F(x^\star,t) = o(t). \end{equation}
By the assumption~\eqref{eqn:drift_assumptions} and upper semi-continuity, following a similar argument as in the previous case, for all $y \in [-a,a]$ and $x \in \mathbb{R}$, \[ \begin{aligned} \mathfrak h(x)+\mathcal{L}(x,s;y,t)\leq M_{U}(x;t) &:= -\frac{x^2}{t-s}+\varepsilon x+C_\varepsilon+F(x;t) \end{aligned} \] A similar proof as before shows that \[ \sup_{x \notin I_{\varepsilon}} M_U(x;t) \le \max\{M_U(-2\sqrt \varepsilon(t - s)),M_U(2\sqrt \varepsilon (t - s))\} = -3\varepsilon (t-s) + o(t), \] and comparison with~\eqref{ub4'} completes the proof. \end{proof}
We believe the following Lemma is well-known, but we do not have a precise reference. In particular,~\cite{KPZfixed} states that the KPZ fixed point preserves the space of linearly bounded continuous functions and gives regularity estimates for the KPZ fixed point.
\begin{lemma} \label{lem:max_restrict} Let $\mathfrak h \in \operatorname{UC}$ be initial data for the KPZ fixed point sampled at time $s \in \mathbb{R}$. For all $t > s$, and $y \in \mathbb{R}$, set \begin{equation} \label{KPZs} h_t(y;\mathfrak h) = \sup_{x \in \mathbb{R}}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\}. \end{equation} Then, on the full-probability event of Lemma~\ref{lem:Landscape_global_bound}, the following hold. \begin{enumerate}[label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:KPZcont} If $\mathfrak h$ is continuous, then $(t,y) \mapsto h_t(y;\mathfrak h)$ is continuous.
\item \label{itm:KPZ_unif_line} For each compact set $K \subseteq \mathbb{R}_{> s}$, there exist constants $A = A(a,b,K)$ and $B = B(a,b,K)$ such that for all $t \in K$ and all $y \in \mathbb{R}$, $h_t(y;\mathfrak h) \le A + B|y|$. If we assume that $\mathfrak h(x) \ge -a - b|x|$ for some constants $a,b > 0$, then we also obtain the bound $h_t(y;\mathfrak h) \ge -A - B|y|$ for all $t \in K$ and $y \in \mathbb{R}$ \rm{(}the upper bound $\mathfrak h(x) \le a + b|x|$ is assumed in the definition of $\operatorname{UC}$\rm{)}.
\item \label{itm:KPZrestrict}
If there exists $a,b > 0$ so that $|\mathfrak h(x)| \le a + b|x|$ for all $x$, then for any $t > s$, $\delta > 0$, there exists $Y = Y(t,\delta) > 0$ so that when $|y| \ge Y$, all maximizers of $\mathfrak h(x) + \mathcal{L}(x,s;y,t)$ over $x \in \mathbb{R}$ lie in the interval $(y - |y|^{1/2 + \delta},y + |y|^{1/2+ \delta})$ \end{enumerate} \end{lemma} \begin{proof}
\noindent \textbf{Item~\ref{itm:KPZcont}}
By definition of $\operatorname{UC}$, there exists constants $a,b > 0$ so that $\mathfrak h(x) \le a + b|x|$ for all $x \in \mathbb{R}$. Combined with the bounds on the directed landscape in Lemma~\ref{lem:Landscape_global_bound} imply that when $(y,t)$ varies over a compact set, the supremum in~\eqref{KPZs} can be taken uniformly over a common compact set. Then, continuity of $\mathfrak h$ and $\mathcal{L}$ gives the continuity of $h$.
\noindent \textbf{Item~\ref{itm:KPZ_unif_line}:} By Lemma~\ref{lem:Landscape_global_bound}, for each $x \in \mathbb{R}$, \begin{equation} \label{414}
\mathfrak h(x) + \mathcal{L}(x,s;y,t) \le a + b|x| -\hspace{1.1pt} \frac{(x - y)^2}{t - s} + C(t - s)^{1/3} \log^{2}\Bigl(\frac{2\sqrt{x^2 + y^2 + s^2 + t^2} + 4}{(t - s)\wedge 1}\Bigr). \end{equation}
The $\log$ term in~\eqref{414}. can be bounded by an affine function uniformly for $t \in K$ and $x,y \in \mathbb{R}$. Then, for constants $a_1 = a_1(a,b,K), b_1 = b_1(a,b,K)$, and $b_2 = b_2(a,b,K)$, \begin{align*}
h_t(y;\mathfrak h)
&\le \sup_{x \in \mathbb{R}}\Bigl\{-\frac{(x - y)^2}{t - s} + a_1 + b_1|x| + b_2|y| \Bigr\} \\
&\le \sup_{x \in \mathbb{R}}\Bigl\{-\frac{(x - y)^2}{t - s} + a_1 + b_1x + b_2|y| \Bigr\} \vee \Bigl\{-\frac{(x - y)^2}{t - s} + a_1 - b_1 x + b_2|y| \Bigr\}\\
&\le a_1 + b_2|y| + \Bigl(b_1 y +\frac{b_1^2 t}{4}\Bigr) \vee \Bigl(-b_1y + \frac{b_1^2t}{4}\Bigr), \end{align*} giving a linear bound, uniformly for $t \in K$.
The lower bound is simpler: By Lemma~\ref{lem:Landscape_global_bound} and the assumption $\mathfrak h(x) \ge -a - b|x|$ for all $x \in \mathbb{R}$, \begin{equation} \label{413} \begin{aligned}
h_t(y;\mathfrak h) &= \sup_{x \in \mathbb{R}}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\} \\ &\ge \mathfrak h(y) + \mathcal{L}(y,s;y,t) \ge -a - b|y| - C(t - s)^{1/3}\log^2\Bigl(\frac{2\sqrt{2y^2 + t^2 + s^2} + 4}{(t - s)\wedge 1}\Bigr), \end{aligned} \end{equation} and again, the $\log$ term can be bounded by an affine function, uniformly for $t \in K$ and $y \in \mathbb{R}$.
\noindent \textbf{Item~\ref{itm:KPZrestrict}:} By comparing~\eqref{414} to~\eqref{413}, when $|y|$ is sufficiently large, for $x \notin (y - |y|^{1/2 + \delta},y + |y|^{1/2 + \delta})$,~\eqref{414} is strictly less than $h_t(y;\mathfrak h)$, so maximizers cannot lie outside the interval $(y - |y|^{1/2 + \delta},y + |y|^{1/2 + \delta})$.
\end{proof}
\begin{lemma} \label{lem:KPZ_preserve_lim} The following holds simultaneously for all initial data and all $t > s$ on the event of probability one from Lemma~\ref{lem:Landscape_global_bound}. Let $\mathfrak h \in \operatorname{UC}$ be initial data for the KPZ fixed point, sampled at time $s$.
For $t > s$, let $h_t$ be defined as in~\eqref{KPZs}. Then, simultaneously for all $t > s$, \begin{equation} \label{hliminfbd1} \liminf_{x \to +\infty} \frac{h_t(x;\mathfrak h)}{x} \ge \liminf_{x \to + \infty} \frac{\mathfrak h(x)}{x},\qquad\text{and}\qquad \limsup_{x \to - \infty} \frac{h_t(x;\mathfrak h)}{x} \le \limsup_{x \to -\infty} \frac{\mathfrak h(x)}{x}. \end{equation}
Furthermore, assuming that $\mathfrak h:\mathbb{R} \to \mathbb{R}$ is \textbf{continuous} and satisfies \begin{equation} \label{liminfsupfinite} \liminf_{x \to \pm \infty} \frac{\mathfrak h(x)}{x} > -\infty \qquad\text{and}\qquad \limsup_{x \to \pm \infty} \frac{\mathfrak h(x)}{x} < +\infty, \end{equation} then also \begin{equation} \label{hliminfbd2} \limsup_{x \to + \infty} \frac{h_t(x;\mathfrak h)}{x} \le \limsup_{x \to +\infty} \frac{\mathfrak h(x)}{x},\qquad\text{and}\qquad \liminf_{x \to -\infty} \frac{h_t(x;\mathfrak h)}{x} \ge \liminf_{x \to - \infty} \frac{\mathfrak h(x)}{x}. \end{equation}
In particular, for \textbf{continuous} initial data $\mathfrak h$ satisfying~\eqref{liminfsupfinite}, if either \rm{(}or both\rm{)} of the limits $ \lim_{x \to \pm \infty} \frac{\mathfrak h(x)}{x} $ exist \rm{(}potentially with different limits on each side\rm{)}, then for $t > s$, \[ \lim_{x \to \pm \infty} \frac{h_t(x;\mathfrak h)}{x} = \lim_{x \to \pm \infty} \frac{\mathfrak h(x)}{x}. \] \end{lemma} \begin{proof} We start with~\eqref{hliminfbd1} by proving the first inequality, and the other is analogous. If $\liminf_{x \to +\infty} \frac{\mathfrak h(x)}{x} = -\infty$, there is nothing to show. Otherwise, let $\xi_1 \in \mathbb{R}$ be an arbitrary number less than $\liminf_{x \to +\infty} \frac{\mathfrak h(x)}{x}$. Let $y$ be sufficiently large and positive so that $\mathfrak h(y)\ge \xi_1 y$. Then, using Lemma~\ref{lem:Landscape_global_bound}, for such sufficiently large positive $y$, \begin{equation} \label{105} \begin{aligned} \sup_{x \in \mathbb{R}}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\} \ge \mathfrak h(y) + \mathcal{L}(y,s;y,t) \ge \xi_1 y - C(t-s)^{1/3} \log^{2}\Bigl(\frac{2\sqrt{2y^2 + t^2 + s^2} + 4}{(t - s)\wedge 1}\Bigr), \end{aligned} \end{equation} where $C$ is a constant. Therefore, $ \liminf_{y \to \infty} \frac{h_t(y;\mathfrak h)}{y} \ge \xi_1, $ but this is true for all $\xi_1 < \liminf_{x \to +\infty} \frac{\mathfrak h(x)}{x}$, so \[ \liminf_{y \to \infty} \frac{h_t(y;\mathfrak h)}{y} \ge \liminf_{x \to +\infty} \frac{\mathfrak h(x)}{x}. \]
Next, we turn to proving~\eqref{hliminfbd2}. Again, we prove the first inequality and the second follows analogously. Set $\xi_2 = \limsup_{x \to +\infty} \frac{\mathfrak h(x)}{x}$ and let $\varepsilon > 0$. By continuity, the assumption~\eqref{liminfsupfinite} on the asymptotics of $\mathfrak h$ implies there exist constants $a,b > 0$ so that $|\mathfrak h(x)| \le a + b|x|$ for all $x \in \mathbb{R}$. Lemma~\ref{lem:max_restrict}\ref{itm:KPZrestrict} implies that for $\varepsilon > 0$ and sufficiently large $y > 0$, \begin{align*} &\quad\; \sup_{x \in \mathbb{R}}\{\mathfrak h(x) + \mathcal{L}(x,s;y,t)\} \\ &\le \sup_{x \in (y - y^{2/3},y + y^{2/3})}\Big\{(\xi_2 + \varepsilon)x -\frac{(x - y)^2}{t - s} + C(t - s)^{1/3} \log^{2}\Bigl(\frac{2\sqrt{x^2 + y^2 + t^2 + s^2} + 4}{(t - s)\wedge 1}\Bigr) \Big\} \\ &\le \sup_{x \in (y - y^{2/3},y + y^{2/3})}\Big\{(\xi_2 + \varepsilon)x-\frac{(x - y)^2}{t - s } + \varepsilon(x + y) \Big\} \\ &= (\xi_2 + 3\varepsilon)y + C(\varepsilon,s,t,\xi_2), \end{align*}
and so \[ \limsup_{y \to \infty} \frac{h_t(y;\mathfrak h)}{y} \le \xi_2 + 3\varepsilon. \qedhere \] \end{proof}
\subsection{Geodesics in the directed landscape}
We start by citing some results from~\cite{Bates-Ganguly-Hammond-22} and~\cite{Dauvergne-Sarkar-Virag-2020}. \begin{lemma}[\cite{Bates-Ganguly-Hammond-22}, Theorem 1.18] \label{lm:BGH_disj}
There exists a single event of full probability on which, for any compact set $K \subseteq \Rup$, there is a random $\varepsilon > 0$ such that the following holds. If $v_1 = (x,s;y,u) \in K$ and $v_2 = (z,s;w,u) \in K$ admit geodesics $\gamma_1$ and $\gamma_2$ satisfying $|\gamma_1(t) - \gamma_2(t)| \le \varepsilon$ for all $t \in [s,u]$, then $\gamma_1$ and $\gamma_2$ are not disjoint, i.e., $\gamma_1(t) = \gamma_2(t)$ for some $t \in [s,u]$. \end{lemma}
Let $g$ be a geodesic from $(x,s)$ to $(y,u)$. Define the graph of this geodesic as \[ \mathcal G g := \{(g(t),t): t \in [s,u]\}. \]
\begin{lemma}[\cite{Dauvergne-Sarkar-Virag-2020}, Lemma 3.1] \label{lem:precompact} The following holds on a single event of full probability. Let $(p_n;q_n) \to (p,q) = (x,s;y,t) \in \Rup$, and let $g_n$ be any sequence of geodesics from $p_n$ to $q_n$. Then, the sequence of graphs $\mathcal G g_n$ is precompact in the Hausdorff metric, and any subsequential limit of $\mathcal G g_n$ is the graph of a geodesic from $p$ to $q$. \end{lemma}
\begin{lemma}[\cite{Dauvergne-Sarkar-Virag-2020}, Lemma 3.3] \label{lem:overlap} The following holds on a single event of full probability. Let $(p_n;q_n) = (x_n,s_n;y_n,u_n) \in \Rup \to (p;q) = (x,s;y,u) \in \Rup$, and let $g_n$ be any sequence of geodesics from $p_n$ to $q_n$. Suppose that either \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{uniqn} For all $n$, $g_n$ is the unique geodesic from $(x_n,s_n)$ to $(y_n,u_n)$ and $\mathcal G g_n \to \mathcal G g$ for some geodesic $g$ from $p$ to $q$, or
\item \label{uniqueg} There is a unique geodesic $g$ from $p$ to $q$.
\end{enumerate}
Then, the \textbf{overlap}
\[
O(g_n,g) := \{t \in [s_n,u_n]\cap [s,u]: g_n(t) = g(t)\}
\]
is an interval for all $n$ whose endpoints converge to $s$ and $u$. \end{lemma} \begin{remark} We note that condition~\ref{uniqn} is slightly different from that stated in~\cite{Dauvergne-Sarkar-Virag-2020}. There, it is assumed instead that $(x_n,s_n;y_n,u_n) \in \mathbb{Q}^4 \cap \Rup$ for all $n$. The only use of this requirement in the proof is to ensure that there is a unique geodesic from $(x_n,s_n)$ to $(y_n,u_n)$ for all $n$, so there is no additional justification needed for the statement we use here. \end{remark}
\begin{lemma} \label{lem:geod_pp} On the intersection of the full probability events from Lemmas~\ref{lem:precompact} and~\ref{lem:overlap}, the following holds. For all ordered triples $s < t < u$ and compact sets $K \subseteq \mathbb{R}$, the set \begin{equation} \label{distinct} \{g(t): g \text{ is the unique geodesic between }(x,s) \text{ and }(y,u) \text{ for some } x \in K, y \in K\} \end{equation} is finite. \end{lemma} Lemma~\ref{lem:geod_pp} is known. Its derivation from Lemma~\ref{lm:BGH_disj} and some results of \cite{Dauvergne-Sarkar-Virag-2020} are shown in~\cite{Busa-Sepp-Sore-22arXiv}. Lemma 3.12 in \cite{Ganguly-Zhang-2022a} (posted after our first version) provides a stronger quantitative statement, but we do not need it for our purposes. This stronger estimate can be traced back to the work of Basu, Hoffman, and Sly~\cite{SlyNonexistenceOB} using integrable methods in exponential LPP. \begin{proof}[Proof of Lemma~\ref{lem:geod_pp}] Assume, without loss of generality, that $K$ is a closed interval $[a,b]$. We observe that by planarity, all geodesics from $(x,s)$ to $(y,u)$ for $x,y \in K$ lie between the leftmost geodesic from $(a,s)$ to $(a,u)$ and the rightmost geodesic from $(b,s)$ to $(b,u)$. Hence, the set~\eqref{distinct} is contained in a compact set, and it suffices to show that the set has no limit points.
Assume, to the contrary, that there exists a point $(\hat x,s;\hat y,u) \in (K \times \{s\}\times K \times \{u\}) \cap \Rup$ with unique geodesic $\hat g$ such that there exists a sequence of points $x_n,y_n \in K$ such that for all $n$, the geodesic $g_n$ from $(x_n,s)$ to $(y_n,t)$ is unique and so that $g_n(t) \to \hat g(t)$ but $g_n(t) \neq \hat g(t)$ for all $n$.
By compactness, there exists a convergent subsequence $(x_{n_k},y_{n_k}) \to (x,y)$. By Lemma~\ref{lem:precompact}, there exists a further subsequence $(x_{n_{k_\ell}},y_{n_{k_\ell}})$ such that the geodesic graphs $\mathcal{G} g_{n_{k_\ell}}$ converge to the graph of some geodesic $\mathcal{G} g$ from $(x,s)$ to $(y,u)$ in the Hausdorff metric. Since $g_n(t) \to \hat g(t)$, we have $g(t) = \hat g(t)$. By Lemma~\ref{lem:overlap}, the overlap $O(g_{n_{k_\ell}},g)$ is an interval whose endpoints converge to $s$ and $u$, so $g_{n_{k_\ell}}(t) = g(t) = \hat g(t)$ for sufficiently large $\ell$, contradicting the definition of the sequence $g_n$.
\end{proof}
\section{Exponential last-passage percolation} \label{sec:LPP} \subsection{Discrete last-passage percolation} \label{sec:LPP_bd_queue} Let $\{Y_{\mathbf x}\}_{\mathbf x \in \mathbb{Z}^2}$ be a collection of nonnegative i.i.d random variables, each associated to a vertex on the integer lattice. For $\mathbf x \le \mathbf y \in \mathbb{Z} \times \mathbb{Z}$, define the last-passage time as \begin{equation}\label{d100}
d(\mathbf x,\mathbf y) = \sup_{\mathbf x_\centerdot \in \Pi_{\mathbf x,\mathbf y}} \sum_{k = 0}^{|\mathbf y - \mathbf x|_1} Y_{\mathbf x_k}, \end{equation} where $\Pi_{\mathbf x, \mathbf y}$ is the set of up-right paths $\{\mathbf x_k\}_{k = 0}^{n}$ that satisfy $\mathbf x_0 = \mathbf x,\mathbf x_{n} = \mathbf y$, and $\mathbf x_k - \mathbf x_{k - 1} \in \{\mathbf e_1,\mathbf e_2\}$. A maximizing path is called a geodesic. We call this model discrete last-passage percolation (LPP). The most tractable case of discrete LPP is given when $Y_{\mathbf x} \sim \operatorname{Exp}(1)$, and we refer to this model as the exponential corner growth model or CGM. We will consider this model specifically for the remainder of this appendix.
\subsection{Stationary LPP in the quadrant} \label{sec:LPP_quad} Choose $\mathbf x \in \mathbb{Z}^2$ and consider the quadrant $\mathbf x + \mathbb{Z}_{\ge 0}^2$. Fix a parameter $\rho \in (0,1)$. Let $\{Y_{\mathbf z}: \mathbf z \in \mathbf x + \mathbb{Z}_{> 0}^2\}$ be i.i.d.\ $\operatorname{Exp}(1)$ weights in the bulk of the quadrant, and let $\{I_{\mathbf x + k\mathbf e_1},J_{\mathbf x + \ell \mathbf e_2}: k,\ell \in \mathbb{Z}_{>0}\}$ be mutually independent boundary weights such that $I_{\mathbf x + k\mathbf e_1} \sim \operatorname{Exp}(\rho)$ and $J_{\mathbf x + \ell \mathbf e_2} \sim \operatorname{Exp}(1 - \rho)$. These weights are defined under a probability measure $\mathbb P^\rho$. We define the increment-stationary process $d_{\mathbf x}^\rho$ as follows. First, on the boundary, $d_{\mathbf x}^\rho(\mathbf x) = 0$, and for $k,\ell \ge 1$, $d_{\mathbf x}^\rho(\mathbf x + k \mathbf e_1) = \sum_{i = 1}^k I_{\mathbf x + i\mathbf e_1}$ and $d_{\mathbf x}^\rho(\mathbf x + \ell \mathbf e_2) = \sum_{j = 1}^\ell J_{\mathbf x + j \mathbf e_2}$. In the bulk, for $\mathbf y = \mathbf x + (m,n) \in \mathbf x + \mathbb{Z}_{>0}^2$, \begin{equation} \label{eqn:stat_LPP} d_{\mathbf x}^\rho(\mathbf y) = \max_{1 \le k \le m}\Biggl\{ \Biggl(\sum_{i = 1}^k I_{\mathbf x + i\mathbf e_1}\Biggr) + d(\mathbf x + k\mathbf e_1 + \mathbf e_2,\mathbf y)\Biggr\} \bigvee \max_{1 \le \ell \le n}\Biggl\{\Biggl(\sum_{j = 1}^\ell J_{\mathbf x + j \mathbf e_2}\Biggl) + d(\mathbf x + \ell \mathbf e_2 + \mathbf e_1,\mathbf y)\Biggr\} \end{equation} In this model, we can define also define geodesics from $\mathbf x$ to $\mathbf y \in \mathbf x + \mathbb{Z}_{\ge 0}^2$ that travel for some time along the boundary and then enter the bulk. Because exponential random variables have continuous distribution, the maximizing paths for both bulk LPP and stationary LPP are almost surely unique. For $\mathbf y \in \mathbf x + \mathbb{Z}_{>0}^2$, if the unique geodesic for the stationary model enters the bulk from the horizontal boundary, define $\tau_1^{\mathbf x}(\mathbf y)$ as the unique value $k$ that maximizes in~\eqref{eqn:stat_LPP}. Otherwise, define $\tau_1^{\mathbf x}(\mathbf y) = 0$. Similarly, define $\tau_{2}^{\mathbf x}(\mathbf y)$ as the exit location from the vertical boundary, or $0$ if the geodesic exits from the horizontal boundary.
This model is increment-stationary in the sense that, for any down-right path $\{\mathbf y_i\}$ in $\mathbf x + \mathbb{Z}_{\ge 0}^2$, the increments $d^\rho_{\mathbf x,\mathbf y_{i + 1}} - d_{\mathbf x,\mathbf y_i}^\rho$ are mutually independent, and for $\mathbf y \in \mathbf x + \mathbb{Z}_{>0} \times \mathbb{Z}_{\ge 0}$ and $\mathbf z \in \mathbf x + \mathbb{Z}_{\ge 0} \times \mathbb{Z}_{> 0}$, \[ I_{\mathbf y}^{\mathbf x} := d_{\mathbf x}^\rho(\mathbf y)- d_{\mathbf x}^\rho(\mathbf y - \mathbf e_1) \sim \operatorname{Exp}(\rho)\qquad\text{and}\qquad J_{\mathbf z}^{\mathbf x} = d_{\mathbf x}^\rho(\mathbf z) - d_{\mathbf x}^{\rho}(\mathbf z - \mathbf e_2) \sim \operatorname{Exp}(1 - \rho). \] See Theorem 3.1 in~\cite{Sepp_lecture_notes} for a proof.
\subsection{LPP in the half-plane with boundary conditions and queues} We also define the last-passage model in the upper half-plane with a horizontal boundary. Let $h = (h(k))_{k \in \mathbb{Z}}$ be a real sequence. For $m \in \mathbb{Z}$ let $d^h(m,0) = h(m)$, and for $n > 0$ \begin{equation} \label{eqn:LPP_bd} d^h(m,n) = \sup_{-\infty < k \le m}\{h(k) + d((k,1),(m,n)) \}. \end{equation} We assume that $h$ is such that the supremum is almost surely finite and achieved at a finite $k$. We define the \textit{exit point} $Z^h(m,n)$ as \begin{equation} \label{eqn:exit_pt} Z^h(m,n) = \max\{k \in \mathbb{Z}:h(k) + d((k,1),(m,n)) = d^h(m,n) \}. \end{equation} Geodesics in this model are defined as follows: the path consists of the backwards-infinite horizontal ray $\{(k,0): k \le Z := Z^h(m,n)\}$, an upward step from $(Z,0)$ to $(Z,1)$, and then the LPP path in the bulk from $(Z,1)$ to $(m,n)$.
The half-plane model with boundary satisfies superadditivity. That is, for $r \in \{1,2\}$, $\mathbf x \in \mathbb{Z} \times \mathbb{Z}_{\ge 0}$ and $\mathbf x + \mathbf e_r \le \mathbf z$ coordinate-wise, \begin{equation} \label{205} d^h(\mathbf z) \ge d^h(\mathbf x) + d^h(\mathbf x + \mathbf e_r,\mathbf z). \end{equation}
The model with boundary condition can be constructed from queuing mappings, which we now define. Let $I = (I_k)_{k \in \mathbb{Z}}$ and $\omega = (\omega_k)_{k \in \mathbb{Z}}$ be sequences of nonnegative numbers such that \[ \lim_{m \to -\infty} \sum_{i = m}^0 (\omega_i - I_{i + 1}) = -\infty. \] Let $F = (F_k)_{k \in \mathbb{Z}}$ be a function on $\mathbb{Z}$ satisfying $I_k = F_k - F_{k - 1}$. Define the output sequence $\widetilde F = (\widetilde F_\ell)_{\ell \in \mathbb{Z}}$ by \begin{equation} \label{202} \widetilde F_\ell = \sup_{-\infty < k \le \ell} \Biggl\{F_k + \sum_{i = k}^\ell \omega_i \Biggr\},\qquad \ell \in \mathbb{Z}. \end{equation} Now, define the sequences $\widetilde I = (\widetilde I_\ell)_{\ell \in \mathbb{Z}}$ and $J = (J_k)_{k \in \mathbb{Z}}$ by \[ \widetilde I_\ell = \widetilde F_\ell - \widetilde F_{\ell - 1},\qquad \text{and}\qquad J_k = \widetilde F_k - F_k. \] In queuing terms, $I_k$ is the time between the arrivals of customers $k - 1$ and $k$, $\omega_k$ is the service time of customer $k$, $\widetilde I_\ell$ is the interdeparture time between customers $\ell - 1$ and $\ell$, and $J_k$ is the sojourn time of customer $k$. We use the mappings $D$ and $S$ to describe this queuing process. That is, \[ \widetilde I = D(\omega,I),\qquad\text{and}\qquad J = S(\omega,I). \]
Exponentially distributed arrival times are invariant for the queue with exponential service times. This is made precise is the following lemma. \begin{lemma}[\cite{Fan-Seppalainen-20}, Lemma B.2] \label{lem:output_of_queue} Let $0 < \rho < \tau$. Let $(I_k)_{k \in \mathbb{Z}}$ and $\{\omega_j\}_{j \in \mathbb{Z}}$ be mutually independent random variables with $I_k \sim \operatorname{Exp}(\rho)$ and $\omega_j \sim \operatorname{Exp}(\tau)$. Let $\widetilde I =D(\omega,I)$ and $J = S(\omega,I)$. Then, $\{\widetilde I_j\}_{j \in \mathbb{Z}}$ is an i.i.d. sequence of $\operatorname{Exp}(\rho)$ random variables, and for each $k \in \mathbb{Z}$, $\{\widetilde I_j\}_{j \le k}$ and $J_k$ are mutually independent with $J_k \sim \operatorname{Exp}(\tau - \rho)$. \end{lemma}
The following lemma shows how to construct the LPP model in the half-plane with boundary from the queuing mappings. \begin{lemma} \label{lem:D_and_LPP_bd} Consider last-passage percolation for the environment $\{ Y_{\mathbf x}\}_{\mathbf x \in \mathbb{Z}^2}$. For $n \ge 1$, let $Y^n = \{ Y_{m,n}\}_{m \in \mathbb{Z}}$ be the weights along the horizontal level $n$. Let $h$ be a function on $\mathbb{Z}$ that denotes initial data for the LPP model with boundary. Define the sequence $I^0 = (I^0_i)_{i \in \mathbb{Z}}$ by $I^0_i = h(i) - h(i - 1)$. Let $I^1 = D( Y^1,I^0)$, and for $n > 1$, let $I^n = D(Y^n,I^{n - 1})$ and $J^n = S(Y^n,I^{n - 1})$. Then, for each $n \ge 1$ and $m \in \mathbb{Z}$, \begin{equation} \label{203} I_m^n = d^h(m,n) - d^h(m - 1,n),\qquad\text{and}\qquad J_m^n = d^h(m,n) - d^h(m,n - 1). \end{equation} \end{lemma} \begin{proof} For $m \in \mathbb{Z}$, \[ d^h(m,1) = \sup_{-\infty < k \le m} \{h(k) + d((k,1),(m,1))\} = \sup_{-\infty < k \le m}\Bigl\{h(k) + \sum_{i = k}^m Y_{i,1}\Bigr\}, \] and so by~\eqref{202} and the definitions below, Equation~\eqref{202} holds for $n = 1$, follow from the definition. Now, assume that the statements hold for some $n \ge 1$. Then, $(d^h(m,n))_{m \in \mathbb{Z}}$ is a function whose increments are given by $I^n$. Then, by definition of $D$, for $m \in \mathbb{Z}$, \begin{align*} I_m^{n + 1} &= [D(Y^{n + 1},I^n)]_m = \sup_{-\infty < k \le m}\Bigl\{d^h(k,n) + \sum_{i = k}^m Y_{i,n + 1}\Bigr\} - \sup_{-\infty < k \le m - 1}\Bigl\{d^h(k,n) + \sum_{i = k}^{m - 1} Y_{i,n + 1}\Bigr\} \\ &= \sup_{-\infty <\ell \le k \le m} \Bigl\{h(\ell) + d((\ell,1),(m,n) +\sum_{i = k}^m Y_{i,n + 1} \Bigr\} \\ &\qquad - \sup_{-\infty <\ell \le k \le m - 1} \Bigl\{h(\ell) + d((\ell,1),(m,n) +\sum_{i = k}^m Y_{i,n + 1} \Bigr\} \\ &= d^h(m,n + 1) - d^h(m - 1,n + 1). \end{align*} The last equality is the dynamic programming principle. Similarly, \begin{align*}
J_m^n &= [S(Y^{n + 1},I^n)]_m = \sup_{-\infty < k \le m}\Bigl\{d^h(k,n) + \sum_{i = k}^m Y_{i,n + 1}\Bigr\} - d^h(m,n) \\
&=d^h(m,n + 1) - d^h(m,n). \qedhere \end{align*} \end{proof}
To construct the stationary boundary condition, fix a parameter $\rho \in (0,1)$, and let $h$ be defined so that $h(0) = 0$ and $ \{h(k) - h(k - 1)\}_{k \in \mathbb{Z}}$ is a sequence of i.i.d. $\operatorname{Exp}(\rho)$ random variables, independent of the i.i.d. $\operatorname{Exp}(1)$ bulk variables $\{Y_{\mathbf x}\}_{\mathbf x \in \mathbb{Z} \times \mathbb{Z}_{>0}}$. Let $\widehat \mathbb P^\rho$ be the probability measure of these random variables. Abusing notation, we denote LPP in the half-plane with this initial data simply as $d^\rho$.
For $\mathbf y \in \mathbb{Z} \times \mathbb{Z}_{\ge 0}$ and $\mathbf z \in \mathbb{Z} \times \mathbb{Z}_{> 0}$, we define \begin{equation} \label{IJ} I_{\mathbf y} = d^\rho(\mathbf y) - d^\rho(\mathbf y - \mathbf e_1),\qquad\text{and}\qquad J_{\mathbf z} = d^\rho(\mathbf z) - d^\rho(\mathbf z - \mathbf e_2). \end{equation}
The stationary model in the quadrant is simply a projection of the stationary model in the half-plane. This is made precise in the following lemma. \begin{lemma} \label{lemma:LPP_coupling} Let $I_{\mathbf y}$ and $J_{\mathbf z}$ be defined as in~\eqref{IJ}. Fix $\mathbf x = (k,0)$, where $k \in \mathbb{Z}$. Then, $\{J_{\mathbf x + j \mathbf e_2}\}_{j \ge 1}$ is a sequence of i.i.d. $\operatorname{Exp}(1 - \rho)$ random variables, independent of the i.i.d. $\operatorname{Exp}(\rho)$ random variables $\{I_{\mathbf x + i \mathbf e_1}\}_{i \ge 1}$. With $I_{\mathbf x + i \mathbf e_1}$ and $J_{\mathbf x + j \mathbf e_2}$ defined, let the process $\{d_{\mathbf x}^{\rho}(\mathbf y): \mathbf y \in \mathbf x + \mathbb{Z}_{\ge 0}^2\}$ be defined as in~\eqref{eqn:stat_LPP}. Then, under $\widehat \mathbb P^\rho$, for any $\mathbf y \in \mathbf x + \mathbb{Z}_{> 0}$, the portion of the almost surely unique geodesic to $\mathbf y$ for the process $d^\rho$ that lies in $\mathbf x + \mathbb{Z}_{> 0}^2$ coincides with the portion of the geodesic from $\mathbf x$ to $\mathbf y$ for the process $d_{\mathbf x}^\rho$ that lies in $\mathbf x + \mathbb{Z}_{> 0}^2$. \end{lemma} \begin{proof}
Let $I^0 = \{I_{i \mathbf e_1}\}$, and for $n \ge 1$, let $Y^n =\{Y_{m,n}\}_{m \in \mathbb{Z}}$, and $I^n = D(Y^n,I^{n - 1})$. By Lemma~\ref{lem:D_and_LPP_bd}, $J_{\mathbf x + \mathbf e_2} = [S(Y^1,I^0)]_k$. For $k$ fixed, we note that the sequence
\[
F_\ell = \begin{cases}
-\sum_{i = \ell + 1}^k I_{i \mathbf e_1}, &\ell \le k \\
\sum_{i = k + 1}^\ell I_{i\mathbf e_1}, &\ell > k.
\end{cases}
\]
satisfies $F_k = 0$ and $F_\ell - F_{\ell - 1} = I_{\ell\mathbf e_1} = I^0_\ell$ for $\ell \in \mathbb{Z}$.
Then, by definition of the mappings $S$ and $D$, \[ J_{\mathbf x + \mathbf e_2} = \sup_{-\infty < j \le k}\Biggl\{-\sum_{i = j + 1}^k I_{i\mathbf e_1} + \sum_{i = j}^k Y^n_i\Biggr\}, \] while, for $\ell \le k$, \[ I^\mathbf{1}_\ell = \sup_{-\infty < j \le \ell}\Biggl\{-\sum_{i = j + 1}^k I_{i\mathbf e_1} + \sum_{i = j}^\ell Y^n_i\Biggr\} - \sup_{-\infty < j \le \ell - 1} \Biggl\{-\sum_{i = j + 1}^k I_{i\mathbf e_1} + \sum_{i = j}^\ell Y^n_i\Biggr\}. \] Therefore, $\{(I^\mathbf{1}_\ell)_{\ell \le k},J_{\mathbf x + \mathbf e_2}\}$ is a measurable function of $(I_{i \mathbf e_1})_{i \le k}$ and $Y^1$, and is therefore independent of $\{(I_{\mathbf x + i\mathbf e_1})_{i \ge 1},Y^2,Y^3,\ldots\}$. By Lemma~\ref{lem:output_of_queue}, $I^1$ is an i.i.d. sequence of $\operatorname{Exp}(\rho)$ random variables, $J_{\mathbf x + \mathbf e_2} \sim \operatorname{Exp}(1 - \rho)$, and $(\{I^\mathbf{1}_\ell\}_{\ell \le k},J_{\mathbf x + \mathbf e_2})$ are mutually independent.
Now, assume by way of induction, that for some $n \ge 1$, \begin{equation} \label{204} \{(I^n_\ell)_{\ell \le k}, J_{\mathbf x + \mathbf e_2},\ldots, J_{\mathbf x + n\mathbf e_2}, (I_{\mathbf x + i\mathbf e_1})_{i \ge 1},Y^{n + 1},Y^{n + 2},\ldots\}. \end{equation} are mutually independent, and $I^n$ is an i.i.d. sequence of $\operatorname{Exp}(\rho)$ random variables. Using the same reasoning as in the base case via Lemmas~\ref{lem:D_and_LPP_bd} and~\ref{lemma:LPP_coupling}, $\{(I^{n + 1}_{\ell \le k},J_{\mathbf x + (n + 1)\mathbf e_1}\}$ is a measurable function of $(I_{i}^n)_{i \le k}$ and $Y^{n + 1}$. Thus, from~\eqref{204}, we have \[ \{(I^{n + 1}_\ell)_{\ell \le k}, J_{\mathbf x + \mathbf e_2},\ldots, J_{\mathbf x + (n + 1)\mathbf e_2}, (I_{\mathbf x + i\mathbf e_1})_{i \ge 1},Y^{n + 2},Y^{n + 2},\ldots\} \] are mutually independent, $I^{n +1}$ is a sequence of i.i.d. $\operatorname{Exp}(\rho)$ random variables, and $J_{\mathbf x + (n + 1)\mathbf e_2} \sim \operatorname{Exp}(1 - \rho)$.
For the second part of the lemma, this follows the same reasoning as Lemma B.3 in~\cite{Balzs2019NonexistenceOB} and Lemma A.1 in~\cite{Sepp_lecture_notes}. Suppose that the geodesic to $\mathbf y$ for $d^\rho$ enters the quadrant $\mathbf x + \mathbb{Z}_{>0}^2$ through the edge $(\mathbf w,\mathbf z)$ with $\mathbf w = \mathbf x + \ell \mathbf e_r$ for $t \in \{1,2\}$, and suppose that the geodesic from $\mathbf x$ to $\mathbf y$ for $d^\rho_{\mathbf x}$ enters the boundary through $(\widetilde{\mathbf w},\widetilde{\mathbf z})$ with $\widetilde{\mathbf w} = \mathbf x + p \mathbf e_s$ for $s \in \{1,2\}$. For $1 \le i \le \ell$, set $\eta_i = I_{\mathbf x + i \mathbf e_1} = d^\rho(\mathbf x + i\mathbf e_1) - d^\rho(\mathbf x + (i - 1)\mathbf e_1)$ if $t = 1$, and $\eta_i = J_{\mathbf x + i\mathbf e_2} = d^\rho(\mathbf x + j\mathbf e_2) - d^\rho(\mathbf x + (j - 1)\mathbf e_2)$ if $t = 2$. For $1 \le j \le p$, set $\widetilde \eta_j = I_{\mathbf x + j \mathbf e_1}$ if $s = 1$ and $\widetilde \eta_j = J_{\mathbf x + j \mathbf e_2}$ if $s = 2$. Then, using~\eqref{205} in the last inequality below, \begin{align*} d^\rho(\mathbf y) &= d^\rho(\mathbf w) + d(\mathbf z,\mathbf y) = d^\rho(\mathbf x) + \sum_{i = 1}^\ell \eta_i + d(\mathbf z,\mathbf y) \\ &\le d^\rho(\mathbf x) + d^\rho_{\mathbf x}(\mathbf y) = d^\rho(\mathbf x) + \sum_{j = 1}^p \widetilde \eta_j + d(\widetilde{\mathbf z},\mathbf y) \\ &= d^\rho(\widetilde{\mathbf x}) + d(\widetilde{\mathbf z},\mathbf y) \le d^\rho(\mathbf y). \end{align*} Thus, all inequalities are equalities. Since geodesics are almost surely unique in both models, the desired conclusion follows. \end{proof}
\subsection{KPZ scaling of the exponential CGM} The following lemma states that the exit point of stationary LPP obeys the KPZ wandering exponent $2/3$. \begin{lemma}[\cite{Emrah-Janjigian-Seppalainen-20}, Theorem 2.5 (See also~\cite{Bhatia-2020}, Theorem 2.5,~\cite{BasuSarkarSly_Coalescence}, Theorem 3,~\cite{Martin-Sly-Zhang-21}, Lemma 2.8 and~\cite{Seppalainen-Shen-2020}, Corollary 3.6 and Remark 2.5b) \label{lemma:quad_exit_pt}] Recall the stationary LPP model in the quadrant from Section~\ref{sec:LPP_quad}, and Let $\tau_1^{\mathbf x}$ and $\tau_2^{\mathbf x}$ be the exit times from the boundary. Let $K = [a,b] \subseteq (0,1)$. Then, there exist positive constants $N_0,C$ depending only on $K$ such that for all $N \ge N_0$, $b > 0$, and $\rho \in K$, \begin{align}
\mathbb P^\rho\Biggl(\tau_{\mathbf x}^1\Bigl(\mathbf x + (\lfloor N \rho^2 - b N^{2/3} \rfloor, \lfloor N(1 - \rho)^2 \rfloor) \Bigl) \ge 1\Biggl) &\le e^{-C b^3},\qquad\text{and} \label{eqn:h_exit_bd} \\
\mathbb P^\rho\Biggl(\tau_{\mathbf x}^2\Bigl(\mathbf x + (\lfloor N \rho^2 + b N^{2/3} \rfloor, \lfloor N(1 - \rho)^2 \rfloor) \Bigl) \ge 1\Biggl) &\le e^{-C b^3}. \label{eqn:v_exit_bd} \end{align} \end{lemma}
\begin{lemma} \label{lemma:line_exit_pt} For $N \ge 1$, Consider LPP with $\operatorname{Exp}(1)$ bulk weights and boundary conditions for the stationary model in the half-plane as defined above, where $\rho_N = \frac{1}{2} + cN^{-1/3}$ and $c$ is some real-valued constant. Assume these are all coupled together under some probability measure $\mathbb P$. Let $Z^{\rho_N}$ denote shorthand notation for the exit point defined in~\eqref{eqn:exit_pt} with initial profile given by sums of i.i.d. $\operatorname{Exp}(\rho_N)$ random variables. Then, for any $y \in \mathbb{R}$ and $t > 0$, there exists a constant $C = C(c,x,t) > 0$ such that \[
\limsup_{N \to \infty} \mathbb P(|Z^{\rho_N}(\lfloor tN + N^{2/3}y \rfloor ,\lfloor tN\rfloor )| \ge MN^{2/3}) \le e^{-CM^3},\qquad\text{ for all } M > 0. \] \end{lemma} \begin{proof} \begin{figure}
\caption{\small We couple the stationary model on the half-plane with the stationary model in the quadrant $\mathbf x + \mathbb{Z}_{\ge 0}$. Inside the quadrant $\mathbf x + \mathbb{Z}_{>0}^2$, the geodesics for the two models agree. The exit time from the initial horizontal line for the stationary geodesic in the half-plane (blue/thin) is less than or equal to $k$ if and only if the stationary geodesic in the quadrant (red/thick) exits from the vertical boundary.}
\label{fig:exit_time_hp}
\end{figure}
We first show that \[ \limsup_{N \to \infty} \mathbb P(Z^{\rho_N}(\lfloor tN + N^{2/3}y \rfloor ,\lfloor tN\rfloor) \le -MN^{2/3}) \le e^{-CM^3}. \] Let $y \in \mathbb{R}$, and let $N$ be large enough so that $\lfloor tN + N^{2/3}y \rfloor > \lfloor - MN^{2/3} \rfloor$. Set $\mathbf x = (\lfloor -MN^{2/3} \rfloor ,0)$. With this choice of $\mathbf x$, consider the coupling of $d^{\rho_N}$ and $d_{\mathbf x}^{\rho_N}$ described in Lemma~\ref{lemma:LPP_coupling}, where geodesics for the two models coincide in the quadrant $\mathbf x + \mathbb{Z}_{>0}^2$. In particular, under this coupling, $Z^{\rho_N}(\lfloor tN + N^{2/3}y \rfloor ,\lfloor tN\rfloor ) \le \lfloor -MN^{2/3} \rfloor$ if and only if $\tau_2^{\mathbf x}(\lfloor tN + N^{2/3} y \rfloor, \lfloor tN \rfloor) \ge 1$. See Figure~\ref{fig:exit_time_hp}. Then, since $\rho_N = \frac{1}{2} + O(N^{-1/3})$, we have \[ tN = \frac{tN}{(1 - \rho_N)^2}(1 - \rho_N)^2 = O(N),\;\;\text{and}\;\; tN + N^{2/3} y = -MN^{2/3} + \frac{tN}{(1 - \rho_N)^2}\rho_N^2 + MN^{2/3} + O(N^{2/3}), \] where the $O(N^{2/3})$ term does not depend on $M$. Hence,~\eqref{eqn:v_exit_bd} of Lemma~\ref{lemma:quad_exit_pt} completes the proof. The proof that \[ \limsup_{N \to \infty} \mathbb P(Z^{\rho_N}(\lfloor tN + N^{2/3}y \rfloor ,\lfloor tN\rfloor ) > MN^{2/3}) \le e^{-CM^3} \] follows analogously, this time setting $\mathbf x = (\lfloor MN^{2/3} \rfloor ,0)$, and replacing the use of~\eqref{eqn:v_exit_bd} from Lemma~\ref{lemma:quad_exit_pt} with~\eqref{eqn:h_exit_bd}. \end{proof}
It was shown in~\cite{Dauvergne-Virag-21} that exponential last-passage percolation converges to the directed landscape. We cite their theorem here \begin{theorem}[\cite{Dauvergne-Virag-21}, Theorem 1.7] \label{thm:conv_to_DL} Let $d$ denote last-passage percolation \eqref{d100} with i.i.d.\ $\operatorname{Exp}(1)$ weights. Choose a parameter $\rho \in (0,\infty)$, and define the parameters $\chi,\alpha,\beta,\tau$ by the relations \[ \chi^3 = \frac{(\sqrt \rho + 1)^4}{\sqrt \rho},\quad \alpha = (\sqrt \rho + 1)^2,\; \quad \beta = 1 + \frac{1}{\sqrt \rho},\quad \frac{\chi}{\tau^2} = \frac{1}{4\rho^{3/2}}. \] Let $v = (\rho,1)$ and $u = (\tau,0)$. Then, for any sequence $\sigma \to \infty$, there is a coupling of identically distributed copies $d_\sigma$ of $d$ and the directed landscape $\mathcal{L}$ so that \[
d_\sigma(x \sigma^2 u + s\sigma^3 v,y \sigma^2u + t\sigma^3v)
=\alpha \sigma^3(t-s) +\beta \tau \sigma^2(y - x) + \chi \sigma(\mathcal{L} + o_\sigma)(x,s;y,t). \] Above $d_\sigma$ is interpreted as an appropriately interpolated version of the LPP process, and $o_\sigma:\Rup \to \mathbb{R}$ is a random continuous function such that for every compact $K\subset \Rup$, there exists a constant $c > 0$ such that \begin{align*}
\sup_K |o_\sigma|\rightarrow 0 \ \text{ almost surely},\qquad\text{and}\qquad \mathbb{E}\big[c\sup_K (o_\sigma^-)^3+(o_\sigma^+)\big]\rightarrow 1. \end{align*} In particular, choosing $\rho = 1$ and setting $\sigma = N^{1/3}$, there exists a coupling of the directed landscape $\mathcal{L}$ and identically distributed copies $d_N$, of $d$, such that \begin{multline*} d_N\big((sN+2^{5/3}xN^{2/3},sN),(tN+2^{5/3}yN^{2/3},tN)\big) \\ =4N(t-s)+2^{8/3}N^{2/3}(y-x)+2^{4/3}N^{1/3}(\mathcal{L}+\widetilde o_N)(x,s;y,t), \end{multline*} where $\widetilde o_N := o_{N^{1/3}}$ as defined above. \end{theorem}
\subsection{Existence and distribution of the Busemann process} \label{sec:cgm-bus}
In the case of the exponential CGM, Busemann functions are known to exist and are indexed by direction vectors $\mathbf u$. We index the direction in terms of a real parameter $\rho \in(0,1)$: \[ \mathbf u(\rho) = \Bigg(\frac{\rho^2}{\rho^2 + (1 - \rho)^2} , \frac{(1 - \rho)^2}{\rho^2 + (1 - \rho)^2}\Bigg). \] Then for a fixed $\rho \in (0,1)$ and $\mathbf x,\mathbf y \in \mathbb{Z}^2$, the following limit exists almost surely: \[ B^\rho_{\mathbf x,\mathbf y} = \lim_{n \rightarrow \infty} d(-n\mathbf u(\rho),\mathbf y) - d(-n\mathbf u(\rho),\mathbf x). \] The Busemann functions can be extended to right- and left-continuous processes defined for all directions, as \cite{Janjigian-Rassoul-Seppalainen-19}. Here, we notice that the geodesics are travelling asymptotically to the southwest, whereas the geodesics we construct in the present paper are travelling to the northeast. In distribution, we can obtain one formulation from the other by a simple reflection. The geodesics to the southwest give rise to queuing relations that are more natural in the discrete model, so we use this formulation here.
Define the following state space of $n$-tuples of bi-infinite nonnegative sequences: \begin{align*} \mathcal{Y}^n &= \Bigl\{(I^1,\ldots,I^n) \in (\mathbb{R}_{\ge0}^\mathbb{Z})^n: \lim_{m \to -\infty} \frac{1}{m} \sum_{i = m}^0 I_i^k < \lim_{m \to \infty} \frac{1}{m}\sum_{i = m}^0 I_i^{k + 1},\,\,\text{ for } 1 \le k \le n - 1 \Bigr\}.
\end{align*} In the definitions above, the limits are assumed to exist. We extend the mapping $D$ to maps that take more than two sequences as inputs. For $k \ge 1$, define the maps $D^{(k)}: \mathcal{Y}^k \to \mathbb{R}_{\ge0}^\mathbb{Z}$ inductively as follows. Define $D^{(1)}(I^1) = I^1$, and for $k > 1$, \[ D^{(k)}(I^1,\ldots,I^k) = D(I^1,D^{(k - 1)}(I^2,I^3,\ldots,I^k)). \] Furthermore, define the map $\mathcal{D}^{(n)}:\mathcal{Y}^n \to \mathcal{Y}^n$ as \[ [\mathcal{D}^{(n)}(I^1,\ldots,I^n)]_i = D^{(i)}(I^1,\ldots,I^i). \] On the space $\mathcal{Y}^n$, we define the measure $\nu^{\boldsymbol \rho^n}$ as follows: $(I^1,\ldots,I^n) \sim \nu^{\boldsymbol \rho^n}$ if $(I^1,\ldots,I^n)$ are mutually independent, and for $1 \le i \le n$, $I^i$ is a sequence of i.i.d.\ exponential random variables with rate $\rho_i$. We define the measure $\mu^{\boldsymbol \rho^n}$ as \begin{equation} \label{mu_def} \mu^{\boldsymbol \rho^n} = \nu^{\boldsymbol \rho^n} \circ (\mathcal{D}^{(n)})^{-1}. \end{equation} We now cite two theorems. \begin{theorem}[\cite{Fan-Seppalainen-20}, Theorem 5.4] \label{thm:mu_invariant} Let $\boldsymbol \rho^n = (\rho_1,\ldots,\rho_n)$ with $1 > \rho_1 > \cdots > \rho_n > 0$ and assume $(I^1,\ldots,I^n) \sim \mu^{\boldsymbol \rho^n}$. Let $I^0$ be a sequence of i.i.d. exponential random variables with rate $1$, independent of $(I^1,\ldots,I^n)$. Then, \[ (D(I^0,I^1),\ldots,D(I^0,I^n)) \sim \mu^{\boldsymbol \rho^n}. \] \end{theorem}
\begin{theorem}[\cite{Fan-Seppalainen-20}, Theorem 3.2] \label{thm:exp_Buse_dist} For $\rho \in (0,1)$, define the sequence $I^\rho$ as $I^{\rho}_i = B_{(i -1)\mathbf e_1,i\mathbf e_1}^\rho$. Let $\boldsymbol \rho^n = (\rho_1,\ldots,\rho_n)$ with $1 > \rho_1 > \cdots > \rho_n > 0$. Then, \[ (I^{\rho_1},\ldots,I^{\rho_n}) \sim \mu^{\boldsymbol \rho^n}. \] \end{theorem}
\section{Stationary horizon} \label{sec:stat_horiz} To describe the stationary horizon, we introduce some notation from~\cite{Busani-2021}. The map $\Phi:C(\mathbb{R}) \times C(\mathbb{R}) \to C(\mathbb{R})$ is defined as \[ \Phi(f,g)(y) = \begin{cases} f(y) + \Big[W_0(f - g) + \inf_{0 \le x \le y} (f(x) - g(x))\Big]^{-} &y \ge 0 \\ f(y) - \Big[W_y(f - g) + \inf_{y < x \le 0} \Big(f(x) - f(y) - [g(x) - g(y)]\Big)\Big]^{-} &y < 0, \end{cases} \] where \[ W_y(f) = \sup_{-\infty < x \le y}[f(y) - f(x)]. \] We note that the map $\Phi$ is well-defined only on the appropriate space of functions where the supremums are all finite. By Lemma 9.2 in~\cite{Seppalainen-Sorensen-21b}, when $f(0) = g(0) = 0$, \begin{equation} \label{Phialt} \Phi(f,g)(y) = f(y) + \sup_{-\infty <x \le y }\{g(x) - f(x)\} - \sup_{-\infty < x \le 0}\{g(x) - f(x)\} \end{equation} This map extends to maps $\Phi^k:C(\mathbb{R})^k \to C(\mathbb{R})^k$ as follows. \begin{enumerate}
\item $\Phi^1(f_1)(x) = f_1(x)$.
\item $\Phi^2(f_1,f_2)(x) = [f_1(x),\Phi(f_1,f_2)(x)]$,\qquad\text{and for }$k \ge 3,$
\item $\Phi^k(f_1,\ldots,f_k)(x) = [f_1(x),\Phi(f_1,[\Phi^{k - 1}(f_2,\ldots,f_k)]_1)(x),\ldots,\Phi(f_1,[\Phi^{k -1}(f_2,\ldots,f_k)]_{k - 1})(x)]$. \end{enumerate}
\begin{comment} When we assume that $f_i(0) = 0$ for all $i$, the mappings $\Phi^k$ have the alternative representation. \begin{lemma}[\cite{Seppalainen-Sorensen-21b}, Lemmas 9.2-9.4, and 7.1] For $f_1,\ldots,f_k \in C(\mathbb{R})$ with $f_i(0) = 0$ for $1 \le j \le k$, \begin{align*}
&[\Phi^k(f_1,\ldots,f_k)]_j(x) = f_1(x) \\
&\qquad + \sup_{-\infty < x_1 \le \cdots \le x_{j - 1} \le x}\sum_{i = 1}^{j - 1}(f_{i + 1}(x_i) - f_i(x_i)) - \sup_{-\infty < x_1 \le \cdots \le x_{j - 1} \le 0} \sum_{i = 1}^{j - 1}(f_{i + 1}(x_i) - f_i(x_i)). \end{align*} \end{lemma}
From this representation, the following lemma follows. \begin{lemma} \label{lem:Phi_shift} For $y \in \mathbb{R}$, let $\tau_y$ denote the shift operator $\tau_y f = f(y,y + \abullet)$. Then, for $k \in \mathbb{Z}_{>0}$ and $f_1,\ldots,f_k \in C(\mathbb{R})$ with $f_i(0) = 0$ for $1 \le i \le k$, \[ \Phi^k(\tau_y f_1,\ldots, \tau_y f_k) = \Bigl(\tau_y [\Phi^k(f_1,\ldots,f_k)]_1,\ldots,\tau_y[\Phi^k(f_1,\ldots,f_k)]_k\Bigr) . \] \end{lemma} \end{comment}
\begin{definition} \label{def:SH} The stationary horizon $\{G_\xi\}_{\xi \in \mathbb{R}}$ is a process with state space $C(\mathbb{R})$ and with paths in the Skorokhod space $D(\mathbb{R},C(\mathbb{R}))$ of right-continuous functions $\mathbb{R} \to C(\mathbb{R})$ with left limits. $C(\mathbb{R})$ has the Polish topology of uniform convergence on compact sets. The law of the stationary horizon is characterized as follows: For real numbers $\xi_1 < \cdots < \xi_k$, the $k$-tuple $(G_{\xi_1},\ldots,G_{\xi_k})$ of continuous function has the same law as $\Phi^k(f_1,\ldots,f_k)$, where $f_1,\ldots,f_k$ are independent two-sided Brownian motions with drifts $2\xi_1,\ldots,2\xi_k$, and each with diffusion coefficient $\sqrt 2$ (as defined in point \eqref{def:2BMcmu} in Section \ref{sec:notat}). \end{definition} \begin{remark} The transformation $\Phi^k$ is such that for each $\xi \in \mathbb{R}$, $G_{\xi}$ is also a two-sided Brownian motion with diffusion coefficient $\sqrt 2$ and drift $2 \xi$. \end{remark}
For $N \in \mathbb{N}$, we define $F_{\abullet}^N \in D(\mathbb{R},C(\mathbb{R}))$ to be such that, for each $\xi \in \mathbb{R}$, $F_\xi^N$ is the linear interpolation of the function $\mathbb{Z}\ni i \mapsto B_{0,i\mathbf e_1}^{1/2 - 2^{-4/3}\xi N^{-1/3}}$, where $B$ is defined in Section~\ref{sec:cgm-bus}. Then, for $\xi \in \mathbb{R}$, we define \begin{equation} \label{GN} G_{\xi}^N(x) = 2^{-4/3}N^{-1/3}\Bigl[F_{\xi}^N(2^{5/3} N^{2/3}x ) - 2^{8/3}N^{2/3} x\Bigr]. \end{equation} \begin{remark} The parameterization here is different from the one used in~\cite{Busani-2021}, because in the present paper, $G_\xi$ is a Brownian motion with diffusivity $\sqrt 2$ and drift $2\xi$, while in~\cite{Busani-2021}, $G_{\mu}$ has diffusivity $2$ and drift $\mu$. We can check that we have the desired diffusivity and drift parameters directly. Let $\widetilde G$ be the version of the stationary horizon as defined in~\cite{Busani-2021}. It is constructed as follows: for each $\mu \in \mathbb{R}$, $\widetilde F_\mu$ is the linear interpolation of the function $\mathbb{Z} \ni i \mapsto B_{0,i \mathbf e_1}^{1/2 - 4^{-1}\mu N^{-1/3}}$, and $\widetilde G_\mu(x)$ is the limit as $N \to \infty$ of \[ N^{-1/3}\Bigl[\widetilde F_\mu^N(N^{2/3}x) - 2N^{2/3}x\Bigr], \] which is distributed as $2B(x) + \mu x$, where $B$ is a standard Brownian motion. Then, after taking a limit from~\eqref{GN}, \begin{align*}
G_{\xi}(x) = 2^{-4/3}\widetilde G_{4 \cdot 2^{-4/3} \xi}(2^{5/3} x) \deq 2^{-4/3}[2B(2^{5/3}x) + 2^{2/3} \xi (2^{5/3} x) ] \deq 2^{1/2} B(x) + 2\xi x, \end{align*} where the last equality comes from Brownian scaling. Hence, the resulting object has diffusivity $\sqrt 2$ and drift $2\xi$, as desired. Furthermore, using the scaling relations of Theorem~\ref{thm:SH10}\ref{itm:SH_sc} below, $G_\xi(x) \deq \widetilde G_{4\xi}(x/2)$. \end{remark}
The main theorem of~\cite{Busani-2021} is the following: \begin{theorem} \label{thm:conv_to_SH} As $N \to \infty$, the process $G^N$ converges in distribution to $G$ on the path space $D(\mathbb{R},C(\mathbb{R}))$. In particular, for any finite collection $\xi_1,\ldots,\xi_n$, \[ (G_{\xi_1}^N,\ldots,G_{\xi_n}^N) \Longrightarrow (G_{\xi_1},\ldots,G_{\xi_n}), \] where the convergence holds in distribution in the sense of uniform convergence on compact sets of functions in $C(\mathbb{R})^n$. \end{theorem}
The first author~\cite{Busani-2021} first proved this finite-dimensional convergence and then showed tightness of the process to conclude the existence of a limit taking values in $D(\mathbb{R},C(\mathbb{R}))$. The second and third authors~\cite{Seppalainen-Sorensen-21b} discovered that the stationary horizon is also the Busemann process of Brownian last-passage percolation, up to an appropriate scaling and reflection (see Theorem 5.3 in~\cite{Seppalainen-Sorensen-21b}).
The following collects several facts about the stationary horizon from these two papers. For notation, let $G_{\xi+} = G_\xi$, and let $G_{\xi -}$ be the limit of $G_{\alpha}$ as $\alpha \nearrow \xi$. \begin{theorem}[\cite{Busani-2021}, Theorem 1.2; \cite{Seppalainen-Sorensen-21b}, Theorems 3.9, 3.11, 3.15, 7.20 and Lemma 3.6] \label{thm:SH10} $ $ The following hold for the stationary horizon. \begin{enumerate} [label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item\label{itm:SHpm} For each $\xi \in \mathbb{R}$, with probability one, $G_{\xi -} = G_{\xi +}$, and $G_\xi$ is a two-sided Brownian motion with diffusion coefficient $\sqrt 2$ and drift $2\xi$
\item \label{itm:SH_sc} For $c > 0$ and $\nu \in \mathbb{R}$, \
$
\{cG_{c (\xi + \nu)}(c^{-2}x) - 2\nu x : x\in \mathbb{R}\}_{\xi \in \mathbb{R}} \,\deq\, \{G_\xi(x): x \in \mathbb{R}\}_{\xi \in \mathbb{R}}.
$
\item\label{itm:SH_sc2} Spatial stationarity holds in the sense that, for $y \in \mathbb{R}$,
\[\{G_{\xi}(x):x \in \mathbb{R}\}_{\xi \in \mathbb{R}} \deq \{G_{\xi}(y,x + y): x \in \mathbb{R}\}_{\xi \in \mathbb{R}}.\]
\item Fix $x > 0$ , $\xi_0 \in \mathbb{R}$, $\xi > 0$, and $z \ge 0$. Then,
\begin{align*}
&\mathbb P\bigl(\sup_{a,b \in [-x,x]}|G_{\xi_0 + \xi}(a,b) - G_{\xi_0 }(a,b)| \le z\bigr) = \mathbb P\bigl(G_{\xi_0 + \xi}(-x,x) - G_{\xi_0}(-x,x) \le z\bigr) \\
&\quad = \Phi\Bigl(\frac{z - 2\xi x}{2\sqrt {2x}}\Bigr) + e^{\frac{\xi z}{2}}\biggl(\Bigl(1 + \tfrac12{\xi}z + \xi^2 x \Bigr)\Phi\Bigl(-\frac{z + 2\xi x}{2 \sqrt{2 x}}\Bigr) - \xi\sqrt{{x}/{\pi}\hspace{0.7pt}} \hspace{0.9pt} e^{-\frac{(z + 2\xi x)^2}{8x}}\biggr)
\end{align*}
where $\Phi$ is the standard normal distribution function.
\item \label{itm:exp} For $x < y$ and $\alpha < \beta$, with $\#$ denoting the cardinality,
\[
\mathbb{E}[\#\{\xi \in (\alpha,\beta): G_{\xi-}(x,y) < G_{\xi +}(x,y) \}] = 2\sqrt{{2}/{\pi}\hspace{0.5pt}}
(\beta - \alpha)\sqrt{y - x}.
\] \end{enumerate} Furthermore, the following hold on a single event of full probability. \begin{enumerate} [resume, label=\rm(\roman{*}), ref=\rm(\roman{*})] \itemsep=3pt
\item \label{itm:SH_j} For $x_0 > 0$ define the process $G^{x_0} \in D(\mathbb{R},C[-x_0,x_0])$ by restricting each function $G_\xi$ to $[-x_0,x_0]$: $G^{x_0}_\xi=G_\xi\vert_{[-x_0,x_0]}$. Then, $\xi\mapsto G^{x_0}_\xi$ is a $C[-x_0,x_0]$-valued jump process with finitely many jumps in any compact interval, but countably infinitely many jumps in $\mathbb{R}$. The number of jumps in a compact interval has finite expectation given in item \ref{itm:exp} above, and each direction $\xi$ is a jump direction with probability $0$. In particular, for each $\xi \in \mathbb{R}$ and compact set $K$, there exists a random $\varepsilon = \varepsilon(\xi,K)>0$ such that for all $\xi - \varepsilon < \alpha < \xi < \beta < \xi + \varepsilon$, $\sigg \in \{-,+\}$, and all $x \in K$, $G_{\xi -}(x) = G_{\alpha}(x)$ and $G_{\xi +}(x) = G_{\beta}(x)$.
\item \label{itm:SH_mont} For $x_1 \le x_2$, $\xi \mapsto G_{\xi}(x_1,x_2)$ is a non-decreasing jump process.
\item Let $\alpha < \beta$. The function $x\mapsto G_\beta(x)-G_\alpha(x)$ is nondecreasing. There exist finite $S_1 = S_1(\alpha,\beta)$ and $S_2 = S_2(\alpha,\beta)$ with $S_1 < 0 < S_2$ such that $G_{\alpha }(x) = G_{\beta }(x)$ for $x \in [S_1,S_2]$ and $G_{\alpha }(x) \ne G_{\beta }(x)$ for $x \notin [S_1,S_2]$.
\item \label{itm:bad_dir_contained}
Let $\alpha < \beta$, $S_1 = S_1(\alpha,\beta)$ and $S_2 = S_2(\alpha,\beta)$. Then $\exists\hspace{0.7pt} \zeta, \eta\in[\alpha, \beta]$ such that, \begin{align*}
&\text{$G_{\zeta -}(x) = G_{\zeta +}(x)$ for $x \in [-S_1,0]$, and $G_{\zeta -}(x) > G_{\zeta +}(x)$ for $x < S_1$, and } \\
&\text{$G_{\eta -}(x) = G_{\eta +}(x)$ for $x \in [0,S_2]$, and $G_{\eta -}(x) < G_{\eta +}(x)$ for $x > S_2$.}
\end{align*}
In particular, the set $\{\xi \in \mathbb{R}: G_{\xi} \neq G_{\xi-}\}$ is dense in $\mathbb{R}$. \end{enumerate} \end{theorem}
Theorem~\ref{thm:invariance_of_SH} gives the following previously unknown property of SH. \begin{corollary} \label{cor:SH_reflect} The distribution of the stationary horizon on $D(\mathbb{R},C(\mathbb{R}))$ satisfies the following reflection property: \[ \{G_{(-\xi)-}(-\hspace{0.9pt}\aabullet)\}_{\xi \in \mathbb{R}} \deq \{G_\xi(\aabullet)\}_{\xi \in \mathbb{R}}. \] \end{corollary} \begin{proof} By the spatial reflection invariance of the directed landscape (Lemma~\ref{lm:landscape_symm}\ref{itm:DL_reflect}), $\{G_{(-\xi)-}(-\aabullet)\}_{\xi \in \mathbb{R}}$ is an invariant distribution for the KPZ fixed point such that each marginal satisfies the limit assumptions~\eqref{eqn:drift_assumptions}. The result follows from the uniqueness part of Theorem~\ref{thm:invariance_of_SH}. \end{proof}
\begin{comment}
Recall the shift operator $\tau_y f = f(y,y + \abullet)$. \begin{lemma} \label{lem:SH_ergodicity} For $y \in \mathbb{R}$, the stationary horizon $G$ is stationary and ergodic under the shifts \[ \{G_{\xi}\}_{\xi \in \mathbb{R}} \mapsto \{\tau_y G_\xi \}_{\xi \in \mathbb{R}}. \] \end{lemma} \begin{proof} If $(f_1,\ldots,f_k)$ is a collection of i.i.d.\ Brownian motions of (possibly different diffusion coefficients and drifts), then $(f_1,\ldots,f_k)$ is stationary and ergodic under the shift $(\tau_y f_1,\ldots,\tau_y f_k)$. The result follows from Lemma~\ref{lem:Phi_shift} since the mapping $\Phi^k$ that defines the finite-dimensional distributions of the stationary horizon respects translations. \end{proof} \end{comment}
\end{appendix}
\end{document} |
\begin{document}
\title{Derivation of the wave kinetic equation: Full range of scaling laws} \author{Yu Deng and Zaher Hani} \maketitle
\begin{abstract} This paper completes the program started in \cite{DH21, DH21-2} aiming at providing a full rigorous justification of the wave kinetic theory for the nonlinear Schr\"odinger (NLS) equation. Here, we cover the full range of scaling laws for the NLS on an arbitrary periodic rectangular box, and derive the wave kinetic equation up to small multiples of the kinetic time.
The proof is based on a diagrammatic expansion and a deep analysis of the resulting Feynman diagrams. The main novelties of this work are three-fold: (1) we present a robust way to identify arbitrarily large ``bad" diagrams which obstruct the convergence of the Feynman diagram expansion, (2) we systematically uncover intricate cancellations among these large ``bad" diagrams, and (3) we present a new robust algorithm to bound all remaining diagrams and prove convergence of the expansion. These ingredients are highly robust, and constitute a powerful new approach in the general mathematical study of Feynman diagrams.
\end{abstract} \tableofcontents \section{Introduction} \subsection{Setup and the main result}\label{intro-nls} In this paper we derive the wave kinetic equation, from the continuum cubic nonlinear Schr\"{o}dinger equation and at the kinetic time scale, for the \emph{full range of scaling laws} between the large box and weak nonlinearity limits. This completes the program initiated in \cite{DH19,DH21}, aiming at providing rigorous mathematical foundation for the wave turbulence theory.
In dimension $d\geq 3$, consider the cubic nonlinear Schr\"{o}dinger equation \begin{equation}\label{nls}\tag{NLS} \left\{
\begin{split}&(i\partial_t-\Delta)u+\alpha|u|^2u=0,\quad x\in \mathbb T_L^d=[0,L]^d,\\ &u(0,x)=u_{\mathrm{in}}(x) \end{split} \right. \end{equation} on the square torus $\mathbb T_L^d=[0,L]^d$ of size $L$ (all results and proofs extend without change to arbitrary rectangular tori). Here $\alpha$ is a parameter indicating the strength of the nonlinearity, and $\Delta:=\frac{1}{2\pi}(\partial_{x_1}^2+\cdots +\partial_{x_d}^2)$ is the normalized Laplacian. We also set the space Fourier transform as \begin{equation}\label{fourier} \widehat u(t, k) =\frac{1}{L^{d/2}}\int_{\mathbb T^d_L} u(t, x) e^{-2\pi i k\cdot x} \, dx, \qquad u(t,x) =\frac{1}{L^{d/2}}\sum_{k\in\mathbb Z_L^d}\widehat{u}(k)e^{2\pi ik\cdot x}, \end{equation} where $\mathbb Z_L^d:=(L^{-1}\mathbb Z)^d$. Note that this convention is different from (but equivalent to) the one in \cite{DH21,DH21-2}; the parameters $\lambda$ in \cite{DH21,DH21-2} and $\alpha$ in the current paper are related by $\alpha=\lambda^2L^{-d}$.
Assume the initial data of (\ref{nls}) is given by \begin{equation} \label{data}\tag{DAT}u_{\mathrm{in}}(x)=\frac{1}{L^{d/2}}\sum_{k\in\mathbb Z_L^d}\widehat{u_{\mathrm{in}}}(k)e^{2\pi ik\cdot x},\quad \widehat{u_{\mathrm{in}}}(k)=\sqrt{n_{\mathrm{in}}(k)}g_k(\omega), \end{equation} where $n_{\mathrm{in}}:\mathbb R^d\to[0,\infty)$ is a given Schwartz function, and $\{g_k(\omega)\}$ is a collection of i.i.d. random variables. For concreteness, we will assume each $g_k$ is a standard normalized Gaussian.
Define the \emph{kinetic (or Van Hove) time} \[T_{\mathrm{kin}}:=\frac{1}{2\alpha^2}.\] For a fixed value $\gamma\in(0,1)$, we will assume that the \emph{scaling law} between $L$ and $\alpha$ is $\alpha=L^{-\gamma}$, so we have $T_{\mathrm{kin}}=\frac{1}{2} L^{2\gamma}$. \subsubsection{The wave kinetic equation} The wave kinetic equation is given by:
\begin{equation}\label{wke}\tag{WKE} \left\{ \begin{split}&\partial_t n(t,k)=\mathcal K(n(t),n(t),n(t))(k),\\ &n(0,k)=n_{\mathrm{in}}(k), \end{split} \right. \end{equation} where $n_{\mathrm{in}}$ is as in Section \ref{intro-nls}, and the nonlinearity $\mathcal K$ is given by \begin{multline}\label{wke2}\tag{COL}
\mathcal K(\phi_1,\phi_2,\phi_3)(k)=\int_{(\mathbb R^d)^3}\big\{\phi_1(k_1)\phi_2(k_2)\phi_3(k_3)-\phi_1(k)\phi_2(k_2)\phi_3(k_3)+\phi_1(k_1)\phi_2(k)\phi_3(k_3)\\-\phi_1(k_1)\phi_2(k_2)\phi_3(k)\big\}\times\boldsymbol{\delta}(k_1-k_2+k_3-k)\cdot\boldsymbol{\delta}(|k_1|^2-|k_2|^2+|k_3|^2-|k|^2)\,\mathrm{d}k_1\mathrm{d}k_2\mathrm{d}k_3. \end{multline} Here and below $\boldsymbol{\delta}$ denotes the Dirac delta, and we define
\[|k|^2:=\langle k,k\rangle,\quad \langle k,\ell\rangle:=k^1 \ell^1+\cdots +k^d\ell^d,\] where $k=(k^1,\cdots,k^d)$ and $\ell=(\ell^1,\cdots,\ell^d)$ are $\mathbb Z_L^d$ or $\mathbb R^d$ vectors.
Given any Schwartz initial data $n_{\mathrm{in}}(k)$, the equation \eqref{wke} has a unique local solution $n=n(t,k)$ on some short time interval depending on $n_{\mathrm{in}}$. \subsubsection{The main result} The main result is stated as follows. \begin{thm}\label{main} Fix $d\geq 3$ and $\gamma\in(0,1)$. Fix a Schwartz function $n_{\mathrm{in}}\geq 0$, and fix $\delta\ll 1$ depending only on $(d,\gamma,n_{\mathrm{in}})$. Consider the equation (\ref{nls}) with random initial data (\ref{data}), and assume $\alpha=L^{-\gamma}$ so that $T_{\mathrm{kin}}=\frac{1}{2} L^{2\gamma}$.
Then, for sufficiently large $L$ (depending on $\delta$), the equation has a smooth solution up to time \[T=\delta\cdot\frac{L^{2\gamma}}{2}=\delta\cdot T_{\mathrm{kin}},\] with probability $\geq 1-e^{-(\log L)^2}$. Moreover we have
\begin{equation}\label{limit}\lim_{L\to\infty}\sup_{t\in[0,T]}\sup_{k\in\mathbb Z_L^d}\left|\mathbb E\,|\widehat{u}(t,k)|^2-n\bigg(\frac{t}{T_{\mathrm{kin}}},k\bigg)\right|=0, \end{equation} where $\widehat{u}$ is as in \eqref{fourier}, and $n(\tau,k)$ is the solution to (\ref{wke}). In (\ref{limit}) and below we understand that the expectation $\mathbb E$ is taken under the assumption that (\ref{nls}) has a smooth solution on $[0,T]$, which is an event with overwhelming probability. \end{thm} A few comments about this result are in order. \begin{itemize} \item The nonlinear Schr\"{o}dinger equation (\ref{nls}) is studied here and in \cite{DH19,DH21,DH21-2}, as a representative model in nonlinear wave theory. In fact, it is the \emph{universal} Hamiltonian nonlinear dispersive equation, in the sense that any such equation gives (NLS) in a suitable limiting regime \cite{Sul99}. The methods we develop here also apply to other dispersive models modulo technical differences. \item Theorem \ref{main} holds for the rectangular torus $\mathbb T_{L,\boldsymbol{\lambda}}^d=[0,\lambda_1L]\times\cdots [0,\lambda_dL]$ for \emph{any} $\lambda_j>0$ (rational or irrational) without genericity assumption. Here we only present the proof for the square torus, but the general case can be treated by the same arguments. \item In the same way as \cite{DH21}, {the assumption that $n_{\mathrm{in}}(k)$ is Schwartz is unnecessary. In fact it suffices to assume that its first $40d$ derivatives decay like $\langle k\rangle^{-40d}$. Moreover} the error term defined in (\ref{limit}) enjoys the explicit decay rate $L^{-c}$, which is uniform in $t$ and $k$, for some absolute constant $c>0$. The value of $c$ we get, though, is likely non-optimal. \item The exceptional probability $e^{-(\log L)^2}$ in Theorem \ref{main} is better than \cite{DH21}, but this is just due to the choice of the order $N$ of the expansion (see Section \ref{norms}). In fact the same bound also holds in the setting of \cite{DH21}, as is already demonstrated in \cite{DH21-2}. \item The main results in \cite{DH21-2} (evolution of higher order moments, propagation of chaos, law evolution for non-Gaussian data, derivation of wave kinetic hierarchy) also extend to the current setting. In particular, we can replace the i.i.d. Gaussians $g_k(\omega)$ by any centered-normalized i.i.d. random variables $\eta_k(\omega)$ whose law is rotationally symmetric and has exponential tails. This is easily shown by combining the arguments in this paper and in \cite{DH21-2} with obvious modifications. \end{itemize} \subsection{Background and literature}\label{intro-back} The theory of wave turbulence describes the non-equilibrium statistical behavior of systems of interacting waves, in the thermodynamic limit where the number of degrees of freedom goes to infinity. It is the wave analog of the classical kinetic theory of Boltzmann for particles, and its rigorous justification corresponds to the Hilbert's sixth problem for nonlinear waves.
The basic setup of the theory is as follows. Start with a nonlinear dispersive equation as the microscopic system of nonlinear waves. This is (\ref{nls}) in our case, but can also be replaced by other equations. Such system is studied in a \emph{large box} $\mathbb T_L^d$ with a \emph{weak nonlinearity} $\alpha|u|^2u$, where $L\to\infty$ (so the number of degrees of freedom diverges as $\sim L^d$) and $\alpha\to 0$ in the limit. Assume the initial data is random and \emph{well-prepared} as in (\ref{data}), i.e. the different Fourier modes $\widehat{u}(k)$ are independent and satisfy a random phase (RP) condition. Then, among other things, the following kinetic description is expected \emph{at the kinetic time} $T_{\mathrm{kin}}$: \begin{itemize} \item Propagation of chaos: different Fourier modes should remain independent in the limit;
\item The wave kinetic equation: the evolution of energy density $|\widehat{u}(k)|^2$ should be governed by (\ref{wke}) in the limit. \end{itemize}
In the physics literature, the very first kinetic description for waves appeared in Peierls \cite{Pei29} in the study of anharmonic crystals, leading to the so-called phonon Boltzmann equation. Since then, the kinetic theory has been developed for various models, and has become a systematic paradigm starting in the 1960s, with immense applications in various fields of physics and science \cite{BS66,BN69,Dav72,Has62,Has63,Jan08,Naz11,Spo06,Spo08,Ved67,WMO98,ZS67}. The name \emph{wave turbulence theory} comes from the spectral energy dynamics and cascades that the wave kinetic equation predicts for nonlinear wave systems, which yields similar conclusions to Kolmogorov spectra in hydrodynamic turbulence; this connection was is a major contribution of Zakharov \cite{Zak65,ZLF92}.
On the other hand, the rigorous mathematical treatment of wave turbulence had to wait until much later for the appropriate conceptual and technical ingredients to be invented. While it was clear in the theoretical physics community that a Feynman diagram expansion is the right approach to the problem, the main mathematical issue here was to prove the convergence of such an expansion. Naturally, the progress started in a linear setting (e.g. electron moving through random impurities), namely with the work of Spohn \cite{Spohn1977} for short kinetic times. This was later extended to much longer times in the celebrated works of Erd\"{o}s-Yau \cite{EY00} and Erd\"{o}s-Salmhofer-Yau \cite{ESY08}. Obviously, the next level of progress is to advance this understanding to the nonlinear setting, where the randomness is only coming from the initial distribution of the data as explained in \cite{Spohn1994}. The first breakthrough proving the convergence of the diagrammatic expansion in a nonlinear setting was that of Lukkarinen-Spohn \cite{LS11}, which considered the lattice (NLS) and studied the time correlations of the invariant Gibbs measure in the thermodynamic limit. Even though the above works only dealt with linear or equilibrium settings, they managed to draw substantial interest to this field from the mathematical community, and inspired subsequent research. In the last decade, partial results have been proved regarding the derivation of (\ref{wke}) in the nonlinear out-of-equilibrium setting, starting with works that addressed certain aspects of the problem (second-order expansions, near-equilibrium dynamics, shorter time scales etc.), see \cite{BGHS19,CG19,CG20,DH19,DK19,DK19-2,Fa18} and references therein. In particular, the authors' earlier work \cite{DH19}, as well as Collot-Germain \cite{CG19,CG20}, provides the justification of (\ref{wke}) up to the almost sharp time scale $T_{\mathrm{kin}}^{1-\varepsilon}$ for any $\varepsilon>0$.
In April 2021, the authors \cite{DH21} completed the first rigorous derivation of (\ref{wke}) up to time $T_{\mathrm{kin}}$ for scaling laws $\gamma=1$ or close to $1$. Subsequently, propagation of chaos and other predictions of wave turbulence theory were proved in \cite{DH21-2}. This includes the asymptotics of higher order correlations, derivation of the wave kinetic hierarchy, limit equations for the law of $\widehat u(t,k)$ when the initial distribution is not necessarily Gaussian, and propagation of Gaussianity in the case the initial distribution is Gaussian.
We should mention that, after the work \cite{DH21}, some other results in a similar vein were also obtained, but for equations with special time-dependent random forcing. In \cite{ST21}, Staffilani and Tran derived the wave kinetic equation for the Zakharov-Kuznetsov equation in the presence of a time-dependent noise that {provides an additional randomization effect for angles in Fourier space.} Recently they extended their result to the spatial inhomogeneous setting with a different noise, in joint work with Hannani and Rosenzweig \cite{HRST22}. At this time, \cite{DH21,DH21-2,HRST22,ST21} are the only results that reach the kinetic time $T_{\mathrm{kin}}$ in the non-equilibrium setting. Some more recent results that cover shorter time scales, but do not include forcing, can be found in \cite{ACG21,Ma22}.
In addition to the derivation of (\ref{wke}), there are also many works devoted to the study of the behavior of solutions to wave kinetic equations like (\ref{wke}), see for example \cite{CDG22,EV15,EV15-2,GIT20, RST21,SoT18}. This is another very important question, but is less related to the focus of this paper, so we will not elaborate on its state of art here. \subsection{The scaling laws}\label{intro-scale} Note that the kinetic description of wave turbulence theory involves the two limits $L\to\infty$ and $\alpha\to 0$. In fact, it is very important to specify the exact manner in which these two limits are taken. The most general form of such limits would be \[\alpha= L^{-\gamma}\] for some $\gamma\in[0,\infty]$, which is called a \emph{scaling law}. Note that, the endpoint case $\gamma=0$ is understood as the iterated limit where first $L\to\infty$ with $\alpha$ fixed and then $\alpha\to 0$; the case $\gamma=\infty$ is the opposite. The purpose of this section is to explain the necessary conditions on the scaling laws for a kinetic theory to hold. This will justify why $\gamma\in(0, 1)$ is the full range of scaling laws for (\ref{nls}) on the {square torus}.
To the best of our knowledge, the role of the scaling law in wave turbulence theory has not been adequately clarified in the physics literature, prior to the recent rigorous mathematical studies. In fact, this was one of the contribution of the authors' recent works, and is explained clearly in the expository paper \cite{DH22}. For completeness of the discussion, we elaborate on this here as well.
First of all, not all scaling laws\footnote{A common knowledge in physical literature is that the limit $\alpha\to \infty$ should not be taken before the limit $L\to 0$ {(see Remark \ref{scaling0})}, which excludes the scaling law $\gamma=\infty$. This may lead to some mistaken belief that the only other option is $\gamma=0$, i.e. to take $L\to \infty $ first followed by $\alpha\to 0$. In fact, as we shall see in Section \ref{intro-endpoint}, the latter is also not compatible with equations on continuum domains, {so in continuum setting one has to restrict to scaling laws $0<\gamma<\infty$}. In the discrete setting, the scaling law $\gamma=0$ is compatible.} $\alpha=L^{-\gamma}$ allow for the kinetic description in Section \ref{intro-back}. To see this, consider the equation (\ref{nls}) with initial data (\ref{data}), but with a general dispersion relation $\omega(\nabla/i)$ instead of $-\Delta$. Then $\mathbb E|\widehat{u}(t,k)|^2$ admits an expansion with the first term being $n_{\mathrm{in}}(k)$, and (part of) the second term being
\begin{equation}\label{intro-duhamel}\alpha^2t\cdot L^{-2d}\sum_{k_1-k_2+k_3=k}n_{\mathrm{in}}(k_1)n_{\mathrm{in}}(k_2)n_{\mathrm{in}}(k_3)\cdot t\bigg|\frac{\sin(\pi\Omega t)}{\pi\Omega t}\bigg|^2;\quad \Omega:=\omega(k_1)-\omega(k_2)+\omega(k_3)-\omega(k),\end{equation} where $k_j\in (L^{-1}\mathbb{Z})^d$, due to a calculation of Duhamel iterations. At time $|t|\sim T_{\mathrm{kin}}\sim \alpha^{-2}$, and when $L\to\infty$ and $\alpha\to 0$, this expression formally matches one of the terms in the second iteration of (\ref{wke}) (cf. the first term in (\ref{wke2})), using the fact that $t|\sin(\pi \Omega t)/(\pi\Omega t)|^2\to\boldsymbol{\delta}(\Omega)$ as $t\to\infty$.
In order for this formal approximation to be legitamite, the one and only restriction is that {\bf the values of $\Omega$, as $k_j$ range over the lattice $\mathbb Z^d_L=(L^{-1}\mathbb Z)^d$, must be equidistributed at scale $T_{\mathrm{kin}}^{-1}\sim\alpha^2$.} In fact, suppose $|\alpha^2t|\sim 1$, then the convergence of (\ref{intro-duhamel}) is intimately tied to the bound \begin{equation}\label{intro-res}
\begin{aligned}&\qquad\qquad\qquad\qquad\frac{\#(A\cap (L^{-1}\mathbb{Z})^{2d})}{L^{2d}}\sim |t|^{-1}\sim \mathrm{Vol}(A),\mathrm{\ where}\\&A=\{(k_1,k_2)\in\mathbb R^{2d}:|k_1|,|k_2|\lesssim 1,\,\,|\omega(k_1)-\omega(k_2)+\omega(k+k_2-k_1)-\omega(k)|\lesssim |t|^{-1}\}. \end{aligned}\end{equation}
The bound \eqref{intro-res} follows from the convergence of \eqref{intro-duhamel}, if we replace the $L^1$ function $|\sin x/x|^2$ by a cutoff function, and similarly for $n_{\mathrm{in}}$. This means that the probability of a lattice point in $\mathbb Z^{2d}_L$ falling into the set $A$---the level set of the function $\Omega$---is proportional to the volume of $A$, which is exactly equidistribution of $\Omega$.
Another implication of the equidistribution property (\ref{intro-res}) is that
\begin{equation}\label{intro-res2}\#(A_0\cap(L^{-1}\mathbb Z)^d)\lesssim L^{2d}|t|^{-1}\end{equation}where $A_0$ is defined as $A$ above but with $\Omega=0$. In fact, the sets $A$ and $A_0$ are referred to as sets of \emph{quasi resonance} and \emph{exact resonance} by physicists, and the latter inequality just states that {\bf the contribution of exact resonances should be dominated by volume-counting estimates of quasi resonances in \eqref{intro-res}}. This is certainly necessary for the kinetic formalism to hold, and is consistent with the discussions in the physical literature. \subsubsection{Admissible scaling laws}\label{intro-admis} We say a scaling law $\gamma\in[0,\infty]$ is \emph{admissible}, if the above equidistribution property holds for $\alpha=L^{-\gamma}$ (equivalently $t=\alpha^{-2}\sim L^{2\gamma}$ in \eqref{intro-res}). Clearly, the range of admissibility depends on the precise properties of the dispersion relation $\omega$. Note that a \emph{sufficient} condition is given by
\[\frac{1}{L}\bigg|\frac{\partial \omega}{\partial k}\bigg|\lesssim \alpha^2\] which corresponds to $\gamma\leq 1/2$ \cite{Naz11, GBE22}: in this range, the equidistribution property holds for \emph{any} reasonably behaved dispersion relation $\omega$ without the need for any number theoretic arguments.
However, for a given (or a class of) dispersion relation $\omega$, the above sufficient condition is usually \emph{not necessary}. Specifying to the Schr\"{o}dinger case (\ref{nls}), one can see in multiple ways that the admissible range is in fact \fbox{$\gamma<1$} for arbitrary (including square) tori, and \fbox{$\gamma<d/2$} for tori satisfying a genericity assumption. For example, for the square torus one has $L^2\Omega\in\mathbb Z$, so $\Omega$ cannot be equidistributed at scales $\ll L^{-2}$. Alternatively, the cardinality of the exact resonance set $A_0$ can be shown to be $\sim L^{2d-2}$ for square tori and $\sim L^{d}$ under genericity assumption, which leads (using \eqref{intro-res2}) to the same range of $\gamma$. In fact, we shall see that different values of $\gamma\in(0,1)$ or $(0,d/2)$ represent a range of different physical and mathematical phenomena; see Section \ref{twoscaling} for two special cases.
For $d\geq 3$, the results of the authors' earlier work \cite{DH21} covers the range of scaling laws $\gamma\in(1-c,1)$ for arbitrary tori, where $c$ is a small dimensional constant, as well as $\gamma=1$ under a genericity assumption. The goal of the current work, as stated in Theorem \ref{main}, is to extend the results to the full range $\gamma\in(0,1)$ (we discuss the endpoint $\gamma=0$ in Section \ref{intro-endpoint}).
\subsubsection{Two important scaling laws}\label{twoscaling} For the Schr\"{o}dinger equation (\ref{nls}), there are two scaling laws of particular mathematical and physical interest. The first one is $\gamma=1$, so that $\alpha =L^{-1}$ and $T_{\mathrm{kin}}\sim L^2$. This is consistent with the natural parabolic scaling for (\ref{nls}), by which solutions to (\ref{nls}) on torus of size $L$ and at time scale $\sim L^2$ can be rescaled to solutions on the unit torus and at time $O(1)$; namely, if $u$ solves (\ref{nls}) and $v(t,x)=L^{1/2}u(L^2t,Lx)$ with $x\in\mathbb T^d$, then $v$ solves the equation $(i\partial_t-\Delta)v+|v|^2v=0$. This means that, the predictions of wave turbulence theory under this scaling law, can be translated into conclusions on the unit torus. In three dimensions, this is closely related to the famous Gibbs measure invariance problem for cubic NLS (i.e. invariance of the $\Phi_3^4$ measure under the Schr\"{o}dinger dynamics), which is the only Gibbs measure invariance problem that still remains open after the works \cite{Bou94,Bou96,BDNY22,DNY19,OT20,Zhi94}. In addition, energy cascade behavior for NLS can also be observed at the level of (\ref{wke}) \cite{EV15-2,Naz11}, and proving such cascade dynamics for the NLS equation on the unit torus is a problem of great interest \cite{Bou00}.
Another important scaling law is $\gamma=\frac12$ for which $T_{\mathrm{kin}}=L$. We may call this the \emph{ballistic scaling law} because it equates the kinetic timescale with the ballistic timescale needed for a wave packet at frequency $O(1)$ to traverse the domain $\mathbb T^d_L$. In some sense, this is analogous to the Boltzmann-Grad scaling law adopted in Lanford's theorem justifying the Boltzmann equation, in which the so-called mean-free path is also equated to the transport length scale.
It should be pointed out that such wave packet considerations are more relevant in the \emph{inhomogeneous} setting of the problem, where the initial field is not homogeneous in space as in \eqref{data}. An example of such data is when one sets \eqref{nls} on $\mathbb R^d$ with random data $u_{\mathrm{in}}(x)$ whose Wigner transform \begin{equation}\label{wigner}\mathbb{E}\bigg(\int_{\mathbb{R}^d}e^{iLy\cdot\eta}\,\overline{\widehat{u_{\mathrm{in}}}\big(\xi-\frac{\eta}{2}\big)}\widehat{u_{\mathrm{in}}}\big(\xi+\frac{\eta}{2}\big)\,\mathrm{d}\eta\bigg)\to W_0(y,\xi)\quad (\mathrm{as\ }L\to\infty), \end{equation} possibly in a weak sense, where $W_0=W_0(x,\xi):\mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}_{\geq 0}$ decays rapidly in $\xi$ and $x$. This is achieved, for example, by setting the random data as \begin{equation}\label{data0} u_{\mathrm{in}}(x)=L^{-\frac{d}{2}}\sum_{k\in(L^{-1}\mathbb{Z})^d}\psi\bigg(\frac{x}{L},k\bigg)\cdot g_k\cdot e^{ik\cdot x};\quad \psi(y,k)=\sqrt{W_0(y,k)}, \end{equation} which can be viewed as an inhomogeneous generalization of that in \eqref{data}. Then, the solution to (\ref{nls}) has the form \begin{equation}\label{solution} u(t,x)=L^{-\frac{d}{2}}\sum_{k\in(L^{-1}\mathbb{Z})^d}A\bigg(t,\frac{x}{L},k\bigg)\cdot e^{ik\cdot x}.
\end{equation} Denoting $N(t,y,k):=\mathbb{E}|A(t,y,k)|^2$, which corresponds to the Wigner transform of $u(t)$, and performing a formal expansion, we find that $N$ satisfies \begin{equation}\label{wke1}\partial_tN+\frac{1}{L}(k\cdot\nabla_y)N\approx\alpha^2 \mathcal{C}(N,N,N), \qquad N(0,y,k)=W_0(y,k). \end{equation} This gives the inhomogeneous wave kinetic equation provided one equates the transport timescale $L$ with the kinetic timescale $\alpha^{-2}$, which is the scaling law $\gamma=\frac12$ with $T_{\mathrm{kin}}=L$.
Note that, if one wants to view the homogeneous WKE as a limit of the inhomogeneous one, then one has to introduce an additional parameter to the data in \eqref{data0}, namely one measuring the scale of the inhomogeneity. This can be done by rescaling $W_0$, or equivalently by replacing $\psi(\frac{x}{L},k)$ with $\psi(\frac{x}{M},k)$ in \eqref{data0}, where $M$ is the new inhomogeneity scale. This leads to the flexibility of scaling laws in the homogeneous setting; in fact all the admissible scaling laws $\gamma \in (0, 1)$ described above arise as suitable limits with $L\to\infty$ and $M/L\to \infty$. \subsubsection{The scaling law $\gamma=0$}\label{intro-endpoint} Note that Theorem \ref{main} covers the full range of scaling laws $\gamma\in(0,1)$, except the endpoint $\gamma=0$. This endpoint does not seem to be compatible with the continuum setting; indeed, formally taking the $L\to\infty$ first will lead to (\ref{nls}) on $\mathbb R^d$ with initial data \[u(0,x)=u_{\mathrm{in}}^\infty(x);\quad \mathbb E(u_{\mathrm{in}}^\infty(x)\overline{u_{\mathrm{in}}^\infty(y)})=(\mathcal F^{-1}n_{\mathrm{in}})(x-y),\] which is a Gaussian random field with covariance operator $n_{\mathrm{in}}(\nabla/i)$ \emph{that has uniform strength at every point of $\mathbb{R}^d$}. In particular, this initial data, and any possible remainder term that may occur, belongs only to $L^\infty(\mathbb R^d)$ (with logarithmic growth at infinity). However, for $L^\infty$ data, there is no known solution theory to (\ref{nls}) (or even the linear Schr\"odinger equation) in any function space, due to infinite speed of propagation and the unboundedness of the linear propagator $e^{it\Delta}$.
Nevertheless, in the \emph{discrete} setting where $\Delta$ is replaced by a discrete difference operator, it is completely plausible to solve (\ref{nls}) in (weighted) $L^\infty$, so in this case $\gamma=0$ is a compatible scaling law, and the corresponding justification of (\ref{wke}) for $\gamma=0$ may be possible \cite{LV22}. \begin{rem}\label{scaling0} In some early physical literature, the limiting procedure was described as ``the $L\to\infty$ limit should be taken before the $\alpha\to 0$ limit, and not after". This should not be understood as these two limits being taken independently; rather, it simply means that the rate $L\to\infty$ should not be slower than that of $\alpha\to 0$. In other words, we must have $\alpha L^{\gamma_0}\to\infty\Leftrightarrow \alpha\gg L^{-\gamma_0}$, or $\gamma<\gamma_0$ in the context of scaling laws, where $\gamma_0$ is a constant depending on the setting of the problem. This is clearly consistent with all the above discussions. \end{rem} \subsection{Ingredients of the proof} We briefly describe here the main difficulties and new ingredients in the proof of Theorem \ref{main}; see Section \ref{overview} for a more substantial description, as that requires the notations set up in Section \ref{setup}.
While the general methodology here follows that in \cite{DH21} which dealt with the scaling law $\gamma=1$, fundamentally new structures and ideas appear for scaling laws $\gamma<2/3$ as we shall explain below. The analysis of these new structures requires introducing new ideas to isolate, analyze, and uncover novel cancellations between some of them. Moreover, it requires upgrading our previous combinatorial algorithm to a much more robust and streamlined apparatus.
The first steps of the proof of Theorem \ref{main} are essentially the same as in \cite{DH21}: one expands the solution $u$ to (\ref{nls}) into terms indexed by ternary trees, which allows to express the correlations of these terms using \emph{couples}. These couples (which are the Feynman diagrams in this game) are pairs of trees whose leafs are completely paired to each other. The analysis of such couples goes through parallel analytical and combinatorial approaches. The leading couples, which we call \emph{regular couples}, are studied and computed analytically to isolate from them the iterates of (\ref{wke}). It then suffices to show that the contribution of non-regular couples is of lower order. Here, the novel idea of \emph{molecules} was introduced in \cite{DH21} to study the combinatorial problems associated with non-regular couples. This molecular picture will prove to be even more indispensable in this paper.
The same algorithm used in \cite{DH21} to analyze these molecules breaks down, as soon as $\gamma<2/3$. On a superficial technical level, this is due to the failure of a particular two-vector counting estimate (namely the $q=2$ case of (\ref{atomiccount})). However, this break down is much more fundamental and cannot be saved by simply modifying the algorithm. Indeed, when $\gamma<2/3$, the molecule may contain new bad structures (in fact \emph{multiple families} of them) other than those already observed in \cite{DH21}. {\bf Such bad structures are harmless at scaling laws $\gamma>2/3$, but can overtake the leading terms for $\gamma<2/3$.} Note that this difficulty is of very different nature from that of \cite{DH21}, which mainly revolves around overcoming the factorial divergence caused by \emph{generic} molecules. While this is still a problem here, the extra difficulty imposed by these \emph{special} bad structures requires substantially new ideas beyond the proof in \cite{DH21}.
The strategy here is to first (i) identify all the possible bad structures---there are \emph{eight} families of them that we call \emph{vines} (Figure \ref{fig:vines}), then (ii) recover a good estimate for any molecule absent of these bad structures (in the form of a \emph{rigidity theorem} similar to Proposition 9.10 of \cite{DH21}), and finally (iii) control the contribution of these bad structures.
Parts (i)--(ii) can in fact be done together at the level of the molecule picture, by introducing a powerful new operation that is absent in the algorithm of \cite{DH21}, called the \emph{cutting} operation (Figure \ref{fig:cutintro}). This seemingly simple operation allows us to \emph{isolate} all the possible bad scenarios into ``local" post-surgery connected components, and locate only finitely many families of connected components that are problematic. It is precisely this small addition that leads to the complete classification of bad structures in this paper, namely the vines. We believe that this is the one missing piece in the algorithm of \cite{DH21} that makes it much more robust. We also notice that the combinatorial difficulty caused by vines is not specific to the (NLS) case, but is actually universal (at least in the $4$-wave setting) independent of dispersion relation and multilinear multipliers. As such, we believe that the algorithm in \cite{DH21}, equipped with the cutting operation, should be directly applicable in many other settings.
Part (iii) of this plan, which is another main novelty of this work, relies on extremely delicate, and somewhat miraculous, cancellations observed between the bad structures identified in (i). Indeed, starting from these bad structures identified at the level of the molecule, one can reconstruct the various possibilities of couples that have this same molecular structure. These couples, which may have arbitrarily large size, can be grouped into pairs defined as \emph{twists} of each other (Figures \ref{fig:vinescancel} and \ref{fig:twist_dec}). The cancellation structure is then found by studying expressions associated with couples that are twists of each other. It is worth mentioning that this cancellation is so involved and intricate that there is little-to-no chance of uncovering it if one only looks at the couple picture, and does not turn to the molecule picture (cf. Figure \ref{fig:flowchart}). This strongly suggests that the molecules introduced in \cite{DH21} are fundamental objects, and not mere technical tools.
To the best of our knowledge, the cancellation identified in this paper has not appeared in earlier mathematical or physical literature (such structures only become significant in higher oder terms, so it's not surprising that they do not play a role in the formal derivation of physicists that only involve second order expansions). Therefore, we believe that the ingredients of this paper and \cite{DH21}---including cancellations of vine structures and the algorithm in \cite{DH21} with cutting---constitute {\bf the next major step beyond \cite{EY00,ESY08,LS11} in the study of Feynman diagrams. This development allows us to effectively estimate diagrams of much higher order than those in \cite{EY00,ESY08,LS11},} which results in the proof of Theorem \ref{main} in the non-equilibrium setting and without noise.
Finally, we remark that, this new Feynman diagram analysis is robust enough to be applicable in a wide range of semilinear dispersive equations. The only major difference for other dispersion relations $\omega$ would be the equidistribution property (\ref{intro-res}), which may restrict the range of scaling laws $\gamma$ depending on the fine number theoretic properties of $\omega$. However, these number theoretic ingredients are only needed when $\gamma>\frac12$; for $\gamma\leq 1/2$, we expect that results like Theorem \ref{main} should hold for arbitrary $\omega$, as demonstrated in Section \ref{intro-admis}. \subsection{Future Horizons} We conclude this introduction by listing, what we believe to be some of the next major frontiers in this line of research, after the resolution (here and in \cite{DH21, DH21-2}) of the first fundamental question that is the rigorous justification of the wave kinetic theory.
(1) \emph{Longer times}: the obvious question after Theorem \ref{main} would be whether the same result can be extended to time $|t|\leq C\cdot T_{\mathrm{kin}}$ for constants $C\gg1$. This is a tremendous open problem, and its resolution is unknown not only in the wave turbulence setting, but also in the classical particle setting of Lanford's theorem justifying Boltzmann's equation. Note that (\ref{wke}) may have finite time blowup (which is even expected to be generic, see \cite{EV15,EV15-2}), so the best one can hope for, in terms of the approximation (\ref{limit}), would be the following conjecture: \begin{itemize}
\item Suppose the solution to (\ref{wke}) stays smooth up to time $\tau$, then the approximation (\ref{limit}) holds for all time $|t|\leq\tau\cdot T_{\mathrm{kin}}$. \end{itemize}
Answering this conjecture is highly challenging, and would require ideas and techniques completely different from the current and earlier works. Moreover, a positive answer would have profound implications on the study of long-time dynamics of (\ref{nls}), especially on energy cascades.
(2) \emph{Post-singularity dynamics}: Suppose that the conjecture in (1) has been proved or is assumed to be true. Moreover, suppose a specific solution to (\ref{wke}) exhibits a $\boldsymbol{\delta}$ singularity at a particular time $\tau_0$. The analysis in \cite{EV15, EV15-2} suggests that such singularity formation is somewhat generic (formation of condensate). Then we may ask the following question: what is the asymptotic behavior of
\[\mathbb E|\widehat{u}(\tau_0\cdot T_{\mathrm{kin}},0)|^2?\]
In other words, can one prove rigorously the dynamical formation of condensate for (\ref{nls})? More interestingly, for $\tau>\tau_0$, can one still track the macroscopic behavior of $\mathbb E|\widehat{u}(\tau\cdot T_{\mathrm{kin}},k)|^2$? Does it converge to a finite limit? If so, can it be defined as a weak solution to (\ref{wke}) in some sense? If not, then should we somehow modify (\ref{nls}) (and/or the wave kinetic equation) beyond the time $\tau_0\cdot T_{\mathrm{kin}}$, in view of the condensate formed for (\ref{wke}) at $\tau_0$? These questions may be even more challenging than the conjecture in (1), but their resolution would bring new insights, both physical and mathematical, to the study of (\ref{nls}) and its condensates.
(3) \emph{Properties of solutions to (\ref{wke})}: turning now to the solution theory to (\ref{wke}), an important question is to describe more precisely the formation of condensate \cite{EV15,EV15-2}, and perhaps justify its genericity, for sufficiently strong classes of solutions. Another venue of immense physical interest, is to rigorously study solutions that may asymptote to (or resemble in some meaningful sense) the Zakharov spectra, see for example \cite{CDG22} for a step in this direction. These specific solutions, when combined with possible results in (1) and (2), may lead to the discovery of very interesting behavior of solutions to (\ref{nls}).
One may also consider the inhomogeneous version of (\ref{wke}), whose derivation is expected to be similar to (\ref{wke}) with only technical differences. However, solutions to the inhomogeneous (\ref{wke}) may behave quite differently, for the transport term may prevent blowup. If these solution exhibit diffusive behavior for long times, this may lead to a nonlinear version of the quantum diffusion behavior described in Erd\"{o}s-Salmhofer-Yau \cite{ESY08} in the linear setting.
\section{Preparations}\label{setup}
\subsection{Preliminary reductions}Start from the equation (\ref{nls}), let $u$ be a solution, and recall $\alpha=L^{-\gamma}$. Let $M=\fint|u|^2$ be the conserved mass of $u$ (where $\fint$ takes the average on $\mathbb T_L^d$), and define $v:=e^{-2iL^{-\gamma} Mt}\cdot u$, then $v$ satisfies the Wick ordered equation
\begin{equation}(i\partial_t-\Delta)v+L^{-\gamma}\bigg(|v|^2v-2\fint|v|^2\cdot v\bigg)=0. \end{equation} By switching to Fourier space, rescaling in time and reverting the linear Schr\"{o}dinger flow, we define
\begin{equation}a_k(t)=e^{-\pi i\cdot\delta L^{2\gamma}|k|^2t}\cdot\widehat{v}( \delta T_{\mathrm{kin}}\cdot t,k) \end{equation} with $\widehat{v}$ as in (\ref{fourier}), then $\textit{\textbf{a}}:=a_k(t)$ will satisfy the equation
\begin{equation}\label{akeqn}
\left\{ \begin{aligned} \partial_ta_k &= \mathcal C_+(\textit{\textbf{a}},\overline{\textit{\textbf{a}}},\textit{\textbf{a}})_k(t),\\ a_k(0) &=(a_k)_{\mathrm{in}}=\sqrt{n_{\mathrm{in}}(k)}g_k(\omega), \end{aligned} \right. \end{equation} with the nonlinearity \begin{equation}\label{akeqn2} \mathcal C_\zeta(\textit{\textbf{f}},\textit{\textbf{g}},\textit{\textbf{h}})_k(t):=\frac{\delta}{2L^{d-\gamma}}\cdot(i\zeta)\sum_{k_1-k_2+k_3=k}\epsilon_{k_1k_2k_3} e^{\zeta\pi i\cdot\delta L^{2\gamma}\Omega(k_1,k_2,k_3,k)t}f_{k_1}(t)g_{k_2}(t)h_{k_3}(t). \end{equation}for $\zeta\in\{\pm\}$. Here in (\ref{akeqn2}) and below, the summation is taken over $(k_1,k_2,k_3)\in(\mathbb Z_L^d)^3$, and \begin{equation}\label{defcoef0}\epsilon_{k_1k_2k_3}= \left\{ \begin{aligned}+&1,&&\mathrm{if\ }k_2\not\in\{k_1,k_3\};\\ -&1,&&\mathrm{if\ }k_1=k_2=k_3;\\ &0,&&\mathrm{otherwise}, \end{aligned} \right.\end{equation} and the resonance factor \begin{equation}\label{res}
\Omega=\Omega(k_1,k_2,k_3,k):=|k_1|^2-|k_2|^2+|k_3|^2-|k|^2=2\langle k_1-k,k-k_3\rangle.\end{equation} Note that $\epsilon_{k_1k_2k_3}$ is always supported in the set \begin{equation}\label{defset}\mathfrak S:=\big\{(k_1,k_2,k_3):\mathrm{\ either\ }k_2\not\in\{k_1,k_3\},\mathrm{\ or\ }k_1=k_2=k_3\big\}.\end{equation}
The rest of this paper is focused on the system (\ref{akeqn})--(\ref{akeqn2}) for $\textit{\textbf{a}}$, with the relevant terms defined in (\ref{defcoef0})--(\ref{res}), in the time interval $t\in[0,1]$. \subsection{Parameters, notations and norms}\label{norms} In this subsection we list some notations and fix some parameters that will be useful below. Recall that $d\geq 3$ and $0<\gamma<1$, and Schwartz data $n_{\mathrm{in}}$ are fixed. Define \begin{equation}\label{othergamma}\gamma_0:=\min(\gamma,1-\gamma),\quad \gamma_1:=\min(2\gamma,1,2(d-1)(1-\gamma)).\end{equation} Fix $\eta$ as a small absolute constant such that $\eta\ll_{d,\gamma}1$, and let $C$ be any large constant depending on $(d,\gamma)$ and $\eta$. Let also $C^+$ be any large constant depending on $C$ and $n_{\mathrm{in}}$, and fix $\delta$ as a small constant such that $\delta\ll_{C^+}1$. Unless otherwise stated, the implicit constants in $\lesssim$ symbols may depend on $C^+$, but those in $O(\cdot)$ symbols depend only on $C$. Let $L$ be large enough depending on $\delta$, and define $N=\lfloor(\log L)^4\rfloor$.
Let $\chi_0=\chi_0(z)\in C^\infty(\mathbb R\to\mathbb R_{\geq 0})$ be such that $\chi_0=1$ for $|z|\leq 1/2$ and $\chi_0=0$ for $|z|\geq 1$; define $\chi_0(z^1,\cdots,z^d)=\chi_0(z^1)\cdots \chi_0(z^d)$ and $\chi_\infty=1-\chi_0$, where $z^j$ are coordinates of vectors $z\in\mathbb R^d$ (we use this notation throughout). By abusing notation, sometimes we may also use $\chi_0$ to denote other cutoff functions with slightly different supports. These functions, as well as the other cutoff functions, will be in Gevrey class $2$ (i.e. the $k$-th order derivatives are bounded by $(2k)!$). For a multi-index $\rho=(\rho_1,\cdots,\rho_m)$, we adopt the usual notations $|\rho|=\rho_1+\cdots+\rho_m$ and $\rho!=(\rho_1)!\cdots(\rho_m)!$, etc. For an index set $A$, we use the vector notation $\alpha[A]=(\alpha_j)_{j\in A}$ and $\mathrm{d}\alpha[A]=\prod_{j\in A}\mathrm{d}\alpha_j$, etc.
Denote $z^+=z$ for a complex number $z$, and $z^-=\overline{z}$. In the rest of this paper, we will not use the space Fourier transform notation as in (\ref{fourier}). We will use $\widehat{\cdot}$ only for the time Fourier transform, which is defined as \[\widehat{u}(\lambda)=\int_\mathbb R u(t) e^{-2\pi i\lambda t}\,\mathrm{d}t,\quad u(t)=\int_\mathbb R \widehat{u}(\lambda)e^{2\pi i\lambda t}\,\mathrm{d}\lambda,\] and similarly for higher dimensional versions. If a function $F=F(t_j,k_j)$ depends on several time variables $t_j$ and several vector variables $k_j$, we shall define its $X^{\theta,\beta}$ norm by \[\|F\|_{X^{\theta,\beta}}=\int\big(\max_j\langle\lambda_j\rangle\big)^{\theta}\cdot\bigg[\sup_{k_j}\big(\max_j\langle k_j\rangle\big)^{\beta}|\widehat{F}(\lambda_j,k_j)|\bigg]\,\prod_j\mathrm{d}\lambda_j,\] If $F=F(t_j)$ does not depend on any $k_j$, the norms are modified accordingly; they do not depend on $\beta$ so we call it $X^\theta$. Define the localized version $X_{\mathrm{loc}}^{\theta,\beta}$, and associated auxiliary $Y_{\mathrm{loc}}^\theta$ norm, by
\[\|F\|_{X_{\mathrm{loc}}^{\theta,\beta}}=\inf\big\{\|\widetilde{F}\|_{X^{\theta,\beta}}:\widetilde{F}=F\mathrm{\ for\ }0\leq t_j\leq 1\big\},\quad\|F\|_{Y_{\mathrm{loc}}^\theta}:=\sup_{(k_j^0)}\|F\cdot\mathbf{1}_{|k_j-k_j^0|\leq 1\,(\forall j)}\|_{X_{\mathrm{loc}}^{\theta,0}}.\] If we will only use the value of $F$ in some subset (for example $\{t_1>t_2\}$, see the second part of Proposition \ref{regcpltreeasymp}), then in the above definition we may only require $\widetilde{F}=F$ in this set. Finally, define the $Z$ norm for function $a=a_k(t)$,
\begin{equation}\label{defznorm}\|a\|_Z^2=\sup_{0\leq t\leq 1}L^{-d}\sum_{k\in\mathbb Z_L^d}\langle k\rangle^{10d}|a_k(t)|^2 \end{equation}All these norms are readily extended to Banach space valued functions. \subsection{Trees, couples and decorations} Recall the notions of trees, couples and decorations, which are defined in \cite{DH21}. \begin{df}[Trees]\label{deftree} A \emph{ternary tree} $\mathcal T$ (we will simply say a \emph{tree} below) is a rooted tree where each non-leaf (or \emph{branching}) node has exactly three children nodes, which we shall distinguish as the \emph{left}, \emph{mid} and \emph{right} ones. A node $\mathfrak m$ is a \emph{descendant} of a node $\mathfrak n$, or $\mathfrak n$ is an \emph{ancestor} of $\mathfrak m$, if $\mathfrak m$ belongs to the subtree rooted at $\mathfrak n$ (we allow $\mathfrak m=\mathfrak n$). We say $\mathcal T$ is \emph{trivial} (and write $\mathcal T=\bullet$) if it has only the root, in which case this root is also viewed as a leaf.
We denote generic nodes by $\mathfrak n$, generic leaves by $\mathfrak l$, the root by $\mathfrak r$, the set of leaves by $\mathcal L$ and the set of branching nodes by $\mathcal N$. The \emph{order} of a tree $\mathcal T$ is defined by $n(\mathcal T)=|\mathcal N|$ (this is called scale in \cite{DH21}), so if $n(\mathcal T)=n$ then $|\mathcal L|=2n+1$ and $|\mathcal T|=3n+1$.
A tree $\mathcal T$ may have sign $+$ or $-$. If its sign is fixed then we decide the signs of its nodes as follows: the root $\mathfrak r$ has the same sign as $\mathcal T$, and for any branching node $\mathfrak n\in\mathcal N$, the signs of the three children nodes of $\mathfrak n$ from left to right are $(\zeta,-\zeta,\zeta)$ if $\mathfrak n$ has sign $\zeta\in\{\pm\}$. Once the sign of $\mathcal T$ is fixed, we will denote the sign of $\mathfrak n\in\mathcal T$ by $\zeta_\mathfrak n$. Define $\zeta(\mathcal T)=\prod_{\mathfrak n\in\mathcal N}(i\zeta_\mathfrak n)$. We also define the conjugate $\overline{\mathcal T}$ of a tree $\mathcal T$ to be the same tree but with opposite sign. \end{df} \begin{df}[Couples]\label{defcouple} A \emph{couple} $\mathcal Q$ is an unordered pair $\{\mathcal T^+,\mathcal T^-\}$ of two trees $\mathcal T^\pm$ with signs $+$ and $-$ respectively, together with a partition $\mathscr P$ of the set $\mathcal L^+\cup\mathcal L^-$ into $(n+1)$ pairwise disjoint two-element subsets, where $\mathcal L^\pm$ is the set of leaves for $\mathcal T^\pm$, and $n=n^++n^-$ where $n^\pm$ is the order of $\mathcal T^\pm$. This $n$ is also called the \emph{order} of $\mathcal Q$, denoted by $n(\mathcal Q)$. The subsets $\{\mathfrak l,\mathfrak l'\}\in\mathscr P$ are referred to as \emph{pairs}, and we require that $\zeta_{\mathfrak l'}=-\zeta_\mathfrak l$, i.e. the signs of paired leaves must be opposite. If both $\mathcal T^\pm$ are trivial, we call $\mathcal Q$ the \emph{trivial couple} (and write $\mathcal Q=\times$).
For a couple $\mathcal Q=\{\mathcal T^+,\mathcal T^-,\mathscr P\}$ we denote the set of branching nodes by $\mathcal N=\mathcal N^+\cup\mathcal N^-$, and the set of leaves by $\mathcal L=\mathcal L^+\cup\mathcal L^-$; for simplicity we will abuse notation and write $\mathcal Q=\mathcal T^+\cup\mathcal T^-$. Define $\zeta(\mathcal Q)=\prod_{\mathfrak n\in\mathcal N}(i\zeta_\mathfrak n)$. We also define a \emph{paired tree} to be a tree where \emph{some} leaves are paired to each other, according to the same pairing rule for couples. We say a paired tree is \emph{saturated} if there is only one unpaired leaf (called the \emph{lone leaf}). In this case the tree forms a couple with the trivial tree $\bullet$. Finally, we define the conjugate of a couple $\mathcal Q=\{\mathcal T^+,\mathcal T^-\}$ as $\overline{\mathcal Q}=\{\overline{\mathcal T^-},\overline{\mathcal T^+}\}$ with the same pairings; for a paired tree $\mathcal T$ we also define its conjugate as $\overline{\mathcal T}$ with the same pairings, where $\overline{\mathcal T}$ is as in Definition \ref{deftree}. \end{df} \begin{df}[Decorations]\label{defdec} A \emph{decoration} $\mathscr D$ of a tree $\mathcal T$ is a set of vectors $(k_\mathfrak n)_{\mathfrak n\in\mathcal T}$, such that $k_\mathfrak n\in\mathbb Z_L^d$ for each node $\mathfrak n$, and that \[k_\mathfrak n=k_{\mathfrak n_1}-k_{\mathfrak n_2}+k_{\mathfrak n_3},\quad \mathrm{or\ equivalently}\quad \zeta_\mathfrak n k_\mathfrak n=\zeta_{\mathfrak n_1}k_{\mathfrak n_1}+\zeta_{\mathfrak n_2}k_{\mathfrak n_2}+\zeta_{\mathfrak n_3}k_{\mathfrak n_3},\] for each branching node $\mathfrak n\in\mathcal N$, where $\zeta_\mathfrak n$ is the sign of $\mathfrak n$ as in Definition \ref{deftree}, and $\mathfrak n_1,\mathfrak n_2,\mathfrak n_3$ are the three children nodes of $\mathfrak n$ from left to right. Clearly a decoration $\mathscr D$ is uniquely determined by the values of $(k_\mathfrak l)_{\mathfrak l\in\mathcal L}$. For $k\in\mathbb Z_L^d$, we say $\mathscr D$ is a $k$-decoration if $k_\mathfrak r=k$ for the root $\mathfrak r$.
Given a decoration $\mathscr D$, we define the coefficient \begin{equation}\label{defcoef}\epsilon_\mathscr D:=\prod_{\mathfrak n\in\mathcal N}\epsilon_{k_{\mathfrak n_1}k_{\mathfrak n_2}k_{\mathfrak n_3}}\end{equation} where $\epsilon_{k_1k_2k_3}$ is as in (\ref{defcoef0}). Note that in the support of $\epsilon_\mathscr D$ we have that $(k_{\mathfrak n_1},k_{\mathfrak n_2},k_{\mathfrak n_3})\in\mathfrak S$ for each $\mathfrak n\in\mathcal N$. We also define the resonance factor $\Omega_\mathfrak n$ for each $\mathfrak n\in\mathcal N$ by
\begin{equation}\label{defres}\Omega_\mathfrak n=\Omega(k_{\mathfrak n_1},k_{\mathfrak n_2},k_{\mathfrak n_3},k_\mathfrak n)=|k_{\mathfrak n_1}|^2-|k_{\mathfrak n_2}|^2+|k_{\mathfrak n_3}|^2-|k_\mathfrak n|^2.\end{equation}
A decoration $\mathscr E$ of a couple $\mathcal Q=\{\mathcal T^+,\mathcal T^-,\mathscr P\}$, is a set of vectors $(k_\mathfrak n)_{\mathfrak n\in\mathcal Q}$, such that $\mathscr D^\pm:=(k_\mathfrak n)_{\mathfrak n\in\mathcal T^\pm}$ is a decoration of $\mathcal T^\pm$, and moreover $k_\mathfrak l=k_{\mathfrak l'}$ for each pair $\{\mathfrak l,\mathfrak l'\}\in\mathscr P$. We define $\epsilon_\mathscr E:=\epsilon_{\mathscr D^+}\epsilon_{\mathscr D^-}$, and define the resonance factors $\Omega_\mathfrak n$ for $\mathfrak n\in\mathcal N$ as in (\ref{defres}). Note that we must have $k_{\mathfrak r^+}=k_{\mathfrak r^-}$ where $\mathfrak r^\pm$ is the root of $\mathcal T^\pm$; again we say $\mathscr E$ is a $k$-decoration if $k_{\mathfrak r^+}=k_{\mathfrak r^-}=k$. We also define decorations $\mathscr D$ of paired trees, as well as $\epsilon_\mathscr D$ and $\Omega_\mathfrak n$ etc., similar to the above (except that we don't pair all leaves). \end{df} \subsection{The ansatz and main estimates} We now state the ansatz for the solution $\textit{\textbf{a}}$ to the system (\ref{akeqn})--(\ref{akeqn2}), as well as the main estimates. \subsubsection{The expressions $\Jc_\mathcal T$ and $\mathcal K_\mathcal Q$} For any tree $\mathcal T$ of order $n$, define the expression \begin{equation}\label{defjt}(\Jc_\mathcal T)_k(t)=\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^n\zeta(\mathcal T)\sum_\mathscr D\epsilon_\mathscr D\cdot\int_\mathcal D\prod_{\mathfrak n\in\mathcal N}e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n\cdot\prod_{\mathfrak l\in\mathcal L}\sqrt{n_{\mathrm{in}}(k_\mathfrak l)}\eta_{k_\mathfrak l}^{\zeta_\mathfrak l}(\omega) \end{equation} where the sum is taken over all $k$-decorations $\mathscr D$ of $\mathcal T$, and the domain \begin{equation}\label{defdomaind}\mathcal D=\big\{t[\mathcal N]:0<t_{\mathfrak n'}<t_\mathfrak n<t\mathrm{\ whenever\ }\mathfrak n'\mathrm{\ is\ a\ child\ node\ of\ }\mathfrak n\big\}. \end{equation} For any couple $\mathcal Q$ of order $n$, define the expression \begin{equation}\label{defkq}\mathcal K_\mathcal Q(t,s,k)=\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^n\zeta(\mathcal Q)\sum_\mathscr E\epsilon_\mathscr E\cdot\int_\mathcal E\prod_{\mathfrak n\in\mathcal N}e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n\cdot\prod_{\mathfrak l\in\mathcal L}^{(+)}n_{\mathrm{in}}(k_\mathfrak l), \end{equation} where the sum is taken over all $k$-decorations $\mathscr E$ of $\mathcal Q$, the product $\prod_{\mathfrak l\in\mathcal L}^{(+)}$ is taken over all leaves $\mathfrak l\in\mathcal L$ with $+$ sign, and the domain \begin{multline}\label{defdomaine}\mathcal E=\big\{t[\mathcal N]:0<t_{\mathfrak n'}<t_\mathfrak n\mathrm{\ whenever\ }\mathfrak n'\mathrm{\ is\ a\ child\ node\ of\ }\mathfrak n;\\t_\mathfrak n<t\mathrm{\ whenever\ }\mathfrak n\in\mathcal N^+\mathrm{\ and\ }t_\mathfrak n<s\mathrm{\ whenever\ }\mathfrak n\in\mathcal N^-\big\}. \end{multline} \subsubsection{The ansatz for $a_k(t)$} Let \begin{equation}\label{defjn}(\Jc_n)_k(t)=\sum_{n(\mathcal T^+)=n}(\Jc_{\mathcal T^+})_k(t) \end{equation} where the sum is taken over all trees $\mathcal T^+$ of order $n$ and sign $+$, and define $\textit{\textbf{b}}=b_k(t)$ by \begin{equation}\label{defb}a_k(t)=\sum_{0\leq n\leq N}(\Jc_n)_k(t)+b_k(t), \end{equation} where $N=\lfloor(\log L)^4\rfloor$ as defined in Section \ref{norms}. Then $\textit{\textbf{b}}$ satisfies an equation of form \begin{equation}\label{eqnbk}\textit{\textbf{b}}=\mathcal R+\mathscr L \textit{\textbf{b}}+\mathscr L_2(\textit{\textbf{b}},\textit{\textbf{b}})+\mathscr L_3(\textit{\textbf{b}},\textit{\textbf{b}},\textit{\textbf{b}}), \end{equation} or equivalently \begin{equation}\label{eqnbk2}\textit{\textbf{b}}=(1-\mathscr L)^{-1}(\mathcal R+\mathscr L_2(\textit{\textbf{b}},\textit{\textbf{b}})+\mathscr L_3(\textit{\textbf{b}},\textit{\textbf{b}},\textit{\textbf{b}})), \end{equation}where the relevant terms are defined as \begin{equation}\label{eqnbk1.5}\mathcal R=\sum_{(0)}\mathcal I\mathcal C_+(\textit{\textbf{u}},\overline{\textit{\textbf{v}}},\textit{\textbf{w}}),\quad\mathscr L \textit{\textbf{b}}=\sum_{(1)}\mathcal I\mathcal C_+(\textit{\textbf{u}},\overline{\textit{\textbf{v}}},\textit{\textbf{w}}),\quad \mathscr L_2(\textit{\textbf{b}},\textit{\textbf{b}})=\sum_{(2)}\mathcal I\mathcal C_+(\textit{\textbf{u}},\overline{\textit{\textbf{v}}},\textit{\textbf{w}}),\end{equation} and $\mathscr L_3(\textit{\textbf{b}},\textit{\textbf{b}},\textit{\textbf{b}})=\mathcal I\mathcal C_+(\textit{\textbf{b}},\overline{\textit{\textbf{b}}},\textit{\textbf{b}})$. The sums in (\ref{eqnbk1.5}) are taken over $(\textit{\textbf{u}},\textit{\textbf{v}},\textit{\textbf{w}})$, each of which being either $\textit{\textbf{b}}$ or $\Jc_n$ for some $0\leq n\leq N$. In the sum $\sum_{(j)}$ for $0\leq j\leq 2$, exactly $j$ inputs in $(\textit{\textbf{u}},\textit{\textbf{v}},\textit{\textbf{w}})$ equals $\textit{\textbf{b}}$, and in the sum $\sum_{(0)}$ we require that $(\textit{\textbf{u}},\textit{\textbf{v}},\textit{\textbf{w}})=(\Jc_{n_1},\Jc_{n_2},\Jc_{n_3})$ with $n_1+n_2+n_3\geq N$. Note that $\mathscr L$, $\mathscr L_2$ and $\mathscr L_3$ are $\mathbb R$-linear, $\mathbb R$-bilinear and $\mathbb R$-trilinear operators respectively. \subsubsection{Correlations, and expansion of (\ref{wke})} For any $n_1,n_2\geq 0$, by using Isserlis' theorem as in Section 2.2.3 of \cite{DH21}, we have that \begin{equation}\label{correlation}\mathbb E\big((\Jc_{n_1})_{k}(t)\overline{(\Jc_{n_2})_k(t)}\big)=\sum_{\mathcal Q}\mathcal K_\mathcal Q(t,t,k),\end{equation} where the summation is taken over all couples $\mathcal Q=\{\mathcal T^+,\mathcal T^-\}$ such that $n(\mathcal T^+)=n_1$ and $n(\mathcal T^-)=n_2$ (the partition $\mathscr P$ can be arbitrary).
Now consider the equation (\ref{wke}). Due to the smallness of $\delta$, the solution $n=n(t,k)$ to (\ref{wke}) has the Taylor expansion \begin{equation}\label{wketaylor}n(\delta t,k)=\sum_{n=0}^\infty\mathcal M_n(t,k), \end{equation} where $\mathcal M_n(t,k)$ is defined such that \begin{equation}\label{wketaylor2}\mathcal M_0(t,k)=n_{\mathrm{in}}(k),\quad \mathcal M_{n}(t,k)=\delta\sum_{n_1+n_2+n_3=n-1}\int_0^t\mathcal K(\mathcal M_{n_1}(t'),\mathcal M_{n_2}(t'),\mathcal M_{n_3}(t'))(k)\,\mathrm{d}t'.
\end{equation} It is easy to see that $|\mathcal M_n(t,k)|\lesssim\langle k\rangle^{-20d}(C^+\delta)^n$ uniformly in $(t,k)$. \subsubsection{The main estimates} The followings are the main estimates of this paper. Their proofs will occupy up to Section \ref{linoper1}; once they are proved, Theorem \ref{main} will then be proved in Section \ref{linoper2}, similar to Section 12 of \cite{DH21}. \begin{prop}\label{mainprop1}Let $\mathcal K_\mathcal Q$ be defined in (\ref{defkq}). Then for each $0\leq n\leq N^2$, $k\in\mathbb Z_L^d$ and $t\in[0,1]$ we have
\begin{equation}\label{mainest1}\bigg|\sum_{\mathcal Q}\mathcal K_\mathcal Q(t,t,k)\bigg|\lesssim\langle k\rangle^{-20d}(C^+\sqrt{\delta})^n, \end{equation} where the summation is taken over all couples $\mathcal Q=\{\mathcal T^+,\mathcal T^-\}$ such that $n(\mathcal T^+)=n(\mathcal T^-)=n$. \end{prop} \begin{prop}\label{mainprop4} Let $\mathcal M_n(t,k)$ be defined as in (\ref{wketaylor2}). Then for each $0\leq n\leq N^2$, $k\in\mathbb Z_L^d$ and $t\in[0,1]$, we have that
\begin{equation}\label{mainest1.5}\bigg|\sum_{n(\mathcal Q)=2n}\mathcal K_\mathcal Q(t,t,k)-\mathcal M_n(t,k)\bigg|\lesssim\langle k\rangle^{-20d} (C^+\sqrt{\delta})^n L^{-\eta^8},\end{equation} where the summation is taken over all couples $\mathcal Q$ of order $2n$. If $2n$ is replaced by $2n+1$, then the same result holds with $\mathcal M_n(t,k)$ replaced by $0$. \end{prop} \begin{prop}\label{mainprop2} For any $\mathbb R$-linear operator $\mathscr K$ define its kernel $\mathscr K_{kk'}^\zeta(t,s)$ (which we assume is supported in $t,s\in[0,1]$ and $t>s$), where $\zeta\in\{\pm\}$, such that \begin{equation}\label{parametrix}(\mathscr K\textit{\textbf{b}})_k(t)=\sum_{\zeta\in\{\pm\}}\sum_{k'}\int_0^t \mathscr K_{kk'}^\zeta(t,s) b_{k'}(s)^\zeta\,\mathrm{d}s;\quad \textit{\textbf{b}}=(b_{k'}(s)).\end{equation} Now let $\mathscr L$ be defined as in (\ref{eqnbk1.5}). Then there exists an $\mathbb R$-linear operator $\mathscr X$, and $\mathscr Y=(1-\mathscr L)\mathscr X$ and $\mathscr W=\mathscr X(1-\mathscr L)$, such that \begin{equation}\label{kernelexp}\mathscr X=1+\sum_{m=1}^N\mathscr X_m,\quad \mathscr Y=1+\sum_{m=N+1}^{3N+1}\mathscr Y_m,\quad \mathscr W=1+\sum_{m=N+1}^{3N+1}\mathscr W_m\end{equation} The kernels of $\mathscr X_m$, $\mathscr Y_m$ and $\mathscr W_m$ are Weiner chaos of order $2m$, and they satisfy that
\begin{equation}\label{mainest2}\mathbb E|(\mathscr X_m)_{kk'}^{\zeta}(t,s)|^2\lesssim \langle k-k'\rangle^{-20d}(C^+\sqrt{\delta})^mL^{40d}\end{equation} for any $1\leq m\leq N$ and $k,k'\in\mathbb Z^d_L$ and $t,s\in[0,1]$ with $t>s$. The same holds for $\mathscr Y_m$ and $\mathscr W_m$ and any $N+1\leq m\leq 3N+1$. \end{prop} \begin{rem}\label{nonresrem} Recall that the support of the coefficient $\epsilon_{k_1k_2k_3}$ in (\ref{defcoef0}), which is the set $\mathfrak S$ in (\ref{defset}), allows for the \emph{degenerate} case $k_2\in\{k_1,k_3\}$. However in this case we must have $k_1=k_2=k_3$, and such case is always easily treated (see for example Section 9.3.1 of \cite{DH21} which deals with degenerate atoms).
For simplicity of presentation, we will neglect the degenerate case for most of this paper and assume that $\epsilon_{k_1k_2k_3}$ is always supported in the set where $k_2\not\in\{k_1,k_3\}$. In Section \ref{extradegen} after the main proof, we briefly discuss how to treat degenerate cases, which only requires minor modifications. \end{rem} \section{Overview of the proof}\label{overview} \subsection{Previous strategy, and the main difficulty} Let us start from the proof of Propositions \ref{mainprop1} and \ref{mainprop4}, which requires us to analyze the quantity $\mathcal K_\mathcal Q$ . The early steps in the analysis are essentially the same as in \cite{DH21} (which deals with the case $\gamma\approx 1$). First one identifies and analyzes the leading couples, called \emph{regular couples}, which are ones built by concatenating specific building blocks called mini couples (cf. Definition \ref{defmini} and Figure \ref{fig:oper}). The analysis of those couples was done in \cite{DH21} and is basically independent of the chosen scaling law. As such, the heart of the matter is showing that the contribution of non-regular couples is lower order. The analysis of non-regular couples proceeds by applying a structure theorem (Proposition \ref{skeleton}) to reduce to prime couples $\mathcal Q$ (Section \ref{primered}), which are couples that contain no regular sub-couples inside them. Then, one uses the almost $L^1$ bound for the time integral in (\ref{bigformula}) (see for example (\ref{1stsumbound})) to reduce the estimates on such prime couples to a counting problem for decorations $(k_\mathfrak n)$ of $\mathcal Q$, which has the form \begin{equation}\label{cplcount0} \left\{
\begin{aligned}&k_{\mathfrak n_1}-k_{\mathfrak n_2}+k_{\mathfrak n_3}=k_\mathfrak n;\quad |k_{\mathfrak n_1}|,\,|k_{\mathfrak n_2}|,\,|k_{\mathfrak n_3}|,\,|k_{\mathfrak n}|\leq 1,\\
&|k_{\mathfrak n_1}|^2-|k_{\mathfrak n_2}|^2+|k_{\mathfrak n_3}|^2-|k_\mathfrak n|^2=(\mathrm{Const})+O(L^{-2\gamma}), \end{aligned} \right.\quad\forall \mathrm{\ branching\ nodes\ }\mathfrak n \end{equation} in the notions of Definition \ref{defdec}.
To study the counting problem, we then introduce the notion of \emph{molecules} $\mathbb M$ (Definition \ref{defmole}), which is of fundamental importance in \cite{DH21} and even more so in the current paper. Basically, a molecule is a directed graph formed by \emph{atoms} which are $4$-element subsets $\{\mathfrak n,\mathfrak n_1,\mathfrak n_2,\mathfrak n_3\}$ as in (\ref{cplcount0}), and \emph{bonds} which are common elements of these subsets under the given pairing structure; such a molecule coming from a couple will have all degree $4$ atoms except for exactly two degree $3$ atoms, and moreover contains no triple bond if the couple is prime. The system (\ref{cplcount0}) is then reduced to a system for decorations this molecule (Definition \ref{decmole}), where each bond is decorated by a vector $k_\ell$ and each atom gives an equation of form (\ref{cplcount0}) with variables corresponding to the bonds at this atom. As a central component of the proof, we need to establish a suitable \emph{rigidity theorem}, which (schematically) states that \begin{itemize} \item Apart from some explicitly defined special structures, the counting problem provides sufficient control for $\mathcal K_\mathcal Q$, with an additional power gain\footnote{This extra gain is needed to cancel the factorial divergence coming from the number of generic couples and molecules, which is the major difficulty in \cite{DH21}. Exactly the same gain is needed also in the current work.} $L^{-cn}$ proportional to the size $n$ of the molecule $\mathbb M$. \end{itemize} It is at this point that the arguments for $\gamma\approx 1$ and $\gamma\in(0,1)$ start to differ, and the new structures and ideas start to emerge. \subsubsection{The $\gamma=1$ case} When $\gamma=1$ (and similarly when $\gamma\approx 1$), the counting problem for the molecule is solved by designing a \emph{reduction algorithm} (Section 9.4 of \cite{DH21}), where in each step we remove one or two atoms and their bonds, and reduce to the counting problem for a smaller molecule. Each such operation is either \emph{good} and favorable for counting (i.e. the desired bound for the smaller molecule implies strictly better than desired bound for the original molecule) or \emph{normal} and neutral for counting (i.e. the desired bound for the smaller molecule implies precisely the desired bound for the original molecule). Note that there is no bad operations, and the proof goes by a careful analysis of the algorithm, using suitable invariants and monotonic quantities, that shows at least a fixed small portion of all operations are good, apart from two special structures called type I and II molecular chains.
As for the special structures, type I chains are formed by double bonds only; type II chains are formed by single and double bonds (the lower right corner of Figure \ref{fig:vines}), and in the current paper we will refer to them as \emph{ladders}. These ladders are neutral for counting, and their presence does not help or harm anything, so we will mostly ignore them for the rest of this section. In comparison, type I chains cause a logarithmic loss when $\gamma=1$, but this is compensated by a delicate \emph{cancellation} between different type I chains (which come from \emph{irregular chains} in the corresponding couples), see Section 8.3 of \cite{DH21}. Such cancellation was previously unknown in either mathematical or physical literature, and is another key component of the proof in \cite{DH21}. \subsubsection{The $\gamma=1/2$ case: Main difficulties} We now turn to the general range $\gamma\in(0,1)$. In fact, the most typical case, and in some sense the hardest case, is the ballistic scaling $\gamma=1/2$. For simplicity, we will assume this scaling for the rest of this section.
Recall that in \cite{DH21}, when $\gamma=1$, the key quality of the operations in the algorithm---namely \emph{good} and \emph{normal}---relies on what we may call the \emph{atomic} counting estimates, which involve one or two atoms (i.e. systems similar to (\ref{cplcount0})), and between $2$ and $5$ bonds (i.e. unknown vectors). For example, favorable $2$- and $3$-vector counting estimates take the form
\begin{equation}\label{atomiccount}\#\big\{(k_1,\cdots,k_q)\in \mathbb Z_L^d\cap[0,1]^d:k_1-\cdots +k_q=k_*,|k_1|^2-\cdots+|k_q|^2=O(L^{-2\gamma})\big\}\lesssim L^{(q-1)(d-\gamma)} \end{equation} for $q\in\{2,3\}$ and $k_*$ fixed; the $4$- and $5$-vector counting estimates involve two systems, see Lemma \ref{basiccount} (3) and (4).
Now, when $\gamma=1/2$, we can prove the same (good and normal) bounds for the $3$-, $4$- and $5$-vector counting problems just as in the $\gamma=1$ case; however, the \emph{two} vector counting bound, namely (\ref{atomiccount}) with $q=2$, breaks down. Indeed, in the worst case scenario $|k_*|\sim L^{-1}$, the left hand side of (\ref{atomiccount}) is of order $L^{d}$ which loses $L^{1/2}$ compared to (\ref{atomiccount}).
This, being the only difference between the $\gamma=1/2$ and $\gamma=1$ cases, seems to be a minor issue at first sight; however it turns out the have a huge effect on the analysis of molecules. First of all, this leads to the presence of \emph{bad} operations in the algorithm (say when one removes one atom of degree $2$), where the desired bound for the smaller molecule does \emph{not} imply the desired bound for the original molecule, so the approach in \cite{DH21} breaks down; in fact it breaks down in a much more essential way, due to the occurrence of new bad special structures.
Of course, the double bond chain---called type I chain in \cite{DH21}---are now quite bad in terms of counting, but as in \cite{DH21} they are compensated by cancellation, albeit in a slightly more subtle manner. Nevertheless, there are families of much more complicated structures, which are favorable for counting when $\gamma=1$, but becomes bad when $\gamma=1/2$, for example the one shown in Figure \ref{fig:vinesintro}. Moreover, there are also \emph{six more families} of structures which are favorable for counting when $\gamma=1$, but become neutral when $\gamma=1/2$. These are depicted in Figure \ref{fig:vines}, including the one in Figure \ref{fig:vinesintro}, and are collectively referred to as \emph{vines}. \begin{figure}
\caption{An example of a new bad structure for $\gamma=1/2$ (see also Figure \ref{fig:vines}), called \emph{vine (II)}; here the red bonds may be replaced by a ladder.}
\label{fig:vinesintro}
\end{figure}
This means that, even in the statement of the rigidity theorem, one has to exclude, in addition to double bond chains and ladders, all the different vines as well as chains formed by them. Dealing with these new structures is the major challenge (which we discuss further in Section \ref{introcancel}), but even if we assume absence of these vines, it is not at all clear why the rigidity theorem would hold. In particular, the vines occurring in Figure \ref{fig:vines} seem sporadic and unrelated to each other, so how can we identify them from all the other structures and show they are the only bad ones?
All these considerations have led to an important modification to the algorithm, namely the addition of a new operation called \emph{cutting}. This not only makes the algorithm in \cite{DH21} much more robust, but also allows one to naturally see the occurrence of all the vines in Figure \ref{fig:vines}, which would otherwise seem to be coming from nowhere. We shall elaborate on on this further below.
\subsection{Vines, derived from cutting}\label{introcut} Now we discuss the main new ingredient in our modification to the algorithm in \cite{DH21}, namely the new operation called cutting.
Recall that the only \emph{bad} operation that can ever occur in the algorithm is $2$-vector counting, and this typically come from removing a degree $2$ atom $v$, which has two bonds $(\ell_1,\ell_2)$ of opposite directions, such that $k_{\ell_1}-k_{\ell_2}=k_*$ as in (\ref{atomiccount}) with $|k_*|\lesssim L^{-1}$. Suppose $v$ has degree $4$ \emph{before any operation} (i.e. in the original molecule), then due to the first equation in (\ref{cplcount0}), the other two bonds $(\ell_3,\ell_4)$ at $v$ must also satisfy $|k_{\ell_3}-k_{\ell_4}|\lesssim L^{-1}$. We then call such atoms $v$ a \emph{small gap}, or SG atom. In comparison, assume (under a small simplification) that all other atoms $v'$ with bonds $\ell_j'\,(1\leq j\leq 4)$ must have that $|k_{\ell_i'}-k_{\ell_j'}|\sim 1$ for any pair $(\ell_i',\ell_j')$ of opposite directions, and call them \emph{large gap} or LG atoms.
Therefore, it is the SG atoms that will cause bad operations to occur, and such bad operations are hard to control once they enter the main algorithm. To isolate the difficulties, the natural idea is then to get rid of these potentially bad atoms \emph{in the first place}, before entering the algorithmic phase. The precise operation we perform in this pre-processing stage is cutting. As its name suggests, at each step we choose a degree $4$ SG atom $v$, and split it into two atoms of degree $2$, as in Figure \ref{fig:cutintro} below (see also Figure \ref{fig:cut}). \begin{figure}
\caption{Cutting a degree $4$ atom $v$ (Definition \ref{defcut}).}
\label{fig:cutintro}
\end{figure}
There are two cases of cutting: when this operation does not create a new connected component (which we call $\alpha$-cutting, see Definition \ref{defcut}), or when it creates a new connected component (which we call $\beta$-cutting). It turns out that $\alpha$ cuttings are favorable for counting in a certain sense, and leads to power gains instead of losses, so below we will focus on $\beta$-cuttings. \subsubsection{Local rigidity theorems} Suppose we have done all possible cuttings (say they are all $\beta$-cuttings), then the resulting graph is composed of finitely many connected components. A typical component will contain several degree $2$ atoms that result from cutting, as well as degree $4$ atoms\footnote{There is only one component with two degree $3$ atoms, which enjoys much better estimates compared to other components, and will be neglected in the discussions below.}; the point here is that all the degree $4$ atoms must have \emph{large gap}, and so will not be involved in any bad operations. Moreover, since the number $q$ of degree $2$ atoms and the number $p$ of components satisfy $q=2p-2$, we expect that a typical component will contain exactly two degree $2$ atoms. We will fix such a component $\mathbb M_0$ below.
Before getting to the counting problem, we shall make one more reduction to this component. For each degree $2$ atom $v$, suppose it has two bonds $(\ell_1,\ell_2)$, then we may remove $v$ and these two bonds, and replace them by a single bond $\ell$ connecting their two other endpoints. If this operation introduces a new triple bond, then we may further remove the two endpoint atoms of this triple bond and add one new bond between the two other atoms connected to these two endpoints, and keep doing so until no more new triple bonds appear. The combined effect of this sequence of operations, which we may call the \emph{(Y) sequence}, is shown in Figure \ref{fig:yoperintro}. Let the graph resulting from this sequence be $\mathbb M_1$. Note that this sequence essentially corresponds to removing a degree $2$ atom together with a ladder attached to it, and recall that ladders are neutral objects for counting. In fact, this sequence is also neutral for counting, in the sense that each decoration of $\mathbb M_0$ provides also a decoration of $\mathbb M_1$, and the desired bound for the counting problem for $\mathbb M_1$ implies precisely the desired bound for the counting problem for $\mathbb M_0$. \begin{figure}
\caption{The (Y) sequence of operations starting from a degree $2$ atom $v$ (compare also with a similar related sequence (Y2) in Figure \ref{fig:yoper}). The combined effect is removing all atoms and bonds above $v_1$ and $v_2$, and adding a new bond (colored in blue) between $v_1$ and $v_2$.}
\label{fig:yoperintro}
\end{figure}
Now the goal is to study the counting problem associated with $\mathbb M_1$; let the number of solutions to this counting problem be $\mathfrak C$, and let $\chi:=E-V+F$ be the characteristics of $\mathbb M_1$ (where $E$ and $V$ are number of bonds and atoms in $\mathbb M_1$, and $F$ is the number of components which is $1$ for now). We would like to compare $\mathfrak C$ with the quantity $L^{(d-1/2)\chi}$, and thus define $\mathfrak A:=\mathfrak C \cdot L^{-(d-1/2)\chi}$. Then, the main result for $\mathbb M$ can be stated in the form of a ``local" rigidity theorem (Proposition \ref{lgmolect}, see also Proposition \ref{moleprop4}), as follows: \begin{itemize} \item If $\mathbb M$ does not equal to one of the finitely many explicitly defined ``bad" graphs, then, apart from ladders, we have $\mathfrak A\lesssim L^{-c\cdot V}$ for some constant $c>0$. \end{itemize}
The proof of the local rigidity theorem, as well as the arguments leading to the bad graphs, relies on the exact algorithm described in Section 9.4 of \cite{DH21}; of course this is owing to the fact that we no longer have any SG atoms in $\mathbb M_1$. However there is also price to pay, namely that now a typical component $\mathbb M_1$ contains only degree $4$ atoms, as opposed having two degree $3$ atoms before doing all the cuttings. Therefore, the first step in the algorithm necessarily have to be removing a degree $4$ atom, which corresponds to a counting problem of form (\ref{atomiccount}) but with $q=4$ (note this is different from the $4$-vector counting in Lemma \ref{basiccount} (2)). This is one---but the \emph{only} one---bad operation in the algorithm, which loses power $L^{1/2}$, in the sense that the $\mathfrak A$ value before the operation is only bounded by $L^{1/2}$ times the $\mathfrak A$ value after the operation.
After this first bad operation, we no longer need to consider (\ref{atomiccount}) with $q=4$, so the remaining operations are all good or normal, due to absence of SG molecules; in summary, we have one bad operation per component (as opposed to potentially many bad operations due to SG atoms, had we not done the cuttings in the first place). Moreover, it can be shown that each good operation gains power $L^{-1/2}$ (opposite to the above, so that the $\mathfrak A$ value before the operation is bounded by $L^{-1/2}$ times the $\mathfrak A$ value after the operation). A subsequent discussion, in the same spirit as Section 9.5 of \cite{DH21}, then allows us to bound the number of good operations from below. More precisely, apart from ladders, there is only one case for $\mathbb M_1$ in which there is no good operation (so $\mathfrak A=L^{1/2}$), namely when $\mathbb M_1$ is a quadruple bond; there is also only one case for $\mathbb M_1$ in which there is exactly one good operation (so $\mathfrak A=1$), namely then $\mathbb M_1$ is a triangle of three double bonds. These are shown in Figure \ref{fig:badmol}. In \emph{all the other} cases, the number of good operations is at least two, so we have $\mathfrak A\lesssim L^{-1/2}$. If we choose $c$ small enough, this already proves the local rigidity theorem when $V\leq 100$; if $V>100$, then we can repeat the proof in Section 9.5 of \cite{DH21}---almost word by word---to get that the power gain is at least proportional to the size $V$ of the graph, which makes the $1/2$ loss in the only bad operation negligible. In the end, this allows us to prove the local rigidity theorem. \begin{figure}
\caption{The two bad graphs: the quadruple bond (left) and the triangle of three double bonds.}
\label{fig:badmol}
\end{figure}
\subsubsection{Vines} With the local rigidity theorem proved, we only need to check all the possibilities for components with exactly two degree $2$ atoms, which lead to the two bad cases---a quadruple bond and a triangle of three double bonds---after at most two (Y) sequences described above. These possibilities can be found by enumeration, and exactly correspond to \emph{the families of vines (II)--(VIII)} (except vines (I) which are double bonds) in Figure \ref{fig:vines}; in fact, this is exactly how these vines are discovered. The following Figure \ref{fig:vinesintro2} shows an example of how a specific case of vine (VI) is reduced to a triangle of three double bonds after two (Y) sequences; the other cases can be shown similarly (see the proof of Lemma \ref{ylem1}).
It is now easy to get the main rigidity theorem, namely Proposition \ref{kqmainest2}, by combining the local rigidity estimates for all components after cutting; however, we still need to analyze the vines. In fact, vines (III)--(VIII) in Figure \ref{fig:vines} are neutral for counting, and do not cause any gain or loss in powers; nevertheless, each individual vine (I) or (II) would cause a serious $L^{1/2}$ power loss. These will be controlled, by some surprisingly delicate cancellations, which we discuss next. \begin{figure}
\caption{The process of reducing a vine (VI) to a triangle of three triple bonds, using two (Y) sequences. Each time, the blue objects are removed in each sequence, and the red objects are added in the previous sequence.}
\label{fig:vinesintro2}
\end{figure} \subsection{The miraculous cancellation}\label{introcancel} As described above, we are now left with the analysis of vines (I) and (II) (called \emph{bad vines}) in Figure \ref{fig:vines}. Since double bonds are essentially the same as in \cite{DH21}, we will focus on vines (II) in this subsection.
To exhibit the cancellation, the idea is to go back to the couple picture and enumerate all the possible (parts of) couples that correspond to a given vine (II), in the same way that chains of double bonds are shown to come from irregular chains in \cite{DH21}. In fact, it will suffice to consider only a triangle (of one double and two single bonds, see the triangle at the top of Figure \ref{fig:vinesintro}) at one end of the vine instead of the full length vine. By definition of molecule, each bond either corresponds to a branching node (\emph{parent-child} or PC bonds) that belongs to two different subsets of form $\{\mathfrak n,\mathfrak n_1,\mathfrak n_2,\mathfrak n_3\}$ as in (\ref{cplcount0}), or corresponds to a pair of leaves (\emph{leaf-pair} or LP bonds, see Definition \ref{defmole}). By considering all possibilities of each pair being either PC or LP, we get five different couple structures corresponding to a given vine (II), namely vines (II-a)--(II-e) as in Proposition \ref{molecpl} (Figure \ref{fig:block_mole}). Among the five structures (II-a)--(II-e), it turns out that vines (II-a) can be uniquely paired with vines (II-b), and vines (II-c) with vines (II-d), to give desired cancellations (vines (II-e) entails a cancellation structure in itself). Such pairs of couple structures are called \emph{twists} of each other, see Definition \ref{twist} (Figure \ref{fig:twist_dec}). \begin{figure}
\caption{Two couple structures, namely vines (II-c) and (II-d), that are twists of each other. Here (A)--(D) represent the remaining parts of the couples, which are the same in both cases. The corresponding molecule (which coincide in both cases) and decorations (which are in one-to-one correspondence) are also illustrated.}
\label{fig:vinescancel}
\end{figure}
In Figure \ref{fig:vinescancel}, we show one example of vine (II-c) and its twist, which is vine (II-d), which exhibit cancellation on the couple level. Note that these two structures are \emph{not} isomorphic as couples (which may be naturally defined by isomorphism of ternary trees), however, their corresponding molecules are the same. Therefore, it is reasonable to say that, compared to the notion of ternary trees and couples, which are immediately associated with the Duhamel evolution (\ref{akeqn}), it is really the notion of molecules that captures the essence of the hidden cancellation structure associated with the problem. This can also be compared to the simple cancellation of irregular chains in \cite{DH21}, shown in Figure \ref{fig:vinescancel2}, in which case the couple structure and its twist are actually isomorphic as couples. \begin{figure}
\caption{Two couple structures corresponding to irregular chains in \cite{DH21} (or vines (I-a) and (I-b) as in Proposition \ref{molecpl}), that are twists of each other; notations are the same as in Figure \ref{fig:vinescancel}.}
\label{fig:vinescancel2}
\end{figure}
We now explain how cancellation takes place between vines (II-c) and (II-d), as shown in Figure \ref{fig:vinescancel}. Recall the expression $\mathcal K_\mathcal Q$ defined in (\ref{defkq}). By examining the decorations as shown in Figure \ref{fig:vinescancel}, it is easy to see for the two corresponding couples that (i) the signs $\zeta(\mathcal Q)$ are the opposite, (ii) the integrands $\exp(\zeta_\mathfrak n \pi i\cdot \delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n)$ are exactly the same. The only differences are that (iii) the initial data $n_{\mathrm{in}}$ factors are \begin{equation}\label{diffnin}n_{\mathrm{in}}(k_4)n_{\mathrm{in}}(k_6)n_{\mathrm{in}}(k_7)\mathrm{\ \ for\ the\ left\ couple},\qquad n_{\mathrm{in}}(k_4)n_{\mathrm{in}}(k_6)n_{\mathrm{in}}(k_8)\mathrm{\ \ for\ the\ right\ couple},\end{equation} and (iv) the (part of) domain $\mathcal E$ of time integration are \begin{equation}\label{diffint}t_1>t_2>t_3>t_4\mathrm{\ \ for\ the\ left\ couple},\qquad t_1>t_2>\max(t_3,t_4)\mathrm{\ \ for\ the\ right\ couple}.\end{equation} Here in (iv) the time variables $t_j$ correspond to the atom $V_j$ in the molecule, which also correspond to the branching nodes decorated by $k_1,k_3,k_5,k_8$ (for the left couple) or $k_1,k_3,k_5,k_7$ (for the right couple).
We may assume the atoms $V_1$ and $V_4$ have SG (otherwise this vine would not correspond to a bad operation), which implies that $|k_7-k_8|\lesssim L^{-1}$, and thus $n_{\mathrm{in}}(k_7)\approx n_{\mathrm{in}}(k_8)$ up to negligible error. Therefore, we only need to treat the difference in the time integration in (\ref{defkq}) caused by (\ref{diffint}), which leads to the domain $t_2>t_4>t_3$. Now consider the $\zeta_\mathfrak n\Omega_\mathfrak n$ factors for the branching nodes $\mathfrak n$ decorated by $k_3$ and $k_5$, and denote them by $\Gamma_2$ and $\Gamma_3$, then from Figure \ref{fig:vinescancel} we see that
\[\Gamma_2=|k_3|^2-|k_5|^2+|k_6|^2-|k_7|^2=2(k_3-k_5)\cdot (k_3-k_7),\quad \Gamma_2+\Gamma_3=|k_3|^2-|k_4|^2+|k_8|^2-|k_7|^2=O(L^{-1}),\] noticing that $|k_3-k_4|=|k_7-k_8|\lesssim L^{-1}$. We may then assume $\Gamma_2+\Gamma_3=0$, so the expression (\ref{defkq}) will involve a part that essentially has the form \[\int_{t_2>t_4>t_3}\sum_{x,y} e^{2\pi iL^{2\gamma}(t_2-t_3)(x\cdot y)}W(x,y)\,\mathrm{d}t_2\mathrm{d}t_3\mathrm{d}t_4,\] where $(x,y)=(k_3-k_5,k_3-k_7)$ and $W$ is a well-behaved function. The sum in $(x,y)$ can be calculated similar to regular couples in \cite{DH21}, however the leading term \emph{vanishes precisely because $t_2>t_4>t_3$}, due to the time integral having zero average as a function of $x\cdot y$, see Lemma \ref{sumintest1}. This cancellation then provides enough decay and allows us to control the contribution of vines. \begin{rem}Such cancellation for vines, as described above, seems to be new in both the mathematical and physical literature. It seems quite miraculous, and it might have some physical interpretation, or be part of a more general cancellation mechanism for Feynman diagrams. However, such interpretation is still unclear at this point. \end{rem} We summarize the proof of Propositions \ref{mainprop1} and \ref{mainprop4} in the following flowchart: \begin{figure}
\caption{A flowchart explaining the process of the proof of Propositions \ref{mainprop1} and \ref{mainprop4}. After the reduction to prime couples, one runs parallel arguments to (a) bound the vines contained in the couple, using cancellation arguments if needed, and (b) perform the combinatorial analysis of the couples absent the problematic vines. The latter proceeds via the cutting operation followed by a local rigidity theorem in the spirit of that in \cite{DH21}, resulting in a global rigidity theorem that gives the needed gain from such couples.}
\label{fig:flowchart}
\end{figure} \subsection{Construction of a parametrix} Finally we turn to Proposition \ref{mainprop2}. Recall the equation (\ref{eqnbk2}) satisfied by $\textit{\textbf{b}}$. As pointed out in \cite{DH21}, we do not need to bound the norm of $\mathscr L$ in any function space in order to solve this equation, but only need to invert the operator $1-\mathscr L$. This then requires to construct a parametrix $\mathscr X$ to $1-\mathscr L$, as stated in Proposition \ref{mainprop2}. In \cite{DH21}, this parametrix is simply defined, using Neumann series, as $\mathscr X=1+\mathscr L+\cdots +\mathscr L^N$ for large $N$; but such construction would run into a problem here, because it is not compatible with the vine cancellation structure.
The solution is to use the notion of \emph{flower trees} and \emph{flower couples} introduced in \cite{DH21}; this is natural, as these structures are already used to obtain bounds for powers of $\mathscr L$ in \cite{DH21}. Here, instead of sticking to powers of $\mathscr L$, we construct $\mathscr X$ by using these structures directly, which allows us to group the flower couples that occur, in the precise way that allows for all the needed cancellations. Apart from these, the proof of Proposition \ref{mainprop2} relies on the same arguments as in the proof of Propositions \ref{mainprop1} and \ref{mainprop4}, with only minor modifications. See Section \ref{linoper1} for details. \subsection{Plan of this paper} In Section \ref{molecules} we define and study the structure of molecules. In Section \ref{secvine} we introduce the key new objects called \emph{vines}. In Section \ref{regular} we study expressions associated with regular couples and regular trees, and prove estimates which are the same as in \cite{DH21} but with more precise error bounds and a new cancellation structure. In Section \ref{funcgroup} we study similar expressions associated wth vines, and prove two key estimates exploiting the cancellation between vines.
With these preparations, we present the proof of Propositions \ref{mainprop1} and \ref{mainprop4} in Sections \ref{reduct1}--\ref{lgmole}: in Section \ref{reduct1} (stage 1) we reduce them to Proposition \ref{kqmainest1} and then \ref{kqmainest2}, in Section \ref{reduct2} (stage 2) we further reduce them to Proposition \ref{lgmolect}, and in Section \ref{lgmole} we prove Proposition \ref{lgmolect}. Finally, in Section \ref{linoper1} we prove Proposition \ref{mainprop2}, and in Section \ref{linoper2} we prove Theorem \ref{main}. The various auxiliary results used in this paper are listed and proved in Appendix \ref{aux}. \section{Couples and molecules}\label{molecules} \subsection{Definition of molecules} We start by defining molecules and related notions as in \cite{DH21}. \begin{df}[Molecules]\label{defmole0} A \emph{molecule} $\mathbb M$ is a directed graph, formed by vertices (called \emph{atoms}) and edges (called \emph{bonds}), where multiple bonds are allowed\footnote{We do not allow self-connecting bonds here; see Remark \ref{moleremark} and Section \ref{extradegen}.}, and each atom has out-degree at most $2$ and in-degree at most $2$. We write $v\in \mathbb M$ and $\ell\in\mathbb M$ for atoms $v$ and bonds $\ell$ of $\mathbb M$, and write $\ell\sim v$ if $v$ is an endpoint of $\ell$. We further require that $\mathbb M$ does not have any connected component with only degree $4$ atoms (we call such components \emph{saturated}), where connectivity is always understood in terms of undirected graphs. For distinction, if a directed graph is otherwise like molecules but may contain saturated components, we will call it a \emph{pseudomolecule}.
An \emph{atomic group} in a molecule is a subset of atoms, together with all bonds between these atoms. Some particular atomic groups, or families of atomic groups, will play important roles in our proof (such as the \emph{vines} (I)--(VIII) defined in Section \ref{subsecvine}). Given any molecule $\mathbb M$, we define $V$ to be the number of atoms, $E$ the number of bonds, and $F$ the number of connected components. Define the characteristics $\chi:=E-V+F$. \end{df} \begin{df}[Molecule of couples]\label{defmole} Let $\mathcal Q$ be a nontrivial couple, we will define a directed graph $\mathbb M=\mathbb M(\mathcal Q)$ associated with $\mathcal Q$ as follows. The atoms are all the branching nodes $\mathfrak n\in\mathcal N$ of $\mathcal Q$. For any two atoms $\mathfrak n_1$ and $\mathfrak n_2$, we connect them by a bond if either (i) one of them is the parent of the other, or (ii) a child of $\mathfrak n_1$ is paired to a child of $\mathfrak n_2$ as leaves. In case (i) we label this bond by PC, and place a label P at the parent atom, and place a label C at the child atom in case (ii) we label this bond by LP. Note that one atom $v$ may have multiple P and C labels coming from different bonds $\ell\sim v$.
We fix the direction of each bond as follows. Any LP bond should go from the atom whose paired child has $-$ sign to the one whose paired child has $+$ sign. Any PC bond should go from the P atom to the C atom if the C atom has $-$ sign as a branching node in $\mathcal Q$, and should go from the C atom to the P atom if the C atom has $+$ sign.
For any atom $v\in\mathbb M(\mathcal Q)$, let $\mathfrak n=\mathfrak n(v)$ be the corresponding branching node in $\mathcal Q$. For any bond $\ell\sim v$, define also $\mathfrak m=\mathfrak m(v,\ell)$ such that (i) if $\ell$ is PC with $v$ labeled C, then $\mathfrak m=\mathfrak n$; (ii) if $\ell$ is PC with $v$ labeled P, then $\mathfrak m$ is the branching node corresponding to the other endpoint of $\ell$ (which is a child of $\mathfrak n$); (iii) if $\ell$ is LP then $\mathfrak m$ is the leaf in the leaf pair defining $\ell$ that is a child of $\mathfrak n$. \end{df} \begin{rem}\label{remlable} The molecule $\mathbb M(\mathcal Q)$ is actually a \emph{labeled molecule} because of the labels LP and PC on bonds (and P and C on atoms). This feature is specific to molecules coming from a couple. Below we will not be too strict in distinguishing a molecule (which is just a direct graph) with a labeled molecule, but this difference does sometimes occur (see e.g. Remark \ref{twistexplain} (a)). \end{rem} \begin{prop}\label{moleproperty} For any nontrivial couple $\mathcal Q$ with order $n$, the directed graph $\mathbb M=\mathbb M(\mathcal Q)$ defined in Definition \ref{defmole} is a molecule. It has $n$ atoms and $2n-1$ bonds, in particular it is connected and has either two atoms of degree $3$ or one atom of degree $2$, with the remaining atoms all having degree $4$.
For any atom $v$, let $\mathfrak n=\mathfrak n(v)$, then the values of $\mathfrak m(v,\ell)$ where $\ell\sim v$ form a subset of $\{\mathfrak n,\mathfrak n_1,\mathfrak n_2,\mathfrak n_3\}$ where $\mathfrak n_j$ are children of $\mathfrak n$. When $v$ has degree $4$ the equality holds, and when $v$ has degree $2$ or $3$, some of the nodes in $\{\mathfrak n,\mathfrak n_1,\mathfrak n_2,\mathfrak n_3\}$ will not correspond to a bond $\ell$. \end{prop} \begin{proof} For connectivity see Proposition 9.4 of \cite{DH21}. The rest follows directly from definitions. \end{proof} \begin{prop}\label{recover} Given a molecule $\mathbb M$ with $n$ atoms as in Definition \ref{defmole0}, the number of couples $\mathcal Q$ (if any) such that $\mathbb M(\mathcal Q)=\mathbb M$ is at most $C^n$. \end{prop} \begin{proof} See Proposition 9.6 of \cite{DH21}. \end{proof} \begin{df}[Decoration of molecules]\label{decmole} Given a molecule or pseudomolecule $\mathbb M$ (Definition \ref{defmole}), suppose we also fix the vectors $c_v\in\mathbb Z_L^d$ for each $v\in\mathbb M$ such that $c_v=0$ when $v$ has degree $4$, then we can define a $(c_v)$-\emph{decoration} (or just a decoration) of $\mathbb M$ to be a set of vectors $(k_\ell)$ for all bonds $\ell\in\mathbb M$, such that $k_\ell\in\mathbb Z_L^d$ and \begin{equation}\label{decmole1}\sum_{\ell\sim v}\zeta_{v,\ell}k_\ell=c_v \end{equation} for each atom $v\in\mathbb M$. Here the sum is taken over all bonds $\ell\sim v$, and $\zeta_{v,\ell}$ equals $1$ if $\ell$ is outgoing from $v$, and equals $-1$ otherwise. For each such decoration and each atom $v$, define also that
\begin{equation}\label{defomegadec}\Gamma_v=\sum_{\ell\sim v}\zeta_{v,\ell}|k_\ell|^2. \end{equation}
Suppose $\mathbb M=\mathbb M(\mathcal Q)$ comes from a nontrivial couple $\mathcal Q$, and for $k\in\mathbb Z_L^d$, we define a $k$-decoration of $\mathbb M$ to be a $(c_v)$-decoration where $(c_v)$ is given by \begin{equation}\label{molecv} c_v=\left\{ \begin{aligned}&0,&\textrm{if }&v\textrm{ has degree }2\textrm{ or }4,\\ +&k,&\textrm{if }&v\textrm{ has out-degree }2\textrm{ and in-degree }1,\\ -&k,&\textrm{if }&v\textrm{ has out-degree }1\textrm{ and in-degree }2. \end{aligned} \right. \end{equation} Given any $k$-decoration of $\mathcal Q$ in the sense of Definition \ref{defdec}, define a $k$-decoration of $\mathbb M(\mathcal Q)$ such that $k_\ell=k_{\mathfrak m(v,\ell)}$ for an endpoint $v$ of $\ell$. It is easy to verify that this $k_\ell$ is well-defined (i.e. does not depend on the choice of $v$), and it gives a one-to-one correspondence between $k$-decorations of $\mathcal Q$ and $k$-decoration of $\mathbb M(\mathcal Q)$. Moreover for such decorations we have \begin{equation}\label{molegammav} \Gamma_v=\left\{ \begin{aligned}&0,&\textrm{if }&v\textrm{ has degree }2,\\ -&\zeta_{\mathfrak n(v)}\Omega_{\mathfrak n(v)},&\textrm{if }&v\textrm{ has degree }4,\\
-&\zeta_{\mathfrak n(v)}\Omega_{\mathfrak n(v)}+|k|^2,&\textrm{if }&v\textrm{ has out-degree }2\textrm{ and in-degree }1,\\
-&\zeta_{\mathfrak n(v)}\Omega_{\mathfrak n(v)}-|k|^2,&\textrm{if }&v\textrm{ has out-degree }1\textrm{ and in-degree }2. \end{aligned} \right. \end{equation}
Finally, given $\beta_v\in\mathbb R$ for each $v\in\mathbb M$ and $k_\ell^0\in\mathbb Z_L^d$ for each $\ell\in\mathbb M$, we define a decoration $(k_\ell)$ to be \emph{restricted by} $(\beta_v)$ and/or $(k_\ell^0)$, if we have $|\Gamma_v-\beta_v|\leq \delta^{-1}L^{-2\gamma}$ for each $v$ and/or $|k_\ell-k_\ell^0|\leq 1$ for each $\ell$. \end{df} \begin{rem}\label{moleremark} As stated in Remark \ref{nonresrem}, for simplification, we will assume there is no degenerate case (i.e. $k_2\in\{k_1,k_3\})$ in any $k$-decoration of any couple $\mathcal Q$ we are considering. In particular, we may assume that no two siblings are paired as leaves in $\mathcal Q$, and more generally there are no two sibling nodes such that the leaves of the subtrees rooted at them are completely paired. In particular, there is no degree $2$ atom or self-connecting bond in the associated molecule $\mathbb M=\mathbb M(\mathcal Q)$, as well as the skeleton $\mathcal Q_{\mathrm{sk}}$ defined in Proposition \ref{skeleton} below (so they must have two atoms of degree $3$ with the rest atoms having degree $4$, due to Proposition \ref{moleproperty}). These may be violated if there is degenerate case, but the latter is easily addressed, see Section \ref{extradegen}. \end{rem} \subsection{Regular couples and regular trees} Recall the following definitions of regular couples and regular trees in \cite{DH21}. \begin{df}\label{defmini} A \emph{$(1,1)$-mini couple} is a couple formed by two trees of order $1$ with no siblings paired. A \emph{mini tree} is a saturated tree of order $2$, again with no siblings paired; see Figure \ref{fig:mini}. Note that if $\mathcal Q$ is a $(1,1)$-mini couple or a couple formed by a mini tree with the trivial tree (called a $(2,0)$-mini couple in \cite{DH21}), then $\mathbb M(\mathcal Q)$ is exactly one triple bond.
For any couple $\mathcal Q$ we can define two operations: operation $A$ where a leaf pair is replaced by a $(1,1)$-mini couple, and operation $B$ where a node is replaced by a mini tree, see Figure \ref{fig:oper}. Then, we define a couple $\mathcal Q$ to be \emph{regular} if it can be formed, starting from the trivial couple $\times$, by operations $A$ and $B$. We also define a saturated paired tree $\mathcal T$ to be a \emph{regular tree}, if $\mathcal T$ forms a regular couple with the trivial tree $\bullet$. Clearly the order of any regular couple and regular tree must be even. \end{df} \begin{figure}\label{fig:mini}
\end{figure} \begin{figure}\label{fig:oper}
\end{figure} \begin{prop}\label{skeleton} For any couple $\mathcal Q$ there is a unique couple $\mathcal Q_{\mathrm{sk}}$, which is \emph{prime} in the sense that it does not contain any $(1,1)$-mini couple or mini tree, such that $\mathcal Q$ is constructed from $\mathcal Q_{\mathrm{sk}}$ in a unique way, by replacing each leaf pair with a regular couple, and each branching node with a regular tree. This $\mathcal Q_{\mathrm{sk}}$ is called the \emph{skeleton} of $\mathcal Q$; see Figure \ref{fig:skeleton} for an illustration. The molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ does not contain a triple bond, and $\mathcal Q$ is regular if and only if $\mathcal Q_{\mathrm{sk}}$ is trivial. Moreover, the number of couples $\mathcal Q$ with order $n$ and fixed skeleton $\mathcal Q_{\mathrm{sk}}$ is at most $C^n$.
More generally, let $\mathcal Q_0$ be any couple (not necessarily prime), one may form a couple $\mathcal Q$ by replacing each leaf pair $(\mathfrak l,\mathfrak l')$ with a regular couple $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and each branching node $\mathfrak m$ with a regular tree $\mathcal T^{(\mathfrak m)}$. We shall denote the the collection of all these $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and $\mathcal T^{(\mathfrak m)}$ by $\mathscr A$, and write $\mathcal Q\sim(\mathcal Q_0,\mathscr A)$. Define $n(\mathscr A)$ to be the total order of regular couples and regular trees $\mathscr A$; we may use $\mathscr B$ etc. to denote suitable sub-collections if $\mathscr A$, and $n(\mathscr B)$ etc. are defined similarly. \begin{figure}\label{fig:skeleton}
\end{figure} \end{prop} \begin{proof} See Proposition 4.14 and Remark 4.15 of \cite{DH21}. The molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ does not have triple bond, because $\mathcal Q_{\mathrm{sk}}$ is a prime couple. \end{proof} \subsection{Blocks}\label{deffuncgroup} We next define the notion of blocks (and hyper-blocks), which is an important class of atomic groups that occur in our proof. \begin{df}[Blocks]\label{defblock}Given a molecule $\mathbb M$, an atomic group $\mathbb B\subset\mathbb M$ is called a \emph{block}, if all atoms in $\mathbb B$ have degree $4$ within $\mathbb B$, except for exactly two atoms $v_1$ and $v_2$ (called \emph{joints} of the block), each of which having out-degree $1$ and in-degree $1$ (hence total degree $2$) within $\mathbb B$, see Figure \ref{fig:block}. Define $\sigma(\mathbb B)$ as the number of bonds between $v_1$ and $v_2$. Note that $\sigma(\mathbb B)\in\{0,1,2\}$, and $\sigma(\mathbb B)=2$ if and only if $\mathbb B$ is a double bond. Moreover, we define a \emph{hyper-block} $\mathbb H$ to be the atomic group formed by adding one bond between the two joints $v_1$ and $v_2$ of a block $\mathbb B$ (we shall call this $\mathbb H$ the \emph{adjoint} of $\mathbb B$), and define $\sigma(\mathbb H)=\sigma(\mathbb B)+1$.
If two blocks share one common joint and no other common atom, then their union (or concatenation) is either a block or a hyper-block (depending on whether the two other joints of the two blocks are connected by a bond), see Figure \ref{fig:blockconc}. Note that a hyper-block cannot be concatenated with another block or hyper-block in this way. In general any finitely many (at least two) blocks can be concatenated to form a new block $\mathbb B$, or a new hyper-block $\mathbb H$, in which case we must have $\sigma(\mathbb B)=0$ and $\sigma(\mathbb H)=1$. \end{df}
\begin{figure}\label{fig:block}
\end{figure}
\begin{figure}\label{fig:blockconc}
\end{figure} \begin{lem}\label{disjointlem} Let $\mathbb M$ be a molecule. Suppose $\mathbb A,\mathbb B\subset\mathbb M$, each of them is a block or a hyper-block, and $\mathbb A\not\subset\mathbb B$, $\mathbb B\not\subset\mathbb A$ and $\mathbb A\cap\mathbb B\neq\varnothing$.
Let $a_1$ and $a_2$ be the joints of $\mathbb A$, and $b_1$ and $b_2$ be the joints of $\mathbb B$. Suppose further that (i) $\mathbb B\backslash\{b_1,b_2\}$ is connected, and (ii) for any $v\in\mathbb B\backslash\{b_1,b_2\}$, the subset $\mathbb B\backslash\{v\}$ is either connected, or has two connected components containing $b_1$ and $b_2$ respectively, and (iii) the same holds for $\mathbb A$.
Then $\mathbb A$ and $\mathbb B$ are both blocks, and exactly one of the three following scenarios happens: (a) $\mathbb A$ and $\mathbb B$ share two common joints and no other common atom, and $\sigma(\mathbb A)=\sigma(\mathbb B)=1$, (b) $\mathbb A$ and $\mathbb B$ share one common joint and no other common atom, and can be concatenated like in Definition \ref{defblock}; (c) $\mathbb A$ is formed by concatenating two blocks $\mathbb C_0$ and $\mathbb C_1$, and $\mathbb B$ is formed by concatenating $\mathbb C_1$ with another block $\mathbb C_2$ (where $\mathbb C_0\cap\mathbb C_2=\varnothing$). \end{lem} \begin{proof} (1) Suppose $\mathbb A$ and $\mathbb B$ share two common joints, say $a_1=b_1$ and $a_2=b_2$. If a third atom $u\in\mathbb A\cap\mathbb B$, since $\mathbb A\not\subset\mathbb B$, there must exist another atom $v\in\mathbb A\backslash\mathbb B$. Since $\mathbb A\backslash\{a_1,a_2\}$ is connected by assumption (i), we can find a path from $u$ to $v$ that remains in $\mathbb A$ but does not include either $a_1$ or $a_2$. However we have $u\in\mathbb B$ and $v\not\in\mathbb B$, so any path from $u$ to $v$ must include either $b_1$ or $b_2$, contradiction. This tells us that $\mathbb A\cap\mathbb B=\{a_1,a_2\}$. In this case there must be one (and exactly one) bond between $a_1$ and $a_2$, so $\sigma(\mathbb A)=\sigma(\mathbb B)=1$ and we are in scenario (a). In fact, if $\sigma(\mathbb A)=\sigma(\mathbb B)=0$, then $a_1$ has two bonds connecting to atoms in $\mathbb A$ and two other bonds connecting to atoms in $\mathbb B$, and same for $a_2$. Therefore \emph{every} atom in $\mathbb A\cup\mathbb B$ will have degree $4$ (including $a_1$ and $a_2$), which contradicts the definition of molecule. The other cases are treated similarly.
(2) Suppose $\mathbb A$ and $\mathbb B$ share no common joint. Choose $u\in\mathbb A\cap\mathbb B$ and $v\in\mathbb A\backslash\mathbb B$, the same argument in (1) implies that either $b_1$ or $b_2$ must be an \emph{interior} (i.e. non-joint) atom of $\mathbb A$. Similarly, either $a_1$ or $a_2$ must be an interior atom of $\mathbb B$. However, these four atoms cannot be all interior atoms because otherwise every atom in $\mathbb A\cup\mathbb B$ will again have degree $4$. By symmetry, we may assume that $a_1$ is an interior atom of $\mathbb B$, $b_1$ is an interior atom of $\mathbb A$, and $b_2\not\in\mathbb A$.
Now consider the atomic group $\mathbb A\backslash\{b_1\}$, which is the disjoint union of $\mathbb A\backslash\mathbb B$ and $(\mathbb A\cap\mathbb B)\backslash\{b_1\}$. If two atoms $u$ and $v$ from these two subsets are connected by a path in $\mathbb A\backslash\{b_1\}$, then we again have a contradiction because this path cannot include either $b_1$ or $b_2$. Using also assumption (ii), we know that $\mathbb A\backslash\mathbb B$ and $(\mathbb A\cap\mathbb B)\backslash\{b_1\}$ are two connected components of $\mathbb A\backslash\{b_1\}$, and $a_2\in\mathbb A\backslash\mathbb B$. It is now easy to see that $\mathbb C_0:=(\mathbb A\backslash\mathbb B)\cup\{b_1\}$ and $\mathbb C_1:=\mathbb A\cap\mathbb B$ are two blocks that are concatenated at the common joint $b_1$ to form $\mathbb A$. Now by switching $\mathbb A$ and $\mathbb B$ and arguing similarly, we can see that $\mathbb C_2=(\mathbb B\backslash\mathbb A)\cup\{a_1\}$ is also a block, and is concatenated with $\mathbb C_1$ at the common joint $a_1$ to form $\mathbb B$. Therefore, we are in scenario (c).
(3) Finally, suppose $\mathbb A$ and $\mathbb B$ share only one common joint, say $a_1=b_1$. If $\mathbb A\cap\mathbb B=\{a_1\}$, then clearly we are in scenario (b). If not, then there is a second atom $u\in\mathbb A\cap\mathbb B$. By repeating the arguments in (1) and (2), we know that $a_2$ is an interior atom of $\mathbb B$ and $b_2$ is an interior atom of $\mathbb A$. Then all atoms in $\mathbb A\cup\mathbb B$ except $a_1$ will have degree $4$, thus $a_1$ can only have degree $2$ (in-degree $1$ and out-degree $1$, as total in-degree must equal total out-degree), which means that $a_1$ has two bonds connecting to atoms in $\mathbb A\cap\mathbb B$. Now we can apply the same argument in (2) and conclude that $\mathbb A\backslash\mathbb B$ and $(\mathbb A\cap\mathbb B)\backslash\{b_2\}$ are two connected components of $\mathbb A\backslash\{b_2\}$, and $a_2\in\mathbb A\backslash\mathbb B$. But we already know $a_2$ is an interior atom of $\mathbb B$, which is impossible. This contradiction completes the proof. \end{proof} \subsubsection{Blocks in a couple} We now discuss the relative position of a block $\mathbb B\subset\mathbb M(\mathcal Q)$ in a couple $\mathcal Q$. \begin{prop}\label{block_clcn} Let $\mathcal Q$ be a couple and $\mathbb B\subset\mathbb M(\mathcal Q)$ be a block with two joints $v_1$ and $v_2$, and let $\mathfrak u_j=\mathfrak n(v_j)$. \begin{enumerate}[{(1)}] \item Then (up to symmetry) exactly one of the following two scenarios happens. \begin{itemize} \item \emph{\bf{(CL) or ``cancellation" blocks:}} There is a child $\mathfrak u_{11}$ of $\mathfrak u_1$ and two children $\mathfrak u_{21},\mathfrak u_{22}$ of $\mathfrak u_2$, such that (i) $\mathfrak u_{11}$ has the same sign as $\mathfrak u_1$, $\mathfrak u_{21}$ has sign $+$ and $\mathfrak u_{22}$ has sign $-$, (ii) $\mathfrak u_2$ is a descendant $\mathfrak u_1$ but not of $\mathfrak u_{11}$, and (iii) all the leaves in the set $\mathcal Q[\mathbb B]$ are completely paired, where $\mathcal Q[\mathbb B]$ denotes all nodes that are descendants of $\mathfrak u_1$ but not of $\mathfrak u_{11},\mathfrak u_{21}$ or $\mathfrak u_{22}$ (in particular $\mathfrak u_1\in\mathcal Q[\mathbb B]$ and $\mathfrak u_{11},\mathfrak u_{21},\mathfrak u_{22}\not\in\mathcal Q[\mathbb B]$). See Figure \ref{fig:couples_cl}. \item \emph{\bf{(CN) or ``connectivity" blocks:}} There is a child $\mathfrak u_{11}$ of $\mathfrak u_1$ and $\mathfrak u_{21}$ of $\mathfrak u_2$, such that (i) $\mathfrak u_{11}$ has the same sign as $\mathfrak u_1$ and $\mathfrak u_{21}$ has the same sign as $\mathfrak u_2$, (ii) $\mathfrak u_2$ is either a descendant of $\mathfrak u_{11}$ or not a descendant of $\mathfrak u_1$ (similar for $\mathfrak u_1$), and (iii) all the leaves in the set $\mathcal Q[\mathbb B]$ are completely paired, where $\mathcal Q[\mathbb B]$ denotes all the nodes that are descendants of $\mathfrak u_1$ but not of $\mathfrak u_{11}$, and all the nodes that are descendants of $\mathfrak u_2$ but not of $\mathfrak u_{21}$ (in particular $\mathfrak u_1,\mathfrak u_2\in\mathcal Q[\mathbb B]$ and $\mathfrak u_{11},\mathfrak u_{21}\not\in\mathcal Q[\mathbb B]$). See Figure \ref{fig:couples_cn}. \end{itemize} \item For (CL) blocks we can define a new couple $\mathcal Q^{\mathrm{sp}}$ by removing all nodes $\mathfrak m\in\mathcal Q[\mathbb B]\backslash\{ \mathfrak u_1\}$, and turning $\mathfrak u_{11},\mathfrak u_{21}$ and $\mathfrak u_{22}$ into the three new children of $\mathfrak u_1$ with corresponding subtrees attached; here the position of $\mathfrak u_{11}$ as a child of $\mathfrak u_1$ remains the same as in $\mathcal Q$, and the positions of $\mathfrak u_{21}$ and $\mathfrak u_{22}$ as children of $\mathfrak u_1$ are determined by their signs. Then, the molecule $\mathbb M^{\mathrm{sp}}=\mathbb M(\mathcal Q^{\mathrm{sp}})$ is formed from $\mathbb M$ by merging all the atoms in $\mathbb B$ (including two joints) into one single atom. We call this operation going from $\mathcal Q$ to $\mathcal Q^{\mathrm{sp}}$ \emph{splicing}. \item For (CN) blocks, if we remove from $\mathbb M(\mathcal Q)$ \emph{any set of disjoint (CN) blocks} in $\mathcal Q$, where by removing a block we mean removing all bonds $\ell\in\mathbb B$, then the resulting molecule is still \emph{connected} (though it no longer comes from a couple). This remains true if we remove also a (CL) block, provided \emph{that both joints of this (CL) block have degree $3$. Note that there is at most one such block due to Proposition \ref{moleproperty}; for simplicity we will call it a \emph{root block}.} \end{enumerate} \end{prop} \begin{figure}\label{fig:couples_cl}
\end{figure} \begin{figure}\label{fig:couples_cn}
\end{figure} \begin{proof} For any $v\in\mathbb B\backslash\{v_1,v_2\}$, there is a unique bond $\ell\sim v$ such that $\mathfrak m(v,\ell)=v$; let $v^+$ be the other endpoint of $\ell$, then $\mathfrak n(v^+)$ is just the parent of $\mathfrak n(v)$ in $\mathcal Q$. Consider the path $v\to v^+\to v^{++}\to\cdots$, then it either stays in $\mathbb B$ or reaches the joints $v_1$ or $v_2$ at some point, due to the structure of $\mathbb B$. However, if it stays in $\mathbb B$ then eventually it will reach one of the roots of $\mathcal Q$, which is impossible because the roots have degree $3$ as atoms. Therefore it must reach $v_1$ or $v_2$, which means that for any atom $v\in\mathbb B$, $\mathfrak n(v)$ must be \emph{a descendant of either $\mathfrak u_1$ or $\mathfrak u_2$}.
Now consider $v_1$, there are two possibilities: (a) there is an atom $v_1^+\in\mathbb B$ such that $\mathfrak n(v_1^+)$ is the parent of $\mathfrak u_1$ (as explained above), or (b) there is no such atom $v_1^+\in\mathbb B$. For $v_2$ there are similarly these two possibilities. If case (a) holds for $v_2$ then by the same proof above, we know that $\mathfrak u_2$ is a descendant of $\mathfrak u_1$; since $\mathfrak u_1$ and $\mathfrak u_2$ cannot be a descendant of each other, by symmetry we have only two cases: either (a) holds for $v_2$ and (b) holds for $v_1$, or (b) holds for both $v_1$ and $v_2$. Below we define $\ell_1$ and $\ell_2$ as the two bonds connecting $v_1$ to atoms in $\mathbb B$, and similarly define $\ell_3$ and $\ell_4$ corresponding to $v_2$.
(1) Suppose (a) holds for $v_2$ and (b) holds for $v_1$. In particular $\mathfrak u_2$ is a descendant of $\mathfrak u_1$, and $\mathfrak m(v_1,\ell_j)\,(j\in\{1,2\})$ are two children of $\mathfrak u_1$; let $\mathfrak u_{11}$ be the other child of $\mathfrak u_1$. Similarly, $\mathfrak m(v_2,\ell_j)\,(j\in\{3,4\})$ are $\mathfrak u_2$ and one child of $\mathfrak u_2$, let $\mathfrak u_{21}$ and $\mathfrak u_{22}$ be the two other children of $\mathfrak u_2$. Clearly $\mathfrak u_{11}$ must have the same sign as $\mathfrak u_1$ and $\mathfrak u_{21}$ must have opposite sign with $\mathfrak u_{22}$, because $\ell_1$ and $\ell_2$ have opposite directions, and the same for $\ell_3$ and $\ell_4$. We will assume $\mathfrak u_{21}$ has sign $+$ and $\mathfrak u_{22}$ has sign $-$.
Now we claim that $v\in\mathbb B\backslash\{v_1\}$ if and only if $\mathfrak n(v)\in\mathcal Q[\mathbb B]\backslash\{\mathfrak u_1\}$. In fact, if $v\in\mathbb B\backslash\{v_1\}$ then first $\mathfrak n(v)$ is a descendant of $\mathfrak u_1$ as shown above; second, consider the path $\mathfrak n(v)\to \mathfrak n(v^+)\to \mathfrak n(v^{++})\to\cdots\to \mathfrak u_1$, then the node immediately before $\mathfrak u_1$ must be $\mathfrak n(w)$ for some $w\in\mathbb B\backslash\{v_1\}$ and thus cannot be $\mathfrak u_{11}$ by definition, hence $\mathfrak n(v)$ is not a descendant of $\mathfrak u_{11}$; third, if the above path contains $\mathfrak u_2$, then the node immediately before $\mathfrak u_2$ must be $\mathfrak n(w)$ for some $w\in\mathbb B\backslash\{v_1,v_2\}$ and thus cannot be $\mathfrak u_{21}$ or $\mathfrak u_{22}$ by definition, hence $\mathfrak n(v)$ is not a descendant of $\mathfrak u_{21}$ or $\mathfrak u_{22}$ either.
Conversely, if $\mathfrak n(v)\neq \mathfrak u_1$ is a descendant of $\mathfrak u_1$ but not of $\mathfrak u_{11},\mathfrak u_{21}$ or $\mathfrak u_{22}$, then the path $\mathfrak n(v)\to \mathfrak n(v^+)\to \mathfrak n(v^{++})\to\cdots$ must end at $\mathfrak u_1$, and the node immediately before $\mathfrak u_1$ must not be $\mathfrak u_{11}$. Thus this node must be $\mathfrak n(w)$ for some $w\in\mathbb B\backslash\{v_1\}$, and the $v^{+\cdots +}$ atoms involved in this path must all be in $\mathbb B$ unless this path contains $\mathfrak u_2$. But if $\mathfrak u_2$ belongs to this path, then the node immediately before it must not be $\mathfrak u_{21}$ or $\mathfrak u_{22}$, so it must also be $\mathfrak n(w)$ for some $w\in\mathbb B\backslash\{v_1,v_2\}$, and again all the $v^{+\cdots +}$ atoms involved in this path must be in $\mathbb B$. In any case we have $v\in\mathbb B$, so our claim is true.
Now with the above claim, it is easy to see that all the leaves in $\mathcal Q[\mathbb B]$ must be completely paired, and these leaf pairs exactly correspond to all LP bonds in $\mathbb B$. It is also clear that, merging $\mathbb B$ to a single atom corresponds to removing all nodes $\mathfrak m\in\mathcal Q[\mathbb B]\backslash\{ \mathfrak u_1\}$, and the resulting molecule is exactly $\mathbb M(\mathcal Q^{\mathrm{sp}})$ for the resulting couple $\mathcal Q^{\mathrm{sp}}$.
(2) Suppose (b) holds for both $v_1$ and $v_2$. In particular $\mathfrak m(v_1,\ell_j)\,(j\in\{1,2\})$ are two children of $\mathfrak u_1$, let the other child of $\mathfrak u_1$ be $\mathfrak u_{11}$. Similarly define $\mathfrak u_{21}$, then $\mathfrak u_{11}$ must have the same sign as $\mathfrak u_1$ and $\mathfrak u_{21}$ has the same sign as $\mathfrak u_2$, again due to the directions of the bonds $\ell_j$. Moreover, if $\mathfrak u_2$ is a descendant of $\mathfrak u_1$, then in the path $\mathfrak n(v_2)\to\mathfrak n(v_2^+)\to\cdots\to \mathfrak u_1$, the second node does not belong to $\mathbb B$, and neither does any subsequent terms; thus the node immediately before $\mathfrak u_1$ \emph{cannot} be $\mathfrak n(w)$ for any $w\in\mathbb B$, so it must be $\mathfrak u_{11}$, which means that $\mathfrak u_2$ is a descendant of $\mathfrak u_{11}$ (actually $\mathfrak u_2$ also cannot equal $\mathfrak u_{11}$ because otherwise we would have an extra bond between $v_1$ and $v_2$, turning $\mathbb B$ into a hyper-block). Now, by arguing similarly as in (1) we can show that $v\in\mathbb B\backslash\{v_1,v_2\}$ if and only if $\mathfrak n(v)\in\mathcal Q[\mathbb B]\backslash\{\mathfrak u_1,\mathfrak u_2\}$. This easily implies that all the leaves in $\mathcal Q[\mathbb B]$ are completely paired.
Next we prove the preservation of connectivity after removing any set of disjoint type (CN) blocks. In fact, we may define the molecule $\mathbb M(\widetilde{\mathcal Q})$ for generalized couples $\widetilde{\mathcal Q}$ formed by two \emph{arbitrary trees} which are not necessarily ternary trees (plus that we only keep the pairing structure but ignore the signs of nodes and directions of bonds), similar to Definition \ref{defmole}.
In this regard, removing a type (CN) block amounts to removing all nodes $\mathfrak m\in\mathcal Q[\mathbb B]\backslash\{\mathfrak u_1,\mathfrak u_2\}$. What remains is a generalized couple formed by two trees in which $\mathfrak u_{11}$ is the only child of $\mathfrak u_1$ and $\mathfrak u_{21}$ is the only child of $\mathfrak u_2$ (the fact that $\mathfrak u_1$ and $\mathfrak u_2$ each has only one child in the new generalized couple, corresponds to the fact that $v_1$ and $v_2$ each has degree $2$ in the new molecule). This can be extended to the removal of multiple disjoint type (CN) blocks, and the resulting molecule is $\mathbb M(\widetilde{\mathcal Q})$ where $\widetilde{\mathcal Q}$ is a generalized couple formed by two trees such that each branching node has either $1$ or $3$ children. However, the corresponding molecule $\mathbb M(\widetilde{\mathcal Q})$ is still connected, because each node can be connected to the root of its tree by using PC bonds, and there exists at least one LP bond between the two trees since the number of leaves in each tree is odd. This completes the proof.
Finally, let $\mathbb B$ be a root (CL) block, i.e. both joints of $\mathbb B$ has degree $3$. Then, in the notation of Proposition \ref{block_clcn}, we must have (up to symmetry) that $\mathfrak u_1$ is the root of one tree in $\mathcal Q$, and a child of $\mathfrak u_2$ (say $\mathfrak u_{21}$) is paired with the root of the other tree as leaves. Thus we have a couple $\widetilde{\mathcal Q}$ rooted at $u_{11}$ and $u_{22}$, and removing $\mathbb B$ reduces $\mathbb M(\mathcal Q)$ to a molecule $\widetilde{\mathbb M}$ which equals $\mathbb M(\widetilde{\mathcal Q})$ plus two extra single bonds. Clearly $\widetilde{\mathbb M}$ is connected, as is the result of removing from it any number of (CN) blocks in $\mathbb M(\widetilde{\mathcal Q})$. This completes the proof. \end{proof} \begin{cor}\label{blockchainprop} Let $\mathcal Q$ be a couple and $\mathbb B\subset\mathbb M(\mathcal Q)$ be a block or hyper-block that is concatenated by at least two blocks $\mathbb B_j\,(1\leq j\leq m)$ as in Definition \ref{defblock}, where $m\geq 2$. Then at most one $\mathbb B_j$ can be a (CN) block. If $\mathbb B$ is a block and all $\mathbb B_j$ are (CL) blocks, then $\mathbb B$ is a (CL) block. If $\mathbb B$ is a block and there is one (CN) block $\mathbb B_j$, then after doing splicing at all other (CL) blocks, this $\mathbb B$ becomes a single (CN) block $\mathbb B_j$. \end{cor} \begin{proof} Let the joints of $\mathbb B_j$ be $v_j$ and $v_{j+1}$ for $1\leq j\leq m$. Recall the possibilities (a) and (b) defined in the proof of Proposition \ref{block_clcn}, which are stated for any joint $v$ of any block $\mathbb B_j\subset\mathbb M(\mathcal Q)$. If some $\mathbb B_j$ is a (CN) block, then as in the proof of Proposition \ref{block_clcn}, (b) must happen for both joints $v_j$ and $v_{j+1}$ relative to the block $\mathbb B_j$. Thus (a) must happen for the joint $v_j$ relative to $\mathbb B_{j-1}$, and $\mathbb B_{j-1}$ is a (CL) block. Moreover, (b) must happen for $v_{j-1}$ relative to $\mathbb B_{j-1}$, and hence (a) must happen for $v_{j-1}$ relative to $\mathbb B_{j-2}$, and so on. Of course we can also start with $v_{j+1}$ and proceed with $\mathbb B_{j+1}$ etc., and altogether we know that all blocks other than $\mathbb B_j$ must be (CL) blocks. If $\mathbb B$ is a block, then after we splice at all the other (CL) blocks, this $\mathbb B_j$ should remain unperturbed, as a (CN) block.
If $\mathbb B$ is a block and all $\mathbb B_j$ are (CL) blocks, then for each block $\mathbb B_j$, (a) must happen at one of its joints, say $v_j$ (if it is $v_{j+1}$ then the proof is the same by going in the other direction). Then (b) must happen for the joint $v_j$ relative to $\mathbb B_{j-1}$, and (a) must happen for $v_{j-1}$ relative to $\mathbb B_{j-1}$ and so on. In the end (a) must happen for $v_1$ relative to $\mathbb B_1$ (and hence relative to $\mathbb B$), so $\mathbb B$ is a (CL) block. \end{proof} \section{Vines and twists}\label{secvine} \subsection{Vines}\label{subsecvine} We are now ready to introduce the notion of \emph{vines} which are the special kind of blocks that are of fundamental importance in our proof. \begin{df}[Vines]\label{defvine}\emph{Vines}\footnote{The nomenclature comes from the shapes of the blocks drawn in Figure \ref{fig:vines}.} are defined as the blocks (I)--(VIII) drawn in Figure \ref{fig:vines}. We also define the notion of \emph{ladders} as drawn in Figure \ref{fig:vines}. For each ladder we also require that each pair of two parallel single bonds must have opposite directions, and define its \emph{length} to be the number of double bonds in it minus one. We refer to vines (I)--(II) as \emph{bad vines}, vines (III)--(VIII) as \emph{normal vines}. Note that $\sigma(\mathbb V)=0$ for all vines $\mathbb V$ except vines (V) and vines (I) (see Definition \ref{defblock}), for which $\sigma(\mathbb V)=1$ and $\sigma(\mathbb V)=2$ respectively.
Define \emph{hyper-vines} (or HV for short) to be the hyper-blocks that are adjoints of vines, as in Definition \ref{defblock}. We also define \emph{vine-chains} (or VC), resp. \emph{hyper-vine-chains} (or HVC), to be the blocks, resp. hyper-blocks, that are formed by concatenating finitely many vines as in Definition \ref{defblock} (these vines are called \emph{ingredients}). Note that a single vine is viewed as a VC, but an HV is not viewed as an HVC. It is easy to verify that assumptions (i) and (ii) in Lemma \ref{disjointlem} hold for any HV, VC or HVC. For simplicity, we will refer to any HV, VC or HVC as \emph{vine-like objects}.
Note that, if the molecule $\mathbb M=\mathbb M(\mathcal Q)$ comes from a couple, then any vine could be a (CL) or (CN) vine depending on whether it is a (CL) or (CN) block. \end{df}
\begin{figure}\label{fig:vines}
\end{figure} \begin{rem}\label{remilia} We make a few remarks about the illustrations in Figure \ref{fig:vines}: \begin{enumerate}[{(a)}] \item In each vine, the two joints are colored blue; the bonds at the joints are drawn with directions, indicating that the two bonds at each joint must have opposite directions. \item The other bonds are drawn without directions, meaning they are arbitrary, as long as requirements of a molecule are met (each non-joint atom has in-degree $2$ and out-degree $2$). \item In each vine, we may insert a ladder between each pair of parallel bonds that are drawn as dashed lines (distinguished by different colors). \item In the ladder, some bonds are drawn with directions, indicating that each pair of two parallel single bonds must have opposite directions. \end{enumerate} \end{rem} \subsubsection{Bad (CL) vines in a couple}\label{propfamily} We need to study the relative position of (CL) vines (I), and the part of (CL) vines (II) near one of its joints, in a couple. \begin{prop}\label{molecpl} Consider a (CL) vine $\mathbb V\subset\mathbb M(\mathcal Q)$ with joints $v_1$ and $v_2$. Let $\mathfrak u_j=\mathfrak n(v_j)$, by Proposition \ref{block_clcn} we may assume $\mathfrak u_2$ is a descendant of $\mathfrak u_1$, and also specify two children $\mathfrak u_{21}$ and $\mathfrak u_{22}$ of $\mathfrak u_2$ that have signs $+$ and $-$ respectively; let $\mathfrak u_{23}$ be the other child of $\mathfrak u_2$, note that $\mathfrak u_{23}$ has the same sign as $\mathfrak u_2$. \begin{enumerate}[{(1)}] \item If $\mathbb V$ is vine (II), then $v_2$ is connected to two atoms $v_3$ and $v_4$ by single bonds, while $v_3$ and $v_4$ are connected by a double bond. Let $\mathfrak u_j=\mathfrak n(v_j)$, then (up to symmetry) exactly one of the following five scenarios happens. See Figure \ref{fig:block_mole}. \begin{enumerate}[{(a)}] \item Vine (II-a): $\mathfrak u_2$ is a child of $\mathfrak u_4$, and $\mathfrak u_{23}$ is paired with one child $\mathfrak u_0$ of $\mathfrak u_3$ as leaves, and the other two children of $\mathfrak u_4$ are paired with the other two children of $\mathfrak u_3$ as leaves. Here neither $\mathfrak u_3$ nor $\mathfrak u_4$ is a descendant of the other, but they have a common ancestor, namely $\mathfrak u_1$.
\item Vine (II-b): $\mathfrak u_2$ is a child of $\mathfrak u_3$, and $\mathfrak u_{23}$ is paired with one child $\mathfrak u_0$ of $\mathfrak u_4$ as leaves, and the other two children of $\mathfrak u_3$ are paired with the other two children of $\mathfrak u_4$ as leaves. Here neither $\mathfrak u_4$ nor $\mathfrak u_3$ is a descendant of the other, but they have a common ancestor, namely $\mathfrak u_1$.
\item Vine (II-c): $\mathfrak u_4$ is a child of $\mathfrak u_3$ and $\mathfrak u_2$ is a child of $\mathfrak u_4$. One of the the other two children of $\mathfrak u_3$ is paired with one of the other children of $\mathfrak u_4$ as leaves, and the remaining child $\mathfrak u_0$ of $\mathfrak u_3$ is paired with $\mathfrak u_{23}$ as leaves. Here $\mathfrak u_3$ is a descendant of $\mathfrak u_1$.
\item Vine (II-d): $\mathfrak u_2$ and $\mathfrak u_4$ are two children of $\mathfrak u_3$, and $\mathfrak u_{23}$ is paired with one child $\mathfrak u_0$ of $\mathfrak u_4$ as leaves, and the remaining child of $\mathfrak u_3$ is paired with another child of $\mathfrak u_4$ as leaves. Here $\mathfrak u_3$ is a descendant of $\mathfrak u_1$.
\item Vine (II-e): $\mathfrak u_2$ is a child of $\mathfrak u_3$, and $\mathfrak u_4=\mathfrak u_{23}$. The other two children of $\mathfrak u_3$ are paired with two of the children of $\mathfrak u_4$ as leaves. Here $\mathfrak u_3$ is a descendant of $\mathfrak u_1$. \end{enumerate}
\item If $\mathbb V$ is vine (I), then exactly one of the following two scenarios happens. See Figure \ref{fig:block_mole}. \begin{enumerate}[{(a)}] \item Vine (I-a): $\mathfrak u_2$ is the left or right child of $\mathfrak u_1$, and $\mathfrak u_{23}$ is paired to the middle child $\mathfrak u_0$ of $\mathfrak u_1$ as leaves. \item Vine (I-b): $\mathfrak u_2$ is the middle of $\mathfrak u_1$, and $\mathfrak u_{23}$ is paired to the left or right child $\mathfrak u_0$ of $\mathfrak u_1$ as leaves. \end{enumerate} \end{enumerate} \begin{figure}\label{fig:block_mole}
\end{figure} For simplicity, below we will call a (CL) vine $\mathbb V$ \emph{core} if it is bad and not vine (II-e), and \emph{non-core} if it is normal or vine (II-e). \end{prop} \begin{proof} We examine the labels of each bond between $(v_2,v_3,v_4)$ using Definition \ref{defmole}. Note that, since $\mathbb V$ is a (CL) vine, one of the bonds connecting $v_2$ to $v_3$ and $v_4$ must be labeled PC with $v_2$ labeled by C; moreover, some configurations are not possible (for example, the two bonds between $v_3$ and $v_4$ cannot both be labeled PC) due to basic properties of the couple $\mathcal Q$ (for example two branching nodes cannot be the parent of each other). By a simple enumeration and symmetry, we find that there is only one case for vine (I) and five cases for vine (II), as drawn in Figure \ref{fig:block_mole}.
For each case of labels, one can apply the definition of the label to deduce that the relation between $(\mathfrak u_2,\mathfrak u_3,\mathfrak u_4)$ has to be as in the corresponding figures. For example, for vine (II-a), by the PC bond between $v_2$ and $v_4$, we know that $\mathfrak u_2$ must be a child of $\mathfrak u_4$. By the two LP bonds between $v_3$ and $v_4$, we know that the two other children of $\mathfrak u_4$ must be paired with two children of $\mathfrak u_3$ as leaves. By the LP bond between $v_2$ and $v_3$, we know that the remaining child of $\mathfrak u_3$ must be paired to a child of $\mathfrak u_2$ as leaves, and this child of $\mathfrak u_2$ must be $\mathfrak u_{23}$, which is drawn in blue. Clearly neither $\mathfrak u_3$ nor $\mathfrak u_4$ can be descendant of the other, but the have a common ancestor $\mathfrak u_1$, so we arrive at the illustration of Vine (II-a) in Figure \ref{fig:block_mole}. The other cases are treated similarly. \end{proof} \begin{rem}\label{molecplrem} We make a few remarks about the illustrations in Figure \ref{fig:block_mole}. \begin{enumerate}[{(a)}] \item The labels of the bonds in the molecule are indicated as in Definition \ref{defmole}. Note that cases (II-a) and (II-b) are actually symmetric, but we prefer to state them as two cases for convenience of showing the cancellation between them, see Definition \ref{twist}. \item The relevant positions of nodes may vary (for example, for vines (II-a) and (II-b) in Figure \ref{fig:block_mole}, $\mathfrak u_2$ might be the middle child of $\mathfrak u_3$ or $\mathfrak u_4$ instead of the right child), but the descriptions in Proposition \ref{molecpl} must be met (for example the blue child of $\mathfrak u_2$, which is $\mathfrak u_{23}$, must have the same sign as $\mathfrak u_2$). \item The nodes represented by hollow dots instead of solid dots are called \emph{free children}. These include $\mathfrak u_{21}$ and $\mathfrak u_{22}$ (\emph{except} vine (II-e)), and either a child of $\mathfrak u_4$ for vines (II-c) and (II-d), or a child of $\mathfrak u_1$ for vines (I-a) and (I-b). \item For each free child we also draw the subtree rooted at it, and indicate it by suitable $\mathcal T_j$, as drawn in Figure \ref{fig:block_mole}. These are used in Definition \ref{twist} below. \item Note that the distinction between vines (I)--(VIII) only involves the structure of the molecule $\mathbb M$ (as a directed graph), but the distinction between (CL) and (CN) vines, as well as families (II-a)--(II-e) etc., is intrinsic to the structure of the \emph{couple} $\mathcal Q$; for instance, it does not make sense to talk about (CL) vines or vines (II-e) if $\mathbb M$ does not have the form $\mathbb M(\mathcal Q)$. \end{enumerate} \end{rem} \subsection{Twists} By exploiting the structure of bad (CL) vines in a couple, as described in Proposition \ref{molecpl}, we can define the operation of \emph{twisting}, which captures the cancellation between such vines. \begin{df}[Twists]\label{twist} Let $\mathcal Q$ be a given couple with the corresponding molecule $\mathbb M(\mathcal Q)$. \begin{enumerate}[{(1)}] \item Given a core (CL) vine $\mathbb V\subset\mathbb M(\mathcal Q)$ as in Proposition \ref{molecpl}, let $v_j$ and $\mathfrak u_j$ for $1\leq j\leq 4$ be as in that proposition. Then, we shall define a new couple $\mathcal Q'$, which we call a \emph{unit twist} of $\mathcal Q$, as follows.
First, in $\mathcal Q'$, let any possible parent-child relation, as well as any possible children pairings, between $\mathfrak u_3$ and $\mathfrak u_4$, be exactly the same as in $\mathcal Q$. Next, let the structure of $\mathcal Q'$ \emph{excluding the subtrees rooted at $\mathfrak u_3$ and $\mathfrak u_4$ (or $\mathfrak u_1$ for vine (I))}, be exactly the same as $\mathcal Q$. Moreover, consider the \emph{free children} in Figure \ref{fig:block_mole} (as in Remark \ref{molecplrem}); we require that the positions of the two free children $\mathfrak u_{21}$ and $\mathfrak u_{22}$ (as children of $\mathfrak u_2$), as well as the positions of the two subtrees (namely $\mathcal T_1$ and $\mathcal T_2$) rooted at them, be \emph{switched}\footnote{They are switched because the sign of $\mathfrak u_2$ is changed (see Remark \ref{twistexplain}); if we locate $\mathfrak u_{21}$ as the child of $\mathfrak u_{2}$ other than $\mathfrak u_{23}$ that has sign $+$ (and same for $\mathfrak u_{22}$), then this remains the same for both couples.} in $\mathcal Q'$ compared to $\mathcal Q$. For the other free child (if it exists), we require that its position (as a child of $\mathfrak u_1$ or $\mathfrak u_4$) and the subtree (namely $\mathcal T_3$) rooted at it, be exactly the same in $\mathcal Q'$ as in $\mathcal Q$. Then, it is easy to see that there are exactly two options to insert $\mathfrak u_2$, one as a child of $\mathfrak u_3$, and the other as a child of $\mathfrak u_4$ (for vine (I), the two options are children of $\mathfrak u_1$ that has the same or opposite sign with $\mathfrak u_1$). One of these two choices leads to $\mathcal Q$, and we define the couple given by the other choice as $\mathcal Q'$. Clearly $\mathcal Q'$ is prime iff $\mathcal Q$ is. \item In the same way, start with a collection of (CL) vines $\mathbb V_j\subset\mathbb M(\mathcal Q)\,(0\leq j\leq q-1)$, such that any two are either disjoint or only share one common joint and no other common atom (i.e. the union of all $\mathbb V_j$ equals the disjoint union of some VC and HVC). Then, we call any block $\mathcal Q'$ a \emph{twist} of $\mathcal Q$, if $\mathcal Q'$ can be obtained from $\mathcal Q$ by performing the unit twist operation at a subset of these blocks, which only contains core vines. In particular, for any given $\mathcal Q$ and $\mathbb V_j$, the number of possible twists is a power of two, and at most $2^q$. \end{enumerate} \end{df} Since the notion of twisting is of vital importance in our proof (especially in Section \ref{reduct1}), we will make several remarks below explaining Definition \ref{twist} in more detail. \begin{rem}\label{explaintwist} We discuss an example of the (unit) twist operation in Definition \ref{twist}. Suppose $\mathbb V$ is vine (II-c) or (II-d) in Figure \ref{fig:block_mole}. Then we have that: \begin{enumerate}[{(a)}] \item The node $\mathfrak u_4$ is the left child of $\mathfrak u_3$, and the middle child of $\mathfrak u_3$ is paired with the right child of $\mathfrak u_4$ as leaves. \item The left child of $\mathfrak u_4$ is a free child with subtree $\mathcal T_3$. The left and middle children of $\mathfrak u_2$ are the two free children $\mathfrak u_{21}$ and $\mathfrak u_{22}$ (or $\mathfrak u_{22}$ and $\mathfrak u_{21}$), with the subtrees rooted at $\mathfrak u_{21}$ and $\mathfrak u_{22}$ being $\mathcal T_1$ and $\mathcal T_2$ respectively. \item $\mathfrak u_2$ is a child of $\mathfrak u_3$ (or $\mathfrak u_4$), and the right child $\mathfrak u_{23}$ of $\mathfrak u_2$ is paired to a child of $\mathfrak u_4$ (or $\mathfrak u_3$) as leaves. \end{enumerate}
Now by Definition \ref{twist}, all these properties must hold in both $\mathcal Q$ and $\mathcal Q'$; also the structure of $\mathcal Q$ and $\mathcal Q'$, excluding the subtree rooted at $\mathfrak u_3$, must be the same. This leaves only two possibilities: either $\mathfrak u_2$ is \emph{the middle child} of $\mathfrak u_4$ and $\mathfrak u_{23}$ is paired to \emph{the right child} of $\mathfrak u_3$ as leaves, or $\mathfrak u_2$ is \emph{the right child} of $\mathfrak u_3$ and $\mathfrak u_{23}$ is paired to \emph{the middle child} of $\mathfrak u_4$ as leaves. These are exactly vines (II-c) and (II-d) in in Proposition \ref{molecpl}. Note that for vines (II-c) $\mathfrak u_{22}$ is the left child of $\mathfrak u_2$ and $\mathfrak u_{21}$ is the middle child, while for vines (II-d) $\mathfrak u_{21}$ is the left child and $\mathfrak u_{22}$ is the middle child, which is consistent with the description in Definition \ref{twist}.
In the same way, we can see that performing one unit twist operation \emph{exactly switches vines (I-a), (II-a), (II-c) vines with vines (I-b), (II-b), (II-d) vines}, respectively. \end{rem} \begin{rem}\label{twistexplain} Throughout the proof below, for any fixed (CL) vine $\mathbb V$, we always adopt the notations $(\mathfrak u_1,\mathfrak u_2,\mathfrak u_{11},\mathfrak u_{21},\mathfrak u_{22})$ as in Proposition \ref{block_clcn}; for bad (CL) vines we also adopt the notations $(\mathfrak u_3,\mathfrak u_4,\mathfrak u_{23},\mathfrak u_0)$ as in Proposition \ref{molecpl}, whenever applicable. The following useful facts are easily verified from Definition \ref{twist}. They are stated for unit twists but can be extended to general twists. \begin{enumerate}[{(a)}] \item Let $\mathcal Q$ and $\mathcal Q'$ be unit twists of each other at a bad (CL) vine $\mathbb V\subset\mathbb M(\mathcal Q)$, then $\mathbb M(\mathcal Q)$ and $\mathbb M(\mathcal Q')$ are the same as directed graphs. They also have the same labelings of bonds, except at the atom $v_2$ (see Figure \ref{fig:block_mole}). \item Continuing (a), the only difference at $v_2$ is that the labels of the two bonds connecting $v_2$ to atoms in $\mathbb V$ are switched (one label is PC with $v_2$ labeled C and the other label is LP). \item Moreover, if we do splicing (as defined in Proposition \ref{block_clcn}) for $\mathcal Q$ and the (CL) vine $\mathbb V$, or for $\mathcal Q'$ and the same (CL) vine $\mathbb V$ (as shown in (a) above), then the two resulting \emph{couples}, defined as $\mathcal Q^{\mathrm{sp}}$ and $(\mathcal Q')^{\mathrm{sp}}$, are exactly the same. \item Finally, the values of $\zeta_{\mathfrak u_j}$ for any branching node $\mathfrak u_j\,(j\neq 2)$ are the same for $\mathcal Q$ and $\mathcal Q'$, while the values of $\zeta_{\mathfrak u_2}$ are the opposite for $\mathcal Q$ and $\mathcal Q'$. \end{enumerate} \end{rem} \begin{figure}\label{fig:twist_dec}
\end{figure} \begin{rem} \label{dectwist} We make another simple observation regarding decorations of couples and their twists. Let $\mathcal Q$ be a couple and $\mathcal Q'$ be formed from $\mathcal Q$ by a unit twist (this is readily generalized to arbitrary twists). Then, the $k$-decorations of $\mathcal Q$ are in one-to-one correspondence with $k$-decorations of $\mathcal Q'$, where the values of $k_\mathfrak m$ for any branching node or leaf $\mathfrak m$ are the same in both cases, but one \emph{switches} the values of $k_{\mathfrak u_2}$ and $k_{\mathfrak u_{23}}$ in both cases, see Figure \ref{fig:twist_dec}.
Let $\mathcal Q^{\mathrm{sp}}=(\mathcal Q')^{\mathrm{sp}}$ be the couple formed from $\mathcal Q$ (or $\mathcal Q'$) by doing splicing, then for any $k$-decoration of $\mathcal Q$ and the corresponding $k$-decoration of $\mathcal Q'$ defined above, the decorations of $\mathcal Q^{\mathrm{sp}}$ \emph{inherited} from them are the same; here inheriting means that the value of $k_\mathfrak m$ is kept the same for any $\mathfrak m$, whether it is viewed as a node of $\mathcal Q$ or $\mathcal Q^{\mathrm{sp}}$. This notion also applies to any subset of couples or molecules, or when a couple or molecule is reduced by repeated splicing (or merging) of vines, and will be used frequently below. \end{rem} \begin{df}\label{twistrep} Note that the unit twist operation in Definition \ref{twist} only changes the structure of the set $\mathcal Q[\mathbb V]$ defined in Proposition \ref{block_clcn} (which is part of a ternary tree, obtained by taking the subtree rooted at $\mathfrak u_1$ and removing the subtrees rooted at $\mathfrak u_{11}$, $\mathfrak u_{21}$ and $\mathfrak u_{22}$), and does not affect the rest of the couple $\mathcal Q$, so we can view it as a unit twist for $\mathcal Q[\mathbb V]$.
We may define another operation on $\mathcal Q[\mathbb V]$ which we call \emph{flipping}, where we flip the signs of $\mathfrak u_{1}$ (and $\mathfrak u_{11}$), and switch the two subtrees rooted at the two other children nodes of $\mathfrak u_1$, together with leaf pairings, see Figure \ref{fig:flip}. Note that unit twisting is an operation on $\mathcal Q[\mathbb V]$, defined only for core (CL) vines $\mathbb V$ (Proposition \ref{molecpl}), that can be canonically extended to the couple. Flipping, however, is an operation on $\mathcal Q[\mathbb V]$, defined for all (CL) vines $\mathbb V$, that in general cannot be canonically extended to the couple. However, \emph{if $\mathbb V$ is concatenated with another (CL) vine $\mathbb V_1$ above it}, then flipping at $\mathbb V$ can be extended to a couple operation, which is \emph{unit twisting} at the vine $\mathbb V_1$. \begin{figure}\label{fig:flip}
\end{figure}
Now, given a (CL) vine $\mathbb V$, there are $O(C^n)$ possible structures for $\mathcal Q[\mathbb V]$, where $n$ is the number of branching nodes in $\mathcal Q[\mathbb V]\backslash\{\mathfrak u_1\}$. We define two of them to be equivalent, if one can be formed from the other by flipping, and (if $\mathbb V$ is a core (CL) vine) unit twisting. We call each equivalence class a \emph{code}, denoted by $\mathtt{cod}$. Note that for a core (CL) vine $\mathbb V$, each code contains exactly four elements, as shown in Figure \ref{fig:flip}; they are uniquely determined by the signs $\mathtt{sgn}:=\zeta_{\mathfrak u_1}$ and $\mathtt{ind}:=\zeta_{\mathfrak u_2}$. For other (CL) vines $\mathbb V$, each code contains exactly two elements; they are uniquely determined by $\mathtt{sgn}$, and the value of $\mathtt{ind}$ is determined by the code.
Let $\mathcal Q$ be a couple with a core (CL) vine $\mathbb V\subset\mathbb M(\mathcal Q)$, then by Definition \ref{twist} and the above discussions, we know that $\mathcal Q$ is in one-to-one correspondence with the quadruple $(\mathcal Q^{\mathrm{sp}}, \mathtt{cod}, \mathfrak n, \mathtt{ind})$, where $\mathtt{cod}$ and $\mathtt{ind}$ are as above, and $\mathfrak n$ is the branching node in $\mathcal Q^{\mathrm{sp}}$ that corresponds to the $\mathfrak u_1$ node of $\mathcal Q[\mathbb V]$ in $\mathcal Q$ (note that $\mathtt{sgn}$ is determined by $\mathcal Q_{\mathrm{sp}}$ and $\mathfrak n$). We shall write this as $\mathcal Q\leftrightarrow (\mathcal Q^{\mathrm{sp}}, \mathtt{cod}, \mathfrak n, \mathtt{ind})$, and note that making a unit twist for $\mathcal Q$ corresponds to changing the value of $\mathtt{ind}$ only. If $\mathbb V$ is non-core then $\mathcal Q$ is still uniquely determined by the above quadruple, except that $\mathtt{ind}$ is now determined by $\mathtt{cod}$. \end{df} \subsection{Full twists} Finally, we extend the notion of twists to general couples by adding back the regular couples $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and regular trees $\mathcal T^{(\mathfrak m)}$ in Proposition \ref{skeleton}. \begin{df}[Full twists]\label{twistgen} Let $\mathcal Q$ be a couple, $\mathcal Q_{\mathrm{sk}}$ be its skeleton, and write $\mathcal Q\sim (\mathcal Q_{\mathrm{sk}},\mathscr A)$ as in Proposition \ref{skeleton}. \begin{enumerate}[{(1)}] \item Fix one core (CL) vine $\mathbb V\subset\mathbb M(\mathcal Q_{\mathrm{sk}})$. Now consider another couple $\mathcal Q'$, such that $(\mathcal Q')_{\mathrm{sk}}$ either equals $\mathcal Q_{\mathrm{sk}}$, or is a unit twist of it as in Definition \ref{twist}. In the unit twist case, consider the branching node $\mathfrak u_2$ (in the notation of Proposition \ref{molecpl}), which occurs in both $\mathcal Q_{\mathrm{sk}}$ and $(\mathcal Q')_{\mathrm{sk}}$. Consider also the leaf pair $(\mathfrak u_{23},\mathfrak u_0)$ involving the non-free child $\mathfrak u_{23}$ of $\mathfrak u_2$, which again occurs in both $\mathcal Q_{\mathrm{sk}}$ and $(\mathcal Q')_{\mathrm{sk}}$, see Figure \ref{fig:block_mole}. Apart from these, any other branching node and leaf pair is exactly the same in the two couples $\mathcal Q_{\mathrm{sk}}$ and $(\mathcal Q')_{\mathrm{sk}}$.
Now, if $(\mathcal Q')_{\mathrm{sk}}$ is a unit twist of $\mathcal Q_{\mathrm{sk}}$, we say $\mathcal Q'\sim((\mathcal Q')_{\mathrm{sk}},\mathscr A')$ is a \emph{full unit twist} of $\mathcal Q$, if (i) for any branching node $\mathfrak m\neq\mathfrak u_2$ (or any leaf pair $(\mathfrak l,\mathfrak l')\neq(\mathfrak u_{23},\mathfrak u_0)$), the regular trees $\mathcal T^{(\mathfrak m)}\in \mathscr A$ and $(\mathcal T')^{(\mathfrak m)}\in\mathscr A'$ (or the regular couples $\mathcal Q^{(\mathfrak l,\mathfrak l')}\in\mathscr A$ and $(\mathcal Q')^{(\mathfrak l,\mathfrak l')}\in\mathscr A'$) are the same, and (ii) the regular trees $\mathcal T^{(\mathfrak u_2)}\in\mathscr A$ and $(\mathcal T')^{(\mathfrak u_2)}\in\mathscr A'$, and the regular couples $\mathcal Q^{(\mathfrak u_{23},\mathfrak u_0)}\in\mathscr A$ and $(\mathcal Q')^{(\mathfrak u_{23},\mathfrak u_0)}\in\mathscr A'$, satisfy that $n(\mathcal T^{(\mathfrak u_2)})+n(\mathcal Q^{(\mathfrak u_{23},\mathfrak u_0)})=n((\mathcal T')^{(\mathfrak u_2)})+n((\mathcal Q')^{(\mathfrak u_{23},\mathfrak u_0)})$. If $(\mathcal Q')_{\mathrm{sk}}=\mathcal Q_{\mathrm{sk}}$, the definition is modified in the obvious way. \item In general, for any set of (CL) vines $\mathbb V_j\subset\mathbb M(\mathcal Q_{\mathrm{sk}})$ as in Definition \ref{twist}, such that any two are either disjoint or only share one common joint and no other common atom, we define $\mathcal Q'$ to be a \emph{full twist} of $\mathcal Q$, if $\mathcal Q'$ can be constructed from $\mathcal Q$ by performing some full unit twist at each $\mathbb V_j$ that is core. \end{enumerate} \end{df} \begin{rem}\label{fulltwistrep} Let $\mathcal Q_0$ be a couple and $\mathscr A$ be a collection of regular couples and regular trees as in Proposition \ref{skeleton}. Suppose $\mathbb V\subset\mathbb M(\mathcal Q_{0})$ is a (CL) vine, let $\mathcal Q^{\mathrm{sp}}$ be the result of splicing $\mathcal Q_{0}$ at $\mathbb V$, then we have $\mathcal Q_{0}\leftrightarrow(\mathcal Q^{\mathrm{sp}}, \texttt{cod},\mathfrak{n},\texttt{ind})$, with notations as in Definition \ref{twistrep}. Therefore, we also have the one-to-one correspondence $(\mathcal Q_{0},\mathscr A)\leftrightarrow(\mathcal Q^{\mathrm{sp}}, \texttt{cod},\mathfrak{n},\texttt{ind},\mathscr B,\mathscr A^{\mathrm{sp}})$, where $\mathscr B$ is the sub-collection of $\mathscr A$ that involves $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and $\mathcal T^{(\mathfrak m)}$ at nodes $\mathfrak l,\mathfrak m\in\mathcal Q_{0}[\mathbb V]\backslash\{\mathfrak u_1\}$, and $\mathscr A^{\mathrm{sp}}$ is the corresponding $\mathscr A$ collection for $\mathcal Q^{\mathrm{sp}}$, which is the same as the sub-collection $\mathscr A\backslash\mathscr B$ that involves $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and $\mathcal T^{(\mathfrak m)}$ at nodes $\mathfrak l,\mathfrak m\not\in\mathcal Q_{0}[\mathbb V]\backslash\{\mathfrak u_1\}$.
If $\mathcal Q$ is a couple with skeleton $\mathcal Q_{\mathrm{sk}}$, the we have $\mathcal Q\sim(\mathcal Q_{\mathrm{sk}},\mathscr A)$ by Proposition \ref{skeleton}, and $(\mathcal Q_{\mathrm{sk}},\mathscr A)$ is also in one-to-one correspondence with the sextuple as above. If $\mathcal Q$ runs over all full unit twists of a given couple at a core (CL) vine $\mathbb V$, then only $\mathtt{ind}$ and $\mathscr B$ in this sextuple may vary; in fact $\mathtt{ind}$ takes values in $\{\pm\}$, while $\mathscr B$ runs over the collections of $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and $\mathcal T^{(\mathfrak m)}$ for $\mathfrak l,\mathfrak m\in\mathcal Q_{\mathrm{sk}}[\mathbb V]\backslash\{\mathfrak u_1\}$, such that $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and $\mathcal T^{(\mathfrak m)}$ are fixed when $\mathfrak m\neq\mathfrak u_2$ and $(\mathfrak l,\mathfrak l')\neq (\mathfrak u_{23},\mathfrak u_0)$, and that $n(\mathcal Q^{(\mathfrak u_{23},\mathfrak u_0)})+n(\mathcal T^{(\mathfrak u_2)})$ takes a fixed value. \end{rem}
\section{$\mathcal K_\mathcal Q$ estimates for regular couples and regular trees}\label{regular} \subsection{Asymptotics and cancellations for $\mathcal K_\mathcal Q$} In this subsection we study expressions $\mathcal K_\mathcal Q$ associated with regular couples $\mathcal Q$ (as well as similar expressions for regular trees). The main results are stated as follows. \begin{prop}\label{regcpltreeasymp} Let $\mathcal Q$ be a regular couple of order $2n$, and $\mathcal K_\mathcal Q(t,s,k)$ be defined as in (\ref{defkq}). Then, $\mathcal K_\mathcal Q(t,s,k)$ extends as a smooth function in $k$ and admits the decomposition $\mathcal K_\mathcal Q=(\mathcal K_\mathcal Q)_{\mathrm{app}}+\mathscr R$, where the remainder $\mathscr R$ satisfies the bound
\begin{equation}\label{remainderbd}\sup_{|\rho|\leq 40d}\|\partial_k^\rho \mathscr R\|_{X_{\mathrm{loc}}^{\eta,40d}}\lesssim(C^+\delta)^nL^{-\gamma_1+2\eta}, \end{equation} where $\gamma_1$ is defined as in (\ref{othergamma}). The main term $(\mathcal K_\mathcal Q)_{\mathrm{app}}$ equals the sum of at most $2^n$ terms of form \begin{equation}\label{kqterms}(\mathcal K_\mathcal Q)_{\mathrm{app}}(t,s,k)=\sum \delta^n\cdot\Jc(t,s)\cdot\mathcal M(k),\end{equation} where each of these terms satisfies the estimate
\begin{equation}\label{kqtermsest}\|\Jc\|_{X_{\mathrm{loc}}^{1-\eta}}\leq (C^+)^n,\quad \sup_{|\rho|\leq 40d}|\partial_k^\rho \mathcal M(k)|\lesssim (C^+)^n\langle k\rangle^{-40d}. \end{equation} In particular we also have
\begin{equation}\label{kqbd}\sup_{|\rho|\leq 40d}\|\partial_k^\rho\mathcal K_\mathcal Q\|_{X_{\mathrm{loc}}^{\eta,40d}}\lesssim(C^+\delta)^n. \end{equation}
Now let $\mathcal T$ be a regular tree of order $2n$. For $0\leq s<t\leq 1$, define $\mathcal K_\mathcal T^*=\mathcal K_\mathcal T^*(t,s,k)$ in the same way as (\ref{defkq}) but with a few differences: the $k$-decoration $\mathscr E$ is replaced by $\mathscr D$, the product $\prod_{\mathfrak l}$ is only taken over leaves $\mathfrak l$ of $+$ sign \emph{different} from the lone leaf, and the domain $\mathcal E$ is replaced by \begin{equation}\label{defdomaind*}\mathcal D^*=\big\{t[\mathcal N]:t_{(\mathfrak l_*)^p}>s;\,\,0<t_{\mathfrak n'}<t_\mathfrak n<t,\mathrm{\ whenever\ }\mathfrak n'\mathrm{\ is\ a\ child\ node\ of\ }\mathfrak n\big\}, \end{equation} where $(\mathfrak l_*)^p$ is the parent node of the lone leaf $\mathfrak l_*$. Then we have the decomposition $\mathcal K_\mathcal T^*=(\mathcal K_\mathcal T^*)_{\mathrm{app}}+\mathscr R^*$, and the main term $(\mathcal K_\mathcal T^*)_{\mathrm{app}}$ equals the sum of at most $2^n$ terms of form \begin{equation}\label{kt*terms}(\mathcal K_\mathcal T^*)_{\mathrm{app}}(t,s,k)=\sum \delta^n\cdot\Jc^*(t,s)\cdot\mathcal M^*(k).\end{equation} The bounds satisfied by $\mathscr R^*$, $\Jc^*$, $\mathcal M^*$ and $\mathcal K_\mathcal T^*$ are the same as in (\ref{remainderbd}), (\ref{kqtermsest}) and (\ref{kqbd}) above, except that the norm $X_{\mathrm{loc}}^{\eta,40d}$ is replaced by $X_{\mathrm{loc}}^{\eta,0}$, and the factor $\langle k\rangle^{-40d}$ on the right hand side of (\ref{kqtermsest}) is replaced by $1$. Finally, we have the simple identities (also for the $(\cdots)_{\mathrm{app}}$ variants) \begin{equation}\label{conjugatekq} \mathcal K_{\overline{\mathcal Q}}(t,s,k)=\overline{\mathcal K_\mathcal Q(s,t,k)},\quad\mathcal K_{\overline{\mathcal T}}^* (t,s,k)=\overline{\mathcal K_\mathcal T^*(t,s,k)}. \end{equation} \end{prop} \begin{prop}\label{regcpltreesum} For any regular couple $\mathcal Q$, let $(\mathcal K_\mathcal Q)_{\mathrm{app}}$ be defined as in (\ref{kqterms}). Then for any $0\leq t\leq 1$, we have \begin{equation}\label{matchn}\sum_{n(\mathcal Q)=2n}(\mathcal K_\mathcal Q)_{\mathrm{app}}(t,t,k)=\mathcal M_n(t,k), \end{equation} where the summation is taken over all regular couples $\mathcal Q$ of order $2n$, and the right hand side is defined in (\ref{wketaylor2}). \end{prop} \begin{prop}\label{regcpltreecancel} Recall $(\mathcal K_\mathcal Q)_{\mathrm{app}}$ defined in (\ref{kqterms}) and $(\mathcal K_\mathcal T^*)_{\mathrm{app}}$ defined in (\ref{kt*terms}). Then for any $0\leq s<t\leq 1$, we have that \begin{equation}\label{realcancel}\sum_{n(\mathcal Q)+n(\mathcal T)=2n}(\mathcal K_\mathcal Q)_{\mathrm{app}}(t,s,k)\cdot\overline{(\mathcal K_\mathcal T^*)_{\mathrm{app}}(t,s,k)}, \end{equation} where the sum is taken over all regular couples $\mathcal Q$ and regular trees $\mathcal T$ with $+$ sign that have total order $2n$, is a \emph{real valued} function of $(t,s,k)$. \end{prop} \subsection{Proof of Propositions \ref{regcpltreeasymp} and \ref{regcpltreecancel}} In this subsection we will prove Propositions \ref{regcpltreeasymp} and \ref{regcpltreecancel}. Note that Proposition \ref{regcpltreesum} follows from the exact same calculations in Subsection 7.4 of \cite{DH21}, so we don't repeat it here.
\subsubsection{Proof of Proposition \ref{regcpltreeasymp}} First, (\ref{conjugatekq}) is obvious by definition in (\ref{defkq}). The proof of other results goes along the same lines as the parallel results in \cite{DH21} (Propositions 6.7, 7.4--7.7). In fact, the only difference between the results here and in \cite{DH21} is an improved bound for the remainder in \eqref{remainderbd}, as well as the bound for an improved norm $\|\Jc(t,s)\|_{X^{1-\eta}}$ in \eqref{kqtermsest} compared to the estimate on the $X^{\frac19}$ norm in \cite{DH21} (the results for $\mathcal K_\mathcal T^*$ are similar). We shall explain below how these estimates follow by combining the analysis in \cite{DH21} with new estimates improving Section 6 of \cite{DH21}. We remark that the analysis in Sections 5 and 7 of \cite{DH21} is independent of the chosen scaling law, and as such carries over to the current setting. The analysis in Section 6 of \cite{DH21} is dependent on the scaling law, and we will provide the corresponding alternative bounds here, which carries over to the full range of scaling laws and provides the improved estimate on the remainder.
As such, we only need to prove \eqref{remainderbd} and the first part of \eqref{kqtermsest}. The latter follows from the fact that $(\mathcal K_\mathcal Q)_{\mathrm{app}}$ is zero unless $\mathcal Q$ is a dominant couple (cf. Proposition 7.4 of \cite{DH21}), and in which case $\Jc_\mathcal Q(t,s)$ is an explicit homogeneous polynomial in the variables $t,s$ and $\min(t,s)$. The desired $X^{1-\eta}$ bound then follows. We now focus on the remainder estimate in \eqref{remainderbd}.
We start by recalling some notation for regular couples: From equation \eqref{defkq}, we may write $$\mathcal K_\mathcal Q(t,s,k)=\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^n\zeta(\mathcal Q)\sum_\mathscr E\epsilon_\mathscr E\cdot\mathcal B_\mathcal Q(t, s, \delta L^{2\gamma}\Omega[\mathcal N])\cdot\prod_{\mathfrak l\in\mathcal L}^{(+)}n_{\mathrm{in}}(k_\mathfrak l), $$ where \begin{equation}\label{defcoefb2}\mathcal B_\mathcal Q(t,s,\alpha[\mathcal N])=\int_\mathcal E\prod_{\mathfrak n\in\mathcal N}e^{\zeta_\mathfrak n \pi i\alpha_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n, \end{equation} Here, $\mathcal E$ is defined in \eqref{defdomaine}. Given a regular couple $\mathcal Q$, a natural pairing exists between the branching nodes in $\mathcal N$ (cf. Proposition 4.3 and 4.8 in \cite{DH21}). We shall fix a choice of $\mathcal N^{ch}\subset \mathcal N$ (here $ch$ for ``choice''), which contains exactly one branching node in each pair. As such, for any decoration $\mathscr E$ of $\mathcal Q$, we must have $\zeta_{\mathfrak n'}\Omega_{\mathfrak n'}=-\zeta_\mathfrak n\Omega_\mathfrak n$ for any pair $\{\mathfrak n,\mathfrak n'\}$ of branching nodes. This allows us to define $\widetilde{\mathcal B}_\mathcal Q=\widetilde{\mathcal B}_\mathcal Q(t,s,\alpha[\mathcal N^{ch}])$ by \begin{equation}\label{deftildeb}\widetilde{\mathcal B}_\mathcal Q(t,s,\alpha[\mathcal N^{ch}])=\mathcal B_\mathcal Q(t,s,\alpha[\mathcal N]),\end{equation} assuming that $\alpha[\mathcal N\backslash\mathcal N^{ch}]$ is defined such that $\zeta_{\mathfrak n'}\alpha_{\mathfrak n'}=-\zeta_\mathfrak n\alpha_\mathfrak n$ for each pair $\{\mathfrak n,\mathfrak n'\}$.
Now Proposition 5.1 of \cite{DH21} shows that, if $\mathcal Q$ is a regular couple of order $2n$, then the function $\widetilde{\mathcal B}_\mathcal Q\big(t,s,\alpha[\mathcal N^{ch}]\big)$ is the sum of at most $2^{n}$ terms. For each term there exists a subset $Z\subset\mathcal N^{ch}$, such that this term has form \begin{equation}\label{maincoef1} \prod_{\mathfrak n\in Z}\frac{\chi_\infty(\alpha_\mathfrak n)}{\zeta_\mathfrak n\pi i\alpha_\mathfrak n}\cdot\int_{\mathbb R^2}\mathcal C\big(\lambda_1,\lambda_2,\alpha[\mathcal N^{ch}\backslash Z]\big)e^{\pi i(\lambda_1t+\lambda_2s)}\,\mathrm{d}\lambda_1\mathrm{d}\lambda_2 \end{equation} for $t,s\in[0,1]$, where $\chi_\infty$ is as in Section \ref{norms}. In (\ref{maincoef1}) the function $\mathcal C$ satisfies the estimate
\begin{equation}\label{maincoef2}\int \langle \max (\lambda_1,\lambda_2)\rangle^{1-\frac{\eta}{8}}\big|\partial_\alpha^\rho\mathcal C\big(\lambda_1,\lambda_2,\alpha[\mathcal N^{ch}\backslash Z]\big)\big|\,\mathrm{d}\alpha[\mathcal N^{ch}\backslash Z]\mathrm{d}\lambda_1\mathrm{d}\lambda_2\leq C^n(2|\rho|)! \end{equation} for any multi-index $\rho$, as well as
\begin{equation}\label{maincoef2.5}\int \langle \max (\lambda_1,\lambda_2)\rangle^{\frac{\eta}{4}}\cdot\max_{\mathfrak n\in \mathcal N^{ch}\backslash Z}\langle \alpha_\mathfrak n\rangle^{1-\frac{\eta}{2}}\big|\mathcal C\big(\lambda_1,\lambda_2,\alpha[\mathcal N^{ch}\backslash Z]\big)\big|\,\mathrm{d}\alpha[\mathcal N^{ch}\backslash Z]\mathrm{d}\lambda_1\mathrm{d}\lambda_2\leq C^n. \end{equation}We will denote the $(\lambda_1,\lambda_2)$ integral in (\ref{maincoef1}) by $\widetilde{\mathcal B}_{\mathcal Q,Z}=\widetilde{\mathcal B}_{\mathcal Q,Z}(t,s,\alpha[\mathcal N^{ch}\backslash Z])$, so we have \begin{equation}\label{maincoef3}\widetilde{\mathcal B}_\mathcal Q(t,s,\alpha[\mathcal N^{ch}])=\sum_{Z\subset\mathcal N^{ch}}\prod_{\mathfrak n\in Z}\frac{\chi_\infty(\alpha_\mathfrak n)}{\zeta_\mathfrak n\pi i\alpha_\mathfrak n}\cdot \widetilde{\mathcal B}_{\mathcal Q,Z}(t,s,\alpha[\mathcal N^{ch}\backslash Z]).\end{equation} We should remark here that the powers on the weights $\langle \max (\lambda_1,\lambda_2)\rangle$ and $\max_{\mathfrak n\in \mathcal N^{ch}\backslash Z}\langle \alpha_\mathfrak n\rangle$ are stated differently in Proposition 5.1 of \cite{DH21}, but a careful inspection of the proof in Section 5 of \cite{DH21} shows that the bounds in \eqref{maincoef2} and \eqref{maincoef2.5} actually hold as well. In fact, this relies on the fact that the corresponding integrals appearing in the proof of Proposition 5.1 of \cite{DH21} have a form like
\[\begin{split}&\int_\mathbb R \langle \xi\rangle^{p}\bigg|\frac{\chi_\infty(\zeta_1+\epsilon\alpha_{1})}{\zeta_1+\epsilon\alpha_{1}}\bigg|\cdot\bigg|\frac{\chi_\infty(\zeta_2+\epsilon\alpha_{1})}{\zeta_2+\epsilon\alpha_{1}}\bigg|\,\mathrm{d}\alpha_{1}\qquad\textrm{or}\\
&\int_{\mathbb R^2}\langle \xi\rangle^p \bigg|\frac{\chi_\infty(\zeta_1+\epsilon_2\alpha_{2}-\epsilon_1\alpha_1)}{\zeta_1+\epsilon_2\alpha_{2}-\epsilon_1\alpha_1}\bigg|\cdot\bigg|\frac{\chi_\infty(\zeta_2+\epsilon_1\alpha_1)}{\zeta_2+\epsilon_1\alpha_1}\bigg|\cdot\bigg|\frac{\chi_\infty(\zeta_3+\epsilon_2\alpha_{2})}{\zeta_3+\epsilon_2\alpha_{2}}\bigg|\,\mathrm{d}\alpha_1\mathrm{d}\alpha_{2} \end{split} \] which are bounded \emph{for all} $0<p<1$; here $\epsilon,\epsilon_j\in\{\pm1\}$ and $\xi$ is one of the denominators ($\zeta_1+\epsilon\alpha_{1}$ or $\zeta_2+\epsilon\alpha_{1}$ in the first expression, and $\zeta_1+\epsilon_2\alpha_{2}-\epsilon_1\alpha_1$ or $\zeta_2+\epsilon_1\alpha_1$ or $\zeta_3+\epsilon_2\alpha_{2}$ in the second).
As a consequence, we can write \begin{equation}\label{KQZdef} \begin{split} \mathcal K_{\mathcal Q}(t, s, k)&=\sum_{Z\subset \mathcal N^{ch}} \mathcal K_{\mathcal Q, Z}(t, s, k),\\ \mathcal K_{\mathcal Q, Z}(t, s, k)&=\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^n\zeta(\mathcal Q)\sum_\mathscr E\epsilon_\mathscr E\cdot\prod_{\mathfrak n\in Z}\frac{\chi_\infty(\delta L^{2\gamma}\Omega_\mathfrak n)}{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n}\cdot \widetilde{\mathcal B}_{\mathcal Q,Z}(t,s,\delta L^{2\gamma}\Omega[\mathcal N^{ch}\backslash Z])\cdot\prod_{\mathfrak l\in\mathcal L}^{(+)}n_{\mathrm{in}}(k_\mathfrak l), \end{split} \end{equation} The remainder $\mathscr R$ appears upon approximating the sum in $\mathcal K_{\mathcal Q, Z}(t, s, k)$ with an integral. This approximation, along with the estimates on the remainder $\mathscr R$, is done in Section 6 of \cite{DH21} (See Propositions 6.1 and 6.7). It is here that the argument becomes dependent on the chosen scaling law. Indeed, we will provide below the alternative propositions that will allow to prove the claimed bound for $\mathscr R$ in \eqref{remainderbd}.
\begin{prop}\label{approxnt} Consider the following expression \begin{equation}\label{defi}I:=\sum_{(x_1,\cdots,x_n)}\sum_{(y_1,\cdots,y_n)}W(x_1,\cdots,x_n,y_1,\cdots,y_n)\cdot\Psi(L^{2\gamma} \delta\langle x_1,y_1\rangle,\cdots,L^{2\gamma} \delta\langle x_n,y_n\rangle)\end{equation} where $(x_1,\cdots,x_n,y_1,\cdots,y_n)\in(\mathbb Z_L^d)^{2n}$. Assume there is a (strict) partial ordering $\prec$ on $\{1,\cdots,n\}$, and that the followings hold for the functions $W$ and $\Psi$:
(1) The function $W$ satisfies the bound (here $\widehat{W}$ denotes the Fourier transform in $(\mathbb R^d)^{2n}$)
\begin{equation}\label{propertyw1}\|\widehat{W}\|_{L^1}+\|\widehat{\partial W}\|_{L^1}\leq (C^+)^n. \end{equation}
(2) This $W$ is supported in the set\begin{equation}\label{propertyw2}
E:=\big\{(x_1,\cdots,x_n,y_1,\cdots,y_n):|\widetilde{x_j}-a_j|,\,|\widetilde{y_j}-b_j|\leq \lambda_j,\,\forall 1\leq j\leq n\big\}, \end{equation} where $1\leq\lambda_j\leq (\log L)^4$ are constants, $a_j$ and $b_j$ are constant vectors. Each $\widetilde{x_j}$ is a linear function that equals either $x_j$, or $x_j\pm x_{j'}$ or $x_j\pm y_{j'}$ for some $j'\prec j$, similarly each $\widetilde{y_j}$ equals either $y_j$, or $y_j\pm x_{j''}$ or $y_j\pm y_{j''}$ for some $j''\prec j$.
(3) For some set $J\subset\{1,\cdots,n\}$, the function $\Psi$ has the expression \begin{equation}\label{propertypsi1}\Psi(\Omega_1,\cdots,\Omega_n)=\prod_{j\in J}\frac{\chi_\infty(\Omega_j)}{\Omega_j}\cdot\Psi_1(\Omega[J^c]), \end{equation} where $\chi_\infty$ is as in Section \ref{norms}, and for any $\rho$ we have \begin{equation}\label{propertypsi2}
\|\partial^\rho\Psi_1\|_{L^1}\leq C^n(4|\rho|)!,\,\, \big\|\max_{j\in J^c}\langle \Omega_j\rangle^{1-\frac{\eta}{2}}\cdot\Psi_1\big\|_{L^1}\leq C^n. \end{equation}
Assume $n\leq (\log L)^3$. Then we have
\begin{multline}\label{conclusion1}\bigg|I-L^{2dn}\int_{(\mathbb R^d)^{2n}}W(x_1,\cdots,x_n,y_1,\cdots,y_n)\cdot\Psi(L^{2\gamma} \delta \langle x_1,y_1\rangle_\beta,\cdots,L^{2\gamma} \delta\langle x_n,y_n\rangle_\beta)\,\mathrm{d}x_1\cdots\mathrm{d}x_n\mathrm{d}y_1\cdots\mathrm{d}y_n\bigg|\\\leq (\lambda_1\cdots\lambda_n)^C(C^+L^{2d-2\gamma}\delta^{-1})^{n} L^{-\gamma_1+\eta}. \end{multline} Moreover, defining \begin{multline}\label{intappr}I_{\mathrm{app}}=(L^{2d-2\gamma}\delta^{-1})^{n}\int\Psi_1\mathrm{d}\Omega[J^c]\cdot\int_{(\mathbb R^d)^{2n}}W(x_1,\cdots,x_n,y_1,\cdots,y_n)\\\times\prod_{j\in J}\frac{1}{\langle x_j,y_j\rangle_\beta}\prod_{j\not\in J}\boldsymbol{\delta}(\langle x_j,y_j\rangle_\beta)\mathrm{d}x_1\cdots\mathrm{d}x_n\mathrm{d}y_1\cdots\mathrm{d}y_n, \end{multline} where the singularities $1/\langle x_j,y_j\rangle_\beta$ are treated using the Cauchy principal value, we have
\begin{equation}\label{conclusion2}|I_{\mathrm{app}}|\leq (\lambda_1\cdots\lambda_n)^C(C^+L^{2d-2\gamma}\delta^{-1})^{n},\quad |I-I_{\mathrm{app}}|\leq(\lambda_1\cdots\lambda_n)^C(C^+L^{2d-2\gamma}\delta^{-1})^{n} L^{-\gamma_1+\eta}. \end{equation} \end{prop}
We shall prove this proposition by relying on a series of Lemmas formulated and proved below. In what follows, we assume that $1\leq \lambda \leq (\log L)^4$ and use the notation $e(z)=e^{2\pi i z}$. The first lemma replaces Lemma 6.2 of \cite{DH21}.
\begin{lem}\label{NTSP}
Suppose $\Phi:\mathbb R\times \mathbb R^d \times \mathbb R^d \to \mathbb C$ is a function satisfying the bounds
\begin{equation}\label{NTphibounds}
\sup_{s, x, y}|\partial_x^\alpha \partial_y^\beta \Phi(s,x, y)|\leq D
\end{equation}
for all multi-indices $|\alpha|, |\beta|\leq 10d$. Then we have:
(1) The following bound
\begin{equation}\label{NTSPest0}
\int_{\mathbb R}\left|\int_{\mathbb R^{2d}} \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(s,x, y)\cdot e(\xi \cdot x+\zeta\cdot y+s\langle x, y\rangle) \, \mathrm{d}x \mathrm{d}y \right|\,\mathrm{d}s \lesssim D\lambda^{2d}
\end{equation} holds uniformly in $(\xi, \zeta, a, b)\in \mathbb R^{4d}$.
(2) Suppose, in addition, that $\Phi$ is supported on the set $|s|\lesssim L$. Then, there holds \begin{equation}\label{NTSPest} \begin{split}
\int_{\mathbb R} \bigg|\sum_{0\neq(g,h)\in \mathbb Z^{2d}}\int_{\mathbb R^{2d}} \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(s, x, y)\cdot e[(Lg+\xi) \cdot x +(Lh +\zeta)\cdot y+s\langle x, y\rangle]\bigg|\\
\lesssim D\lambda^{2d} L^{-1+2\eta} (1+|\xi|+|\zeta|). \end{split} \end{equation} uniformly in $(a, b)\in \mathbb R^{2d}$. In particular, we have \begin{equation}
\int_{\mathbb R}\bigg|\sum_{(g,h) \in \mathbb Z^{2d}} \int_{\mathbb R^{2d}} \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(s, x, y)\cdot e[(Lg+\xi) \cdot x +(Lh +\zeta)\cdot y+s\langle x, y\rangle]\bigg|
\lesssim D\lambda^{2d}\label{NTSPest2} \end{equation}
uniformly in $(\xi, \zeta,a,b)\in \mathbb R^{4d}$. \end{lem}
\begin{proof}
With no loss of generality we assume $D=1$. Part (1) follows easily by translating in $x$ and $y$, and then applying the stationary phase lemma to estimate the $(x, y)$ integral by $\langle s\rangle^{-d}\lambda^{2d}$. To prove part (2), we define $Q=L^{\eta}(\lambda+|s|)$, and split the left-hand side of \eqref{NTSPest} into two parts $A$ and $B$ defined as follows:
$\bullet$ \emph{Part A: where $|Lg+\xi+sb|\gtrsim Q$ or $|Lh+\zeta+sa|\gtrsim Q$}. For this part, we will fix the variables $(g, h, s)$ and integrate by parts in $(x,y)$ as follows: Defining $z=x+\frac{Lh+\zeta}{s}$, $w=y+\frac{Lg+\xi}{s}$, $a'=a+\frac{Lh+\zeta}{s}$, and $b'=b+\frac{Lg+\xi}{s}$, then up to a unimodular constant independent of the variables, we can write the integral in $(x,y)$ as \begin{multline*} \int_{\mathbb R^{2d}} \chi_0\big(\frac{z-a'}{\lambda}\big)\chi_0\big(\frac{w-b'}{\lambda}\big)\Phi\bigg(s, z-\frac{Lh+\zeta}{s}, w-\frac{Lg+\xi}{s}\bigg)e\big(s (z\cdot w)\big) \, \mathrm{d}z\mathrm{d}w\\ =e(s a'\cdot b')\int_{\mathbb R^{2d}} \chi_0\big(\frac{u}{\lambda}\big)\chi_0\big(\frac{v}{\lambda}\big)\Phi\bigg(s, u+a'-\frac{Lh+\zeta}{s}, v+b'-\frac{Lg+\xi}{s}\bigg)e\big(s (u\cdot v)\big)e\big(s a'\cdot v+sb'\cdot u\big) \, \mathrm{d}u\mathrm{d}v. \end{multline*}
Integrating by parts many times, using the last oscillatory factor and the fact that that either $|sa'|$ or $|sb'|\geq Q=L^\eta(\lambda+|s|)$, we obtain that the contribution of Part A is better than acceptable.
$\bullet$ \emph{Part B: where $|Lg+\xi+sb|$ and $|Lh+\zeta+sa|\ll Q$.} Recall that since $|s|\lesssim L$ and $\lambda\lesssim L^\eta$, we have that $Q\lesssim L^{1+\eta}$, and hence for fixed $s$ the number of choices of $(g,h)$ in the sum in this part B is $L^{O(\eta)}$. We further split this part into two: Part B1 in which the $s$ integral is over $|s|\leq L^{\frac56}$ and part B2 for which $L^{\frac56}\lesssim |s|\lesssim L$. For B2, we simply use stationary phase again in the $(x,y)$ integral to estimate it by $\lambda^{2d} |s|^{-d}$, so the contribution of this part is bounded by $$
|(\textrm{contribution\ of\ B2})|\lesssim \lambda^{2d}L^{O(\eta)}\int_{L^{\frac56}\lesssim |s|\lesssim L}|s|^{-d}\mathrm{d}s\lesssim \lambda^{2d}L^{C\eta -\frac{5}{6}(d-1)}\ll \lambda^{2d} L^{-1}. $$ Moving back to $B1$, we notice that in this case $Q\leq L^{\eta+\frac{5}{6}}\ll L$, which implies that, for each fixed $s$, there is at most one element in the whole sum over $(g, h)\neq 0$. Now we split the $s$ integral into dyadic pieces, and rewrite the contribution of B1 as $$
\sum_{K\in 2^\mathbb N,K\leq L^{\frac56}} \int_{|s|\sim K}\bigg|\sum_{(g, h)\neq 0}\int_{\mathbb R^{2d}}\chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(s, x, y)\cdot e[(Lg+\xi) \cdot x +(Lh +\zeta)\cdot y+s\langle x, y\rangle]\bigg|. $$
Let $Y=\max(\langle a\rangle, \langle b\rangle)$. For the part of the dyadic sum with $K\ll LY^{-1}$, notice that since $\max(|Lg+\xi+sb|,|Lh+\zeta+sa|)\ll Q\ll L$ and $(g, h) \neq 0$, we must have that $|\xi|+|\zeta|\gtrsim L$. Therefore, if we just use stationary phase again in the $(x,y)$ integral, we can estimate the sum over $K$ by $$
\sum_{K\in 2^\mathbb N,K\leq L^{\frac56}} \int_{|s|\sim K\ll LY^{-1}}\lambda^{2d}\langle s\rangle^{-d} \mathrm{d}s\lesssim \lambda^{2d}\lesssim \lambda^{2d}L^{-1}(|\xi|+|\zeta|), $$
as needed. This leaves us with the sum over $K\gtrsim LY^{-1}$. Assume with no loss of generality that $|a^1|\sim Y\gg 1$. Since $|Lh+\zeta+sa|\ll Q$, we have that $\{\frac{\zeta^1+s a^1}{L}\} \ll QL^{-1}$ where $\{\cdot\}$ denotes the fractional part. Since we also have that $\frac{\zeta^1+s a^1}{L}$ belongs to an interval of size $\frac{KY}{L}$, it follows that $\frac{\zeta^1+s a^1}{L}$ belongs to a set of measure $\lesssim \frac{KYQ}{L^2}$. Hence, $s$ belongs to a set of measure $\lesssim \frac{KQ}{L}$. As a result, recall that for fixed $s$ the the sum over $(g,h)$ has at most one element and the integral in $(x,y)$ is $\lesssim \lambda^{2d}K^{-d}$ by stationary phase, we can estimate $$
\int_{|s|\sim K}\bigg|\sum_{(g, h)\neq 0}\int_{\mathbb R^{2d}}\chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(s, x, y)\cdot e[(Lg+\xi) \cdot x +(Lh +\zeta)\cdot y+s\langle x, y\rangle]\bigg|\lesssim \lambda^{2d}\frac{KQ}{L} K^{-d}, $$ which sums in $K$ to give, $$ \sum_{K\in 2^\mathbb N,K\leq L^{\frac56}} \lambda^{2d} L^\eta (\lambda +K){K^{-d+1}}{L}^{-1}\lesssim \lambda^{2d} L^{-1+2\eta}. $$ This finishes the proof. \end{proof}
The following lemma replaces Lemma 6.4 of \cite{DH21}.
\begin{lem}\label{lem:minorarcs}
Suppose that $\Phi(s,x,y): \mathbb R\times \mathbb R^d \times \mathbb R^d\to \mathbb C$ is a function satisfying (\ref{NTphibounds}).
(1) If $\Phi$ is supported on $|s|< L^{2\gamma}$, then the following bound holds uniformly in $(a, b, \xi, \zeta)\in \mathbb R^{4d}$: \begin{equation}\label{NTlem21}
\int_{\mathbb R}\bigg|\sum_{(x, y)\in \mathbb Z^{2d}_L} \Phi(s, x, y) \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)e(x\cdot \xi+y\cdot \zeta+s \langle x, y\rangle)\bigg|\mathrm{d}s \lesssim D \lambda^{4d} L^{2d}. \end{equation}
(2) If $\Phi(s,x,y)$ is supported on the set $L\lesssim |s|$, then the following improved estimate holds uniformly in $(a, b, \xi,\zeta)\in\mathbb R^{4d}$: for $P>\eta^{-2}$, \begin{equation}\label{NTlem22}
\int_{\mathbb R}\big\langle\frac{s}{\delta L^{2\gamma}}\big\rangle^{-P}\bigg|\sum_{(x, y)\in \mathbb Z^{2d}_L} \Phi(s, x, y) \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)e(x\cdot \xi+y\cdot \zeta+s \langle x, y\rangle)\bigg|\mathrm{d}s \lesssim D \lambda^{4d} L^{2d-2(d-1)(1-\gamma)+\eta}. \end{equation} \end{lem}
\begin{proof}
Part (1) is implied by Lemma 6.4 of \cite{DH21} (which even extends to $\Phi$ supported on $|s|< L^2$. So we focus on part (2); if $\gamma<1/2$ then (\ref{NTlem22}) is trivial due to the factor $\langle s\delta^{-1}L^{-2\gamma}\rangle^{-P}$, so we will assume $\gamma\geq 1/2$. Recall that, $\langle x,y\rangle=\sum_{j=1}^d x^j y^j$ where $x^j, y^j\in \mathbb Z_L$. We make the change of variables
$$
L^{-1}p^j={x^j+y^j}, \qquad L^{-1}q^j={x^j-y^j}, \qquad p^j\equiv q^j \pmod 2.
$$ The sum in $(x^j,y^j)\in \mathbb Z_L^{2}$ then becomes the linear combination of four sums, which are taken over $(p^j, q^j)\in \mathbb Z^{2}$, or $(p^j, q^j)\in 2\mathbb Z\times\mathbb Z$, or $(p^j, q^j)\in \mathbb Z\times 2\mathbb Z$, or $(p^j, q^j)\in (2\mathbb Z)^2$. We will only consider the first sum, and it will be obvious from the proof that the other sums are estimated similarly. Define \[ \Upsilon (s,z, w)=\Phi\big(s, \frac{z+w}{2},\frac{z-w}{2}\big)\chi_0\big(\frac{z+w-2a}{2\lambda}\big)\chi_0\big(\frac{z-w-2b}{2\lambda}\big),\] which has all derivatives in $(z,w)$ up to order $10d$ uniformly bounded, and is supported in the set $\{g^j\leq Lz^j\leq g^j+2\lambda L,\,h^j\leq Lw^j\leq h^j+2\lambda L\}$, where $(g^j,h^j)\in\mathbb Z^2$ are determined by $(a,b)$.
Now, by possibly redefining $(s,\xi,\zeta)$, we need to show that the function
\begin{align*}
B(\xi, \zeta)&=\int_{\mathbb R}\langle s/\delta L^{2\gamma}\rangle^{-P} \bigg|\sum_{(p, q)\in \mathbb Z^{2d}} \Upsilon\big(s, pL^{-1}, qL^{-1}\big)e\big[sL^{-2}(|p|^2-|q|^2)+p\cdot \xi+y\cdot \zeta\big]\bigg| \, \mathrm{d}s\\
&=\int_{\mathbb R} \langle s/\delta L^{2\gamma}\rangle^{-P}\bigg|\sum_{(p, q)\in \mathbb Z^{2d}} \Upsilon\big(s,pL^{-1}, qL^{-1}\big)\prod_{j=1}^d e\big[sL^{-2} (p^j)^2 +p^j \xi^j]\cdot e[-sL^{-2} (q^j)^2 +q^j \zeta^j\big]\bigg| \, \mathrm{d}s
\end{align*}
satisfies the bounds in \eqref{NTlem22} when $\Upsilon$ is supported on $|s|\gtrsim L$, noting that in the above sum we must have $p^j\in[g^j,g^j+20\lambda L]$ and $q^j\in[h^j,h^j+20\lambda L]$. Now, recall the Gauss sums $G_h(s,r,n)$ and $G_h(s,r,x)$ defined by \begin{equation}\label{huagausssum}G_h(s, r,n)=\sum_{p=h}^{h+n}e(s p^2+rp), n \in \mathbb N; \qquad \mathrm{and} \qquad G_h(s, r,x)=G_h(s, r, \lfloor x\rfloor), x\in \mathbb R_+,\end{equation} where $\lfloor x\rfloor$ is the floor function, and notice that since $\partial_x G_h(s, r; x)=\sum_{p\in \mathbb N}e(s (h+p)^2+r(h+p)) \boldsymbol{\delta}(x-p)$, we can write
\begin{align*}
B(\xi, \zeta)&=\int_{\mathbb R} \langle s/\delta L^{2\gamma}\rangle^{-P}\bigg|\int_{(u, v)\in \mathbb R_+^{2d}} \Upsilon\big(s, (u+g)L^{-1}, (v+h)L^{-1}\big)\prod_{j=1}^d \partial_{u^j}G_{g^j}( sL^{-2}, \xi^j , u^j) \\&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\,\,\times\partial_{v^j}G_{h^j}(- sL^{-2}, \zeta^j, v^j) \, \mathrm{d}u \mathrm{d}v\bigg| \, \mathrm{d}s\\
&\leq L^{-2d}\int_{\mathbb R}\langle s/\delta L^{2\gamma}\rangle^{-P}\int_{(u, v)\in \mathbb R_+^{2d}} \big| D^\alpha\Upsilon\big(s,(u+g)L^{-1}, (v+h)L^{-1}\big)\big|\\&\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\times\prod_{j=1}^d\big|G_{g^j}( sL^{-2}, \xi^j , u^j) G_{h^j}(- sL^{-2}, \zeta^j, v^j)\big| \, \mathrm{d}u\mathrm{d}v\mathrm{d}s,
\end{align*}
where $g=(g^1,\cdots,g^d)$ etc., and $D^\alpha \Upsilon$ is obtained from $ \Upsilon$ by taking one derivative in each of the variables $u_j, v_j$ (and hence has the same support properties). By rescaling in $s$, we obtain that
\begin{align*}
B(\xi, \zeta)&\lesssim L^{-2d+2}\int_{(u,v) \in \Omega}\int_{|s|\geq L^{-1}} \langle s\delta^{-1} L^{2-2\gamma}\rangle^{-P}\prod_{j=1}^d \bigg|G_{g^j}( s, \xi^j , u^j) G_{h^j}(- s, \zeta^j, v^j) \bigg| \, ds \mathrm{d}u \mathrm{d}v\\
\end{align*}
where $\Omega$ is a set in $\mathbb R^{2d}$ of volume $\lesssim (\lambda L)^{2d}$. We will be done then once we show that, uniformly in $|u|, |v|\lesssim \lambda L$, there holds $$
\int_{|s|\geq L^{-1}} \langle s\delta^{-1} L^{2-2\gamma}\rangle^{-P}\prod_{j=1}^d \bigg|G_{g^j}( s, \xi^j , u^j) G_{h^j}(- s, \zeta^j, v^j) \bigg| \, ds \lesssim \lambda^{2d} L^{2d-2} L^{-(d-1)(2-2\gamma)+\eta}. $$
First note that using the trivial bound $|G_{h_j}|\lesssim L$, and by our choice of $P\geq \eta^{-2}$, it is enough to show that $$
\int_{L^{-1} \leq |s|\lesssim L^{2\gamma-2+\eta/(10d)}} \prod_{j=1}^d \bigg|G_{g^j}( s, \xi^j , u^j) G_{h^j}(- s, \zeta^j, v^j) \bigg| \, ds \lesssim\lambda^{2d} L^{2d-2} L^{-(d-1)(2-2\gamma)+\eta}, $$ which is implied by proving that, uniformly in $(g, \xi, u)$ satisfying $0<u\lesssim \lambda L$, there holds \begin{equation}\label{refinedHua}
\int_{L^{-1} \leq |s|\lesssim L^{2\gamma-2+\eta/(10d)}} \bigg|G_{g}( s, \xi , u) \bigg|^{2d} \, ds \lesssim \lambda^{2d} L^{2d-2} L^{-(d-1)(2-2\gamma)+\eta}. \end{equation}
For this, let $I$ be the interval of integration above, and let us assume that $u \in [K, 2K)$ for some dyadic integer $K\lesssim \lambda L$. We will rely on the Gauss lemma for Gauss sums in \eqref{huagausssum} which gives that if $|s-\frac{a}{q}|\leq \frac{1}{qK}$ for some $0\leq a <q\leq K$ with $\gcd(a, q)=1$, then
$$
|G_g(s, \xi, u)|\lesssim \frac{K\log L}{\sqrt q \left(1+K|s-\frac{a}{q}|^{1/2}\right)}. $$ Here we recall that $s\in [0,1]$ can be approximated by such rational number in the above fashion by Dirichlet's Approximation Theorem. Let us assume that $q\in [B, 2B)$ for some dyadic integer $B$ with $1\leq B \leq K$ and that $s\in I_{a/q, n, B}:=\frac{a}{q}\pm [\frac{1}{2^{n}BK}, \frac{1}{2^{n-1}BK})$ with $1\leq 2^n\lesssim \frac{K}{B}$ (with the obvious modification for $2^n\sim\frac{K}{B}$), so that $$ I=\bigcup_{B\leq K} \bigcup_{1\leq 2^n \lesssim \frac{K}{B}} \bigcup_{q\in [B, 2B)} \bigcup_{0\leq a < q,\gcd(a, q)=1}I_{a/q, n, B}. $$
First note that if $a=0$, then $|G_g|\lesssim |s|^{-1/2}\log L\leq L^{\frac{1}{2}}\log L$ on $I_{0, n, B}$, which gives a bound that is much better than \eqref{refinedHua}. Otherwise, we have $|G_g(s, \xi, u)|\lesssim K^{\frac{1}{2}}2^{n/2}\log L$ on $I_{a/q, n, B}$. Also, note that since $s\lesssim L^{2\gamma-2+\eta/(10d)}$ on $I$, we must have that $B\sim q \gtrsim L^{2-2\gamma-\eta/(10d)}$ for any $I_{a/q, n, B}$ with $a\neq 0$, as well as $a\leq qL^{2\gamma-2+\eta/(10d)}$. As a result, we obtain that \begin{align*}
\int_I \bigg|G_{g}( s, \xi , u) \bigg|^{2d} &\lesssim L^{d-1+\eta}+\sum_{L^{2-2\gamma-\eta/(10d)}\leq B \leq K}\sum_{1\leq 2^n \lesssim \frac{K}{B}} \sum_{q\in [B, 2B)} \sum_{1\leq a < q L^{2\gamma-2+\eta/(10d)},\gcd(a, q)=1}\frac{(K2^n\log L)^d}{2^n BK}\\ &\lesssim L^{d-1+\eta}+L^{2\gamma-2+\eta/(10d)}\sum_{L^{2-2\gamma-\eta/(10d)}\leq B \leq K}\sum_{1\leq 2^n \lesssim KB^{-1}} B K^{d-1}2^{n(d-1)}(\log L)^d \\&\lesssim L^{d-1+\eta}+K^{2(d-1)}L^{(d-1)(2\gamma-2)+\eta}, \end{align*} which gives the result since $K \leq \lambda L$. \end{proof} \begin{lem}\label{NTintlem} Suppose $\Phi(x,y)$ satisfyies \eqref{NTphibounds} without $s$. Let $\Omega(x,y)=\langle x, y\rangle$ and $\mu:=\delta L^{2\gamma}$.
(1) Suppose $\psi$ is a function such that $\|\psi\|_{L^1(\mathbb R)}\leq D$, then
\begin{equation}\label{NTintlem1}
\left|\mu^{-1}\int_{\mathbb R^{2d}}\psi(\mu \Omega) \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(x,y) e(x\cdot\xi+y\cdot \zeta) \, \mathrm{d}x\mathrm{d}y\right|\lesssim D \lambda^{2d}
\end{equation}
uniformly in $(a, b, \xi, \zeta)\in \mathbb R^{4d}$. The same holds if $\psi(\mu\Omega)\Phi(x,y)$ is replaced by $\Psi(\mu\Omega,x,y)$ where $\Psi=\Psi(u,x,y)$ satisfies $\big\|\sup_{x,y}|\partial_x^\alpha\partial_y^\beta\Psi|\big\|_{L_u^1}\leq D$ for all multi-indices $|\alpha|,|\beta|\leq 10d$.
(2) Suppose further that $\| \langle y\rangle^{1-\eta} \psi\|_{L^1(\mathbb R)}\leq D$, then
\begin{align}
&\left|\mu^{-1}\int_{\mathbb R^{2d}}\psi(\mu \Omega) \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big) \Phi(x,y)e(x\cdot\xi+y\cdot \zeta) \, \mathrm{d}x\mathrm{d}y-\right.\label{NTintlem2}\\
&\qquad \qquad \left.\left(\int \psi\right)\int_{\mathbb R^{2d}}\boldsymbol{\delta}(\Omega) \chi_0\big(\frac{x-a}{\lambda}\big)\chi_0\big(\frac{y-b}{\lambda}\big)\Phi(x,y) e(x\cdot\xi+y\cdot \zeta) \,\mathrm{d}x\mathrm{d}y\right|\lesssim D \lambda^{2d}\mu^{1-2\eta}(1+|\xi|+|\zeta|),\nonumber
\end{align}
uniformly in $(a, b)\in \mathbb R^{2d}$.
\end{lem} \begin{proof} The proof is the same as that of Lemma 6.5 of \cite{DH21} (with the weight in (2) replaced by $\langle y\rangle^{1-\eta}$ which does not affect the proof). We omit the details. \end{proof} \begin{proof}[Proof of Proposition \ref{approxnt}] The proof follows now exactly as the proof of Proposition 6.1 in \cite{DH21} but using Lemma \ref{NTSP}, \ref{lem:minorarcs}, and \ref{NTintlem} to replace Lemma 6.2, 6.4, and 6.5 respectively in \cite{DH21}. \end{proof} With Proposition \ref{approxnt} in hand, we can apply it to the sum in \eqref{KQZdef} exactly as is done in the proof of Proposition 6.7 of \cite{DH21}: First, for any $\mathfrak n\in\mathcal N^{ch}$ we define $x_\mathfrak n=k_{\mathfrak n_1}-k_\mathfrak n$ and $y_{\mathfrak n}=k_\mathfrak n-k_{\mathfrak n_3}$, so we have $\Omega_\mathfrak n=2( x_\mathfrak n \cdot y_\mathfrak n)$ by (\ref{res}). As explained in \cite{DH21}, the linear mapping \begin{equation*}(x_\mathfrak n,y_\mathfrak n)_{\mathfrak n\in\mathcal N^{ch}}\leftrightarrow (k_{\mathfrak l_1},\cdots, k_{\mathfrak l_{2n}})\end{equation*} is volume preserving and preserves the lattice $(\mathbb Z_L^d)^{2n}$, where $(k_{\mathfrak l_j})$ are the decorations of some $2n$ leaf pairs (out of the $2n+1$ pairs in total) in the $k$-decoration $\mathscr E$. We can then rewrite the sum in \eqref{KQZdef} as \begin{equation}\label{newsum1}\sum_{(x_\mathfrak n,y_\mathfrak n):\mathfrak n\in\mathcal N^{ch}}\epsilon\cdot\prod_{\mathfrak n\in Z}\frac{\chi_\infty(2\delta L^{2\gamma}( x_\mathfrak n\cdot y_\mathfrak n))}{2\delta L^{2\gamma}( x_\mathfrak n \cdot y_\mathfrak n)}\cdot\widetilde{\mathcal B}_{\mathcal Q,Z}(t,s,2\delta L^{2\gamma}( x_\mathfrak n \cdot y_\mathfrak n):\mathfrak n\in\mathcal N^{ch}\backslash Z)\cdot W(x[\mathcal N^{ch}],y[\mathcal N^{ch}]),\end{equation} where $\epsilon=\epsilon_\mathscr E$ (which depends only on $(x_\mathfrak n,y_\mathfrak n)$) and $W(x[\mathcal N^{ch}],y[\mathcal N^{ch}])=\prod_{j=1}^{2n+1}n_{\mathrm{in}}(k_{\mathfrak l_j})$, with each $k_{\mathfrak l_j}$ and $k_{2n+1}:=\pm k\pm k_{\mathfrak l_{2m+1}}\cdots\pm k_{\mathfrak l_{2n}}$ equaling $k$ plus some linear combination of $(x_\mathfrak n,y_\mathfrak n)$.
The first thing to notice from \eqref{newsum1} is that $\mathcal K_{\mathcal Q, Z}$ is clearly smooth in $k$, as any derivative in $k$ falls on the $W$ function. This allows to write \eqref{newsum1}, as well as its derivatives in $k$ up to order $40d$, in the form \eqref{defi} as explained in details in the proof of Proposition 6.7 of \cite{DH21}, which allows us to apply Proposition \ref{approxnt}. Consequently, we obtain that if $\mathcal Q$ be a regular couple of order $2n$ where $n\leq N^3$, then we have $\mathcal K_\mathcal Q(t,s,k)=\sum_{Z\subset \mathcal N^{ch}}\mathcal K_{\mathcal Q,Z}(t,s,k)$, where \begin{equation}\label{KQZapprox} \begin{split} \mathcal K_{\mathcal Q,Z}(t,s,k)&=\left(\mathcal K_{\mathcal Q,Z}\right)_{\textrm{app}}(t,s,k)+\mathscr R_{\mathcal Q, Z},\\ \left(\mathcal K_{\mathcal Q,Z}\right)_{\textrm{app}}(t,s,k)&=2^{-2n}\delta^n\zeta(\mathcal Q)\prod_{\mathfrak n\in Z}\frac{1}{\zeta_\mathfrak n \pi i}\cdot\Jc \widetilde B_{\mathcal Q, Z}(t, s)\cdot\mathcal M_{\mathcal Q,Z}(k),\qquad \textrm{where}\\ \Jc \widetilde B_{\mathcal Q, Z}(t, s)&=\int\widetilde{\mathcal B}_{\mathcal Q,Z}\big(t,s,\alpha[\mathcal N^{ch}\backslash Z]\big)\,\mathrm{d}\alpha[\mathcal N^{ch}\backslash Z]. \end{split} \end{equation} Here the error term $\mathscr R_{\mathcal Q, Z}$ satisfies \eqref{remainderbd} for each $(\mathcal Q, Z)$, by examining the gain of power in each error term occurring in Lemmas \ref{NTSP}, \ref{lem:minorarcs}, and \ref{NTintlem}, and noticing that $\gamma_1:=\min(2\gamma,1,2(d-1)(1-\gamma))$. The exact expression $\mathcal M_{\mathcal Q,Z}(k)$, which is provided in Proposition 6.7 of \cite{DH21}, is not needed here, but we shall need some of its properties which are recalled below. We refer the reader to \cite{DH21} for the complete details, which are the same in our case here. This finishes the proof of Proposition \ref{regcpltreeasymp} (for couples, but the results for regular trees $\mathcal T$ are proved similarly using the fact that a regular tree forms a regular couple with the trivial tree, see Proposition 6.10 of \cite{DH21}). \subsubsection{Proof of Proposition \ref{regcpltreecancel}} We start by recalling some facts concerning the structure of regular and dominant couples from Section 4 of \cite{DH21}. A \emph{regular chain} is a saturated paired tree, obtained by repeatedly applying operation $B$ (attaching one of the mini trees in Figure \ref{fig:minitree} at either a branching node or the lone leaf, as described in Definition \ref{defmini}), starting from the trivial tree $\bullet$. A \emph{regular double chain} is a couple consisting of two regular chains (where, of course, the lone leaves of the two regular chains are paired). It can also be obtained by repeatedly applying operation $B$ at either a branching node or a lone leaf, starting from the trivial couple $\times$. The order of a regular chain $\mathcal T$ is always an even number $2m$. The $2m$ branching nodes are naturally ordered by parent-child relation; denote them by $\mathfrak n_j\,(1\leq j\leq 2m)$ from top to bottom. A \emph{dominant chain} is a special case of regular chain in which the application of operation $B$ is only done at the lone leaves. In this case, we can group the branching nodes $\mathfrak n \in \mathcal N$ from top to bottom as pairs $(\mathfrak n_{2j-1}, \mathfrak n_{2j})$, $1\leq j \leq m$, which are exactly the branching nodes of the mini tree attached in the $j$-th application of operation $B$. Note that the signs of the nodes $\mathfrak n_{2j-1}$ are all the same. \begin{figure}
\caption{Six possibilities of mini trees (Definition \ref{defmini}).}
\label{fig:minitree}
\end{figure}
The structure theorem of regular couples states that, for any nontrivial regular couple $\mathcal Q\neq \times$, there exists a regular couple $\mathcal Q_0\neq\times$ which is either a $(1,1)$-mini couple or a regular double chain, such that $\mathcal Q$ is formed by replacing each leaf pair in $\mathcal Q_0$ with a regular couple (cf. Proposition 4.8 of \cite{DH21}). In the first case, we call the couple type 1 and in the second we call it type 2. For type 2 regular couples $\mathcal Q$, if we we require that the couple replacing the lone leaf pair of $\mathcal Q_0$ is trivial or has type 1, then this $\mathcal Q_0$ is unique (cf. Proposition 4.10 of \cite{DH21}).
\emph{Dominant couples} are a special class of regular couples defined inductively as follows. First the trivial couple $\times$ is dominant. Suppose $\mathcal Q\neq\times$, let $\mathcal Q_0$ be uniquely determined as explained above, and let $\mathcal Q_k\,(k\geq 1)$ be the regular couples in $\mathcal Q$ replacing leaf pairs in $\mathcal Q_0$. Then we define $\mathcal Q$ to be dominant, if (i) $\mathcal Q_0$ is either a $(1,1)$-mini couple or a regular double chain formed by two \emph{dominant} chains, and (ii) each regular couple $\mathcal Q_k$ is dominant. For a dominant couple of type 2, we enumerate the couples $\mathcal Q_k$ replacing leaf pairs as follows: $\mathcal Q_0$ is formed of two dominant chains $\mathcal T_0^+$ and $\mathcal T_0^-$; each of $\mathcal T_0^\pm$ is formed by attaching $m^\pm$ mini trees (from root to lone-leaf) at the nodes $\mathfrak n_{2j-1}$ ($1\leq j \leq m^\pm$). Look at the $j$-th mini tree in $\mathcal T_0^\pm$, we define the dominant couple replacing the pair of \emph{red} leaves in Figure \ref{fig:minitree} by $\mathcal Q_{j,+,1}$, and define the regular couple in $\mathcal Q$ replacing the pair of \emph{green} leaves in Figure \ref{fig:minitree} by $\mathcal Q_{j,+,2}$. Then, for the dominant couple $\mathcal Q$, we have \begin{equation}\label{newn*}\mathcal N=\bigg(\bigcup_{j,\epsilon,\iota}\mathcal N_{j,\epsilon,\iota}\bigg)\cup\mathcal N_{lp}\cup \big\{\mathfrak n_1^+,\cdots,\mathfrak n_{2m^+}^+\big\}\cup\big\{\mathfrak n_1^-,\cdots,\mathfrak n_{2m^-}^-\big\} \end{equation} and we define \begin{equation}\label{newnch}\mathcal N^{ch}=\bigg(\bigcup_{j,\epsilon,\iota}\mathcal N_{j,\epsilon,\iota}^{ch}\bigg)\cup\mathcal N_{lp}^{ch}\cup\big\{\mathfrak n_{2j-1}^+:1\leq j \leq m^+\big\}\cup\big\{\mathfrak n_{2j-1}^-:1\leq j \leq m^-\big\}. \end{equation} Here in (\ref{newn*}) and (\ref{newnch}), the couples $\mathcal Q_{j,\epsilon,\iota}$, where $\epsilon\in\{\pm\}$ and $\iota\in\{1,2\}$, are the ones described above, and $\mathcal N_{j,\epsilon,\iota}$ (and $\mathcal N_{j,\epsilon,\iota}^{ch}$) are defined correspondingly; similarly for $\mathcal Q_{lp}$, $\mathcal N_{lp}$ and $\mathcal N_{lp}^{ch}$. The notation for a dominant couple of type 1 is much simpler as we denote by $\mathcal Q_1, \mathcal Q_2, \mathcal Q_3$ the three couples replacing the three leaf pairs (from left to right) of a $(1,1)$-mini couple. Here, we set $\mathcal N^{ch}=\mathcal N_1^{ch}\cup\mathcal N_2^{ch}\cup\mathcal N_3^{ch}\cup\{\mathfrak r\}$ where $\mathfrak r$ is the root with $+$ sign.
Motivated by \eqref{KQZdef} and still recalling \cite{DH21}, we define below the notion of \emph{special subsets} $Z$ of $\mathcal N^{ch}$ for a dominant couple $\mathcal Q$. The pair $\mathscr Q:=(\mathcal Q,Z)$ will be called an \emph{enhanced dominant couple}, on which which we also define an equivalence relation $\sim$ between two enhanced dominant couples $\mathscr Q=(\mathcal Q, Z)$ and $\mathscr Q'=(\mathcal Q', Z')$, as follows. First $\varnothing$ is a special subset and the enhanced trivial couple $(\times,\varnothing)$ is only equivalent to itself.
Next, if $\mathscr Q$ is a dominant couple of type 1, then $Z$ is special if and only if $Z=Z_1\cup Z_2\cup Z_3$ (i.e. $\mathfrak r$ is \emph{not} in $Z$) where $Z_j\subset \mathcal N_j^{ch}$ is special. If we denote by $\mathscr Q_j=(\mathcal Q_j,Z_j)$ the three enhanced dominant couples defining $\mathscr Q$, and similarly for $\mathscr Q'$, we say $\mathscr Q\sim\mathscr Q'$ if and only if $\mathscr Q_j\sim\mathscr Q_j'$ for $1\leq j\leq 3$.
Now let $\mathscr Q$ and $\mathscr Q'$ be as before, but suppose $\mathcal Q$ and $\mathcal Q'$ have type $2$. Let $\mathcal Q_0$ be associated with $\mathcal Q$ as explained above, and similarly for $\mathcal Q'$ (same for the other objects appearing below). Suppose the two regular chains of $\mathcal Q_0$ have order $2m^+$ and $2m^-$ respectively, and let the branching nodes in $\mathcal Q_0$ be $\mathfrak n_a^\pm(1\leq a\leq 2m^\pm)$. In the construction of $\mathcal Q_0$, at each node $\mathfrak n^{\pm}_{2j-1}$ ($1\leq j \leq n$) one of the six mini trees (Figure \ref{fig:minitree}) is attached, and $\mathfrak n_{2j}^\pm$ is the other branching node of this mini tree. We define a set $Z\subset \mathcal N^{ch}$ to be special if and only if \begin{equation}\label{unionz}Z=\bigg(\bigcup_{j,\epsilon,\iota}Z_{j,\epsilon,\iota}\bigg)\cup Z_{lp}\cup\big\{\mathfrak n_{2j-1}^+:j\in Z^+\big\}\cup\big\{\mathfrak n_{2j-1}^-:j\in Z^-\big\} \end{equation} for some special subsets $Z_{j,\epsilon,\iota}\subset \mathcal N_{j,\epsilon,\iota}^{ch}$ and $Z_{lp}\subset\mathcal N_{lp}^{ch}$, and some subsets $Z^\pm\subset \{1,\cdots,m^\pm\}$. Similar representations are defined for $\mathscr Q'$. For $\epsilon\in\{\pm\}$ and each $1\leq j\leq m^\epsilon$, consider the tuple $(\mathtt{I}_{j,\epsilon},\mathtt{c}_{j,\epsilon},\mathscr X_{j,\epsilon,1},\mathscr X_{j,\epsilon,2})$. Here $\mathtt{I}_{j,\epsilon}=1$ if $j\in Z^\epsilon$ and $\mathtt{I}_{j,\epsilon}=0$ otherwise, $\mathtt{c}_{j,\epsilon}\in\{1,2,3\}$ is the \emph{first digit of} the code of the mini tree attached at the node $\mathfrak n_{2j-1}$. Moreover $\mathscr X_{j,\epsilon,\iota}$ is the equivalence class of the enhanced dominant couple $\mathscr Q_{j,\epsilon,\iota}=(\mathcal Q_{j,\epsilon,\iota},Z_{j,\epsilon,\iota})$ for $\iota\in\{1,2\}$, and let $\mathscr Y$ be the equivalence class of the enhanced dominant couple $\mathscr Q_{lp}=(\mathcal Q_{lp},Z_{lp})$.
We now define $\mathscr Q\sim\mathscr Q'$, if and only if (i) $m^++m^-=(m^+)'+(m^-)'$, and (ii) the tuples coming from $\mathcal Q_0$ (there are total $m^++m^-$ of them) form \emph{a permutation of} the corresponding tuples coming from $\mathcal Q_0'$ (there are total $(m^+)'+(m^-)'$ of them), and (iii) $\mathscr Y=\mathscr Y'$. Finally, note that if $\mathscr Q=(\mathcal Q,Z)$ and $\mathscr Q'=(\mathcal Q',Z')$ are equivalent then $n(\mathcal Q)=n(\mathcal Q')$ and $|Z|=|Z'|$. When $\mathscr Q\sim\mathscr Q'$ with $Z=Z'=\varnothing$, we also say that $\mathcal Q\sim\mathcal Q'$.
Similarly, we can define the notions of dominant trees $\mathcal T$, special subsets $Z$, enhanced dominant trees $\mathscr T:=(\mathcal T, Z)$, and equivalence relations among them, similar to type 2 dominant couples above, except that there there is only one dominant chain, and so there is no lone pair couple $\mathcal Q_{lp}$. In particular, if we denote by $\mathcal T_0$ the dominant chain of order $2m$ such that $\mathcal T$ is obtained by replacing leaf pairs of $\mathcal T_0$ by dominant couples, then the equivalence class of the enhanced dominant tree $\mathscr T$ is determined by specifying $m$ tuples $(\mathtt{I}_{j},\mathtt{c}_{j},\mathscr X_{j,1},\mathscr X_{j,2})$ exactly as defined above.
With this notation in hand, we can recount the main results in Section 7 of \cite{DH21}, which carry over verbatum to our setting here: \begin{itemize} \item If $\mathcal Q$ is a regular, but not dominant couple, then $\Jc \widetilde \mathcal B_{\mathcal Q, Z}(t, s)=0$. This means that the sum in \eqref{realcancel} is only over dominant couples. This is Proposition 7.4 in \cite{DH21}. The same holds for the sum over the regular trees $\mathcal T$ in \eqref{realcancel}, which is only over dominant trees. \item Let $\mathcal Q$ be a dominant couple. Then $\mathcal K_{\mathcal Q}(t, s,k)=\sum_{Z}\mathcal K_{\mathcal Q, Z}(t,s,k)$ where the sum is over special subsets $Z\subset\mathcal Q^{ch}$ as defined above and $\mathcal K_{\mathcal Q, Z}(t,s,k)$ is defined in \eqref{KQZapprox}. The function $\Jc\widetilde{\mathcal B}_{\mathcal Q,Z}(t,s)$ is independent of $Z$ and may be denoted $\Jc\widetilde{\mathcal B}_{\mathcal Q}(t,s)$. Moreover, these functions satisfy some explicit recurrence relation, described as follows. First $\Jc\widetilde{\mathcal B}_\mathcal Q(t,s)\equiv 1$ for the trivial couple (Proposition 7.5 of \cite{DH21}). If $\mathcal Q$ has type $1$, then it is formed from the $(1,1)$-mini couple by replacing its three leaf pairs by dominant couples $\mathcal Q_j\,(1\leq j\leq 3)$. In this case, we have \begin{equation} \label{recurtype1} \Jc\widetilde{\mathcal B}_{\mathcal Q}(t,s)=2\int_0^{\min(t,s)}\prod_{j=1}^3\Jc\widetilde{\mathcal B}_{\mathcal Q_j}(\tau,\tau)\,\mathrm{d}\tau. \end{equation} In particular $\Jc\widetilde{\mathcal B}_{\mathcal Q}=\Jc\widetilde{\mathcal B}_{\mathcal Q}(\min(t,s))$ is a function of $\min(t,s)$ for type $1$ dominant couples $\mathcal Q$. Finally, if $\mathcal Q$ has type $2$, then $\mathcal Q$ is formed from a regular double chain $\mathcal Q_0$, which consists of two dominant chains, by replacing each leaf pair in $\mathcal Q_0$ with a dominant couple. Using the notations described above for the structure of $\mathcal Q$ and $\mathcal Q_0$ in this case, we have \begin{multline} \label{recurtype2} \Jc\widetilde{\mathcal B}_{\mathcal Q}(t,s)=\int_{t>t_1>\cdots>t_{m^+}>0}\int_{s>s_1>\cdots >s_{m^-}>0}\prod_{j=1}^{m^+}\Jc\widetilde{\mathcal B}_{\mathcal Q_{j,+,1}}(t_j,t_j)\Jc\widetilde{\mathcal B}_{\mathcal Q_{j,+,2}}(t_j,t_j)\\\times\prod_{j=1}^{m^-}\Jc\widetilde{\mathcal B}_{\mathcal Q_{j,-,1}}(s_j,s_j)\Jc\widetilde{\mathcal B}_{\mathcal Q_{j,-,2}}(s_j,s_j)\cdot\Jc\widetilde{\mathcal B}_{\mathcal Q_{lp}}(\min(t_{m^+},s_{m^-}))\prod_{j=1}^{m^+}\mathrm{d}t_j\prod_{j=1}^{m^-}\mathrm{d}s_j. \end{multline} Here we understand that $t_0=t$ and $s_0=s$.
\item Let $\mathscr Q=(\mathcal Q,Z)$ be an enhanced dominant couple. Let $\mathcal M_{\mathscr Q}(k)=\mathcal M_{\mathcal Q,Z}(k)$ be defined as in (\ref{KQZapprox}). Then, the expression $\mathcal M_\mathscr Q(k)$ is real-valued and depends only on the equivalence class $\mathscr X$ of $\mathscr Q$, so we can denote it by $\mathcal M_\mathscr X(k)$, (cf. Proposition 7.7 of \cite{DH21}).
\item Similarly, for a dominant tree $\mathcal T$, and with the notation described above, we have \begin{align} (\mathcal K^*_{\mathcal T})_{\mathrm{app}}(t,s,k)&=\sum_{Z\textrm{\ special}}(\mathcal K^*_{\mathcal T,Z})_{\mathrm{app}}(t,s,k) \nonumber \\ (\mathcal K^*_{\mathscr T})_{\mathrm{app}}(t,s,k)&=(\mathcal K^*_{\mathcal T,Z})_{\mathrm{app}}(t,s,k)=2^{-2n}\delta^n\zeta(\mathcal T)\prod_{\mathfrak n\in Z}\frac{1}{\zeta_\mathfrak n \pi i}\cdot\Jc \widetilde B^*_{\mathcal T}(t, s)\cdot\mathcal M_{\mathscr T}^*(k),\nonumber\\ \Jc \widetilde{\mathcal B}^*_{\mathcal T}(t,s)&=\int_{t>t_1>\cdots>t_{m^*}>s}\prod_{j=1}^{m^*}\Jc\widetilde{\mathcal B}_{\mathcal Q_{j,+,1}}(t_j,t_j)\Jc\widetilde{\mathcal B}_{\mathcal Q_{j,+,2}}(t_j,t_j)\prod_{j=1}^{m^*}\mathrm{d}t_j, \label{recurtypetree} \end{align} and $\mathcal M_{\mathscr T}^*(k)$ is real-valued and depends only on the equivalence class of the enhanced dominant tree $(\mathcal T, Z)$. \end{itemize}
We are now finally ready to give the proof of \eqref{realcancel}. In fact, we shall split this sum into subsets and show that the sum of each subset is real. To define these subsets, we first notice that \begin{equation}\label{realcancel2} \sum_{n(\mathcal Q)+n(\mathcal T)=2n}(\mathcal K_\mathcal Q)_{\mathrm{app}}(t,s,k)\cdot\overline{(\mathcal K^*_\mathcal T)_{\mathrm{app}}(t,s,k)}=\sum_{n(\mathscr Q)+n(\mathscr T)=2n} (\mathcal K_{\mathscr Q})_{\mathrm{app}}(t,s,k)\cdot\overline{(\mathcal K^*_{\mathscr T})_{\mathrm{app}}(t,s,k)} \end{equation}
where the sum is now over enhanced dominant couples $\mathscr Q$ of order $n(\mathscr Q):=n(\mathcal Q)$ and enhanced dominant trees $\mathscr T$ of order $n(\mathscr T):=n(\mathcal T)$. Recall that each $\mathscr Q$ belongs to an equivalence class that is completely determined by specifying $m^++m^-$ tuples $(\mathtt{I}_{j,\epsilon},\mathtt{c}_{j,\epsilon},\mathscr X_{j,\epsilon,1},\mathscr X_{j,\epsilon,2})$ ($\epsilon=\pm$) and an equivalence class $\mathscr Y$ for the enhanced dominant couple $\mathscr Q_{lp}=(\mathcal Q_{lp}, Z_{lp})$. Note that $\mathscr Q$ is of type 1 if and only if $m^++m^-=0$, in which case $\mathscr Y$ is the equivalence class of the enhanced dominant couple $\mathscr Q$. Similarly, $\mathscr T$ belongs to an equivalence class of enhanced dominant trees that is completely determined by specifying $m^*$ tuples $(\mathtt{I}_{j,*},\mathtt{c}_{j,*},\mathscr X_{j,*,1},\mathscr X_{j,*,2})$.
Suppose we fix the value $n=m^++m^-+m^*$, fix a collection of $n$ tuples $(\mathtt{I}_{j},\mathtt{c}_{j},\mathscr X_{j,1},\mathscr X_{j,2})$, and fix an equivalence class $\mathscr Y$, and then sum in \eqref{realcancel2} \emph{only for $(\mathscr Q, \mathscr T)$ that belong to equivalence classes formed from this collection}. We will show that this sum is real valued, which then completes the proof. Denote by $\boldsymbol{\mathfrak A}$ all possible (enhanced dominant) couple-tree pairs $(\mathscr Q, \mathscr T)$ that belong to equivalence classes formed from this given collection. Note that for any $(\mathscr Q, \mathscr T)\in \boldsymbol{\mathfrak A}$, the total order $n(\mathcal Q)+n(\mathcal T)$, the sum $|Z|+|Z^*|$ of the cardinalities of the special subsets $Z$ for $\mathcal Q$ and $Z^*$ for $\mathcal T$, and the product $\zeta(\mathcal Q) \zeta(\mathcal T)$ are all the same. Moreover, the product $\mathcal M_{\mathscr Q}(k) \mathcal M_{\mathscr T}^*(k)$ is also the same for all enhanced dominant $(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}$ since it is a product of factors determined by the tuples $(\mathtt{I}_{j},\mathtt{c}_{j},\mathscr X_{j,1},\mathscr X_{j,2})$ and the equivalence class $\mathscr Y$ (see Proposition 7.9 of \cite{DH21}). Hence, for some real-valued function $\mathfrak C(k)$ we have \begin{equation}\label{realcancel4} \sum_{(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}} (\mathcal K_{\mathscr Q})_{\mathrm{app}}(t,s,k)\cdot\overline{(\mathcal K^*_{\mathscr T})_{\mathrm{app}}(t,s,k)}=\mathfrak C(k)\sum_{(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}}\prod_{\mathfrak n \in Z}\frac{1}{\zeta_\mathfrak n \pi i}\prod_{\mathfrak n \in Z^*}\Jc \widetilde B_{\mathcal Q}(t,s)\frac{-1}{\zeta_\mathfrak n \pi i} \Jc \widetilde B^*_{\mathcal T}(t,s). \end{equation}
We will show that this quantity is real, by showing that the sum vanishes unless $Z\cup Z^*$ is empty, in which case reality follows from that the evident reality of $\Jc \widetilde B_{\mathcal Q}(t,s)$ and $\Jc \widetilde B^*_{\mathcal T}(t,s)$. To see this, let us assume that $|Z|+ |Z^*|\neq 0$ for $(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}$ (recall that this value is the same for all $(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}$), and consider the sum on the right hand side of \eqref{realcancel4}. We first note that from Proposition 7.8 of \cite{DH21}, if $\mathscr X$ is an equivalence class of enhanced dominant couples $(\widetilde \mathcal Q, \widetilde Z)$, with $\widetilde{Z}\neq \varnothing$, then \begin{equation} \mathcal{G}_{\mathscr X}:=\sum_{\widetilde \mathscr Q=(\widetilde \mathcal Q,\widetilde Z)\in\mathscr X}\bigg(\prod_{\mathfrak n\in \widetilde Z}\frac{1}{\zeta_\mathfrak n\pi i}\bigg)\cdot \Jc\widetilde{\mathcal B}_{\widetilde \mathcal Q}(t,t)=0. \end{equation}
As a result, by \eqref{recurtype1}--\eqref{recurtypetree}, the sum in \eqref{realcancel4} is a linear combination of factors of the form $\mathcal{G}_{\mathscr X_{j,1}}\mathcal{G}_{\mathscr X_{j, 2}}\mathcal{G}_{\mathscr Y}$ (possibly with different arguments/variables), hence it vanishes unless all the equivalence classes $\mathscr X_{j, 1}, \mathscr X_{j_2}$, and $\mathscr Y$ have empty special subsets (recall that $|Z|$ is constant on an equivalence class). Moreover, since $|Z|+|Z^*|$ is constant over $\boldsymbol{\mathfrak A}$, we may replace the factors $\frac{1}{\zeta_\mathfrak n \pi i}$ in \eqref{realcancel4} by $\zeta_n$ and prove the vanishing of the resulting expression \begin{equation}\label{realcancel3} \mathcal{G}_{\boldsymbol{\mathfrak A}}(t,s)=\sum_{(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}}\prod_{\mathfrak n \in Z}\zeta_\mathfrak n \Jc \widetilde B_{\mathcal Q}(t,s) \prod_{\mathfrak n \in Z^*}(-\zeta_\mathfrak n) \Jc \widetilde B^*_{\mathcal T}(t,s). \end{equation}
If $(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}$, then we have a decomposition $n=m^++m^-+m^*$ where $2m^+, 2m^-, 2m^*$ are the orders of three dominant chains $\mathcal T_0^+, \mathcal T_0^-, \mathcal T_0^*$ associated to the dominant couple $\mathcal Q$ and dominant tree $\mathcal T$, as well as a division of the $m$ tuples $(\mathtt{I}_j,\mathtt{c}_j,\mathscr X_{j,1},\mathscr X_{j,2})$ into three groups: one with $m^+$ elements denoted by $(\mathtt{I}_{j,+},\mathtt{c}_{j,+},\mathscr X_{j,+,1},\mathscr X_{j,+,2})$ ($1\leq j \leq m^+$), one with $m^-$ elements $(\mathtt{I}_{j,-},\mathtt{c}_{j,-},\mathscr X_{j,-,1},\mathscr X_{j,-,2})$ ($1\leq j\leq m^-)$, and one with $m^*$ elements $(\mathtt{I}_{j,*},\mathtt{c}_{j,*},\mathscr X_{j,*,1},\mathscr X_{j,*,2})$ ($1\leq j \leq m^*$). Moreover, since $\mathtt{c}_{j,\epsilon}$ are just the \emph{first digits} of the codes of the mini trees appearing in the structure of $\mathcal Q$ and $\mathcal T$, the corresponding \emph{second digits} can be arbitrary (and $\widetilde{\mathcal B}_\mathcal Q$ and $\mathcal B_\mathcal T$ do not depend on this second digit) which results in a $2^n$ factor. Putting together, if we sum over all possible $(\mathscr Q, \mathscr T) \in \boldsymbol{\mathfrak A}$---which means summing over all possible decompositions of $n=m^++m^-+m^*$ and permutations of the tuples, and then summing over all possible $\mathcal Q_{j,\epsilon,\iota}$ and $\mathcal Q_{lp}$---we would get \begin{equation} \label{splitformula0} \begin{aligned} \mathcal{G}_{\boldsymbol \mathfrak A}(t)&=2^n\sum_{m^++m^-+m^*=n}\sum_{(\mathscr A_1,\cdots, \mathscr A_{m^+},\mathscr B_1,\cdots,\mathscr B_{m^-}, \mathscr C_1, \cdots, \mathscr C_{m^*})}\int_{t>t_1>\cdots >t_{m^+}>0}\int_{s>s_1>\cdots>s_{m^-}>0}\int_{t>u_1>\cdots >u_{m^*}>s}\\&\times \prod_{j=1}^{m^-}(-1)^{\mathtt{I}_{j,-}'} \prod_{j=1}^{m^*}(-1)^{\mathtt{I}_{j,*}'}\prod_{j=1}^{m^+}\mathscr M(\mathscr A_j)(t_j)\prod_{j=1}^{m^-}\mathscr M(\mathscr B_j)(s_j)\prod_{j=1}^{m^*}\mathscr M(\mathscr C_j)(u_j)\cdot \mathcal{G}_{\mathscr Y}(\min(t_{m^+},s_{m^-}))\\ &\qquad\,\, \qquad\qquad \qquad\qquad \qquad\qquad \qquad \qquad \qquad\qquad \qquad\times\mathrm{d}t_1\cdots\mathrm{d}t_{m^+}\mathrm{d}s_1\cdots\mathrm{d}s_{m^-}\mathrm{d}u_1\cdots\mathrm{d}u_{m^*}. \end{aligned} \end{equation} Here in (\ref{splitformula0}) the sum is taken over all permutations $(\mathscr A_1,\cdots, \mathscr A_{m^+},\mathscr B_1,\cdots,\mathscr B_{m^-}, \mathscr C_1, \ldots, \mathscr C_{m^*})$ of the tuples $(\mathtt{I}_j,\mathtt{c}_j,\mathscr X_{j,1},\mathscr X_{j,2})$. Moreover $\mathtt{I}_{j,-}'$ and $\mathtt{I}_{j, *}'$ represent the first (``$\mathtt{I}$") component of $\mathscr B_j$ and $\mathscr C_j$, the function $\mathscr M(\mathscr A_j)$ equals $\mathcal{G}_{\mathscr X_{j,+,1}'}\cdot\mathcal{G}_{\mathscr X_{j,+,2}'}$ where $(\mathscr X_{j,+,1}',\mathscr X_{j,+,2}')$ represents the last two (``$\mathscr X$") components of $\mathscr A_j$, and $\mathscr M(\mathscr B_j)$ and $\mathscr M(\mathscr C_j)$ are defined similarly. Arranging all the time variables in decreasing order, and renaming them as $t>v_1>v_2 >\ldots> v_n>0$, we can write \begin{align*} \mathcal{G}_{\boldsymbol \mathfrak A}(t)&=2^n\sum_{(\mathscr D_1, \ldots, \mathscr D_n)}\int_{t>v_1>\ldots>v_n>0} \prod_{j=1}^n\mathscr M(\mathscr D_j)(v_j) \mathcal{G}_{\mathscr Y}\left(\min(v_n, s) \right)\\ &\qquad \sum_{A_1\cup A_2\cup A_3=[1,n]} \prod_{j\in A_2}(-1)^{\mathtt{I}_{j}'}\mathbf{1}_{v_j<s} \prod_{j\in A_3}(-1)^{\mathtt{I}_{j}'}\mathbf{1}_{v_j>s} \quad \mathrm{d}v_1\cdots\mathrm{d}v_{n}, \end{align*} where the first sum is over all permutations $(\mathscr D_1, \ldots, \mathscr D_n)$ of the tuples $(\mathtt{I}_j,\mathtt{c}_j,\mathscr X_{j,1},\mathscr X_{j,2})$, and the second sum is over all partitions of the set $[1,n]=\{1, 2, \ldots, n\}$ into three subsets $A_1, A_2,A_3$, and $\mathtt{I}_j'$ is the first (``$\mathtt{I}$") component of $\mathscr D_j$. Now, notice that $$ \sum_{(A_1,A_2,A_3)} \prod_{j\in A_2}(-1)^{\mathtt{I}_{j}'}\mathbf{1}_{v_j<s} \prod_{j\in A_3}(-1)^{\mathtt{I}_{j}'}\mathbf{1}_{v_j>s}=\prod_{j=1}^n\left(1+(-1)^{\mathtt{I}_{j}'}\mathbf{1}_{v_j<s}+(-1)^{\mathtt{I}_{j}'}\mathbf{1}_{v_j>s}\right)=\prod_{j=1}^n\left(1+(-1)^{\mathtt{I}_{j}'}\right), $$ if $Z\cup Z^*\neq\varnothing$, then at least one of the $\mathtt{I}_{j}'=1$, so the above product vanishes and hence $\mathcal{G}_{\boldsymbol \mathfrak A}(t)$ also vanishes. This finishes the proof. \subsection{Reduction to prime couples}\label{primered} Using Proposition \ref{regcpltreeasymp}, we can reduce the expression $\mathcal K_\mathcal Q$ for any couple $\mathcal Q$, defined in (\ref{defkq}), to a similar expression associated with the skeleton $\mathcal Q_{\mathrm{sk}}$ of $\mathcal Q$, in the same way as in Section 8.1 of \cite{DH21}.
Recall from (\ref{defkq}) that \begin{equation}\label{bigformula}\mathcal K_\mathcal Q(t,t,k)=\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^n\zeta(\mathcal Q)\sum_{\mathscr E}\int_\mathcal E\epsilon_\mathscr E\prod_{\mathfrak n\in \mathcal N} e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n{\prod_{\mathfrak l\in\mathcal L}^{(+)}n_{\mathrm{in}}(k_\mathfrak l)}, \end{equation} where $n$ is the order of $\mathcal Q$, $\mathcal E$ is the domain defined in (\ref{defdomaine}), $\mathscr E$ is a $k$-decoration and other objects are defined as before, all associated to the couple $\mathcal Q$. By repeating the arguments in Subsection 8.1 of \cite{DH21}, using also the notation $\mathscr A$ in Proposition \ref{skeleton}, we get \begin{multline}\label{bigformula2}\mathcal K_\mathcal Q(t,t,k)=\mathcal K_{(\mathcal Q_{\mathrm{sk}},\mathscr A)}(t,t,k):=\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_0}\zeta(\mathcal Q_{\mathrm{sk}})\sum_{\mathscr E_{\mathrm{sk}}}\int_{\mathcal E_{\mathrm{sk}}}\epsilon_{\mathscr E_{\mathrm{sk}}}\prod_{\mathfrak n\in \mathcal N_{\mathrm{sk}}} e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n\\\times{\prod_{\mathfrak l\in\mathcal L_{\mathrm{sk}}}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)}\prod_{\mathfrak m\in\mathcal N_{\mathrm{sk}}}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m), \end{multline} where $n_0$ is the order of $\mathcal Q_{\mathrm{sk}}$, $\mathcal E_{\mathrm{sk}}$ is the domain defined in (\ref{defdomaine}) but with $s=t$, and $\mathscr E_{\mathrm{sk}}$ is a $k$-decoration, the other objects are as before but associated to the couple $\mathcal Q_{\mathrm{sk}}$. Here in (\ref{bigformula2}), the first product is taken over all leaf pairs $(\mathfrak l,\mathfrak l')$ where $\mathfrak l$ has sign $+$, the second product is taken over all branching nodes $\mathfrak m$, and $\mathfrak m^p$ represents the parent of $\mathfrak m$ (if $\mathfrak m$ is the root of a tree then $t_{\mathfrak m^p}$ should be replaced by $t$).
Note that, using Proposition \ref{regcpltreeasymp}, we can decompose \begin{equation}\label{inputdecomp}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}=(\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}})_{\mathrm{app}}+\mathscr R_{\mathcal Q^{(\mathfrak l,\mathfrak l')}},\quad \mathcal K_{\mathcal T^{(\mathfrak m)}}^*=(\mathcal K_{\mathcal T^{(\mathfrak m)}}^*)_{\mathrm{app}}+\mathscr R_{\mathcal T^{(\mathfrak m)}}^*, \end{equation} where the leading term $(\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}})_{\mathrm{app}}$ and the remainder $\mathscr R_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}$ satisfy the bounds (\ref{remainderbd})--(\ref{kqbd}), and similarly $(\mathcal K_{\mathcal T^{(\mathfrak m)}}^*)_{\mathrm{app}}$ and $\mathscr R_{\mathcal T^{(\mathfrak m)}}^*$ satisfy the slightly modified bounds as in Proposition \ref{regcpltreeasymp}. The leading terms also satisfy the exact equalities (\ref{conjugatekq})--(\ref{realcancel}).
\section{$\mathcal K_\mathcal Q$ estimates for vines}\label{funcgroup} \subsection{Two general estimates for sum-integrals} Before getting to the needed estimates for vines, we first state two general results about expressions that are sums of time-oscillatory integrals.
\begin{lem}\label{sumintest1} Fix $\gamma\leq \frac45-\eta$. For $m\geq 1$, consider the output variables $e,f,g,h,x_0\in\mathbb Z_L^d$ and $t\in[0,1]$, and parameters $\lambda_0,\lambda_j,\mu_j\in\mathbb R\,(1\leq j\leq m)$. Assume each of $(e,f,g,h,x_0)$ is restricted to a fixed unit ball, and $e-f=h-g:=r\neq 0$ and $|r|\lesssim L^{-\gamma-\eta}$. Let the input variables be $x_j,y_j\in\mathbb Z_L^d\,(1\leq j\leq m)$ and $t_0,t_j,s_j\in[0,1]\,(1\leq j\leq m)$; denote ${\textbf{x}}=(x_0,\cdots,x_m,y_1,\cdots,y_m)$.
Suppose there exist alternative variables $k_j\,(1\leq j\leq 2m+1)$ and $\ell_j\,(1\leq j\leq 2m+1)$, where we write ${\textbf{k}}=(k_1,\cdots,k_{2m+1})$ and $\boldsymbol{\ell}=(\ell_1,\cdots,\ell_{2m+1})$, such that: (i) we have ${\textbf{k}}=T_1{\textbf{x}}+{\textbf{h}}_1$ and $\boldsymbol{\ell}=T_2{\textbf{x}}+{\textbf{h}}_2$ for some matrices $T_j$ and some constant vectors ${\textbf{h}}_j$ depending only on $(e,f,g,h)$, such that all coefficients of $T_1,\,T_1^{-1},\,T_2$ are integers $\lesssim 1$; (ii) for each $1\leq p\leq 2m+1$ there exists $q<p$ such that $\ell_{p}\pm\ell_{q}$ is an integer linear combination of $(k_j)$ and $(e,f,g,h)$ with absolute value sum $\lesssim \Lambda_p$, such that $\Lambda_1\cdots \Lambda_{2m+1}\lesssim C^m$; (iii) any component of the vector ${\textbf{x}}$ (such as $x_j$ or $y_j$) is the sum or difference of two variables, each which is a component of the vector ${\textbf{k}}$ or $\boldsymbol{\ell}$ or $(e,f,g,h)$. Now define\begin{multline}\label{sumintI1}\mathcal I=\mathcal I(x_0,e,f,g,h,t):=\sum_{(x_j,y_j):1\leq j\leq m}\prod_{j=1}^{2m+1}\mathcal K_j(k_j)\prod_{j=1}^{2m+1}\mathcal K_j^*(\ell_j)\\\times \int_{\mathcal D} \prod_{j=1}^m e^{\pi i \cdot\delta L^{2\gamma}(t_j-s_j)(x_j\cdot y_j)}\cdot\prod_{j=0}^m e^{\pi i\cdot\delta L^{2\gamma}t_j(r\cdot \zeta_j)}\cdot\prod_{j=0}^me^{\pi i\lambda_j t_j}\prod_{j=1}^m e^{\pi i\mu_js_j}\prod_{j=1}^m\mathrm{d}t_j\mathrm{d}s_j \cdot\mathrm{d}t_0. \end{multline} Here in (\ref{sumintI1}), $\mathcal D$ is the domain of $(t_0,t_j,s_j)$ defined by the following conditions: (i) $t_j,s_j>0$ for any $j$, and $0<t_0<t$; (ii) any fixed collection of inequalities of form $s_i<s_j$, $t_i<t_j$, $s_i<t_j$ or $t_i<s_j$. For each $j$, we allow $\mathcal K_j$ and $\mathcal K_j^*$ to depend on the parameters $(\lambda_0, \lambda_j, \mu_j)_{1\leq j \leq m}$ and assume \begin{equation}
\label{propertyKj}\sup_{|\alpha|\leq 40d}\sup_{k}\langle k\rangle^{40d}|\partial^\alpha\mathcal K_j(k)|\lesssim 1,\qquad \sup_{|\alpha|\leq 40d}\sup_{\ell}|\partial^\alpha\mathcal K_j^*(\ell)|\lesssim 1. \end{equation} Moreover $\zeta_j=a_jx_j+b_jy_j+c_jr$ for $a_j,b_j,c_j\in\{0,\pm1\}$, and if $j=0$, then $y_0$ should be replaced by one of $(e,f,g,h)$. Finally, assume one of the followings: \begin{enumerate}[{(a)}] \item The conditions in the definition of $\mathcal D$ includes either $s_1<t_0<t_1$ or $t_1<t_0<s_1$, \item The summand-integrand in $\mathcal I$ contains an extra factor of $r$, in addition to (\ref{sumintI1}), \item The summand-integrand in $\mathcal I$ contains an extra factor
of $|t_1-s_1|^{1-\eta}$. \item For at least one of the $1\leq j\leq 2m+1$, one of the two bounds in \eqref{propertyKj} is replaced by $L^{-\gamma}$ \end{enumerate} Then, uniformly in $(\lambda_0,\lambda_j,\mu_j)$ and in the choice of the unit balls containing $(e,f,g,h,x_0)$ described above, and with the norm taken in $(x_0,e,f,g,h,t)$, we have that
\begin{equation}\label{FourierboundI}\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^2,0}}\lesssim (C^+\delta^{-1})^{m}L^{2m(d-\gamma)-\gamma-\eta^2}. \end{equation} \end{lem} \begin{proof}
\underline{\emph{Step 1: Preparations.}} Using the $\langle k\rangle^{-40d}$ decay of $\mathcal K_j(k)$, we can localize $k_j$ into sets of the form $|k_j-a_j^*|\leq 1$, where $a_j^*\in \mathbb Z^d$ and sum the obtained estimate in $a_j^*$ at the end. Once this set of $a_j^*$ is fixed, we notice that there is $O(\Lambda_1^d)$ choices for the integer part of the coordinates of $\ell_1$. Similarly, once we fix all those integer parts, there are $O(\Lambda_2^d)$ choices for the integer part of the coordinates of $\ell_2$, and so on. As a result, at the expense of a multiplicative factor of $(\Lambda_1 \ldots \Lambda_{2m+1})^d\lesssim C^m$, we may fix points $b_j^*\in \mathbb Z^d$ so that $|\ell_j-b_j^*|\leq 1$. Consequently, since each $x_j,y_j\,(j\geq 1)$ is the difference of some $k_j$ and $\ell_j$, we may assume that for some fixed $a_j, b_j$ we have $|x_j-a_j|\leq 1$ and $|y_j-b_j|\leq 1$ (where $a_j$ and $b_j$ may depend on the unit balls containing $(e,f,g,h,x_0)$). Finally, if $(e,f,g,h,x_0)$ is fixed, we may express one of the $k_j$ (for example $k_{2m+1}$) as an affine linear combination of the others. This allows us to define the change of variables $ (x_1,\cdots,x_m, y_1,\cdots,y_m) \leftrightarrow (k_1, \ldots, k_{2m})$ which is affine and volume preserving, and preserves the lattice $\mathbb Z^d$ (at most up to an absolute constant).
Now set $W(x_1,\cdots,x_m, y_1,\cdots,y_m):=\prod_{j=1}^{2m+1}(\mathcal K_j(k_j)\mathcal K_j^*(\ell_j))$, which is a function that satisfies the same conditions \eqref{propertyw1} and \eqref{propertyw2} (with $\widetilde x_j=x_j$ and $\widetilde y_j=y_j$) of Proposition \ref{approxnt} in the variables $(x_j, y_j)_{1\leq j\leq m}$. In fact, \eqref{propertyw1} holds if $W$ is regarded as a function of $(k_j)_{1\leq j \leq 2m}$, and thus it also holds for $(x_j, y_j)_{1\leq j\leq m}$ due to the properties of the affine linear transform. As such, by expanding $W$ in terms of its Fourier transform, we can replace this function by $\prod_{j=1}^m e^{\pi i \xi_j \cdot x_j} e^{\pi i \rho_j \cdot y_j}$, with one extra weight $\langle\xi_j\rangle^{-1}$ or $\langle \rho_j\rangle^{-1}$ if needed. We thus reduce to \begin{equation}\label{IexpProof1} \begin{aligned} \mathcal I&=\int_0^t\Jc\,\mathrm{d}t_0,\qquad\mathrm{where}\\ \Jc&=\int_{\mathcal D_1}e^{\pi i\cdot\delta L^{2\gamma}t_0(r\cdot\zeta_0)}\cdot\prod_{j=0}^me^{\pi i\lambda_j t_j}\prod_{j=1}^m e^{\pi i\mu_js_j}\prod_{j=1}^m\mathrm{d}t_j\mathrm{d}s_j\\ &\times\prod_{j=1}^m\sum_{(x_j,y_j)}\chi_0(x_j-a_j)\chi_0(y_j-b_j)e^{\pi i\cdot\delta L^{2\gamma}(t_j-s_j)(x_j\cdot y_j)}\cdot e^{\pi i\cdot\delta L^{2\gamma}t_j(r\cdot\zeta_j)}\cdot e^{\pi i(\xi_j\cdot x_j+\rho_j\cdot y_j)} \end{aligned} \end{equation}
for some set $\mathcal D_1$ depending on $t_0$. Below we first study $\Jc$, which we will bound uniformly in all variables and parameters $(\lambda_0,\lambda_j,\mu_j,e,f,g,h,x_0,t_0)$.
\underline{\emph{Step 2: Continuous approximation.}} We shall neglect all the exponential factors in $(t_j,s_j)$ outside the sums in $(x_j, y_j)$ in (\ref{IexpProof1}) and take absolute value outside this sum. This allows to enlarge the domain $\mathcal D_1$ (but still keeping the restriction in (a) if applicable) and factorize the whole expression $\Jc$ into a product of $m$ terms $\mathcal M_j$ for $1\leq j\leq m$. We will focus on the $\mathcal M_1$ factor, as others are similar and easier. This factor then reads \begin{equation*}
\mathcal M_1:=\int_{\mathcal D_2}\bigg|\sum_{(x_1, y_1) \in \mathbb Z^{2d}_L}\chi_0({x_1-a_1})\chi_0({y_1-b_1})e^{\pi i \left[x_1\cdot\xi_1+y_1\cdot \rho_1+ \delta L^{2\gamma}(t_1-s_1)(x_1\cdot y_1)+\delta L^{2\gamma}t_1r\cdot \zeta_1\right]}\bigg|\,\mathrm{d}t_1\mathrm{d}s_1 \end{equation*}
where $\mathcal D_2=[0,1]^2$ with the extra restriction $s_1<t_0<t_1$ or $t_1<t_0<s_1$ in (a) if applicable. We may change the variables $(t_1, s_1)\to(t_1, u_1)$ where $u_1=\delta L^{2\gamma}(t_1-s_1)$ and split the integral into two regions: where $|u_1|> L$ and where $|u_1|\leq L$. Denote the contributions of those two regions by $A$ and $B$ respectively.
For term $A$, since $L\leq |u_1| \lesssim L^{2\gamma}$, we have \begin{align*}
|A|\lesssim& (\delta L^{2\gamma})^{-1}\int_0^1\int_{L}^{\delta L^{2\gamma}}\left|\sum_{(x_1, y_1) \in \mathbb Z^{2d}_L}\chi_0({x_1-a_1})\chi_0({y_1-b_1})e^{\pi i \left[x_1\cdot\xi_1+y_1\cdot \rho_1+ u_1(x_1\cdot y_1)+\delta L^{2\gamma}t_1r\cdot \zeta_1\right]}\right|\mathrm{d}u_1\mathrm{d}t_1\\ \lesssim& (\delta L^{2\gamma})^{-1} L^{2d-2(d-1)(1-\gamma)+\eta}\lesssim (\delta L^{2\gamma})^{-1} L^{2d-\gamma-\eta}, \end{align*} where we have used Lemma \ref{lem:minorarcs} and the fact that $\gamma<\frac{4}{5}-\eta$.
As for $B$, we do Poisson summation in the variables $(x_1, y_1)$ and obtain that $B\leq B_1+B_2$, where \begin{align*}
B_1&=L^{2d}(\delta L^{2\gamma})^{-1}\int_{\mathbb R^2}\bigg|\sum_{(g, h)\in \mathbb Z^{2d}\setminus \{0\}}\int_{\mathbb R^d\times \mathbb R^{d}} \Phi(t_1, u_1, x-a_1, y-b_1) \\
&\qquad \qquad \qquad \qquad \qquad \qquad \times e^{\pi i \left[x_1\cdot(\xi_1-Lg)+y_1\cdot (\rho_1-Lh)+ u_1(x_1\cdot y_1)+\delta L^{2\gamma}t_1r\cdot \zeta_1\right]}\,\mathrm{d}x\mathrm{d}y\bigg|\,\mathrm{d}t_1\mathrm{d}u_1,\\
B_2&=L^{2d}(\delta L^{2\gamma})^{-1}\int_{\mathbb R^2} \bigg|\int_{\mathbb R^d\times \mathbb R^{d}}\Phi(t_1, u_1, x-a_1, y-b_1) e^{\pi i \left[x_1\cdot\xi_1+y_1\cdot \rho_1+ u_1(x_1\cdot y_1)+\delta L^{2\gamma}t_1r\cdot \zeta_1\right]} \,\mathrm{d}x\mathrm{d}y\bigg|\,\mathrm{d}t_1\mathrm{d}u_1. \end{align*}
Here $\Phi(t_1, u_1, x-a_1, y-b_1)= \mathbf 1_{\mathcal D_2}\left(t_1, t_1- (\delta L^{2\gamma})^{-1}u_1\right)\mathbf 1_{|u_1|\leq L}\cdot \chi_0({x_1-a_1})\chi_0({y_1-b_1})$ is a function satisfying \eqref{NTphibounds} uniformly in $t_1$. Note that $B_1$ is bounded using Lemma \ref{NTSP} by $$
L^{2d}(\delta L^{2\gamma})^{-1} (1+|\xi_1+\epsilon\cdot\delta L^{2\gamma}r|+|\rho_1+\epsilon'\cdot\delta L^{2\gamma}r|) \cdot L^{-1+2\eta} $$
where we integrate in $t_1$ trivially. Moreover, due to the proof of Lemma \ref{NTSP}, the bound can be improved to $L^{2d}(\delta L^{2\gamma})^{-1}\cdot L^{-1+2\eta}$, \emph{unless} $|\xi_1+\epsilon\cdot\delta L^{2\gamma}r|+|\rho_1+\epsilon'\cdot\delta L^{2\gamma}r|\gtrsim L$. In the latter case, since $|r|\lesssim L^{-\gamma-\eta}$, we must have $|\xi_1|+|\rho_1|\gtrsim L$. Therefore we can gain the power $L^{-1+2\eta}$ using the $\langle\xi_1\rangle^{-1}$ or $\langle \rho_1\rangle^{-1}$ as above.
Now we are left with $B_2$. By stationary phase, the integral in $(x,y)$ is bounded by $\langle u_1\rangle^{-d}$, so trivially $|B_2|\lesssim \delta^{-1}L^{2(d-\gamma)}$ if without extra restrictions (this applies for example to $\mathcal M_j$ for $j>1$). For $j=1$ we have one of the assumptions (a)--(c) available, which will lead to further power gains. In fact, since $|r|\lesssim L^{-\gamma-\eta}$, case (b) already implies the gain $L^{-\gamma-\eta}$ which suffices for (\ref{FourierboundI}); for (c), the extra factor $|t_1-s_1|^{1-\eta}= (\delta L^{2\gamma})^{-1+\eta} |u_1|^{1-\eta}$ can be transformed to a gain of $(\delta L^{2\gamma})^{-1+\eta}\ll L^{-\gamma-\eta}$ since $\langle u_1\rangle^{-d}|u_1|^{1-\eta}$ is still integrable in $u_1$. Finally, in case (a), we have that $s_1\leq t_0\leq t_1$ or $t_1\leq t_0 \leq s_1$, which means that $|t_1-t_0|\leq |t_1-s_1|=(\delta L^{2\gamma})^{-1} |u_1|$. Thus, integrating in $t_1$ with fixed $u_1$ gives an extra factor of $(\delta L^{2\gamma})^{-1} |u_1|$ in estimating $B_2$, which also gives the gain $(\delta L^{2\gamma})^{-1}$.
Summing up, in all cases we have proved that $|\mathcal M_1|\lesssim \delta^{-1}L^{2(d-\gamma)-\gamma-\eta}$ and $|\mathcal M_j|\lesssim \delta^{-1}L^{2(d-\gamma)}$ for $j>1$, which then implies $|\Jc|\lesssim (C^+\delta^{-1})^m L^{2m(d-\gamma)-\gamma-\eta}$ uniformly in $(e,f,g,h,x_0)$ and $t_0$.
\underline{\emph{Step 3: Going from $\Jc$ to $\mathcal I$.}} Recall that $\mathcal I$ is defined as in (\ref{IexpProof1}) and $\Jc=\Jc(t_0)$ is function valued in a Banach space $\mathfrak X:=L_{(e,f,g,h,x_0)}^\infty$ with $\|\Jc\|_{L^\infty}\lesssim (C^+\delta^{-1})^m L^{2m(d-\gamma)-\gamma-\eta}:=\mathcal A$. Now it suffices to prove that $\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^2,0}}\lesssim L^{100d\eta^2}\mathcal A$. Note that, if we insert suitable time cutoff functions to the definition of $\mathcal I$ in (\ref{IexpProof1}), then a simple integration by parts with (\ref{IexpProof1}) gives
\begin{equation}\label{JtoIbound}\|\widehat{I}(\xi)\|_{\mathfrak X}\lesssim \langle \xi\rangle^{-1}\bigg\|\int_\mathbb R\chi(t_0) \Jc(t_0)e^{i\xi t_0}\,\mathrm{d}t_0\bigg\|_{\mathfrak X}+\langle \xi\rangle^{-2}\|\Jc\|_{L^\infty}\lesssim \langle \xi\rangle^{-1}\|\Jc\|_{L^\infty},\end{equation} which immediately implies (\ref{FourierboundI}) if we restrict to $|\xi|\leq L^{50d}$.
Now if instead $|\xi|\geq L^{50d}$, then we may replace the $L_{(e,f,g,h,x_0)}^\infty$ norm by the $L_{(e,f,g,h,x_0)}^1$ norm, and then sum over $(e,f,g,h,x_0)$ (which has $\lesssim L^{5d}$ choices due to the support assumption). Then we are dealing with the expression $\mathcal I$ (or $\Jc$) with the values of $(e,f,g,h,x_0)$ fixed, so the value of $\alpha:=\pi (\delta L^{2\gamma}(r\cdot\zeta_0)+\lambda_0)$ is also fixed. Integrating by parts in $t_0$ in (\ref{JtoIbound}) again, we get
\begin{equation}\label{JtoIbound2}\|\widehat{I}(\xi)\|_{\mathfrak X}\lesssim\langle \xi\rangle^{-1}(\langle \xi-\alpha\rangle^{-1}+\langle \xi\rangle^{-1})\cdot\|\widetilde{\Jc}\|_{L^\infty},\end{equation} where $\widetilde{\Jc}=(\partial_{t_0}-i\alpha)\Jc$. By (\ref{IexpProof1}), this $\widetilde{\Jc}$ is defined similar to $\Jc$, but without the $e^{\pi i\cdot\delta L^{2\gamma}t_0(r\cdot\zeta_0)}$ and $e^{\pi i\lambda_0t_0}$ factors and with one of the $t_j$ or $s_j$ variables fixed. Going back to the argument in Step 2, we can then estimate at most one $\mathcal M_j$ trivially (say $|\mathcal M_j|\lesssim L^{2d}$) while the other $\mathcal M_j$ are still bounded as above, and gain a power $\langle \xi\rangle^{-1/2}\lesssim L^{-25d}$ from (\ref{JtoIbound2}), which completes the proof. \end{proof} \begin{lem}\label{sumintest2} Fix $\gamma\leq \frac12$ and $m\geq 0$, consider the same setting as in Lemma \ref{sumintest1}, but with the following differences. First, there are three more vector variables which we call $(u_1,u_2,u_3)$ in addition to $(x_0,x_j,y_j)$; accordingly, there are three more time variables $(\tau_1,\tau_2,\tau_3)$ in addition to $(t_0,t_j,s_j)$ and three more parameters $(\sigma_1,\sigma_2,\sigma_3)$ in addition to $(\lambda_0,\lambda_j,\mu_j)$. There are also three more alternative variables $(k_{2m+2},k_{2m+3},k_{2m+4})$ in additional to $(k_j)$ and $(\ell_{2m+2},\ell_{2m+3},\ell_{2m+4})$ in addition to $(\ell_j)$. Let the definitions of ${\textbf{x}}$, ${\textbf{k}}$ and $\boldsymbol{\ell}$ include these extra variables, then they satisfy the same assumptions as in Lemma \ref{sumintest1}.
Then $\mathcal I$ is defined as in (\ref{sumintI1}), but we also sum over the new $u_j$ variables and integrate over the new $\tau_j$ variables. The domain $\mathcal D$ defined in the same way as in Lemma \ref{sumintest1} (except we include the new $\tau_j$ variables). The summand-integrand in $\mathcal I$ is the same as in (\ref{sumintI1}), and the bounds in (\ref{propertyKj}) are also the same, except that (i) we also include the new $k_j$ and $\ell_j$ variables and the corresponding $\mathcal K_j$ and $\mathcal K_j^*$ functions that also satisfy (\ref{propertyKj}), and (ii) we also include the extra factors \[e^{\pi i\cdot \delta L^{2\gamma}(\tau_1-\tau_3)(u_1\cdot u_2)},\quad e^{\pi i\cdot \delta L^{2\gamma}(\tau_2-\tau_3)\Lambda},\quad e^{\pi i\cdot \delta L^{2\gamma}\tau_3(r\cdot\xi)},\quad e^{\pi i\sigma_j\tau_j}\,(1\leq j\leq 3),\] where $\Lambda\in\{u_1\cdot u_3,u_3\cdot (u_1+u_2-u_3)\}$, and $\xi=au_1+bu_2+cu_3+dr$ with $a,b,c,d\in\{0,\pm1\}$. Finally, we do not require one of the scenarios (a)--(c) to happen as in Lemma \ref{sumintest1}.
Then, uniformly in $(\lambda_0,\lambda_j,\mu_j,\sigma_j)$ and in the choice of the unit balls containing $(e,f,g,h,x_0)$ described above, and with the norm taken in $(x_0,e,f,g,h,t)$, we have that
\begin{equation}\label{FourierboundI2}\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^5,0}}\lesssim (C^+\delta^{-1})^{m+2}L^{(2m+4)(d-\gamma)-d+\eta^4}. \end{equation} \end{lem}
\begin{proof}We perform the same reduction procedure as in the proof of Lemma \ref{sumintest1}. By restricting each $x_j,y_j$ and $u_j$ to a unit ball, expanding $W$ (which is the product of all the $\mathcal K_j$ and $\mathcal K_j^*$ factors) using Fourier integral, writing $\mathcal I$ as an integral of $\Jc$ in $t_0$, and enlarging the time integration domain, we can reduce to $m+1$ expressions, which we denote by $\mathcal M_j\,(1\leq j\leq m)$ and $\mathcal M_*$. In fact, the expressions $\mathcal M_j$ are exactly the same as in the proof of Lemma \ref{sumintest1}, and satisfy the same bounds $|\mathcal M_j|\lesssim \delta^{-1}L^{2(d-\gamma)}$; therefore it suffices to study $\mathcal M_*$, which has the expression
\begin{multline}\mathcal M_*:=\int_{[0,1]^3}\bigg|\sum_{(u_1,u_2,u_3)\in\mathbb Z_L^{3d}}\chi_0(u_1-c_1)\chi_0(u_2-c_2)\chi_0(u_3-c_3)\\
\times e^{\pi i\left[u_1\cdot\nu_1+u_2\cdot\nu_2+u_3\cdot\nu_3+\delta L^{2\gamma}(\tau_1-\tau_3)(u_1\cdot u_2)+\delta L^{2\gamma}(\tau_2-\tau_3)\Lambda+\delta L^{2\gamma}\tau_3(r\cdot\xi)\right]}\bigg|\,\mathrm{d}\tau_1\mathrm{d}\tau_2\mathrm{d}\tau_3, \end{multline} with fixed vectors $\nu_j$. By integrating trivially in $\tau_3$, defining $\theta_j=\delta L^{2\gamma}(\tau_j-\tau_3)$ for $1\leq j\leq 2$ with fixed $\tau_3$, and applying Poisson summation as in the proof of Lemma \ref{sumintest1} above, we can reduce to \begin{multline}\label{IexpProof3}
\mathcal M_*\leq(\delta L^{2\gamma})^{-2}L^{3d}\sup_{\tau_3}\int_{|\theta_j|\lesssim \delta L^{2\gamma}}\bigg|\sum_{(f_1, f_2, f_3)\in \mathbb Z^{3d}} \int_{\mathbb R^{3d}}\prod_{j=1}^3\chi(u_j-c_j)e^{\pi i u_j \cdot(\nu_j-Lf_j)}\\
\times e^{\pi i\cdot \left[\theta_1(u_1\cdot u_2)+\theta_2\Lambda+\rho\cdot\xi\right]}\,\mathrm{d}u_1\mathrm{d}u_2\mathrm{d}u_3\bigg|\,\mathrm{d}\theta_1\mathrm{d}\theta_2,\end{multline}
where $\rho:=\delta L^{2\gamma}\tau_3 \cdot r$. To control this last expression, note that $|\theta_1|, |\theta_2|\lesssim L$ due to the assumption $\gamma\leq \frac12$. For fixed $(\theta_1,\theta_2)$, the phase function $$ \Phi(u_1, u_2, u_3)=\sum_{j=1}^3 u_j \cdot(\nu_j-Lf_j) + \theta_1(u_1\cdot u_2)+\theta_2\Lambda+\rho\cdot\xi $$
satisfies $|\nabla_{u_j}\Phi|\geq |Lf_j-C_j|-O(L)$ for some fixed $C_j$ (which may depend on $\theta_j$), therefore for all but $O(1)$ values of $f_j$, the integral in $(u_1,u_2,u_3)$ can be controlled trivially by integrating by parts. For these $O(1)$ values of $f_j$, a simple stationary phase argument yields that the integral in $(u_1,u_2,u_3)$ integral is bounded by $\min(\langle \theta_1\rangle^{-d},\langle \theta_2\rangle^{-d})$, which is integrable in $(\theta_1,\theta_2)$ for $d\geq 3$. This implies that $\mathcal M_*\lesssim \delta^{-2}L^{3d-4\gamma}$, and combining with the bounds for other $\mathcal M_j$ implies that
\[|\Jc|\lesssim (C^+\delta^{-1}L^{2(d-\gamma)})^m\cdot C^+\delta^{-2}L^{3m-4\gamma}.\] Then, repeating the last part of the proof in Lemma \ref{sumintest1} and noticing that we are allowed to lose $L^{\eta^4}$ here, we can easily deduce (\ref{FourierboundI2}). \end{proof} \begin{cor}\label{sumintest3} Fix $\gamma\leq 1/2$. Consider the following setting, which is basically a ``concatenation" of Lemma \ref{sumintest2}: the output variables are denoted $(e,f,g,h,x_0^0,t)$, with $(e,f,g,h,x_0^0)$ each in a unit fixed ball. The input variables are $(x_0^q,x_j^q,y_j^q,u_1^q,u_2^q,u_3^q)$ and $(t_0^q,t_j^q,s_j^q,\tau_1^q,\tau_2^q,\tau_3^q)$ where $0\leq q<Q$ and $1\leq j\leq m_q$ (but excluding $x_0^0$), with $m_0+\cdots +m_Q=m$. The parameters are $(\lambda_0^q,\lambda_j^q,\mu_j^q,\sigma_1^q,\sigma_2^q,\sigma_3^q)$ as above. The alternative variables are ${\textbf{k}}=(k_1,\cdots,k_{2m+4Q})$ and $\boldsymbol{\ell}=(\ell_1,\cdots,\ell_{2m+4Q})$, and ${\textbf{x}}=(x_0^0,x_0^q, x_j^q, y_j^q, u_1^q, u_2^q, u_3^q)_{0\leq q<Q,1\leq j\leq m_q}$ (including $x_0^0$) satisfy the same properties as in Lemma \ref{sumintest2} and Lemma \ref{sumintest1}), with $\Lambda_1\cdots\Lambda_{2m+4Q}\lesssim C^{m+Q}$ in assumption (ii) of Lemma \ref{sumintest1}. The expression $\mathcal I$ is defined as in (\ref{sumintI1}) with summation and integration in all input variables (vector and time, see above). The domain $\mathcal D$ is defined in the same way but involves all the time variables, and the summand-integrand in $\mathcal I$ includes the following factors: \begin{itemize} \item Functions $\mathcal K_j(k_j)$ and $\mathcal K_j^*(\ell_j)$ for $1\leq j\leq 2m+4Q$ that each satisfies (\ref{propertyKj}); \item All the factors $e^{\pi i(\cdots)}$ occurring in Lemma \ref{sumintest1} and \ref{sumintest2} \emph{except $e^{\pi i\cdot\delta L^{2\gamma}t_0(r\cdot \zeta_0)}$}, for all $0\leq q<Q$ (such as $e^{\pi i\cdot\delta L^{2\gamma}(t_j^q-s_j^q)(x_j^q\cdot y_j^q)}$, $e^{\pi i(\lambda_j^qt_j^q+\mu_j^qs_j^q)}$, $e^{\pi i\cdot \delta L^{2\gamma}(\tau_1^q-\tau_3^q)(u_1^q\cdot u_2^q)}$, $e^{\pi i\cdot\delta L^{2\gamma}t_j^q(r\cdot \zeta_j^q)}$ for $j\neq 0$); \item Extra factors of $e^{\pi i\cdot\delta L^{2\gamma}t_0^q(r\cdot\zeta_0^q)}$ for $0\leq q<Q$, where $\zeta_0^0$ equals $x_0^0$ plus or minus a vector in $(e,f,g,h)$, and $\zeta_0^q\,(1\leq q<Q)$ is an arbitrary linear combination of the $(x,y,u)$ variables and $(e,f,g,h)$. \end{itemize} Then, uniformly in $(\lambda,\mu,\sigma)$ parameters and in the choice of the unit balls containing $(e,f,g,h,x_0^0)$ described above, and with the norm taken in $(x_0^0,e,f,g,h,t)$, we have that
\begin{equation}\label{FourierboundI3}\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^5,0}}\lesssim (C^+\delta^{-1})^{m+2Q}L^{(2m+4Q)(d-\gamma)-d+\eta^4}. \end{equation} \end{cor} \begin{proof} The proof is almost the same as Lemma \ref{sumintest2}. We write $\mathcal I$ as an integral of $\Jc$ in $t_0^0$, and use the same arguments as in the proof of Lemma \ref{sumintest1} and \ref{sumintest2} to reduce the $X_{\mathrm{loc}^{2\eta^5,0}}$ norm of $\mathcal I$ to the $L^\infty$ norm of $\Jc$ with $L^{\eta^4}$ loss. Then, in estimating $\Jc$ we may make the same reductions, sum in $x_0^q$ variables an integrate in $t_0^q$ variables trivially, and then reduce to the same $\mathcal M_j^q$ and $\mathcal M_*^q$ quantities as occurred in the proof of Lemma \ref{sumintest2}. These quantities are then estimated in the same way, noticing that the linear phases $e^{\pi i\cdot\delta L^{2\gamma}t_0^q(r\cdot\zeta_0^q)}$ do not affect any part of the proof. Putting together, and noticing that the summation in each $x_0^q$ variables leads to a factor of $L^d$, this proves (\ref{FourierboundI3}). \end{proof} \subsection{Application to vines} Using Lemmas \ref{sumintest1}--\ref{sumintest2}, Definition \ref{twistgen} and Remark \ref{fulltwistrep}, we can prove the estimates regarding the part of the expression $\mathcal K_{(\mathcal Q_{\mathrm{sk}},\mathscr A)}$ in (\ref{bigformula2}) where we only sum and integrate over the variables corresponding to a bad or normal vine $\mathbb V$ in $\mathcal Q_{\mathrm{sk}}$. \subsubsection{Estimates for vines}\label{vinesubset} Given a couple $\mathcal Q_0$ (not necessarily prime) and a collection $\mathscr A$ of regular trees and regular couples as in Proposition \ref{skeleton}, let $\mathcal Q\sim(\mathcal Q_0,\mathscr A)$. Fix also a (CL) vine $\mathbb V\subset\mathbb M(\mathcal Q_0)$, then we can write $(\mathcal Q_0,\mathscr A)\leftrightarrow(\mathcal Q^{\mathrm{sp}}, \texttt{cod},\mathfrak{n},\texttt{ind},\mathscr B,\mathscr A^{\mathrm{sp}})$ by Remark \ref{fulltwistrep}. we shall fix $(\mathcal Q^{\mathrm{sp}}, \texttt{cod},\mathfrak{n},\mathscr A^{\mathrm{sp}})$ and let $\mathtt{ind}$ and $\mathscr B$ vary as in Remark \ref{fulltwistrep}; in particular $\mathtt{sgn}$ is fixed as in the notion of Definition \ref{twistrep}.
Define $n_1$ to be the number of branching nodes in $\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$, and $n_2=n_1+n(\mathscr B)$. Consider the formula (\ref{bigformula2}) but with $\mathcal Q_{\mathrm{sk}}$ replaced by $\mathcal Q_0$ (and associated notations changed accordingly, like $\mathscr E_{\mathrm{sk}}$ replaced by $\mathscr E_0$ etc.). For later purposes, we also fix a set $W\subset\mathcal Q_0[\mathbb V]$ containing $\mathfrak u_1$, and a Banach space valued function $Z=Z(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}},t_{\mathfrak u_2})$. Define the expression $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$ similar to $\mathcal K_{(\mathcal Q_0,\mathscr A)}$ in (\ref{bigformula2}), but that: \begin{enumerate}[{(a)}] \item We replace the power $(\delta/(2L^{d-\gamma}))^{n_0}$ (where $n_0$ is the order of $\mathcal Q_0$) by $(\delta/(2L^{d-\gamma}))^{n_1}$; \item We replace the factor $\zeta(\mathcal Q_0)$ by the product of $i\zeta_\mathfrak n$ where $\mathfrak n$ runs over all branching nodes in $\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$; \item In the summation $\sum_{\mathscr E_0}(\cdots)$, we only sum over the variables $k_\mathfrak n$ for $\mathfrak n\in\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$ (including branching nodes and leaves), and treat the other $k_\mathfrak n$ variables as fixed. We also replace $\epsilon_{\mathscr E_0}$ by the product of factors on the right hand side of (\ref{defcoef}), but only for $\mathfrak n\in W$. In particular, we have that $k_{\mathfrak u_{21}}\neq k_{\mathfrak u_{22}}$. \item In the integral $\int_{\mathcal E_0}(\cdots)$, we only integrate over the variables $t_\mathfrak n$ for all branching nodes $\mathfrak n\in\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$, and treat the other $t_\mathfrak n$ variables as parameters. \item In the first product $\prod_{\mathfrak n\in\mathcal N_0}(\cdots)$, we only include those factors where $\mathfrak n\in\mathcal Q_0[\mathbb V]$; in the products $\prod_{\mathfrak m\in\mathcal L_0}^{(+)}(\cdots)$ and $\prod_{\mathfrak m\in\mathcal N_0}(\cdots)$, we only include those factors where $\mathfrak m\in\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$. \item We include the function $Z$ as a factor in the summand-integrand, where in the place of $x_0$ we plug in $k_{\mathfrak u_2}$ if $\mathtt{ind}=-$, and plug in $k_{\mathfrak u_{23}}$ otherwise. \end{enumerate}
Note that $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$ depends on $(\mathbb V,Z,W)$ and $(\mathtt{ind},\mathscr B)$, and depends on $(\mathcal Q^{\mathrm{sp}},\mathfrak n)$ \emph{only via $\mathtt{sgn}$}. Its output variables are $(k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})$ and time variables $(t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})$. Since $k_{\mathfrak u_1}-k_{\mathfrak u_{11}}=\pm(k_{\mathfrak u_{21}}-k_{\mathfrak u_{22}})$, we may write this function as \begin{equation}\label{eqnx0'}\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}=\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}(x_0',k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}}),\end{equation} where $x_0'$ is replaced by $k_{\mathfrak u_1}$ if $\mathtt{sgn}=-$, and by $k_{\mathfrak u_{11}}$ otherwise. \begin{rem}\label{remkv} Suppose the function $Z$ \emph{does not depend on $(k_{\mathfrak u_1},k_{\mathfrak u_{11}})$.} If we fix $(\mathbb V,Z,W,\mathtt{ind},\mathscr B)$ and flip $\mathtt{sgn}$ (which corresponds to flipping $\mathcal Q_0[\mathbb V]$), then the function $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$, \emph{written exactly as in (\ref{eqnx0'}), does not change.} In fact, consider the decorations of $\mathcal Q_0[\mathbb V]$ and its flipping, see Figure \ref{fig:flip}, where \emph{the values of $k_{\mathfrak u_1}$ and $k_{\mathfrak u_{11}}$ are switched}. If we write down the whole expression $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$, which is (\ref{bigformula2}) modified by (a)--(f) above, in these two cases, we can verify that they match exactly term by term, hence the results are the same. \end{rem} \begin{prop}\label{estbadvine} Suppose $\mathbb V$ is a \emph{bad} (CL) vine. In the above setting, let $(\mathcal Q^{\mathrm{sp}}, \mathtt{cod},\mathfrak{n},\mathscr A^{\mathrm{sp}})$ and $(Z,W)$ be fixed, then for $\theta\in\{\eta^5,0\}$ we have \begin{equation}\label{vinebound1}
\bigg\|e^{-\pi i\cdot\delta L^{2\gamma}t_{\mathfrak u_1}\Gamma}\sum_{\mathtt{ind},\mathscr B}\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}(x_0',k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})\bigg\|_{Y_{\mathrm{loc}}^{\theta}}\lesssim (C^+\sqrt{\delta})^{n_2}L^{-\eta^2}\|Z\|_{Y_{\mathrm{loc}}^{\theta}}.
\end{equation} Here $\Gamma:=\zeta_{\mathfrak u_{11}}|k_{\mathfrak u_{11}}|^2+\zeta_{\mathfrak u_{21}}|k_{\mathfrak u_{21}}|^2+\zeta_{\mathfrak u_{22}}|k_{\mathfrak u_{22}}|^2-\zeta_{\mathfrak u_1}|k_{\mathfrak u_1}|^2$, and the summation in (\ref{vinebound1}) is taken over all choices of $(\mathtt{ind},\mathscr B)$ as in Remark \ref{fulltwistrep}. More precisely, if $\mathbb V$ is vine (II-e) then $\mathtt{ind}$ and $\mathscr B$ are fixed; if $\mathbb V$ is not vine (II-e) then $\mathtt{ind}\in\{\pm\}$ and $\mathscr B$ is such that $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and $\mathcal T^{(\mathfrak m)}$ are fixed when $\mathfrak m\neq\mathfrak u_2$ and $(\mathfrak l,\mathfrak l')\neq (\mathfrak u_{23},\mathfrak u_0)$, and the value of $n(\mathcal Q^{(\mathfrak u_{23},\mathfrak u_0)})+n(\mathcal T^{(\mathfrak u_2)})$ is also fixed. \end{prop}
\begin{proof} The proof is elaborate due to the many cases and arguments involved, so we divide it into several steps. We assume throughout that $\|Z\|_{Y_{\mathrm{loc}}^\theta}=1$.
\underline{\it Step 1: Reductions.} By definition, for each fixed $(\mathtt{ind},\mathscr B)$, the summand on the left hand side of \eqref{vinebound1} takes the form (we omit dependence on $(\mathbb V,Z,W,\mathtt{sgn})$ for convenience, same below) \begin{multline}\label{vineboundBfix} \widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}= \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_1}\zeta[\mathbb V] \sum_{\mathscr E [\mathbb V]}e^{\pi i \widetilde{\Gamma}t_{\mathfrak u_1}} \epsilon_{\mathscr E[\mathbb V]}\cdot\int_{ \mathcal E[\mathbb V]} Z(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}},t_{\mathfrak u_2})\\\times \prod_{\mathfrak n\in \mathcal N[\mathbb V]} e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,{\prod_{\mathfrak l\in \mathcal L[\mathbb V]}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)}\prod_{\mathfrak m\in\mathcal N[\mathbb V]}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m)\,\mathrm{d}t_{\mathfrak m}.
\end{multline} Here, we define $\zeta [\mathbb V]$ as the product of $i\zeta_\mathfrak n$ where $\mathfrak n$ runs over all branching nodes in $\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$; similarly define $\mathscr E[\mathbb V], \epsilon_{ \mathscr E[\mathbb V]},\mathcal E[\mathbb V]$ and $ \mathcal N[\mathbb V],\mathcal L[\mathbb V]$ as in the definition of $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$ above (In particular $\mathfrak u_1\not\in\mathcal N[\mathbb V]$). Let $\mathfrak u_{12}$ and $\mathfrak u_{13}$ be the two children of $\mathfrak u_{1}$ other than $\mathfrak u_{11}$, note that $$\widetilde \Gamma:=\delta L^{2\gamma}\left(\zeta_{\mathfrak u_{12}}|k_{\mathfrak u_{12}}|^2+\zeta_{\mathfrak u_{13}}|k_{\mathfrak u_{13}}|^2-\zeta_{\mathfrak u_{21}}|k_{\mathfrak u_{21}}|^2-\zeta_{\mathfrak u_{22}}|k_{\mathfrak u_{22}}|^2\right)=-\delta L^{2\gamma}\sum_{\mathfrak n\in\mathcal N[\mathbb V]} \zeta_\mathfrak n \Omega_\mathfrak n, $$
since each factor $\zeta_\mathfrak n |k_{\mathfrak n}|^2$ with $\mathfrak n\in\mathcal Q_0[\mathbb V] \setminus\{\mathfrak u_1\}$ appears twice in the sum with opposite signs except for $\mathfrak u_{12}$ and $\mathfrak u_{13}$. Thus, we can rewrite \eqref{vineboundBfix} as \begin{multline}\label{vineboundBfix2} \widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}= \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_1}\zeta[\mathbb V] \sum_{\mathscr E [\mathbb V]}\epsilon_{\mathscr E[\mathbb V]}\cdot \int_{\mathcal E[\mathbb V]} Z(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}},t_{\mathfrak u_2})\\\times \prod_{\mathfrak n\in \mathcal N[\mathbb V]} e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n (t_\mathfrak n-t_{\mathfrak u_1})}\,{\prod_{\mathfrak l\in \mathcal L[\mathbb V]}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)}\prod_{\mathfrak m\in \mathcal N[\mathbb V]}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m)\,\mathrm{d}t_{\mathfrak m}. \end{multline}
\underline{\it Step 2: Parametrization for vines (II).} We start by reparametrizing the expression of $\widetilde \mathcal K$ in \eqref{vineboundBfix2}. For this, we will need the information contained in Figures \ref{fig:couples_cl} and Figure \ref{fig:vineAnnotated} (A) for vine (II). From the latter figure, we denote by $v_1, v_2, v_{2j+1}, v_{2j+2}\,(1\leq j\leq m)$ the $2m+2$ atoms forming the vine with joints at $v_1, v_2$, such that $v_{2j+1}$ is connected by a double bond to $v_{2j+2}$. We denote by $\mathfrak u_{1}, \mathfrak u_2, \ldots, \mathfrak u_{2m+2}$ the corresponding branching nodes in the couple, which are the branching nodes of $\mathcal Q_0[\mathbb V]$. Given a decoration $(k_{\mathfrak n})$ for $\mathfrak n\in \mathcal Q_0[\mathbb V]$, we get a decoration of the bonds of the vine $\mathbb V$ as explained in Definition \ref{decmole}. Note that there is a total of $n_1=2m+1$ branching nodes in $\mathcal Q_0[\mathbb V] \setminus \{\mathfrak u_1\}$, and as a result a total of $2m+1$ leaf pairs (recall that $\mathfrak u_{11}, \mathfrak u_{21}, \mathfrak u_{22}\not\in\mathcal Q_0[\mathbb V]$). We will denote the decoration of leaf pairs by $\{k_j: 1\leq j \leq 2m+1\}$, and that of the branching nodes $\mathfrak u_2, \ldots, \mathfrak u_{2m+2}$ by $\{\ell_j: 1\leq j \leq 2m+1\}$. Those two sets decorate the bonds in vine $\mathbb V$ as is shown in Figure \ref{fig:vineAnnotated} (A), where we denote by $a_j, b_j$ the decoration of double bonds between $v_{2j+1}$ and $v_{2j+2}$, by $c_j$ that between $v_{2j+1}$ and $v_{2j+3}$, and by $d_{j}$ that between $c_{2j+2}$ and $c_{2j+4}$ for $1\leq j \leq m-1$ (with $c_0$ and $d_0$ decorating the two bonds connecting $(v_2, v_3)$ and $(v_2, v_4)$ respectively, and similarly for $c_m$ and $d_m$ decorating the bonds connected to $v_1$ from $v_{2m+1}$ and $v_{2m+2}$). Finally, we also define $\zeta_j=+1$ if bond decorated by $c_j$ is outgoing from $v_{2j+1}$ for $1\leq j \leq m$ or from $v_2$ for $j=0$, and $\zeta_j=-1$ otherwise. Note that specifying $\zeta_j$ completely specifies the directions of all the bonds in $\mathbb V$ except for the horizontal double bonds in Figure \ref{fig:vineAnnotated} (A).
\begin{figure}
\caption{(A) is the annotation of the decoration of a Vine (II) as referred to in Step 3 of the proof of Proposition \ref{estbadvine}. (B)--(D) are the annotations of parts of Vine (III), (IV) and (V)--(VIII) respectively as referred to in Step 1 of the proof of Proposition \ref{estnormalvine}.}
\label{fig:vineAnnotated}
\end{figure}
From this decoration of the bonds and using Definition \ref{decmole}, we have that $$ \zeta_j (c_j -d_j)=\zeta_{j-1}(c_{j-1}-d_{j-1}), \quad 1\leq j \leq m, \qquad $$ and we call this common value $r$. Note that $r=k_{\mathfrak u_{21}}-k_{\mathfrak u_{22}}$ is fixed and is nonzero due to Section \ref{vinesubset} (c). Moreover, we have from \eqref{molegammav} that \begin{equation}\label{zetaOmegasum} \zeta_{\mathfrak u_{2j+1}}\Omega_{\mathfrak u_{2j+1}}+\zeta_{\mathfrak u_{2j+2}}\Omega_{\mathfrak u_{2j+2}}=-\Gamma_{v_{2j+1}}-\Gamma_{v_{2j+2}}=\pm 2r\cdot(c_{j-1}-c_j\mathrm{\ or\ }c_{j-1}-d_j). \end{equation} As a result of this, we can define new variables $x_0, x_j , y_j$ ($1\leq j \leq m$) such that (a) we have $x_0\in \{c_0, d_0\}$ which is specified as in part (f) of Section \ref{vinesubset}, (b) each of $(x_j, y_j)$ is the difference of two vectors among $(a_j, b_j, c_j, c_{j-1})$ for $1\leq j \leq m$, (c) for $1\leq j\leq m$ we have \[\zeta_{\mathfrak u_{2j+2}}\Omega_{\mathfrak u_{2j+2}}=-\Gamma_{v_{2j+2}}= 2x_j \cdot y_j,\quad\zeta_{\mathfrak u_{2j+1}}\Omega_{\mathfrak u_{2j+1}}=-2x_j\cdot y_j +2r\cdot \mu_j\] where $\mu_j=\alpha_j x_j+\beta_j y_j+\theta_j r$ for some $\alpha_j, \beta_j, \theta_j \in \{0, \pm 1\}$ with $\alpha_j^2+\beta_j^2\neq 0$, and (d) for $j=0$ we have that \[\zeta_{\mathfrak u_{2}}\Omega_{\mathfrak u_{2}}=r\cdot \mu_0\] where $\mu_0=\alpha_0 x_0+\beta_j y_0+\theta_0 r$ for some $\alpha_j, \beta_j, \theta_j \in \{0, \pm 1\}$ and with $y_0 \in \{k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}}\}$.
As a result of all this, we get a set of variables $(x_0, x_j, y_j)_{1\leq j \leq m}$ to replace the variables $k_{\mathfrak u_j}$ for $\mathfrak u_j\in \mathcal Q_0[\mathbb V]$ (of which there are $2m+1$ corresponding to leaf pairs $(k_\mathfrak l)_{\mathfrak l\in \mathcal L[\mathbb V]}$, and another $2m+1$ corresponding to branching nodes $(k_\mathfrak m)_{\mathfrak m\in \mathcal N[\mathbb V]}$). If we write $\boldsymbol{x}=(x_0, x_j, y_j)_{1\leq j \leq m}$, ${\boldsymbol{k}}=(k_\mathfrak l)_{\mathfrak l \in \mathcal L[\mathbb V]}$ and $\boldsymbol{\ell}=(k_{\mathfrak n})_{\mathfrak n \in \mathcal N[\mathbb V]}$, then we have that: (i) ${\boldsymbol{k}}=T_1{\boldsymbol{x}}+{\boldsymbol{h}}_1$ and $\boldsymbol{\ell}=T_2{\boldsymbol{x}}+{\boldsymbol{h}}_2$ for some matrices $T_j$ and some constant vectors ${\boldsymbol{h}}_j$ depending only on $(k_{\mathfrak u_1},k_{\mathfrak u_{11}}, k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}})$, such that all coefficients of $T_1,\,T_1^{-1},\,T_2$ are integers $\lesssim 1$; (ii) for each component $k_\mathfrak m$ ($\mathfrak m\in \mathcal N[\mathbb V]$) of $\boldsymbol{\ell}$, there exists $k_{\mathfrak m'}$, where $\mathfrak m'$ is a descendent of $\mathfrak m$, such that $k_{\mathfrak m}\pm k_{\mathfrak m'}$ is an integer linear combination of $(k_\mathfrak l)$ and $(k_{\mathfrak u_1},k_{\mathfrak u_{11}}, k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}})$ with absolute value sum $\lesssim \Lambda_p$, such that $\Lambda_1\cdots \Lambda_{2m+1}\lesssim C^m$; (iii) any component of ${\boldsymbol{x}}$ is the sum or difference of two variables, each of which is a component of either ${\boldsymbol{k}}$ or $\boldsymbol{\ell}$. Here the proof of (i) and (iii) and obvious, and (ii) follows from Lemma 6.6 of \cite{DH21}.
With the above reparametrization, \eqref{vineboundBfix2} can also be written as \begin{equation}\label{vineboundBfix3} \begin{aligned} \widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}&= \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{2m+1}\zeta[\mathbb V] \sum_{(x_0,x_j,y_j):1\leq j\leq m}\epsilon_{\mathscr E[\mathbb V]}\int_{\mathcal E[\mathbb V]} e^{-\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_1}-t_{\mathfrak u_2})(r\cdot \mu_0)}\\&\times \prod_{j=1}^m e^{\pi i\cdot\delta L^{2\gamma} [(t_{\mathfrak u_{2j+2}}-t_{\mathfrak u_{2j+1}})x_j \cdot y_j-(t_{\mathfrak u_1}-t_{\mathfrak u_{2j+1}})(r\cdot \mu_j)]}\cdot \prod_{\mathfrak l\in \mathcal L[\mathbb V]}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)\\ &\times Z(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}},t_{\mathfrak u_2})\prod_{\mathfrak m\in \mathcal N[\mathbb V]}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m)\,\mathrm{d}t_{\mathfrak m}. \end{aligned} \end{equation} This expression will not be used in Step 3, but will be used in Step 4 below. Moreover, once we confirm $r\neq 0$, we will replace $\epsilon_{\mathscr E[\mathbb V]}$ factor by $1$, since $\epsilon_{\mathscr E[\mathbb V]}\neq 1$ corresponds to the case where some $x_j$ or $y_j$ equals $0$ or some other fixed constant, which means we can sum over them trivially, gain a power using $L^d\ll L^{2(d-\gamma)}$, and treat the rest of the variables using the same arguments below.
\underline{\it Step 3: A counting argument.} We now treat the case where $\mathbb V$ is vine (II), and $\gamma>\frac45-\eta$ or $|r|>L^{-\gamma-\eta}$. In this case we do not need the cancellation structure, and as such we shall estimate each $\widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}$ by itself, and use that there or $O(C^{n_2})$ elements in the sum over $(\mathscr B, \mathtt{ind})$.
Start with the expression of $\widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}$ in \eqref{vineboundBfix2}. At this point, suppose we restrict each of $(k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})$ to a unit ball as in the $Y_{\mathrm{loc}}^\theta$ in (\ref{vinebound1}). We may repeat the arguments in the proof of Lemma \ref{sumintest1}, using (i)--(iii) above, to also restrict $x_0$ and each $(x_j,y_j)$, as well as $k_\mathfrak n$ for all nodes $\mathfrak n\in\mathcal Q_0[\mathbb V]$, to a unit ball; all these unit balls will be fixed throughout the rest of the proof. As such we may assume $Z\in X^{\theta, 0}$, and henceforth expand it as a Fourier integral, so that the $Z$ function in \eqref{vinebound1} is replaced by the product \begin{equation}\label{defztilde}e^{\pi i \lambda_{\mathfrak u_1}t_{\mathfrak u_1}}e^{\pi i \lambda_{\mathfrak u_{21}}t_{\mathfrak u_{21}}}e^{\pi i \lambda_{\mathfrak u_{22}} t_{\mathfrak u_{22}}}e^{\pi i \lambda_{\mathfrak u_2} t_{\mathfrak u_2}}\cdot \widetilde{Z}(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})\end{equation} where $\widetilde{Z}$ is a bounded function. Note that by doing this we can also exploit the weights of form $\max(\langle \lambda_{\mathfrak u_1}\rangle,\langle\lambda_{\mathfrak u_{21}}\rangle,\langle\lambda_{\mathfrak u_{22}}\rangle,\langle\lambda_{\mathfrak u_2}\rangle)^{-\theta}$ whenever needed. In the same way, we can expand all the $\mathcal K_\mathcal Q$ and $\mathcal K_{\mathcal T}^*$ functions as time Fourier integrals to obtain a linear combination of functions\ \[(C^+\delta)^{n(\mathscr B)/2}\cdot \prod_{\mathfrak n\in \mathcal N[\mathbb V]}e^{\pi i\vartheta_\mathfrak n t_\mathfrak n}\cdot\prod_{\mathfrak l\in \mathcal L[\mathbb V]}^{(+)}\langle k_\mathfrak l\rangle^{-40d}\cdot\mathcal X(k[\mathcal Q_{0}[\mathbb V]])\] for different choices of $\vartheta[\mathcal N[\mathbb V]]$, with the coefficient being a weighted $L^1$ integrable function with a suitable weight, and where $\mathcal X$ is a bounded function of all the $k_\mathfrak m$ variables. Below we may fix one choice of $(\vartheta[\mathcal N[\mathbb V]])$; by doing so we may also exploit the weight $(\max_{\mathfrak n\in \mathcal N[\mathbb V]}\langle \vartheta_\mathfrak n \rangle)^{-\eta}$ whenever needed. As a result of, we can write $\widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}$ in \eqref{vineboundBfix2} as a linear combination of \begin{equation}\label{newbexpr} (C^+\delta)^{n(\mathscr B)/2} \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_1}\sum_{\mathscr E} e^{\pi i (\lambda_{\mathfrak u_{21}}t_{\mathfrak u_{21}}+\lambda_{\mathfrak u_{22}}t_{\mathfrak u_{22}}+\widetilde \lambda_{\mathfrak u_{1}} t_{\mathfrak u_{1}})}\mathcal B\big(t_{\mathfrak u_1},t_{\mathfrak u_{21}}, t_{\mathfrak u_{22}},\alpha[\mathcal N[\mathbb V]]\big)\cdot\widetilde \mathcal X(k[\mathcal Q_{0}[\mathbb V]]) \end{equation} where $\alpha_{\mathfrak m}=\delta L^{2\gamma}\zeta_m\Omega_{\mathfrak m}+\vartheta_\mathfrak m$ for $\mathfrak m\in \mathcal N[\mathbb V]$, and \[ \mathcal B\big(t_{\mathfrak u_1},t_{\mathfrak u_{21}}, t_{\mathfrak u_{22}},\alpha[\mathcal N[\mathbb V]]\big)=\int_{\mathcal E[\mathbb V]} \prod_{\mathfrak n\in \mathcal N[\mathbb V]} e^{\pi i \alpha_\mathfrak n (t_\mathfrak n-t_{\mathfrak u_1})} \, \mathrm{d}t_{\mathfrak n}, \] where $\widetilde \lambda_{\mathfrak u_1}$ is a shift of $\lambda_{\mathfrak u_1}$ by elements of $\vartheta[ \mathcal N[\mathbb V]]$, and we also allow a shift of $\alpha_{\mathfrak u_2}$ by $\lambda_{\mathfrak u_{2}}$. Also, $\widetilde \mathcal X(k[\mathcal Q_{0}[\mathbb V]])$ is a bounded function that is localized in a fixed unit size box for each of its variables. Fixing the integer parts of $\alpha_\mathfrak m$ as $\sigma_{\mathfrak m}$ (each of which having at most $L^{10d}$ possibilities), by summing over all these integer parts, we can bound the $(t_{\mathfrak u_1}, t_{\mathfrak u_{21}}, t_{\mathfrak u_{22}})$ Fourier transform of \eqref{newbexpr} by \begin{multline}\label{afterloc7.3}
(C^+\delta)^{n(\mathscr B)/2} \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_1} \sum_{\sigma[ \mathcal N[\mathbb V]]}\sup_{|\alpha_\mathfrak m-\sigma_\mathfrak m|\leq 1}|\widehat \mathcal B(\tau_{\mathfrak u_1}+\widetilde{\lambda}_{\mathfrak u_1},\tau_{\mathfrak u_{21}}+\lambda_{\mathfrak u_{21}}, \tau_{\mathfrak u_{22}}+\lambda_{\mathfrak u_{22}},\alpha[\mathcal N[\mathbb V]])| \\\times\sup_{\sigma[\mathcal N[\mathbb V]]}\sum_{\mathscr E}^*\widetilde \mathcal X(k[\mathcal Q_{0}[\mathbb V]]) \end{multline}
where the sum $\sum^*$ satisfies the additional localization that $|\alpha_{\mathfrak m}-\sigma_{\mathfrak m}|\leq 1$ for each $\mathfrak m\in \mathcal N[\mathbb V]$. The result \eqref{vinebound1} will follow from the following two estimates: \begin{align}
&\bigg\|\max(\langle\tau_{\mathfrak u_1}\rangle,\langle\tau_{\mathfrak u_{21}}\rangle,\langle\tau_{\mathfrak u_{22}}\rangle)^{\eta^5}\sum_{\sigma[\mathcal N[\mathbb V]]}\sup_{|\alpha_\mathfrak m-\sigma_\mathfrak m|\leq 1}|\widehat \mathcal B(\tau_{\mathfrak u_1},\tau_{\mathfrak u_{21}}, \tau_{\mathfrak u_{22}},\alpha[\mathcal N[\mathbb V]])|\bigg\|_{L_{\tau,\alpha}^1}\lesssim C^m (\log L)^{2m+1} L^{4\eta^5} ,\label{twoestbadv2}\\ &\sup_{(k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})}\sup_{\sigma[ \mathcal N[\mathbb V]]}\sum_{\mathscr E}^*\widetilde \mathcal X(k[\mathcal Q_{0}[\mathbb V]])\leq(C^+\delta^{-1})^{m+1}L^{(2m+1)(d-\gamma)-5\eta}.\label{twoestbadv1} \end{align} Those two estimates are enough to give \eqref{vinebound1}, since shifts of $\widetilde{\lambda}_{\mathfrak u_1}$, $\lambda_{\mathfrak u_{21}}$ and $\lambda_{\mathfrak u_{22}}$ can be absorbed by the weights $\max(\langle \lambda_{\mathfrak u_1}\rangle,\langle\lambda_{\mathfrak u_{21}}\rangle,\langle\lambda_{\mathfrak u_{22}}\rangle,\langle\lambda_{\mathfrak u_2}\rangle)^{-\theta}$ and $(\max_{\mathfrak n\in \mathcal N[\mathbb V]}\langle \vartheta_\mathfrak n \rangle)^{-\eta}$.
To prove \eqref{twoestbadv2}, we note that $\mathcal B(t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})=\mathcal C(t_{\mathfrak u_1}-\max(\tau_{\mathfrak u_{21}},\tau_{\mathfrak u_{22}}))$ for some function $\mathcal C$ by changing time variables, and by Lemma \ref{timeFourierlem} (1) we can bound $\widehat{\mathcal C}(\tau,\alpha[\mathcal N[\mathbb V]])$ by
\[|\widehat{\mathcal C}(\tau,\alpha[ \mathcal N[\mathbb V]])|\lesssim\langle \tau-\gamma\rangle^{-10}
\prod_{\mathfrak m\in \mathcal N[\mathbb V]} \langle q_\mathfrak m\rangle^{-1}, \] where $(q_\mathfrak m)_{\mathfrak m \in \mathcal N[\mathbb V]}$ is obtained from $\alpha[\mathcal N[\mathbb V]]$ by an invertible (actually lower triangular) linear transformation with integer coefficients, and $\gamma$ is the sum of at most two $q_\mathfrak m$ variables. Since each $\sigma_\mathfrak m$ has at most $L^{10d}$ choices, we can easily sum over all $(\sigma_\mathfrak m)$, equivalently all $(q_\mathfrak m)$, and apply Lemma \ref{timeFourierlem} (2) to transform from $\mathcal C$ to $\mathcal B$, and get \eqref{twoestbadv2}.
To prove \eqref{twoestbadv1}, we upper bound $\widetilde \mathcal X$ by 1 which reduces the sum into a counting problem for $k[\mathcal Q_0[\mathbb V]]$ satisfying all the decoration and localization assumptions as before, and in addition the condition that $\delta L^{2\gamma}\Omega_\mathfrak n$ for each $\mathfrak n\in\mathcal N[\mathbb V]$ belongs to an interval of length $1$. By the reparametrization in Step 2 above, this is a equivalent to counting the number of choices for the variables $(x_0, x_j, y_j)_{1\leq j \leq m}$, each restricted to a fixed unit ball, such that each of $(r\cdot x_0, x_j\cdot y_j, r\cdot \mu_j)$ $(1\leq j \leq m$) is restricted to an interval of length $O(\delta^{-1}L^{-2\gamma})$.
Now if $\gamma\geq\frac45-\eta$, we can bound the number of choices of $x_0$ by $O(\delta^{-1}L^{d-\gamma+(1-\gamma)})$ using Lemma \ref{basiccount} (1), and the number of choices for each $(x_j,y_j)$ by $\delta^{-2} L^{2(d-\gamma)-(1-\gamma)-20\eta}$ using Lemma \ref{basiccount0} (2). This implies \eqref{twoestbadv1}, noticing that $m\geq 1$ for vines (II). If $\gamma<\frac45-\eta$ and $|r|\gtrsim L^{-\gamma-\eta}$, then we can bound the number of choices of $x_0$ by $O(\delta^{-1}L^{d-\gamma+2\eta})$ using Lemma \ref{basiccount} (1), and the number of choices for each $(x_j,y_j)$ by $\delta^{-2} L^{2(d-\gamma)-20\eta}$ using Lemma \ref{basiccount0} (2), which again implies \eqref{twoestbadv1}.
\underline{\it Step 4: The cancellation argument.} We now treat the case where $\mathbb V$ is vine (II), $\gamma\leq \frac45-\eta$ and $|r|\leq L^{-\gamma-\eta}$. In this case we need to rely on the cancellation happening in the sum over $(\mathscr B, \mathtt{ind})$ of the terms $\widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}$ defined in \eqref{vineboundBfix3}. For this cancellation to manifest itself, we need to utilize the couple structure near $\mathfrak u_2$, which is depicted in Figure \ref{fig:block_mole}.
We start with the easiest case, which is that of vine (II-e). In this case, we will estimate each element of the sum over $(\mathscr B, \mathtt{ind})$ separately and use that there are $O(C^{n_2})$ elements in this sum. At this point we can repeat the arguments in Step 3 to localize each of the $(x_0,x_j,y_j)$ and $k_\mathfrak n$ variables to a fixed unit ball, and consequently expand $Z$ into (\ref{defztilde}) and each $\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}$ and $\mathcal K_{\mathcal T^{(\mathfrak m)}}^*$ as time Fourier integrals, to reduce (\ref{vineboundBfix3}) to at most $O(C^m)$ expressions of form \begin{multline*}
\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{2m+1}(C^+\delta)^{n(\mathscr B)/2}\sum_{|x_0-a_0|\leq 1} e^{\pi i (\lambda_{\mathfrak u_{21}}t_{\mathfrak u_{21}}+ \lambda_{\mathfrak u_{22}} t_{\mathfrak u_{22}}+(\lambda_{\mathfrak u_1}+\gamma_1)t_{\mathfrak u_1})}\\\times \widetilde{Z}(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})\cdot\mathcal I\left(x_0, k_{\mathfrak u_1}, k_{\mathfrak u_{11}}, k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}}, t_*\right) \end{multline*} Here $a_0$ is a fixed vector, $\gamma_1$ is a linear combination of $\lambda_{\mathfrak u_2}$ and the Fourier variables occurring in the expansions of $\mathcal K_\mathcal Q^{(\mathfrak l, \mathfrak l')}$ and $\mathcal K^*_{\mathcal T^{(\mathfrak m)}}$ as above, and $\mathcal I$ is an expression in the form \eqref{sumintI1}, which can be obtained after defining the new time variables $t_j=t_{\mathfrak u_1}-t_{\mathfrak u_{2j+1}}$, $s_j=t_{\mathfrak u_1}-t_{\mathfrak u_{2j+2}}$ for $1\leq j \leq m$, and $t_0=t_{\mathfrak u_1}-t_{\mathfrak u_2}$. The domain of integration after the change of variables is the same as described in Lemma \ref{sumintest1}, with $t$ replaced by $t_*:=t_{\mathfrak u_1}-\max(t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})\}$. Moreover, due to the structure of the couple in vine (II-e) (see Figure \ref{fig:block_mole}), namely the fact that $\mathfrak u_2$ is a parent of $\mathfrak u_4$ and child of $\mathfrak u_3$, we have the additional condition (a) in Lemma \ref{sumintest1}. As a result, by \eqref{FourierboundI}, we have that $$
\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^5, 0}(t_*)}\lesssim (C^+\delta^{-1})^{m}L^{2m(d-\gamma)-\gamma-\eta^2}. $$ After summing over $x_0$, applying Lemma \ref{timeFourierlem} (2), and including the factors $(\delta/(2L^{d-\gamma}))^{2m+1}$ and $(C^+\delta)^{n(\mathscr B)/2}$ etc., this implies (\ref{vinebound1}). Note that the shifts $(\lambda_{\mathfrak u_{21}},\lambda_{\mathfrak u_{22}},\lambda_{\mathfrak u_1}+\gamma_1)$ is again absorbed by the weights.
Now we turn to vines (II-a)--(II-d). We will consider the case of a pair of couples with vines (II-a) and (II-b) that are twists of each other; the case of vines (II-c) and (II-d) are similar. If $\mathscr B$ is fixed and we sum over $\mathtt{ind}\in\{0,1\}$, then starting from (\ref{vineboundBfix3}), and recalling Figure \ref{fig:block_mole} (assuming without loss of generality that $\mathfrak u_4$ has positive sign), we get the expression \begin{equation}\label{step4sumind} \begin{aligned} \sum_{\mathtt{ind}}\widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}&= \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{2m+1}\zeta[\mathbb V^{(a)}]\sum_{(x_0,x_j,y_j):1\leq j\leq m} \int_{ \mathcal E^*[\mathbb V]}\prod_{j=2}^{2m+2} \mathrm{d}t_{\mathfrak u_j}\\ &\times \prod_{j=1}^m e^{\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_{2j+2}} -t_{\mathfrak u_{2j+1}})x_j \cdot y_j}e^{\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_{2j+1}}-t_{\mathfrak u_1})(r\cdot \mu_j)} \cdot e^{\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_2}-t_{\mathfrak u_1})(r\cdot (k_{\mathfrak u_{22}}-x_0))}\\&\times Z(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}},t_{\mathfrak u_2})\cdot \prod_{\mathfrak l\in\mathcal L[\mathbb V]\setminus \{\mathfrak l^*\}}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)\\ &\times \prod_{\mathfrak m\in \mathcal N[\mathbb V]\setminus \{\mathfrak u_2\}}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m)\left[\mathcal M^{(a)}(t_{\mathfrak u_2}, t_{\mathfrak u_3}, t_{\mathfrak u_4},x_0, y_0)-\mathcal M^{(b)}(t_{\mathfrak u_2},t_{\mathfrak u_3}, t_{\mathfrak u_4}, x_0,y_0)\right]. \end{aligned} \end{equation} Here, we denote by $\zeta[\mathbb V^{(a)}]$ the $\zeta[\mathbb V]$ for the couple with vine (II-a), which is the negative of the (II-b) couple, and by $\mathcal E^*[\mathbb V]$ the domain of integration derived from $\mathcal E[\mathbb V]$ by removing the condition $t_{\mathfrak u_4}>t_{\mathfrak u_2}$ in case (II-a) and $t_{\mathfrak u_3}>t_{\mathfrak u_2}$ in case (II-b). Moreover $x_0$ equals $k_{\mathfrak u_{23}}$ in case (II-a) and equals $k_{\mathfrak u_2}$ in case (II-b), and $y_0$ is defined to be the other element. We also denoted by $\mathfrak l^*$ the positive leaf in the leaf pair $(\mathfrak u_{23}, \mathfrak u_0)$ (using the notation in Figure \ref{fig:block_mole}), and introduced \begin{align*} &\mathcal M^{(a)}(t_{\mathfrak u_2}, t_{\mathfrak u_3},t_{\mathfrak u_4}, x_0, y_0)=\mathbf{1}_{t_{\mathfrak u_2}<t_{\mathfrak u_4}}(t_{\mathfrak u_4})\overline{\mathcal K_{\overline{\mathcal Q_a}}}(t_{\mathfrak u_3},t_{\mathfrak u_2},x_{0})\cdot\mathcal K_{\mathcal T_a}^*(t_{\mathfrak u_4},t_{\mathfrak u_2},y_0)\\ &\mathcal M^{(b)}(t_{\mathfrak u_2}, t_{\mathfrak u_3},t_{\mathfrak u_4}, x_0, y_0)=\mathbf{1}_{t_{\mathfrak u_2}<t_{\mathfrak u_3}}(t_{\mathfrak u_3})\cdot\mathcal K_{\mathcal Q_b}(t_{\mathfrak u_4},t_{\mathfrak u_2},y_0)\cdot\overline{\mathcal K_{\overline{\mathcal T_b}}^*}(t_{\mathfrak u_3},t_{\mathfrak u_2},x_{0}), \end{align*} where we denoted by $\mathcal Q^a$ and $\mathcal T^a$ (resp. $\mathcal Q^b, \mathcal T^b$) the couples $\mathcal Q^{(\mathfrak u_{23}, \mathfrak u_0)}$ and $\mathcal T^{(\mathfrak u_2)}$ (resp. $\mathcal Q^{(\mathfrak u_0, \mathfrak u_{23})}$ and ${\mathcal T^{(\mathfrak u_2)}}$). Recall also that $y_0=x_0\pm r$ which holds for both cases (II-a) and (II-b). This puts us in the position to apply Lemma \ref{sumintest1} to conclude. In fact, the difference $\mathcal M^{(a)}-\mathcal M^{(b)}$ leads to a sum involving one of the following assumptions or terms: \begin{enumerate} \item The assumption $t_{\mathfrak u_4}< t_{\mathfrak u_2}<t_{\mathfrak u_3}$ or $t_{\mathfrak u_3}< t_{\mathfrak u_2}<t_{\mathfrak u_4}$. \item Factors $\mathscr R_{\mathcal Q}=\mathcal K_{\mathcal Q}-(\mathcal K_{\mathcal Q})_{\textrm{app}}$ or $\mathscr R^*_{\mathcal T}=\mathcal K^*_{\mathcal T}-(\mathcal K^*_{\mathcal T})_{\textrm{app}}$ replacing at least one of the $\mathcal K_{\mathcal Q}$ or the $\mathcal K^*_{\mathcal T}$ in \eqref{step4sumind}, for $\mathcal Q\in\{\overline{\mathcal Q_a},\mathcal Q_b\}$ and $\mathcal T\in\{\mathcal T_a,\overline{\mathcal T_b}\}$; here we use Proposition \ref{regcpltreeasymp}.
\item Factors \[\overline{\Jc(t_{\mathfrak u_3}, t_{\mathfrak u_2})} \big(\Jc^*(t_{\mathfrak u_4}, t_{\mathfrak u_2})- \Jc^*(t_{\mathfrak u_3}, t_{\mathfrak u_2})\big)\quad \textrm{or}\quad\big(\Jc(t_{\mathfrak u_4}, t_{\mathfrak u_2})-\Jc(t_{\mathfrak u_3}, t_{\mathfrak u_2})\big)\overline{\Jc^*(t_{\mathfrak u_3}, t_{\mathfrak u_2})},\] which equals $|t_{\mathfrak u_3}-t_{\mathfrak u_4}|^{1-\eta}$ multiplies a weighted Fourier $L^1$ function; this comes from expanding $(\mathcal K_\mathcal Q)_{\mathrm{app}}$ and $(\mathcal K_\mathcal T^*)_{\mathrm{app}}$ as in \eqref{kqterms}.
\item Factors $(\mathcal K_{\mathcal Q})_{\mathrm{app}}(\cdot,\cdot,y_0)-(\mathcal K_{\mathcal Q})_{\mathrm{app}}(\cdot,\cdot,x_0)$ and $(\mathcal K_{\mathcal T}^*)_{\mathrm{app}}(\cdot,\cdot,x_{0})-(\mathcal K_{\mathcal T}^*)_{\mathrm{app}}(\cdot,\cdot,y_{0})$, for some $\mathcal Q$ and $\mathcal T$, with the time variables being the same in both functions, which is bounded by $|r|$ using that $|x_0-y_0|=r$. \item The leading factor of the form $$ \left[ \overline{(\mathcal K_{\mathcal Q_0}})_{\mathrm{app}}(t_{\mathfrak u_3},t_{\mathfrak u_2},x_0)(\mathcal K_{\mathcal T_0}^*)_{\mathrm{app}}(t_{\mathfrak u_3},t_{\mathfrak u_2},x_{0})-(\mathcal K_{\mathcal Q_0'})_{\mathrm{app}}(t_{\mathfrak u_3},t_{\mathfrak u_2},x_0)\overline{(\mathcal K_{\mathcal T_0'}^*)_{\mathrm{app}}}(t_{\mathfrak u_3},t_{\mathfrak u_2},x_{0})\right]. $$ \end{enumerate}
Note that the contribution of the last term (5) vanishes after summing over $\mathscr B$, due to Lemma \ref{regcpltreecancel}; therefore we are left with terms (1)--(4). For each term, we may argue as in the case of vine (II-e) above, where we insert them back into \eqref{step4sumind}, then localize each vector to a fixed unit ball, expand $Z$ into (\ref{defztilde}), and expand all the $\mathcal K_{\mathcal Q^{(\mathfrak l, \mathfrak l')}}$ and $\mathcal K_{\mathcal T^{(\mathfrak m)}}$ (and $\mathscr R_{\mathcal Q}$ and $\mathscr R_{\mathcal T}^*$ if present) as time Fourier integrals using Proposition \ref{regcpltreeasymp}, to reduce to at most $O(C^m)$ expressions of form \begin{multline}
\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{2m+1}(C\delta)^{n(\mathscr B)/2}\sum_{|k_0-a_0|\leq 2} e^{\pi i( \lambda_{\mathfrak u_{21}}t_{\mathfrak u_{21}}+\pi i \lambda_{\mathfrak u_{22}} t_{\mathfrak u_{22}}+(\lambda_{\mathfrak u_1}+\gamma_1)t_{\mathfrak u_1})} \\\times \widetilde{Z}(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})\cdot\mathcal I\left(k_0, k_{\mathfrak u_1}, k_{\mathfrak u_{11}}, k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}}, t_*\right). \end{multline} Here $a_0$ is a fixed vector, $\gamma_1$ is a linear combination of $\lambda_{\mathfrak u_2}$ and the Fourier variables occurring in the expansions of $\mathcal K_\mathcal Q^{(\mathfrak l, \mathfrak l')}$ and $\mathcal K^*_{\mathcal T^{(\mathfrak m)}}$ as above, and $\mathcal I$ is an expression in the form \eqref{sumintI1}, which can be obtained after defining the new time variables $t_j=t_{\mathfrak u_1}-t_{\mathfrak u_{2j+1}}$, $s_j=t_{\mathfrak u_1}-t_{\mathfrak u_{2j+2}}$ for $1\leq j \leq m$, and $t_0=t_{\mathfrak u_1}-t_{\mathfrak u_2}$. The domain of integration after the change of variables is the same as described in Lemma \ref{sumintest1}, with $t$ replaced by $t_*:=t_{\mathfrak u_1}-\max(t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})\}$. Clearly, for each of the terms (1)--(4), one of the conditions (a)--(d) in Lemma \ref{sumintest1} will be satisfied, so we can apply \eqref{FourierboundI} to get that
$$
\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^5, 0}(t_*)}\lesssim (C^+\delta^{-1})^{m}L^{2m(d-\gamma)-\gamma-\eta^2}. $$ After summing over $x_0$, applying Lemma \ref{timeFourierlem} (2), and including the factors $(\delta/(2L^{d-\gamma}))^{2m+1}$ and $(C^+\delta)^{n(\mathscr B)/2}$ etc., this implies (\ref{vinebound1}); note that the shifts $(\lambda_{\mathfrak u_{21}},\lambda_{\mathfrak u_{22}},\lambda_{\mathfrak u_1}+\gamma_1)$ is again absorbed by the weights.
\underline{\it Step 5: The vine (I) case.} We now treat the case where $\mathbb V$ is a (CL) vine (I), i.e. one double bond. Here we will apply the cancellation structure in Step 4, combined with the counting arguments in Step 3, but both will be in this extremely simple setting of one double bond. In fact, in this case we have $m=0$, so there is only one variable $x_0$ which we sum in. Moreover, an easy examination of Figure \ref{fig:block_mole} similar to Step 4 implies that, instead of the $\mathcal M^{(a)}-\mathcal M^{(b)}$ factor in (\ref{step4sumind}), we have the factor
\[\overline{\mathcal K_{\overline{\mathcal Q_a}}}(t_{\mathfrak u_1},t_{\mathfrak u_2},x_0)\mathcal K_{\mathcal T_a}^*(t_{\mathfrak u_1},t_{\mathfrak u_2},y_0)-\mathcal K_{\mathcal Q_b}(t_{\mathfrak u_1},t_{\mathfrak u_2},y_0)\overline{\mathcal K_{\overline{\mathcal T_b}}}(t_{\mathfrak u_1},t_{\mathfrak u_2},x_0),\]assuming $\mathfrak u_1$ has sign $+$ in Figure \ref{fig:block_mole}. Upon summing in $\mathscr B$ and applying Lemma \ref{regcpltreecancel}, this factor is bounded in $X_{\mathrm{loc}}^{\eta,0}$ by $|r|$ since $|x_0-y_0|=r$.
Then, we reduce to the counting problem as in Step 3. Note that the result corresponding to (\ref{twoestbadv2}) is proved in the same way; as for the one corresponding to (\ref{twoestbadv1}), we are reduced to a counting problem with only one variable $x_0$ involved, which is restricted to a unit ball and satisfies that $r\cdot x_0$ belongs to a fixed interval of length $\delta^{-1}L^{-2\gamma}$. By Lemma \ref{basiccount} (1) and using the extra factor $|r|$ exhibited above, we get the total contribution factor
\[(L^{d-1}+\delta^{-1}|r|^{-1}L^{d-2\gamma})\cdot |r|\lesssim L^{d-\gamma-\gamma_0+O(\eta)},\] which suffices for the proof of (\ref{vinebound1}) just as in Step 3.
The proof of Proposition \ref{estbadvine} is now finished. \end{proof}
\begin{prop}\label{estnormalvine} Suppose $\mathbb V$ is a \emph{normal} (CL) vine. Let $(\mathcal Q^{\mathrm{sp}}, \mathtt{cod},\mathfrak{n},\mathscr A^{\mathrm{sp}})$ and $(Z,W)$ be fixed as in Section \ref{vinesubset} and Proposition \ref{estbadvine}, and let $(\mathtt{ind},\mathscr B)$ also be fixed. Then, for any choice of $\theta\in\{\eta^5,0\}$ and in the same notations as in Proposition \ref{estbadvine}, we have \begin{equation}\label{vinebound2}
\big\|e^{-\pi i\cdot\delta L^{2\gamma}t_{\mathfrak u_1}\Gamma}\cdot\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}(x_0',k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})\big\|_{Y_{\mathrm{loc}}^{\theta}}\lesssim(C^+\delta^{1/4})^{n_2}L^{\eta^4}\|Z\|_{Y_{\mathrm{loc}}^{\theta}}. \end{equation} Moreover, if $\mathbb V$ is replaced by a normal (CL) vine chain $\mathbb V\mathbb C$, define $\mathcal Q_0[\mathbb V\mathbb C]$ as in Proposition \ref{block_clcn} for the block $\mathbb V\mathbb C$ and let $\mathcal Q^{\mathrm{sp}}$ be the couple obtained by splicing the whole vine chain $\mathbb V\mathbb C$. Fix $(\mathcal Q^{\mathrm{sp}}, \mathfrak{n},\mathscr A^{\mathrm{sp}})$ and $(\mathtt{cod},\mathtt{ind}, \mathscr B)$ for each ingredient vine in the chain, and define the expression $\mathcal K_{(\mathtt{ind},\mathscr B)}^{(\mathbb V\mathbb C, Z, W)}$ in the same way as in Section \ref{vinesubset}. Then, for $\theta\in\{\eta^5,0\}$ we have \begin{equation}\label{vinebound3}
\big\|e^{-\pi i\cdot\delta L^{2\gamma}t_{\mathfrak u_1}\Gamma}\cdot\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V\mathbb C,Z,W)}(x_0',k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})\big\|_{Y_{\mathrm{loc}}^{\theta}}\lesssim(C^+\delta^{1/4})^{n_2}L^{\eta^4}\|Z\|_{Y_{\mathrm{loc}}^{\theta}}, \end{equation} where $n_2$ is the total number of branching nodes in $\mathcal Q_0[\mathbb V\mathbb C]\setminus \{\mathfrak u_1\}$ plus the sum of all the $n(\mathscr B)$, as defined in Section \ref{vinesubset}. \end{prop}
\begin{proof} The proof of this proposition goes along the same lines as that of Proposition \ref{estbadvine}, so we will just sketch the similar arguments and only elaborate on the differences, which are mainly in the reparametrization in Step 1 below. We start with the case of a single normal (CL) vine. We replace $Y_{\mathrm{loc}}^\theta$ by $Y^\theta$ and assume throughout $\|Z\|_{Y^\theta}=1$.
\underline{\it Step 1: Reparametrization.} Define the notations $\mathscr E[\mathbb V]$, $\mathcal N[\mathbb V]$ and $\mathcal L[\mathbb V]$ etc. as in the proof of Proposition \ref{estbadvine}. We first perform the reparametrization. By examining the form of the normal vine $\mathbb V$ in Figure \ref{fig:vines}, we see that it contains $n_1=2m+4$ atoms (excluding the joint atom $v_1$) for some $m$, which is equal to the cardinality of $\mathcal Q_0[\mathbb V]\setminus \{\mathfrak u_0\}$. These atoms are split into three groups: (a) the joint $v_2$, (b) the $m$ pairs of atoms connected by double bounds that belong to the ladders represented colored dashed lines in Figure \ref{fig:vines} (for Vine (VII) we also include here the pair of atoms where all three ladders intersect), and (c) the three remaining atoms $v_1', v_2', v_3'$. At the level of the couple, $\mathcal Q_0[\mathbb V]\backslash\{\mathfrak u_1\}$ has $2m+4$ leaf pairs $(\mathfrak l, \mathfrak l')$, as well as $2m+4$ branching nodes $\mathfrak m$. We will replace the variables $k_\mathfrak l$ and $k_\mathfrak m$ (where $\mathfrak l\in\mathcal L[\mathbb V]$ and $\mathfrak m\in\mathcal N[\mathbb V]$) occurring in the decoration $\mathscr E$ by a new set of variables $(x_0, x_j, y_j, u_1, u_2, u_3)_{1\leq j \leq m}$ as follows.
By the same argument and notation as in Step 2 of the proof of Proposition \ref{estbadvine}, we have that $\Omega_{\mathfrak u_2}=2r\cdot\mu_0$ where $r=k_{\mathfrak u_{21}}-k_{\mathfrak u_{22}}$ is fixed and nonzero, and $\mu_0=\alpha_0x_0+\beta_0 y_0+\theta_0 r$ for some $\alpha_0, \beta_0, \theta_0 \in \{0,\pm 1\}$ with $y_0 \in \{k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}}\}$, $x_0$ equals $k_{\mathfrak u_{23}}$ if $\mathfrak u_2$ has sign $+$ and $k_{\mathfrak u_2}$ otherwise, and $\alpha_0^2+\beta_0^2\neq 0$. Next, for each pair of atoms connected by a double bond in a ladder, the same argument as in Step 3 of the proof of Proposition \ref{estbadvine} shows that if $\mathfrak u$ and $\widetilde \mathfrak u$ are the branching nodes of $\mathcal Q_0[\mathbb V]$ corresponding to those two atoms, then $$ \zeta_{\mathfrak u} \Omega_{\mathfrak u}+\zeta_{\widetilde \mathfrak u}\Omega_{\widetilde \mathfrak u}=2\widetilde r \cdot \mu. $$ Here $\mu$ is the difference of two $k_\mathfrak n$ vectors corresponding to two of the four single bonds at these pair of atoms, and $\widetilde r\in \{0, r\}$ is the the same for all pairs of atoms in the same ladder and is equal to $r$ for the ladders attached to the joints and zero otherwise. In Figure \ref{fig:vines} the ladders whose $\widetilde r$ value is 0 are colored in light blue, and we call such ladders \emph{zero-gap ladders} (note that for the pair of atoms where all three ladders intersect in vine (VII), the argument needs to be slightly adjusted but the result remains the same, with $\widetilde{r}=r$ in this case). As such, we can define $(x_j, y_j)\in \mathbb Z^{2d}$ such that $\zeta_{\mathfrak u} \Omega_{\widetilde \mathfrak u}=2x_j \cdot y_j$, so that $\zeta_{\widetilde \mathfrak u}\Omega_{\widetilde \mathfrak u}=-2x_j\cdot y_j+2\widetilde r\cdot \mu$, and $\mu$ can be written as $a_jx_j+b_jy_j+c_j\widetilde r$ for $a_j,b_j,c_j\in\{0,\pm1\}$ and $a_j^2+b_j^2\neq 0$. It now remains to define the variables $(u_1, u_2, u_3)$. Here we only discuss vine (III) in detail below, as arguments in other cases are similar.
(1) For vine (III), using the notation in Figure \ref{fig:vineAnnotated} (B), we may denote $\omega_j=\zeta_{\mathfrak w_j}\Omega_{\mathfrak w_j}$ where $\mathfrak w_j$ is the node in $\mathcal Q_0[\mathbb V]$ corresponding to the atom $w_j$. In Figure \ref{fig:vineAnnotated} (B) and Figure \ref{fig:vines}, note that $e-g=f-h=\pm r$, and $(e,g)$ is determined by $x_0$ and some of the $(x_j,y_j)$ variables. Now, if the bonds decorated by $c$ and $d$ have opposite directions (say $c$ goes from $w_1$ to $w_3$, and $d$ goes from $w_3$ to $w_1$), we may define $(u_1,u_2,u_3)=(c-d,d-a,d-b)$. If the bonds decorated by $c$ and $d$ have the same direction (which has to go from $w_1$ to $w_3$), then we may define $(u_1,u_2,u_3)=(c-a,a-d,c-b)$. Then we have $\omega_1=2u_1\cdot u_2$, and $\omega_2$ equals $2u_1\cdot u_3$ or $2u_3\cdot (u_1+u_2-u_3)$ in the first and second case. Moreover we have $\omega_1+\omega_2+\omega_3=\pm(|e|^2-|f|^2-|g|^2+|h|^2)=2r\cdot\xi$ where $\xi=au_1+bu_2+cu_3+dr$ with $a,b,c,d\in\{0,\pm1\}$. In any case, the variables $(u_1,u_2,u_3)$ determines $(a,b,c,d,f,h)$ and allows one to proceed with parametrizing the next ladder starting from $(f,h)$ by the rest of $(x_j,y_j)$ variables. The argument for Vine (IV) is similar, see Figure \ref{fig:vineAnnotated} (C).
(2) For Vines (V)--(VIII), the argument is again similar, and in fact much easier. In Figure \ref{fig:vineAnnotated} (D) and Figure \ref{fig:vines}, note that the two bonds going in and out the triangle are both decorated by $g$ (which is determined by the $(x_0,x_j,y_j)$ variables), which means that the vector $r$ for vine (III) above is replaced by $\widetilde{r}=0$. In particular we have $\omega_1+\omega_2+\omega_3=0$ where $\omega_j=\zeta_{\mathfrak w_j}\Omega_{\mathfrak w_j}$. Then we argue as above, with $(u_1,u_2,u_3)=(a-e,b-e,a-c)$ if bonds decorated by $a$ and $b$ have the same direction, and $(u_1,u_2,u_3)=(e-g,b-g,d-g)$ if they have opposite directions, then the same results will hold.
As a result, in all cases we can define $(u_1, u_2, u_3)$ so that the full decoration of the vine $\mathbb V$ and hence that of $\mathcal Q_0[\mathbb V]$ is completely determined by $(x_0, x_j, y_j, u_1, u_2, u_3)_{1\leq j \leq m}$. The factors $\zeta_{\mathfrak u_{2j+1}}\Omega_{\mathfrak u_{2j+1}}$ and $\zeta_{\mathfrak u_{2j+2}}\Omega_{\mathfrak u_{2j+2}}$ for $1\leq j \leq m$ are given by $2x_j \cdot y_j$ and $-2x_j \cdot y_j +2\widetilde {r}_j \cdot \mu_j$ where $\mu_j=a_jx_j+b_jy_j+c_jr$ with $a_j,b_j,c_j\in\{0,\pm1\}$ and $a_j^2+b_j^2\neq 0$, and $\widetilde {r}_j=r$ if $j\leq m_1$ and $\widetilde{r}_j=0$ otherwise. The remaining three branching nodes in $\mathcal Q_0[\mathbb V]$ have their resonance factors given by $2u_1 \cdot u_2$ and $\Lambda\in\{2u_1\cdot u_3,2u_3\cdot (u_1+u_2-u_3)\}$, and $-2u_1\cdot u_2-\Lambda+\widetilde r \cdot\xi$ where $\xi=au_1+bu_2+cu_3+dr$ with $a,b,c,d\in\{0,\pm1\}$ and $\widetilde{r}\in\{r,0\}$. In particular, the change of variables from $(k_\mathfrak n)_{\mathfrak n\in \mathcal Q_0[\mathbb V]}$ into $(x_0, x_j, y_j , u_1, u_2, u_3)$ satisfies the conditions stated in Lemma \ref{sumintest2}.
Now, with the reparametrization, we can argue in the same way as in Step 2 of the proof of Proposition \ref{estbadvine} to restrict each of the $(x_0,x_j,y_j,u_j)$ and $k_\mathfrak n$ variables to a fixed unit ball, and consequently expand $Z$ into (\ref{defztilde}). Moreover, once we confirm $r\neq 0$, we can get rid of the $\epsilon_{\mathscr E[\mathbb V]}$ factor in the same way as in Step 2 of the proof of Proposition \ref{estbadvine}. Next, by using the $(x_0,x_j,y_j,u_j)$ variables, we can reduce (\ref{vinebound2}) to estimating the expression \begin{align} \widetilde \mathcal K_{(\mathtt{ind},\mathscr B)}&= e^{\pi i (\lambda_{\mathfrak u_{21}}t_{\mathfrak u_{21}}+\lambda_{\mathfrak u_{22}} t_{\mathfrak u_{22}}+\lambda_{\mathfrak u_1}t_{\mathfrak u_1})} \bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_1}\zeta[\mathbb V] \sum_{(x_0,x_j,y_j):1\leq j\leq m}\sum_{(u_1, u_2, u_3)}\int_{ \mathcal E[\mathbb V]}\nonumber\\ &\times e^{2\pi i \lambda_{\mathfrak u_2} t_{\mathfrak u_2}}e^{\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_2}-t_{\mathfrak u_1})(r\cdot \mu_0)} \prod_{j=1}^{m} e^{\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_{2j+2}} -t_{\mathfrak u_{2j+1}})x_j \cdot y_j}\prod_{j=1}^{m_1}e^{\pi i\cdot\delta L^{2\gamma} (t_{\mathfrak u_{2j+1}}-t_{\mathfrak u_1})(r\cdot \mu_j)} \nonumber\\ &\times e^{\pi i\cdot \delta L^{2\gamma}(t_{\mathfrak w_1}-t_{\mathfrak w_3})(u_1\cdot u_2)}e^{\pi i\cdot \delta L^{2\gamma}(t_{\mathfrak w_2}-t_{\mathfrak w_3})\Lambda} e^{\pi i\cdot \delta L^{2\gamma}t_{\mathfrak w_3}(\widetilde{r}\cdot\xi)} {\prod_{\mathfrak l\in \mathcal L[\mathbb V]}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)}\nonumber\\ &\times\prod_{\mathfrak m\in \mathcal N[\mathbb V]}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m)\cdot \widetilde{Z}(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})\prod_{j=1}^m \mathrm{d}t_{j}\mathrm{d}s_{j}\cdot\mathrm{d}t_{0}\mathrm{d}\tau_{1}\mathrm{d}\tau_{2}\mathrm{d}\tau_{3}.\label{gvineboundBfix2} \end{align}
\underline{\it Step 2: Case $\gamma>\frac12$: counting argument.} The argument here is very similar to Step 3 of the proof of Proposition \ref{estbadvine}, but with one additional ingredient. After expanding all the $\mathcal K_{\mathcal Q}$ and $\mathcal K^*_\mathcal T$ as time Fourier integrals, and localizing each $k_\mathfrak n\,(\mathfrak n \in \mathcal Q_0[\mathbb V])$ to a unit ball as in Step 3 of the proof of Proposition \ref{estbadvine}, we can reduce the estimate of (\ref{gvineboundBfix2}) to that of \eqref{afterloc7.3}. Recall here that $m=m_1+m_2$ where $m_2$ is the length of the zero-gap ladder in $\mathbb V$, and $m_1$ is the total length of other ladders. Now it suffices to prove the following two estimates (in fact they will allow us to gain a small power of $L$ in (\ref{vinebound2})): \begin{multline}\label{twoestgoodv2}
\bigg\|\max(\langle \tau_{\mathfrak u_1}\rangle,\langle \tau_{\mathfrak u_{21}}\rangle,\langle \tau_{\mathfrak u_{22}}\rangle)^{\eta^5}\sum_{\sigma[\mathcal N[\mathbb V]]}\sup_{|\alpha_\mathfrak m-\sigma_\mathfrak m|\leq 1}|\widehat \mathcal B(\tau_{\mathfrak u_1},\tau_{\mathfrak u_{21}}, \tau_{\mathfrak u_{22}},\alpha[ \mathcal N[\mathbb V]])|\bigg\|_{L_{\tau,\alpha}^1}\\\lesssim C^m \delta^{-(m+2)/2}L^{Cm_1\sqrt \delta} (\log L)^{C m_1}L^{4\eta^5}, \end{multline} \begin{equation}\label{twoestgoodv1} \sup_{(k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})}\sup_{\sigma[\mathcal N[\mathbb V]]}\sum_{\mathscr E}^*\mathcal X(k[\mathcal Q_{0}[\mathbb V]])\leq (C^+\delta^{-1})^{m+2}L^{(2m+4)(d-\gamma)-5\eta (1+m_1)}. \end{equation} These two estimates are clearly enough to give \eqref{vinebound2} since the shifts can be absorbed by the weights as in the proof of Proposition \ref{estbadvine}. Moreover, \eqref{twoestgoodv2} follows by combining the arguments in Step 3 of the proof of Proposition \ref{estbadvine} with the ladder $L^1$ estimate proved in Proposition 10.1 of \cite{DH21} (but only to the zero-gap ladder, so the number of atoms not in this ladder is $O(m_1)$); we remark that while Proposition 10.1 of \cite{DH21} is proved for $L^\infty_t$ rather than $X^\theta$, but it extends directly to the space $X^\theta$ by simply relying on Lemma \ref{timeFourierlem} instead of Lemma 10.2 in \cite{DH21}.
To prove \eqref{twoestgoodv1}, we reduce it to a counting problem as in the proof of Proposition \ref{estbadvine}. Here we are counting the number of choices for the variables $(x_0, x_j, y_j, u_1, u_2,u_3)_{1\leq j \leq m}$, each of which is in a fixed unit ball, such that each of $(r\cdot \mu_0, x_j\cdot y_j,u_1\cdot u_2,\Lambda)$ where $1\leq j \leq m$, and each of $r\cdot\mu_j$ where $1\leq j\leq m_1$, belongs to a fixed interval of length $O(\delta^{-1}L^{-2\gamma})$. Since $\gamma>1/2$, we know that the number of choices for $x_0$ is $O(\delta^{-1}L^{d-\gamma-(1-\gamma)}$ by Lemma \ref{basiccount} (1), that the number of choices for each $(x_j,y_j)\,(1\leq j\leq m_1)$ is $O(\delta^{-2}L^{2(d-\gamma)-10\eta})$ by Lemma \ref{basiccount0} (1), the number of choices for each $(x_j,y_j)\,(j>m_1)$ is $O(\delta^{-1}L^{2(d-\gamma)})$ by Lemma \ref{basiccount0} (1), and the number of choices for $(u_1,u_2,u_3)$ is $O(\delta^{-2}L^{3(d-\gamma)-(1-\gamma)-10\eta})$ by Lemma \ref{basiccount0} (3). Putting together, this proves \eqref{twoestgoodv1}.
\underline{\it Step 3: Case $\gamma\leq \frac12$: Lemma \ref{sumintest2}.} Here, the argument is basically the same as in Step 4 of the proof of Proposition \ref{estbadvine}, except that we rely on Lemma \ref{sumintest2} instead of \ref{sumintest1}. We expand $\mathcal K_{\mathcal Q}$ and $\mathcal K^*_{\mathcal T}$ as time Fourier integrals to obtain an expression of the form \begin{multline}
\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{2m+4}(C\delta)^{n(\mathscr B)/2}\sum_{|x_0-a_0|\leq 1} e^{\pi i (\lambda_{\mathfrak u_{21}}t_{\mathfrak u_{21}}+\pi i \lambda_{\mathfrak u_{22}} t_{\mathfrak u_{22}}+(\lambda_{\mathfrak u_1}+\gamma_1)t_{\mathfrak u_1})}\\\times\widetilde{Z}(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})\mathcal I\left(x_0, k_{\mathfrak u_1}, k_{\mathfrak u_{11}}, k_{\mathfrak u_{21}}, k_{\mathfrak u_{22}}, t_*\right). \end{multline}
Here $a_0$ is a fixed vector, $\gamma_1$ is a linear combination of $\lambda_{\mathfrak u_2}$ and the Fourier variables occurring in the expansions of $\mathcal K_\mathcal Q^{(\mathfrak l, \mathfrak l')}$ and $\mathcal K^*_{\mathcal T^{(\mathfrak m)}}$ as above, and $\mathcal I$ is an expression in the form \eqref{sumintI1} but modified as in Lemma \ref{sumintest2}, which can be obtained after defining the new time variables $t_j=t_{\mathfrak u_1}-t_{\mathfrak u_{2j+1}}$, $s_j=t_{\mathfrak u_1}-t_{\mathfrak u_{2j+2}}$ for $1\leq j \leq m$, $t_0=t_{\mathfrak u_1}-t_{\mathfrak u_2}$, and $\tau_j=t_{\mathfrak u_1}-t_{\mathfrak w_j}$. The domain of integration after the change of variables is the same as described in Lemma \ref{sumintest2}, with $t$ replaced by $t_*:=t_{\mathfrak u_1}-\max(t_{\mathfrak u_{21}},t_{\mathfrak u_{22}})\}$. As a result, by \eqref{FourierboundI2}, we have that $$
\|\mathcal I\|_{X_{\mathrm{loc}}^{2\eta^5, 0}(t_*)}\lesssim (C^+\delta^{-1})^{m+2}L^{(2m+4)(d-\gamma)-d+\eta^4}. $$ After summing over $x_0$, applying Lemma \ref{timeFourierlem} (2), and including the factors $(\delta/(2L^{d-\gamma}))^{2m+4}$ and $(C^+\delta)^{n(\mathscr B)/2}$ etc., this implies (\ref{vinebound2}); note that the shifts $(\lambda_{\mathfrak u_{21}},\lambda_{\mathfrak u_{22}},\lambda_{\mathfrak u_1}+\gamma_1)$ is again absorbed by the weights.
\underline{\it Step 4: The Vine Chain case.} The proof for the vine chain $\mathbb V\mathbb C$ runs exactly as above, except that we apply Corollary \ref{sumintest3} instead of Lemma \ref{sumintest2} in Step 3. Indeed, by reparametrizing the whole vine chain $\mathbb V\mathbb C$ by going from bottom to top and using the same reparametrization in Step 1 for each ingredient vine, we can define the variables $(x_0^q, x_j^q, y_j^q, u_1^q, u_2^q, u_3^q)$ where $0\leq q<Q$ and $Q$ is the number of ingredient vines in $\mathbb V\mathbb C$. We then get an expression for $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V\mathbb C, W, Z)}$ generalizing that in \eqref{gvineboundBfix2}. We treat the case when $\gamma>\frac12$ by reducing to a counting estimate as in Step 2 above, and treat the case $\gamma\leq \frac12$ using Corollary \ref{sumintest3} as in Step 3 above. This then completes the proof. \end{proof} \begin{rem}\label{extravar} In the setting of Propositions \ref{estbadvine} and \ref{estnormalvine} (described in Section \ref{vinesubset}) we have assumed that $Z$ is a function of $(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}},t_{\mathfrak u_1},t_{\mathfrak u_{21}},t_{\mathfrak u_{22}},t_{\mathfrak u_2})$. In fact, we may allow $Z$ to depend on other variables (say denoted by $k[\mathcal U]$ and $t[\mathcal V]$) provided that they do not appear in the rest of the expression for $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$; in this case, if we consider the norm $Y_{\mathrm{loc}}^\theta$ in all variables including $k[\mathcal U]$ and $t[\mathcal V]$, then (\ref{vinebound1}), (\ref{vinebound2}) and (\ref{vinebound3}) still hold with the same implicit constants.
This is because, in the process of the proof of Propositions \ref{estbadvine} and \ref{estnormalvine}, we have restricted each of the variables $(x_0,k_{\mathfrak u_1},k_{\mathfrak u_{11}},k_{\mathfrak u_{21}},k_{\mathfrak u_{22}})$ to a fixed unit ball. If $Z$ depends on $k[\mathcal U]$, then we may also restrict each variable in $k[\mathcal U]$ to a unit ball, which reduces the $Y_{\mathrm{loc}}^\theta$ bound to the $X_{\mathrm{loc}}^{\theta,0}$ bound. Then we simply view $Z$ as a function with value in the Banach space $L_{k[\mathcal U]}^\infty$, and apply Propositions \ref{estbadvine} and \ref{estnormalvine} to handle these extra $k[\mathcal U]$ variables. As for the extra time variables $t[\mathcal V]$, note that $\mathcal K_{(\mathtt{sgn},\mathtt{ind},\mathscr B)}^{(\mathbb V,Z,W)}$ is linear in $Z$ and thus commutes with taking time Fourier transforms in $t[\mathcal V]$. Let the Fourier dual of $t[\mathcal V]$ be $\xi[\mathcal V]$, then
\begin{equation}\label{sepvar}\|\mathcal Z\|_{X^{\eta^5,0}}\sim\int\big(\max_{\mathfrak n\in\mathcal V}\langle \xi_\mathfrak n\rangle\big)^{\eta^5}\|\mathcal F_{t[\mathcal V]}\mathcal Z(\cdot,\xi[\mathcal V])\|_{X^{0,0}}\,\mathrm{d}\xi[\mathcal V]+\int\|\mathcal F_{t[\mathcal V]}\mathcal Z(\cdot,\xi[\mathcal V])\|_{X^{\eta^5,0}}\,\mathrm{d}\xi[\mathcal V]\end{equation} for any $\mathcal Z=\mathcal Z(\cdot,t[\mathcal V])$, where the $\cdot$ represents variables other than $k[\mathcal U]$ and $t[\mathcal V]$, and the norm on the left hand side of (\ref{sepvar}) is the norm in all variables, while the norms on the right hand side are in the $\cdot$ variables only. Therefore, we can apply Propositions \ref{estbadvine} and \ref{estnormalvine} for each fixed $\xi[\mathcal V]$, and then integrate in these variables, to get the same results as in (\ref{vinebound1}), (\ref{vinebound2}) and (\ref{vinebound3}). \end{rem} \section{Reduction to counting estimates}\label{reduct1} \subsection{Preliminary setup} Recall the notions of vines and vine-chains (VC), hyper-vines (HV) and hyper-vine-chains (HVC), and ladders in Definition \ref{defvine}. \begin{lem}\label{vinechainlem} Define a \emph{double-vine (V)}, or DV for short, to be the union of two vines (V), see Figure \ref{fig:vines}, that share two common joints and no other common atoms\footnote{This is not a vine-like object; see Definition \ref{defvine}.}. Then, for any molecule $\mathbb M$, there is a unique collection $\mathscr C$ of disjoint atomic groups, such that each atomic group in $\mathscr C$ is an HV, VC, HVC or DV, and any vine-like object in $\mathbb V$ is a subset of some atomic group in $\mathscr C$. \end{lem} \begin{proof} Consider all the maximal vine-like objects in $\mathbb M$, where maximal is in the sense that it is not a subset of any other vine-like object; let this collection be $\mathscr C_1$. We know that Lemma \ref{disjointlem} applies to any two of these objects. If $\mathbb A,\mathbb B\in\mathscr C_1$ and $\mathbb A\cap\mathbb B\neq\varnothing$, then we are in one of scenarios (a)--(c) of Lemma \ref{disjointlem}. However scenarios (b) and (c) are impossible, because $\mathbb D=\mathbb A\cup\mathbb B$ (in case (b)) or $\mathbb D=\mathbb C_0\cup\mathbb C_1\cup\mathbb C_2$ (in case (c)) would be a larger vine-like object that contains $\mathbb A$ and $\mathbb B$. We are then left with case (a), where $\sigma(\mathbb A)=\sigma(\mathbb B)=1$. This means that each of $\mathbb A$ and $\mathbb B$ must be one vine (V), and they share two common joints and no other common atoms, so their union is a DV. Moreover, in this case, neither $\mathbb A$ nor $\mathbb B$ can intersect with any other maximal vine-like object $\mathbb C$ (otherwise $\mathbb A$ (for example) would form another DV with $\mathbb C$, which leads to a $4$-regular component). Therefore, let $\mathscr C$ be obtained from $\mathscr C_1$ by replacing the two intersecting vines (V) with one DV, then it satisfies the requirement and clearly is unique. \end{proof} \begin{df}\label{defcong}Let $\mathcal Q$ be a couple with skeleton $\mathcal Q_{\mathrm{sk}}$, and let $\mathscr C$ be defined for the molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ by Lemma \ref{vinechainlem}. Define a collection $\mathscr V$ of (CL) vines as follows: for each VC in $\mathscr C$ whose joints do not both have degree $3$, we include into $\mathscr V$ all its vine ingredients that are (CL) vines. For each HVC in $\mathscr C$ and each VC in $\mathscr C$ whose joints both have degree $3$, we include into $\mathscr V$ all but one of its vine ingredients, such that (a) if there is a (CN) vine then we only exclude this one, and (b) if all vines are (CL) vines then we only exclude the ``top" vine whose $\mathfrak u_1$ node is the ancestor of all other $\mathfrak u_1$ nodes (as branching nodes of $\mathcal Q_{\mathrm{sk}}$). We do not include anything in $\mathscr C$ from any HV or DV.
Since $\mathscr V$ satisfies the assumptions in Definition \ref{twistgen}, we shall define any couple $\mathcal Q'$ to be \emph{congruent} to $\mathcal Q$, if $\mathcal Q'$ is a full twist of $\mathcal Q$ with respect to $\mathscr V$. Clearly, performing a full twist does not affect the molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ nor the choice of vines in $\mathscr V$, and congruence is an equivalence relation and preserves the order of \emph{each tree} in a couple. \end{df} \begin{df}\label{defdiff} Given any molecule $\mathbb M$ and block $\mathbb B\subset\mathbb M$, let the four bonds in $\mathbb B$ at the two joints be $\ell_1,\ell_2\in\mathbb B$ at one joint, and $\ell_3,\ell_4\in\mathbb B$ at the other. Then for any decoration $(k_\ell)$ we have $k_{\ell_1}-k_{\ell_2}=\pm(k_{\ell_3}-k_{\ell_4}):=r$. We call this vector the \emph{gap} of $\mathbb B$ relative to this decoration. Note that once the parameters $(c_v)$ of a decoration are fixed as in Definition \ref{decmole}, then this $r$ can be expressed as a function of the vectors $k_{\ell_j^*}$, where $\ell_j^*$ runs over all bonds connecting a given joint of $\mathbb B$ to atoms \emph{not} in $\mathbb B$. If $\mathbb B$ is concatenated by blocks $\mathbb B_j$, then all $\mathbb B_j$ must have the same gap as $\mathbb B$. For the hyper-block which is adjoint of $\mathbb B$, we define its gap to be the gap of $\mathbb B$. Note that the gap of a block can never be $0$ due to Remark \ref{nonresrem}.
More generally, if $v$ is an atom and $\ell_1,\ell_2\sim v$ are two bonds with opposite directions, then we define the \emph{gap} of the triple $(v,\ell_1,\ell_2)$ relative to a given decoration as $r:=k_{\ell_1}-k_{\ell_2}$. In particular the gap of any block or hyper-block equals a suitable gap at either of its joints. Next, for any ladder of length $\geq 1$ (see Definition \ref{defvine} and Figure \ref{fig:vines}), the difference $k_\ell-k_{\ell'}$ for any pair of parallel single bonds $(\ell,\ell')$ must be equal (up to a sign change), which we also define to be the \emph{gap} of the ladder. In particular, if $\mathbb V$ is a vine (or VC) with gap $r$, then for any ladder contained in $\mathbb V$ that is inserted between parallel dashed bonds of the same color in Figure \ref{fig:vines}, the gap of this ladder is either $\pm r$ or $0$. Finally, for all the gaps defined above, we say it is \emph{small gap} (or SG for short) if $|r|\leq L^{-\gamma+\eta}$ (including $r=0$), and \emph{large gap} (or LG) if $|r|> L^{-\gamma+\eta}$. \end{df} With the above preparations, we can reduce Propositions \ref{mainprop1}--\ref{mainprop4} to the following \begin{prop} \label{kqmainest1} We can define a value $\rho=\rho(\mathcal Q)$ associated to a non-regular couple $\mathcal Q$, which is an integer and $1\leq\rho\leq n$ (where $n$ is the order of $\mathcal Q$), such that (i) it takes the same value for $\mathcal Q$ in the same congruence class, (ii) the number of couples $\mathcal Q$ of order $n$ such that $\rho(\mathcal Q)=\rho$ is at most $(C\rho)!C^n$, and (iii) for any couple $\mathcal Q$ of order $n$, we have
\begin{equation}\label{kqmainest1-1}\bigg|\sum_{\mathcal Q'}\mathcal K_{\mathcal Q'}(t,t,k)\bigg|\lesssim\langle k\rangle^{-20d}(C^+\delta^{1/4})^n\cdot L^{-\eta^7\cdot\rho(\mathcal Q)}, \end{equation} where $\mathcal Q'$ runs over all couples congruent to $\mathcal Q$. \end{prop} \begin{proof}[Proof of Propositions \ref{mainprop1}--\ref{mainprop4} assuming Proposition \ref{kqmainest1}] Consider the sum on the left hand side of (\ref{mainest1}) and (\ref{mainest1.5}). The sum over all regular couples $\mathcal Q$ is taken care of by Propositions \ref{regcpltreeasymp} and \ref{regcpltreesum} (in particular by the bound (\ref{kqbd}) and the equality (\ref{matchn})), so we only need to consider the sum over non-regular couples $\mathcal Q$.
Note that by Definition \ref{twistgen}, if $\mathcal Q=(\mathcal T^+,\mathcal T^-)$ and $\mathcal Q'=((\mathcal T')^+,(\mathcal T')^-)$ are two congruent couples, then $n(\mathcal T^\pm)=n((\mathcal T')^\pm)$. Therefore both sums on the left hand side of (\ref{mainest1}) and (\ref{mainest1.5}), over non-regular couples $\mathcal Q$, can be written as a sum of subset sums, such that each subset sum has the form of the left hand side of (\ref{kqmainest1-1}). We then classify these subset sums according to the value $\rho(\mathcal Q)$, and apply Proposition \ref{kqmainest1} to get that
\begin{equation}\label{suminrho1}\bigg|\sum_{\mathcal Q}\mathcal K_\mathcal Q(t,t,k)\bigg|\lesssim (C^+\delta^{1/4})^m\sum_{\rho=1}^m(C\rho)!C^m L^{-\eta^7\rho}. \end{equation} Here in (\ref{suminrho1}) the value $m\in\{2n,2n+1\}$ is fixed, and the sum is either taken over all non-regular couples $\mathcal Q$ of order $m$, or taken over all non-regular couples $\mathcal Q=(\mathcal T^+,\mathcal T-)$ with $n(\mathcal T^+)=n(\mathcal T^-)=m/2=n$. In either case, since $\rho\leq m$, we have \[(C\rho)^C\leq (Cm)^C\lesssim (\log L)^C\ll L^{\eta^9},\] so the $\rho$-sum on the right hand side of (\ref{suminrho1}) is easly bounded by $L^{-\eta^8}$, which then proves both (\ref{mainest1}) and (\ref{mainest1.5}). \end{proof} \subsection{Stage 1 reduction: Small gap (CL) vines}\label{stage1red} We start the proof of Proposition \ref{kqmainest1}. First, using (\ref{bigformula2}), we can rewrite the left hand side of (\ref{kqmainest1-1}) as \begin{multline}\label{bigformula2new}\mathrm{LHS\ of\ }(\ref{kqmainest1-1})=\sum_{(\mathcal Q_{\mathrm{sk}},\mathscr A)}\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_0}\zeta(\mathcal Q_{\mathrm{sk}})\sum_{\mathscr E_{\mathrm{sk}}}\int_{\mathcal E_{\mathrm{sk}}}\epsilon_{\mathscr E_{\mathrm{sk}}}\prod_{\mathfrak n\in \mathcal N_{\mathrm{sk}}} e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n\\\times{\prod_{\mathfrak l\in\mathcal L_{\mathrm{sk}}}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)}\prod_{\mathfrak m\in\mathcal N_{\mathrm{sk}}}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m). \end{multline} Here the $\mathcal Q_{\mathrm{sk}}$ runs over all twists of a given prime couple, $\mathscr A$ runs over collections of regular trees and regular couples that satisfy a certain set of assumptions (see Definition \ref{twistgen}), and $\mathscr E_{\mathrm{sk}}$ runs over all $k$-decorations of $\mathcal Q_{\mathrm{sk}}$ (note that, different choices of $\mathcal Q_{sk}$ are twists of each other, so their decorations are in one-to-one correspondence as in Remark \ref{dectwist}). In particular, if $\mathscr C$ is defined by Lemma \ref{vinechainlem} for $\mathbb M(\mathcal Q_{\mathrm{sk}})$, then for each vine-like object in $\mathscr C$, we can specify whether it has SG or LG under the decoration (each such specification imposes a set of restrictions on the decoration). This reduces the left hand side of (\ref{kqmainest1-1}) to a superposition of at most $C^n$ terms, such that each vine-like object is specified to be either SG or LG in each term. Let $\mathscr V_0$ be the collection of (CL) vines in $\mathscr V$ (as Definition \ref{defcong}) that are SG. Note that $\mathcal Q$ runs over a congruence class, which is defined by full twists at all vines in $\mathscr V$; if we strengthen the equivalence relation by allowing only full twists at all vines in $\mathscr V_0$, then the sum in $\mathcal Q$ can again be written as a superposition of at most $C^n$ terms, each of which is a sum over the new equivalence class. As such, we only need to consider one of these new terms, which we shall refer to as $\mathscr K$ for below.
We define a new ordered collection $\mathscr U_0$ from $\mathscr V_0$, whose elements are \emph{bad (CL) vines and normal (CL) vine chains,} as follows. First organize vines in $\mathscr V_0$ into disjoint VC, then order these VC arbitrarily; for each VC, we divide it into \emph{units} and order them from bottom to top, where each unit is either a bad (CL) vine or a (longest) sub-VC formed by consecutive normal (CL) vines.
Now let $\mathcal Q_0=\mathcal Q_{\mathrm{sk}}$ and $\mathscr A_0=\mathscr A$ as in (\ref{bigformula2new}), then $\mathcal Q\sim(\mathcal Q_0,\mathscr A_0)$. Denote $\mathscr U_0$, with elements ordered as above, by $\mathscr U_0=\{\mathbb U_0,\cdots,\mathbb U_{q-1}\}$, and denote the $\mathfrak u_1,\mathfrak u_{23}$ nodes for $\mathbb U_j$ by $\mathfrak u_1^j,\mathfrak u_{23}^j$ etc. Define $\mathcal Q_{j+1}$ to be the result of splicing $\mathcal Q_j$ at $\mathbb U_j$ (so $\mathbb M(\mathcal Q_{j+1})$ is the result of merging the $\mathbb U_j$ in $\mathbb M(\mathcal Q_j)$ into a single atom), and let $\mathscr A_{j}$ be the $\mathscr A$ collection corresponding to $\mathcal Q_j$, and $\mathscr U_j:=\{\mathbb U_j,\cdots,\mathbb U_{q-1}\}$. As $\mathcal Q$ runs over all full twists of a given couple at vines in $\mathscr V_0$ (or $\mathscr U_0$), we know that $\mathcal Q_j$ runs over all twists of a given couple at vines in $\mathscr U_j$. Moreover, $\mathscr A_j$ runs over all collections of regular couples and regular trees, such that the value of $n(\mathcal Q^{(\mathfrak u_{23}^i,\mathfrak u_0^i)})+n(\mathcal T^{(\mathfrak u_2^i)})$ is fixed for all $i\geq j$ such that $\mathbb U_i$ is a core vine, and all other regular couples and regular trees are uniquely fixed.
By Remark \ref{fulltwistrep}, for each $j$ we have $(\mathcal Q_j,\mathscr A_j)\leftrightarrow(\mathcal Q_{j+1}, \texttt{cod}_j,\mathfrak{n}_j,\texttt{ind}_j,\mathscr B_j,\mathscr A_{j+1})$, with $(\texttt{cod}_j,\mathfrak{n}_j)$ etc. corresponding to $\mathbb U_j$ in $\mathcal Q_j$ (these matter only when $\mathbb U_j$ is a bad vine rather than a normal VC). By losing at most $C^n$ we may fix the codes $\texttt{cod}_j$ for each $j$. Moreover, the branching node $\mathfrak{n}_j$ for each $j$ is also uniquely fixed because it corresponds to the atom formed by merging $\mathbb U_i\,(0\leq i\leq j)$ in $\mathbb M(\mathcal Q_{\mathrm{sk}})$, and the molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ (as a directed graph) does not vary when $\mathcal Q_{\mathrm{sk}}$ is twisted (note however that $\zeta_{\mathfrak n_j}$ may change if we twist at $\mathbb U_{j+1}$). As such, we know that $(\mathcal Q_j,\mathscr A_j)$ uniquely corresponds to the quadruple $(\mathcal Q_{j+1}, \mathscr A_{j+1},\texttt{ind}_j,\mathscr B_j)$. Let $\mathcal O_j$ be the set of all nodes in $\mathcal Q_{j}$ that do \emph{not} belong to any $\mathcal Q_{j}[\mathbb U_i]\backslash\{\mathfrak u_1^i\}$ for $i\geq j$. Now we prove, by induction in $j$, the following result: \begin{prop}\label{red1step} For each $j$, we have \begin{multline}\label{vinered} \mathscr K=(C^+\delta^{1/4})^{M_j}\sum_{(\mathcal Q_{j},\mathscr A_{j})}\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_{j}}\zeta(\mathcal Q_{j})\sum_{\mathscr E_{j}}\int_{\mathcal E_{j}}\epsilon_{\mathscr E_{j}}^*\prod_{\mathfrak n\in \mathcal N_{j}} e^{\zeta_\mathfrak n\pi i\cdot\delta L^{2\gamma}\Omega_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n\\\times\mathcal Z_{j}\cdot{\prod_{\mathfrak m\in\mathcal L_{j}}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak m,\mathfrak m_-)}}(t_{\mathfrak m^p},t_{\mathfrak m_-^p},k_\mathfrak m)}\prod_{\mathfrak m\in\mathcal N_{j}}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m). \end{multline} Here $n_{j}$ is the order of $\mathcal Q_{j}$, $M_j=(n_0-n_j)+n(\mathscr A)-n(\mathscr A_j)$, and all symbols with subscript or superscript $j$ are associated with the couple $\mathcal Q_{j}$. The summation in $(\mathcal Q_j,\mathscr A_j)$ is taken as above: $\mathcal Q_j$ runs over all twists of a given couple at vines in $\mathscr U_j$. Moreover, $\mathscr A_j$ runs over all collections of regular couples and regular trees, such that the value of $n(\mathcal Q^{(\mathfrak u_{23}^i,\mathfrak u_0^i)})+n(\mathcal T^{(\mathfrak u_2^i)})$ is fixed for all $i\geq j$ such that $\mathbb U_i$ is a core vine, and all other regular couples and regular trees are uniquely fixed.
In (\ref{vinered}), the factor $\epsilon_{\mathscr E_j}^*$ is defined as in (\ref{defcoef}) but with the product containing only $\mathfrak n\in \mathcal W_j$, where $\mathcal W_j$ is a subset of $\mathcal N_j$; the function $\mathcal Z_j=\mathcal Z_j(x_0^j,k[\mathcal X_j],t[\mathcal Y_j],t_{\mathfrak u_2^j})$ where $\mathcal X_j$ and $\mathcal Y_j$ are two subsets of $\mathcal O_j$ (when $j=q$ there is no $x_0^j$ and $t_{\mathfrak u_2^j}$), and in the summation one replaces $x_0^j$ by $k_{\mathfrak u_2^j}$ if $\zeta_{\mathfrak u_2^j}=-$, and by $k_{\mathfrak u_{23}^j}$ otherwise. The objects $\mathcal W_j,\mathcal X_j,\mathcal Y_j,\mathcal Z_j$ etc. do not depend on the choice of $\mathcal Q_j$ or $\mathscr A_j$, and the function $\mathcal Z_j$ satisfies $\|\mathcal Z_j\|_{Y_{\mathrm{loc}}^{\eta^5}}\lesssim (C^+)^jL^{\Delta_j}$, where $\Delta_0=0$ and $\Delta_{j+1}-\Delta_{j}$ equals $-\eta^2$ or $\eta^4$ depending on whether $\mathscr U_j$ is a bad vine or a normal vine chain. Finally, in the support of $\mathcal Z_j\cdot\epsilon_{\mathscr E_j}^*$, the values of $k_\mathfrak n$ variables must be inherited from a $k$-decoration of $\mathcal Q_{\mathrm{sk}}$ that satisfies all the SG and LG assumptions specified above, as well as the non-degeneracy condition $\epsilon_{\mathscr E_{\mathrm{sk}}}\neq 0$. \end{prop} \begin{proof} For $j=0$, recall that we have fixed the term $\mathscr K$ by specifying whether each vine-like object in $\mathscr C$ has LG or SG; this is done by attaching factors to the expression in (\ref{bigformula2new}) which are indicator functions of differences of various $k_\mathfrak m$ in the decoration. Let the product of these functions be $\mathcal Z_{0}$, then it only depends on the $k_\mathfrak m$ variables for $\mathfrak m\in\mathcal O_0$ (see Definition \ref{defdiff}), and does not depend on any time variables $t_\mathfrak m$. Let $\mathcal W_0=\mathcal N_{\mathrm{sk}}$, then (\ref{vinered}) is true for $j=0$.
Suppose (\ref{vinered}) is true for $j$, we will prove it for $j+1$. Start with the expression $\mathscr K$ as in (\ref{vinered}), note that the summation
\[\sum_{(\mathcal Q_j,\mathscr A_j)}=\sum_{(\mathcal Q_{j+1},\mathscr A_{j+1})}\sum_{(\mathtt{ind}_j,\mathscr B_j)}\] as described above (recall that $\mathtt{cod}$ has been fixed). We now fix $(\mathcal Q_{j+1},\mathscr A_{j+1})$, and consider the part of the sum and integral in (\ref{vinered}) that involves only $(\mathtt{ind}_j,\mathscr B_j)$ and the $k_\mathfrak n$ and $t_\mathfrak n$ variables for $\mathfrak n\in\mathcal Q_j[\mathbb U_j]\backslash\{\mathfrak u_1^j\}$. Note also that $\mathcal Z_j=\mathcal Z_j(x_0^j,k[\mathcal X_j],t[\mathcal Y_j],t_{\mathfrak u_2^j})$; by inserting finitely many smooth time cutoff functions, we may formally write \[\mathcal Z_j=\mathcal Z_j(x_0^j,k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j},t_{\mathfrak u_2^j},k[\mathcal U],t[\mathcal V]),\] for some $\mathcal U\subset\mathcal X_j\subset\mathcal O_j,\,\mathcal V\subset\mathcal Y_j\subset\mathcal O_j$ where $\{\mathfrak u_1^j,\mathfrak u_{11}^j,\mathfrak u_{21}^j,\mathfrak u_{22}^j\}\cap\mathcal U=\varnothing$ and $\{\mathfrak u_1^j,\mathfrak u_{21}^j,\mathfrak u_{22}^j\}\cap\mathcal V=\varnothing$.
Then, this part of summation and integration is exactly of the form \begin{equation}\label{subsummation}\sum_{(\mathtt{ind}_j,\mathscr B_j)}\mathcal K_{(\mathtt{ind}_j,\mathscr B_j)}^{(\mathbb U_j,Z,W)}\end{equation} defined as in (\ref{vinebound1}) in Section \ref{vinesubset}, where the set $W=\mathcal W_j\cap\mathcal Q_j[\mathbb U_j]$, the function \[\qquad Z(x_0^j,k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j},t_{\mathfrak u_2^j})=\mathcal Z_j(x_0^j,k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j},t_{\mathfrak u_2^j},k[\mathcal U],t[\mathcal V])\] with $k[\mathcal U]$ and $t[\mathcal V]$ viewed as parameters, and the sum over $(\mathtt{ind}_j,\mathscr B_j)$ is exactly as in Propositions \ref{estbadvine} and \ref{estnormalvine}. Here again we plug in $x_0^j=k_{\mathfrak u_{2}^j}$ if $\mathtt{ind}_j=\zeta_{\mathfrak u_2^j}=-$, and $x_0^j=k_{\mathfrak u_{23}^j}$ otherwise.
Now define $\mathcal W_{j+1}=\mathcal W_j\backslash\mathcal Q_j[\mathbb U_j]$ (where we identify branching nodes in $\mathcal Q_{j+1}$ with the corresponding ones in $\mathcal Q_j$), and \begin{equation}\label{inductzj} \begin{aligned}\mathcal Z_{j+1}&=\mathcal Z_{j+1}(k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j},k[\mathcal U],t[\mathcal V])\\&:=(C^+\delta^{1/4})^{-n_*}\exp(-\pi i\cdot\delta L^{2\gamma}t_{\mathfrak u_1^j}\Gamma)\sum_{(\mathtt{ind}_j,\mathscr B_j)}\mathcal K_{(\mathtt{sgn}_j,\mathtt{ind}_j,\mathscr B_j)}^{(\mathbb U_j,Z,W)}(k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j}) \end{aligned}\end{equation} as in (\ref{vinebound1}), where $\mathtt{sgn}_j=\zeta_{\mathfrak u_1^j}$, and $n_*$ equals the number of branching nodes in $\mathcal Q_j[\mathbb U_j]\backslash\{\mathfrak u_1^j\}$ plus $n(\mathscr B_j)$. Then, the expression $\mathscr K$ in (\ref{vinered}) can be reduced to the same expression (\ref{vinered}) with $j$ replaced by $j+1$ (note that $M_{j+1}=M_j+n_*$, and similarly $n(\mathcal Q_{j})-n(\mathcal Q_{j+1})$ equals the number of branching nodes in $\mathcal Q_j[\mathbb U_j]\backslash\{\mathfrak u_1^j\}$), with the new quantities $\mathcal W_{j+1}$ and $\mathcal Z_{j+1}$.
It remains to prove that the new expression $\mathcal Z_{j+1}$ verifies our assumptions. In addition to $k[\mathcal U]$ and $t[\mathcal V]$, the function $\mathcal Z_{j+1}$ also depends on the new variables $(k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j})$; therefore we may define $\mathcal X_{j+1}=(\mathcal X_j\cup\{\mathfrak u_1^j,\mathfrak u_{11}^j,\mathfrak u_{21}^j,\mathfrak u_{22}^j\})\cap\mathcal O_{j+1}$ and $\mathcal Y_{j+1}=(\mathcal Y_j\cup\{\mathfrak u_1^j,\mathfrak u_{21}^j,\mathfrak u_{22}^j\})\cap\mathcal O_{j+1}$. Note that with such recursive definition of $\mathcal X_j$ and $\mathcal Y_j$, it is easy to verify that, if $\mathbb U_j$ is concatenated with $\mathbb U_{j+1}$, then $\{\mathfrak u_1^j,\mathfrak u_{11}^j\}\cap \mathcal X_j=\varnothing$; this is because $\mathfrak u_1^j=\mathfrak u_2^{j+1}$ and $\mathfrak u_{11}^j=\mathfrak u_{23}^{j+1}$, so these nodes belong to $\mathbb U_{j+1}$, and are thus not involved in any vine merged \emph{before} $\mathbb U_j$.
Now, if any of these new variables $(k_{\mathfrak u_1^j},k_{\mathfrak u_{11}^j},k_{\mathfrak u_{21}^j},k_{\mathfrak u_{22}^j},t_{\mathfrak u_1^j},t_{\mathfrak u_{21}^j},t_{\mathfrak u_{22}^j})$ coincides with $k_\mathfrak n$ or $t_\mathfrak n$ for any $\mathbb U\in\mathscr U_{j+1}$ and $\mathfrak n\in\mathcal Q_{j+1}[\mathbb U]\backslash\{\mathfrak u_1\}$ (where $\mathfrak u_1$ is associated with $\mathbb U$ as in Proposition \ref{block_clcn})--or otherwise the assumptions for $\mathcal Z_{j+1}$ is already verified--then $\mathbb U_j$ and $\mathbb U$ must share a common joint in $\mathbb M(\mathcal Q_j)$, so $\mathbb U=\mathbb U_{j+1}$ and it is concatenated with $\mathbb U_j$ as above. Consequently the function $\mathcal Z_j$, as well as $Z$ defined above, does not depend on the variables $k_{\mathfrak u_1^j}=k_{\mathfrak u_2^{j+1}}$ and $k_{\mathfrak u_{11}^j}=k_{\mathfrak u_{23}^{j+1}}$. By applying Remark \ref{remkv} to (\ref{inductzj}), we see that $\mathcal Z_{j+1}$ depends only on the vector variables $k[\mathcal X_{j+1}]$ and $x_0^{j+1}$ (which is $k_{\mathfrak u_1^j}=k_{\mathfrak u_2^{j+1}}$ if $\mathtt{ind}_{j+1}=\zeta_{\mathfrak u_2^{j+1}}=-$ and is $k_{\mathfrak u_{11}^j}=k_{\mathfrak u_{23}^{j+1}}$ otherwise), and that it does not depend on the choice of $\mathtt{ind}_{j+1}$ when regarded as a function. The $t_\mathfrak n$ variables are similar, as in this case $t_{\mathfrak u_1^j}=t_{\mathfrak u_2^{j+1}}$, so $\mathcal Z_{j+1}$ is also allowed to depend on $t_{\mathfrak u_2^{j+1}}$ and $t[\mathcal Y_{j+1}]$, as desired.
Finally, it is also clear by definition that the support of $\mathcal Z_{j+1}\cdot\epsilon_{\mathscr E_{j+1}}^*$ contains only those decorations that are inherited from decorations in the support of $\mathcal Z_j\cdot\epsilon_{\mathscr E_j}^*$, so it remains to prove that
\begin{equation}\label{inductineqZ}\|\mathcal Z_{j+1}\|_{Y_{\mathrm{loc}}^{\eta^5}}\lesssim L^{\Delta_{j+1}-\Delta_j} \|\mathcal Z_{j}\|_{Y_{\mathrm{loc}}^{\eta^5}},\end{equation} but this follows from Propositions \ref{estbadvine}, \ref{estnormalvine} and Remark \ref{extravar}. \end{proof} Define the couple $\mathcal Q_{\mathrm{sub}}$ to be the result of doing splicing at all vines in $\mathscr V_0$ (equivalently, all ingredient vines of all vine-like objects in $\mathscr U_0$) from $\mathcal Q_{\mathrm{sk}}$, so the molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$ is obtained by merging all the vines in $\mathscr V_0$ from $\mathbb M(\mathcal Q_{\mathrm{sk}})$. This whole splicing process will be called \emph{stage 1 reduction}. Using Proposition \ref{red1step}, we can reduce Proposition \ref{kqmainest1} to the following counting estimate: \begin{prop}\label{kqmainest2} Suppose we fix $k\in\mathbb Z_L^d$ and $k_\ell^0\in\mathbb Z_L^d$ for each bond $\ell$ of $\mathbb M(\mathcal Q_{\mathrm{sub}})$, and $\beta_v\in\mathbb R$ for each atom $v$ of $\mathbb M(\mathcal Q_{\mathrm{sub}})$. Consider all the maximal ladders in $\mathbb M(\mathcal Q_{\mathrm{sub}})$; assume they are $\mathcal L_j\,(1\leq j\leq q_{\mathrm{sub}})$ with length $z_j$, and fix a dyadic number $P_j\in[L^{-1},1]\cup\{0\}$ for each $j$. Define $m_{\mathrm{sub}}'$ as the number of atoms not in any of these maximal ladders, and $\rho_{\mathrm{sub}}=q_{\mathrm{sub}}+m_{\mathrm{sub}}'$.
Now consider all the $k$-decorations $(k_\ell)$ of $\mathbb M(\mathcal Q_{\mathrm{sub}})$, with the following additional requirements: \begin{enumerate}[{(i)}] \item This $k$-decoration $(k_\ell)$ is inherited from a $k$-decoration $\mathscr E_{\mathrm{sk}}$ of $\mathcal Q_{\mathrm{sk}}$ (and $\mathbb M(\mathcal Q_{\mathrm{sk}})$) that satisfies all the SG and LG assumptions specified in the proof above, as well as the non-degeneracy condition $\epsilon_{\mathscr E_{\mathrm{sk}}}\neq 0$.
\item The decoration is restricted by $(\beta_v)$ and $(k_\ell^0)$, i.e. $|\Gamma_v-\beta_v|\leq\delta^{-1}L^{-2\gamma}$ and $|k_\ell-k_\ell^0|\leq 1$ (see Definition \ref{decmole}), and the gap $r_j$ of each ladder $\mathcal L_j$ satisfies that $|r_j|\sim P_j$ (or $|r_j|\gtrsim P_j$ if $P_j=1$). \end{enumerate} Let $n_{\mathrm{sub}}$ be the order of $\mathcal Q_{\mathrm{sub}}$, $\Delta_{\mathrm{sub}}$ is the end value $\Delta_j\,(j=q)$ in Proposition \ref{red1step}, and define $\mathfrak X_j=\min((\log L)^2,1+\delta L^{2\gamma}P_j)$ (so $1\leq \mathfrak X_j\lesssim (\log L)^2$). Then, the number $\mathfrak C$ of such restricted $k$-decorations is bounded by \begin{equation}\label{kqmainest2-1}\mathfrak C\leq (C^+\delta^{-1/2})^{n_{\mathrm{sub}}}L^{(d-\gamma)n_{\mathrm{sub}}}\cdot L^{-\Delta_{\mathrm{sub}}}L^{-\eta^6\rho_{\mathrm{sub}}}\prod_{j=1}^{q_{\mathrm{sub}}}\mathfrak X_j^{-z_j}. \end{equation} \end{prop} \begin{proof}[Proof of Proposition \ref{kqmainest1} assuming Proposition \ref{kqmainest2}] We start by defining $\rho(\mathcal Q)=\rho_{\mathrm{sub}}=q_{\mathrm{sub}}+m_{\mathrm{sub}}'$ where $q_{\mathrm{sub}}$ and $m_{\mathrm{sub}}'$ are defined as above. Clearly its value does not depend on the congruence class of $\mathcal Q$ (as $\mathcal Q_{\mathrm{sub}}$ does not), and for non-regular couples $\mathcal Q$, the couple $\mathcal Q_{\mathrm{sk}}$ is nontrivial, and so is $\mathcal Q_{\mathrm{sub}}$. This means that $1\leq\rho(\mathcal Q)\leq n$ (the latter inequality is trivial as $\rho_{\mathrm{sub}}=q_{\mathrm{sub}}+m_{\mathrm{sub}}'$ does not exceed $n_{\mathrm{sub}}\leq n$).
Next, once $\rho_{\mathrm{sub}}=\rho$ is fixed, the number of choices for $\mathbb M(\mathcal Q_{\mathrm{sub}})$ is clearly $\lesssim (C\rho)!C^n$ because this molecule must equal at most $\rho$ ladders of total length $\leq n$, plus at most $\rho$ extra atoms. Then, going back from $\mathbb M(\mathcal Q_{\mathrm{sub}})$ to $\mathbb M(\mathcal Q_{\mathrm{sk}})$ again involves inserting finitely many VC with total number of atoms $\leq n$, which again has $\lesssim C^n$ choices, as a VC of given length $m$ has $\lesssim C^m$ possibilities, thus the number of choices for $\mathbb M(\mathcal Q_{\mathrm{sk}})$ is also $\lesssim (C\rho)!C^n$. By Propositions \ref{recover} and \ref{skeleton}, we know that the same bound holds for $\mathcal Q_{\mathrm{sk}}$ and $\mathcal Q$, and certainly also for congruence classed of $\mathcal Q$.
It suffices to prove (\ref{kqmainest1-1}). We only need to control a single term $\mathscr K$ as defined above, and using Proposition \ref{red1step}, we can start with the expression $\mathscr K$ in (\ref{vinered}) with $\mathcal Q_j$ replaced by $\mathcal Q_{\mathrm{sub}}$ (which is the final result of all the splicing described above), and all subscripts $(\cdots)_j$ replaced by $(\cdots)_{\mathrm{sub}}$. There is now no summation in $(\mathcal Q_{\mathrm{sub}}, \mathscr A_{\mathrm{sub}})$, as the $\mathscr U_0$ is reduced to empty set in the end, and the choice of $\mathcal Q_{\mathrm{sub}}$ and $\mathscr A_{\mathrm{sub}}$ are fixed. For simplicity we will denote $(\rho_{\mathrm{sub}},q_{\mathrm{sub}},m_{\mathrm{sub}}')$ by $(\rho,q,m')$.
Due to the $\langle k_\mathfrak l\rangle^{-40d}$ decay for each leaf $\mathfrak l$, we may also restrict $k_\mathfrak l$ for each leaf $\mathfrak l$ to a unit ball in $\mathbb R^d$, say $|k_\mathfrak l-k_\mathfrak l^0|\leq 1$. By Lemma \ref{explem}, we can decompose the full expression into $\lesssim C^n$ terms, such that in each term, the variable $k_\mathfrak m$ belongs to a fixed unit ball not only for \emph{leaves} $\mathfrak m=\mathfrak l$, but also for \emph{all nodes} $\mathfrak m$. With this localization, we may use the $X_{\mathrm{loc}}^{\eta,40d}$, $X_{\mathrm{loc}}^{\eta,0}$ and $Y_{\mathrm{loc}}^{\eta^5}$ bounds for the factors $\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}$, $\mathcal K_{\mathcal T^{(\mathfrak m)}}$ and $\mathcal Z$, see Propositions \ref{regcpltreeasymp} and \ref{red1step}, to reduce the product \[\mathcal Z_{\mathrm{sub}}\cdot{\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\mathcal K_{\mathcal Q^{(\mathfrak l,\mathfrak l')}}(t_{\mathfrak l^p},t_{(\mathfrak l')^p},k_\mathfrak l)}\prod_{\mathfrak m\in\mathcal N_{\mathrm{sub}}}\mathcal K_{\mathcal T^{(\mathfrak m)}}^*(t_{\mathfrak m^p},t_{\mathfrak m},k_\mathfrak m)\] in (\ref{vinered}) to a linear combination of functions
\[L^{\Delta_{\mathrm{sub}}}(C^+\delta)^{n(\mathscr A_{\mathrm{sub}})/2}\cdot e^{\pi i\mu t}\cdot\prod_{\mathfrak n\in\mathcal N_{\mathrm{sub}}}e^{\pi i\lambda_\mathfrak n t_\mathfrak n}\cdot\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\langle k_\mathfrak l\rangle^{-40d}\cdot\mathcal X(k[\mathcal Q_{\mathrm{sub}}])\] for different choices of $(\mu,\lambda[\mathcal N_{\mathrm{sub}}])$, with the coefficient being an $L^1$ integrable function of $(\mu,\lambda[\mathcal N_{\mathrm{sub}}])$, where $\mathcal X$ is a bounded function of all the $k_\mathfrak m$ variables. Below we may fix one choice of $(\mu,\lambda[\mathcal N_{\mathrm{sub}}])$ (in which our estimates will be uniform). To match Proposition \ref{kqmainest2}, we also identify the $q$ maximal ladders in $\mathbb M(\mathcal Q_{\mathrm{sub}})$ and fix a dyadic number $P_j\in[L^{-1},1]\cup\{0\}$ such that the gap of the $j$-th ladder $\mathcal L_j$ is $|r_j|\sim P_j$ (or $|r_j|\gtrsim P_j$ if $P_j=1$). This causes a loss of $(\log L)^q$ by summing over all choices of $P_j$, which will be ignored in view of the $L^{-\eta^6\rho}$ gain expected in (\ref{kqmainest2-1}).
Now consider such a term (we call it $\mathscr M$), rewrite it as \begin{multline}\label{expressionM}\mathscr M=e^{\pi i\mu t}\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\langle k_\mathfrak l^0\rangle^{-40d}\cdot(C^+\delta^{1/4})^{M_{\mathrm{sub}}}(C^+\delta)^{n(\mathscr A_{\mathrm{sub}})/2}\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_{\mathrm{sub}}}\\\times L^{\Delta_{\mathrm{sub}}}\sum_{\mathscr E_{\mathrm{sub}}}\mathcal B(t,t,\alpha[\mathcal N_{\mathrm{sub}}])\mathcal Y(k[\mathcal Q_{\mathrm{sub}}]), \end{multline} where in (\ref{expressionM}), the sum is taken over all $k$-decorations $\mathscr E_{\mathrm{sub}}$ of $\mathcal Q_{\mathrm{sub}}$ restricted by some fixed $(k_\ell^0)$, $n_{\mathrm{sub}}$ is the order of $\mathcal Q_{\mathrm{sub}}$ and $M_{\mathrm{sub}}=(n_0-n_{\mathrm{sub}})+n(\mathscr A)-n(\mathscr A_{\mathrm{sub}})$ where $n_0$ is the order of $\mathcal Q_{\mathrm{sk}}$ and $\mathscr A$ and $\mathscr A_{\mathrm{sub}}$ are as in (\ref{bigformula2new}) and (\ref{vinered}). The function $\mathcal Y$ is uniformly bounded, and is supported on the $k$-decorations of $\mathcal Q_{\mathrm{sub}}$ that satisfy the above gap assumptions for ladders, and are inherited from $k$-decorations of $\mathcal Q_{\mathrm{sk}}$ that satisfy all the SG, LG and non-degeneracy assumptions specified above, due to Proposition \ref{red1step}. Finally, the $\mathcal B$ expression is defined as \begin{equation}\label{defintB}\mathcal B(t,t,\alpha[\mathcal N_{\mathrm{sub}}]):=\int_{\mathcal E_{\mathrm{sub}}}\prod_{\mathfrak n\in\mathcal N_{\mathrm{sub}}}e^{\pi i \alpha_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n \end{equation} and $\alpha_\mathfrak n:=\zeta_\mathfrak n\delta L^{2\gamma}\Omega_\mathfrak n+\lambda_\mathfrak n$. From (\ref{expressionM}), by first fixing the values of $\lfloor\alpha_\mathfrak n\rfloor:=\sigma_\mathfrak n\in\mathbb Z$ (where each $\sigma_\mathfrak n$ belongs to a fixed set of $\leq L^{10d}$ elements) and summing over all choices of $\sigma_\mathfrak n$, we can bound
\begin{multline}\label{omegasum}|\mathscr M|\lesssim\langle k\rangle^{-20d}\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\langle k_\mathfrak l^0\rangle^{-20d}\cdot(C^+\delta^{1/4})^{M_{\mathrm{sub}}}(C^+\delta)^{n(\mathscr A_{\mathrm{sub}})/2}\bigg(\frac{\delta}{2L^{d-\gamma}}\bigg)^{n_{\mathrm{sub}}}\\\times L^{\Delta_{\mathrm{sub}}}\sum_{\sigma[\mathcal N_{\mathrm{sub}}]}\sup_{\alpha[\mathcal N_{\mathrm{sub}}]:|\alpha_\mathfrak n-\sigma_\mathfrak n|\leq 1}|\mathcal B(t,t,\alpha[\mathcal N_{\mathrm{sub}}])|\cdot\sup_{\sigma[\mathcal N_{\mathrm{sub}}]}\sum_{\mathscr E_{\mathrm{sub}}}1,
\end{multline} where the first summation in (\ref{omegasum}) is taken over all $\sigma[\mathcal N_{\mathrm{sub}}]$, and the second summation is taken over all $k$-decorations $\mathscr E_{\mathrm{sub}}$ that satisfies all the above assumptions, as well as $|\delta L^{2\gamma}\Omega_\mathfrak n\pm(\sigma_\mathfrak n-\lambda_\mathfrak n)|\leq 1$ for each node $\mathfrak n$.
Now, with fixed $\sigma[\mathcal N_{\mathrm{sub}}]$, consider the $k$-decoration $\mathscr E_{\mathrm{sub}}$ in the second summation in (\ref{omegasum}), and the corresponding $k$-decoration of $\mathbb M(\mathcal Q_{\mathrm{sub}})$ defined by Definition \ref{decmole}, then the latter decoration will have to satisfy all the requirements made in the statement of Proposition \ref{kqmainest2} (for some choice of $k_\ell^0$ and $\beta_v$). Thus, by (\ref{kqmainest2-1}), the second summation in (\ref{omegasum}) is bounded by \begin{equation}\label{2ndsumbound}\sup_{\sigma[\mathcal N_{\mathrm{sub}}]}\sum_{\mathscr E_{\mathrm{sub}}}1\lesssim(C^+\delta^{-1/2})^{n_{\mathrm{sub}}}L^{(d-\gamma)n_{\mathrm{sub}}}\cdot L^{-\Delta_{\mathrm{sub}}}L^{-\eta^6\rho}\prod_{j=1}^q\mathfrak X_j^{-z_j}.\end{equation} Then, if we can prove that the first summation is bounded by \begin{equation}\label{1stsumbound}
\sum_{\sigma[\mathcal N_{\mathrm{sub}}]}\sup_{\alpha[\mathcal N_{\mathrm{sub}}]:|\alpha_\mathfrak n-\sigma_\mathfrak n|\leq 1}|\mathcal B(t,t,\alpha[\mathcal N_{\mathrm{sub}}])|\lesssim (C^+\delta^{-1/4})^{n_{\mathrm{sub}}}\cdot L^{C\rho\sqrt{\delta}}\prod_{j=1}^q\mathfrak X_j^{z_j}\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\langle k_\mathfrak l^0\rangle^{20d},
\end{equation} then putting together (\ref{omegasum}), (\ref{2ndsumbound}) and (\ref{1stsumbound}) (noticing also equalities like $n=n_0+n(\mathscr A)$ etc.). we can get \[|\mathscr M|\lesssim (C^+\delta^{1/4})^{n}\langle k\rangle^{-20d}L^{-\eta^7\rho},\] which then proves Proposition \ref{kqmainest1}.
Finally let us prove (\ref{1stsumbound}); of course we only need to prove it under the restriction on $(\sigma_\mathfrak n)$ that, there exists some $k$-decoration $\mathscr E_{\mathrm{sub}}$ satisfying all the previously specified assumptions, such that the corresponding quantities $\alpha_\mathfrak n=\zeta_\mathfrak n\delta L^{2\gamma}\Omega_\mathfrak n+\lambda_\mathfrak n$ satisfies $|\alpha_\mathfrak n-\sigma_\mathfrak n|\leq 1$. Then, this is basically a consequence of Proposition 10.1 of \cite{DH21}, but with some additional twists. Consider all the ladders $\mathcal L_j$ such that $1+\delta L^{2\gamma}P_j\leq (\log L)^2$, so $\mathfrak X_j=1+\delta L^{2\gamma}P_j$; assume these correspond to $1\leq j\leq p$ for some $1\leq p\leq q$. For each ladder $\mathcal L_j\,(j\leq p)$ and each pair of atoms $(v_1,v_2)$ connected by a double bond, we consider them together, as well as the corresponding branching nodes $\mathfrak n_j=\mathfrak n(v_j)$ in $\mathcal Q_{\mathrm{sub}}$. If the gap of this ladder is $|r_j|\sim P_j$, then it is easy to see that for some choice of $\pm$, we have that $\Omega_{\mathfrak n_1}\pm\Omega_{\mathfrak n_2}=r_j\cdot(k_{\mathfrak m}\pm k_{\mathfrak m'})$ for some fixed nodes $\mathfrak m$ and $\mathfrak m'$ depending on $v_1$ and $v_2$.
By Lemma \ref{explem}, we can decompose the set of all $k$-decorations into at most $C^{n_{\mathrm{sub}}}\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\langle k_\mathfrak l^0\rangle^{10d}$ subsets such that whenever a $k$-decoration belongs to a fixed subset, and whenever $|r_j|\sim P_j$, the value of $r_j\cdot(k_{\mathfrak m}\pm k_{\mathfrak m'})$ must belong to an fixed interval of length $P_j$, which does not depend on the choice of $r_j$ or the decoration. This implies that $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}$ belongs to a fixed interval of length $1+\delta L^{2\gamma}P_j$, and thus it has at most $O(1+\delta L^{2\gamma}P_j)$ choices. This means that, after losing a factor \[C^{n_{\mathrm{sub}}}\prod_{j=1}^p(1+\delta L^{2\gamma}P_j)^{z_j+1}=C^{n_{\mathrm{sub}}}\prod_{j=1}^p\mathfrak X_j^{z_j+1},\] we can assume the value of $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}$ is fixed for each pair $(\mathfrak n_1,\mathfrak n_2)$ as above. Note that we may replace the above power $z_j+1$ of $\mathfrak X_j$ by $z_j$, as $\mathfrak X_j\leq (\log L)^2$ and $p\leq \rho_{\mathrm{sub}}$, so the loss $(\log L)^{Cp}$ here can be covered by the gain $L^{-\eta^6\rho_{\mathrm{sub}}}$ in (\ref{kqmainest2-1}).
Notice that the ladders are the same as type II chains defined in \cite{DH21}, we can now run the same arguments in the proof of Proposition 10.1 in \cite{DH21}, to get that \begin{multline}\label{1stsumbound2}
\sum_{\sigma[\mathcal N_{\mathrm{sub}}]}\sup_{\alpha[\mathcal N_{\mathrm{sub}}]:|\alpha_\mathfrak n-\sigma_\mathfrak n|\leq 1}|\mathcal B(t,t,\alpha[\mathcal N_{\mathrm{sub}}])|\\\lesssim C^{n_{\mathrm{sub}}}\prod_{j=1}^p\mathfrak X_j^{z_j}\prod_{\mathfrak l\in\mathcal L_{\mathrm{sub}}}^{(+)}\langle k_\mathfrak l^0\rangle^{10d}\cdot(C^+)^{n_{\mathrm{sub}}}\delta^{-n_{\mathrm{sub}}/4}L^{Cp\sqrt{\delta}}(\log L)^{Cp+m''}, \end{multline} where $m''$ is the number of atoms not in the ladders $\mathcal L_j\,(1\leq j\leq p)$, so in particular $m''=m'+2(z_{p+1}+\cdots +z_q)$; namely each ladder $\mathcal L_j\,(1\leq j\leq p)$ causes a loss of $L^{C\sqrt{\delta}}(\log L)^C$ and each remaining atom causes a loss of $\log L$, see for example (10.10) in Section 10.2 of \cite{DH21}. It is easily seen that (\ref{1stsumbound2}) is stronger than (\ref{1stsumbound}), using the fact that $\rho=q+m'\geq p+m'$. This completes the proof. \end{proof} \section{Reduction to large gap molecules}\label{reduct2} \subsection{Preliminary setup} We now start the proof of Proposition \ref{kqmainest2}. The idea is to perform operations to further reduce the molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$ to simpler molecules (this reduction will be called stage 2, compared to stage 1 reduction in Section \ref{stage1red}). In this process we will keep track of the corresponding counting estimate, as well as certain parameters of the molecule, especially the characteristics $\chi=E-V+F$, where $E$, $V$ and $F$ are the number of bonds, atoms, and components (Definition \ref{defmole0}).
Before introducing the stage 2 reduction operations, we first need to setup the properties of the molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$, compared to the original molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ before stage 1 reduction, as well as the corresponding decorations. These are summarized in Proposition \ref{subpro} below. \begin{prop}\label{subpro} Suppose the molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ is reduced to $\mathbb M(\mathcal Q_{\mathrm{sub}})$ via stage 1 reduction, as described in Section \ref{stage1red}. Let $\mathscr C$ be defined for $\mathcal Q_{\mathrm{sk}}$ as in Lemma \ref{vinechainlem}. Consider $k$-decoration $(k_\ell)$ of $\mathbb M(\mathcal Q_{\mathrm{sub}})$ that is inherited from a $k$-decoration $\mathscr E_{\mathrm{sk}}$ of $\mathcal Q_{\mathrm{sk}}$ (and $\mathbb M(\mathcal Q_{\mathrm{sk}})$), as in Proposition \ref{kqmainest2}. Then we have the following properties: \begin{enumerate}[{(a)}] \item Each SGHVC in $\mathscr C$ is reduced to an SGHV, and each SGHV remains unchanged. Each SGVC in $\mathscr C$ is reduced to an SG vine that is also a (CN) vine, or an SG vine that is also a root (CL) vine (see Proposition \ref{block_clcn}), or a single atom. Each DV in $\mathscr C$ remains unchanged, and still contains two vines (V). \item The molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$ is connected, there are only two atoms of degree $3$, and all other atoms have degree $4$. We define an atom $v$ in $\mathbb M(\mathcal Q_{\mathrm{sub}})$ to be a \emph{hinge atom}, if it is the atom that results from merging a SGVC in $\mathbb M(\mathcal Q_{\mathrm{sk}})$. These include the single atoms defined in (a), as well as some of the joints of the SGHV and SG vines defined in (a). \item Any degenerate atom (where there are two bonds $(\ell_1,\ell_2)$ at $v$ of opposite directions and $k_{\ell_1}=k_{\ell_2}$ in the decoration) must be a hinge atom, and any triple bond must have an endpoint that is a hinge atom. \item For any hinge atom $v$, we can find two bonds $(\ell_1,\ell_2)$ at $v$, so that in the original molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ they are the two bonds at one joint of an SGVC in $\mathscr C$ that do \emph{not} belong to this VC (this VC is merged in Stage 1). Also the triple $(v,\ell_1,\ell_2)$ has SG and $k_{\ell_1}\neq k_{\ell_2}$. Moreover, if $v$ has degree $4$, then the other two bonds $(\ell_3,\ell_4)$ at $v$ satisfy the same properties as $(\ell_1,\ell_2)$ above. \item Any SG vine-like object that is not a subset of an object in (a), which also does \emph{not} contain a hinge atom, must be a subset of a LG vine-like object in $\mathscr C$. Note that it must be in one of the cases defined in Proposition \ref{subsetvc}. \item Any SG vine-like object (say $\mathbb U$) that is not a subset of an object in (a), which also \emph{contains} a hinge atom, \emph{must} contain a hinge atom $v$, such that either $v$ is an interior atom of $\mathbb U$, or $v$ is a joint of $\mathbb U$ and exactly one bond in $(\ell_1,\ell_2)$ belongs to $\mathbb U$, where $(\ell_1,\ell_2)$ is defined as in (d). \end{enumerate} \end{prop} \begin{proof} First, (a) directly follows from the definition of $\mathscr V_0$ and $\mathscr U_0$ in Section \ref{stage1red} and the definition of splicing operation in Proposition \ref{block_clcn}, using also Corollary \ref{blockchainprop}. Also (b) follows from Proposition \ref{moleproperty} and Remark \ref{moleremark} (note that degree $2$ atoms cannot be generated by splicing due to our treatment for root (CL) vines), and (c) follows from the fact that there is no degenerate atoms nor triple bonds in $\mathbb M(\mathcal Q_{\mathrm{sk}})$. As for (d), if $v$ is a hinge atom, since $v$ has degree $3$ or $4$, we must have in $\mathbb M(\mathcal Q_{\mathrm{sk}})$ an SGVC (say $\mathbb V\mathbb C$) with one of its joints, say $v_1$, having degree $4$. Let the two bonds $(\ell_1',\ell_2')$ at $v_1$ belong to $\mathbb V\mathbb C$ and the other two bonds $(\ell_1,\ell_2)$ at $v_1$ not belong to $\mathbb V\mathbb C$, then the triple $(v,\ell_1,\ell_2)$ in $\mathbb M(\mathcal Q_{\mathrm{sub}})$ is just the triple $(v_1,\ell_1,\ell_2)$ in $\mathbb M(\mathcal Q_{\mathrm{sk}})$ and must have SG as $\mathbb V\mathbb C$ has SG. If $d(v)=4$, then the other joint $v_2$ of $\mathbb V\mathbb C$ also has degree $4$ and we can repeat the above argument to show that $(\ell_3,\ell_4)$ satisfy the same properties as $(\ell_1,\ell_2)$.
Now let an SG vine-like object, say $\mathbb U$, be in $\mathbb M(\mathcal Q_{\mathrm{sub}})$ which is not a subset of an object in (a). If $\mathbb U$ does not contain a hinge atom, then the same $\mathbb U$ must exist in the original molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ as a vine-like object, and is not changed in the process. By Lemma \ref{vinechainlem}, $\mathbb U$ must be the subset of an object in $\mathscr C$, and this object must be an LG vine-like object, since otherwise it will be involved in the splicing process and then $\mathbb U$ would be a subset of an object in (a)--(b). This proves (e).
Next, suppose $\mathbb U$ contains a hinge atom. If (f) does not hold, then $\mathbb U$ must contain one hinge atom as a joint, such that the bonds $(\ell_1,\ell_2)$ defined in (d) both belong to $\mathbb U$ (if not, then replace $(\ell_1,\ell_2)$ by $(\ell_3,\ell_4)$ defined in (d)). The other joint $v_2$ of $\mathbb V$ may also be a hinge atom; if so then again the bonds $(\ell_1',\ell_2')$ defined in (d) (corresponding to $v_2$) both belong to $\mathbb U$. Then the same $\mathbb U$ must exist in the original molecule $\mathbb M(\mathcal Q_{\mathrm{sk}})$ as an SGVC, and is not changed in the process. Suppose $v_1$ is formed by merging an SGVC called $\mathbb V\mathbb C_1$, and $v_2$ (if applicable) is formed by merging an SGVC called $\mathbb V\mathbb C_2$. Then in $\mathbb M(\mathcal Q_{\mathrm{sk}})$ one can concatenate $\mathbb U$ with $\mathbb V\mathbb C_1$ (and $\mathbb V\mathbb C_2$ if applicable) to form a larger SG vine-like object, which must be a subset of an SG vine-like object in $\mathscr C$. This implies that $\mathbb U$ must be a subset of an object in (a), which is a contradiction and proves (f). \end{proof} \begin{rem}\label{subcondition} Consider any $k$-decoration $(k_\ell)$ of $\mathbb M(\mathcal Q_{\mathrm{sub}})$, restricted by $(\beta_v)$ and $(k_\ell^0)$, that is inherited from a $k$-decoration $\mathscr E_{\mathrm{sk}}$ of $\mathcal Q_{\mathrm{sk}}$ (and $\mathbb M(\mathcal Q_{\mathrm{sk}})$), as in Proposition \ref{kqmainest2}. Then, the following conditions hold for this decoration: \begin{enumerate} \item[(i)] Those imposed by Proposition \ref{subpro},
\item[(ii)] The gaps $|r_j|\sim P_j$ for each maximal ladder $\mathcal L_j$, where $P_j$ are fixed as in Proposition \ref{kqmainest2}. \item[(iii)] By losing at most $C^{n_{\mathrm{sub}}}$, we may assume that each atom that is not an interior atom of an SG vine-like object in (a) and (e) of Proposition \ref{subpro} is fixed to be either SG or LG. For any SG atom $v$ we fix the corresponding bonds $(\ell_1,\ell_2)$ according to the following rules: for any hinge atom we must fix the bonds as in (d) of Proposition \ref{subpro}, for any joint of any SG vine-like object defined in (a) and (e) of Proposition \ref{subpro} we must fix the bonds $(\ell_1,\ell_2)$ to belong to the corresponding vine-like object (if both vines (V) in one DV are SG, then $(\ell_1, \ell_2)$ is chosen from only one them), and for any endpoint of any triple bond in (c) we must fix the bonds using the endpoint that is a hinge atom. For all other SG atoms we may fix the bonds $(\ell_1, \ell_2)$ arbitrarily.
\item[(iv)] Finally, for each SG atom $v$, we fix its gap as $|r|\sim R_v$, where $R_v\in[L^{-1},L^{-\gamma+\eta}]$ is a dyadic number. Note that this condition leads to a loss of $(\log L)^p$, where $p$ is the number of SG atoms, which we will also treat in the proof below. \end{enumerate} \end{rem} \subsection{The cutting operation}\label{reductcut0} Now we define the basic operation in the stage 2 reduction, namely the \emph{cutting} operation, which involves dividing an atom of degree $3$ or $4$ into an atom of degree $2$ and another atom of degree $1$ or $2$. \begin{df}\label{defcut} Given a molecule $\mathbb M$ and an atom $v$ of degree $3$ or $4$. Suppose $v$ has two bonds $\ell_1$ and $\ell_2$ of opposite directions, then we may \emph{cut} the atom $v$ along the bonds $\ell_1$ and $\ell_2$, by replacing $v$ with two atoms $v_1$ and $v_2$, such that the endpoint $v$ for the bonds $\ell_1$ and $\ell_2$ is moved to $v_1$, and the endpoint $v$ for the other bond(s) is moved to $v_2$, see Figure \ref{fig:cut}. We also call this cut an $\alpha$- (resp. $\beta$-) cut, if it does not (resp. does) generate a new connected component, and accordingly we call the resulting atoms $\alpha$- or $\beta$- atoms. If we are also given a decoration, then we may define the gap of this cut to be $r:=k_{\ell_1}-k_{\ell_2}$. \begin{figure}
\caption{A cutting operation executed at a degree $4$ atom $v$, see Definition \ref{defcut}.}
\label{fig:cut}
\end{figure} \end{df} The stage 2 reduction, which will be applied to $\mathbb M(\mathcal Q_{\mathrm{sub}})$ involves cutting various atoms as in Definition \ref{defcut}, and possibly removing some connected components created in this process, until reaching a final molecule $\mathbb M_{\mathrm{fin}}$. At each step, let the molecule before and after the operation be $\mathbb M_{\mathrm{pre}}$ and $\mathbb M_{\mathrm{pos}}$, then a decoration $(k_\ell)$ of $\mathbb M_{\mathrm{pre}}$ naturally leads to a decoration of $\mathbb M_{\mathrm{pos}}$.
For any molecule $\mathbb M$ (which could be $\mathbb M_{\mathrm{pre}}$ or $\mathbb M_{\mathrm{pos}}$), consider the possible $(c_v)$-decorations of $\mathbb M$, also restricted by $(\beta_v)$ and $(k_\ell^0)$; we also assume that this decoration is inherited from a $k$-decoration of $\mathbb M(\mathcal Q_{\mathrm{sub}})$ that satisfies all assumptions in Remark \ref{subcondition}. Then we consider the number of such restricted decorations, take supremum over the parameters $(c_v,\beta_v,k_\ell^0)$, and define it to be $\mathfrak C$. In view of the right hand side of (\ref{kqmainest2-1}) and the logarithmic loss in Remark \ref{subcondition}, we also define \begin{equation}\label{defequan}\mathfrak A:=\mathfrak C\cdot L^{-(d-\gamma)\chi(\mathbb M)}(C^+\delta^{-1/2})^{-\chi(\mathbb M)}\prod_{j=1}^q\mathfrak X_j^{z_j}\cdot (\log L)^{Cp}, \end{equation} where $\chi(\mathbb M)$ is the characteristic of $\mathbb M$, $p$ is the number of remaining SG atoms in $\mathbb M$, and the other notations are under the setting of Proposition \ref{kqmainest2} but adapted to $\mathbb M$ instead of $\mathbb M(\mathcal Q_{\mathrm{sub}})$, for example $\mathcal L_j$ are the maximal ladders in $\mathbb M$. Define also $m'$ to be the number of atoms not in the maximal ladders, and $\rho=q+m'$ as in Proposition \ref{kqmainest2} before, and denote the product $\prod_{j=1}^q\mathfrak X_j^{z_j}$ in (\ref{defequan}) as $\mathfrak P$. The notations $(\mathfrak A,\mathfrak C,\mathfrak P)$ and $(\rho,q,m')$ will apply to all the molecules appearing in the rest of this paper, with possible subscripts matching those of $\mathbb M$ (so $\mathfrak A_{\mathrm{pre}}$ is defined for the molecule $\mathbb M_{\mathrm{pre}}$ etc.; sometimes for emphasis we may also write $\mathfrak A(\mathbb M)$ or $\mathfrak A(\mathbb M_{\mathrm{pre}})$ instead of $\mathfrak A$ and $\mathfrak A_{\mathrm{pre}}$).
We will prove, for each operation, an inequality of form \begin{equation}\label{defdev}\mathfrak A_{\mathrm{pre}}\lesssim \mathfrak D\cdot \mathfrak A_{\mathrm{pos}}\end{equation} for some quantity $\mathfrak D$, which we define to be the \emph{deviation} of this operation. Clearly, if we know the deviation in each operation step, and an upper bound for the counting problem associated with the final molecule $\mathbb M_{\mathrm{fin}}$, then we can deduce from this information an upper bound for the counting problem associated with $\mathbb M(\mathcal Q_{\mathrm{sub}})$ using (\ref{defequan}) and (\ref{defdev}).
In Section \ref{reductcut} we define the stage 2 reduction process, and study the deviation in each step; in Section \ref{largegapmole} and Section \ref{lgmole} we solve the counting problem associated with the final molecule $\mathbb M_{\mathrm{fin}}$, and completes the proof of Proposition \ref{kqmainest2}. \subsection{Stage 2 reduction: Cutting degree $3$ and $4$ atoms}\label{reductcut} Start with the molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$ and a restricted decoration $(k_\ell)$ satisfying the assumptions in Remark \ref{subcondition}. Recall the notion of $V,E,F$ and $\chi$ as in Definition \ref{defmole0}; define also $V_\alpha$ and $V_\beta$ to be the number of $\alpha$- and $\beta$-atoms, and use $\Delta$ to denote increments.
\emph{\underline{Step 1: removing SG vine-like objects.}} In Step 1, we collect all the SG vine-like objects defined in (a) and (e) of Proposition \ref{subpro}, including also the triple bonds in (c). Note that if both vines (V) in one DV in (a) are SG then we only collect one of them. For each of these objects $\mathbb U$, we cut the molecule at each joint, along the two bonds $(\ell_1,\ell_2)$ fixed as in Remark \ref{subcondition} (i.e. along the two bonds that belong to $\mathbb U$). This disconnects a VC, say $\mathbb V\mathbb C$ (which is $\mathbb U$ or $\mathbb U$ minus a bond in case of HV or HVC), from the rest of the molecule, and then we remove $\mathbb V\mathbb C$. The two joints then become degree $1$ or $2$ atoms, and we \emph{define them as $\alpha$-atoms}; we also label each of them by the dyadic number $R\in[L^{-1},L^{-\gamma+\eta}]$ such that $|r|\sim R$ for the gap $r$ of $\mathbb V\mathbb C$ (note that we must have $r\neq 0$).
\emph{\underline{Step 2: removing triangles.}} In Step 2, we consider the possible triangles $v_1v_2v_3$ in the molecule, such that there are bonds $\ell_j$ connecting $v_{j+1}$ and $v_{j+2}$ (where $v_4=v_1$ and $\ell_4=\ell_1$ etc.), and $(v_j,\ell_{j+1},\ell_{j+2})$ is an SG triple as fixed in Remark \ref{subcondition} for $j\in\{1,2\}$. Let $|k_{\ell_{j+1}}-k_{\ell_{j+2}}|\sim R_{j}$ for $j\in\{1,2\}$ with $R_j\in[L^{-1},L^{-\gamma+\eta}]$. If $\mathrm{deg }\,v_j \geq 3$, then we cut the molecule at each $v_j$ along the bonds $(\ell_{j+1},\ell_{j+2})$, which disconnects the triangle formed by $v_j$ and $\ell_j$ from the rest of the molecule, and remove the triangle. This leaves $3$ atoms $v_j$ of degree $1$ or $2$. We call $v_3$ a $\beta$-atom, and call $v_j\,(1\leq j\leq 2)$ an $\alpha$- (resp. $\beta$-) atom if it belongs to the same (resp. different) component with $v_3$, except when $v_1$ and $v_2$ are in the same component different from $v_3$, in which case we call $v_1$ an $\alpha$-atom and $v_2$ a $\beta$-atom. For any $\alpha$-atom $v_j$ we label it by the corresponding $R_j$.
\emph{\underline{Step 3: remaining SG cuts.}} In Step 3, we select each of the remaining SG atoms $v$ of degree $\geq 3$, and cut them along the designated bonds $(\ell_1,\ell_2)$ in Remark \ref{subcondition}, in arbitrary order. Note that each cut may be $\alpha$- or $\beta$-cut; we call the resulting atoms $\alpha$- or $\beta$-atoms, and label any $\alpha$-atom by the dyadic number $R\in[L^{-1},L^{-\gamma+\eta}]$, as in Definition \ref{defcut} (again $R\neq 0$ due to our choice of $(\ell_1,\ell_2)$).
\emph{\underline{Step 4: remaining $\beta$-cuts.}} After Step 3, there is now no more SG atoms left in the molecule. In Step 4, we look for all the possible degree $3$ or $4$ atoms where a $\beta$-cut is possible, and perform the corresponding $\beta$-cut, until this can no longer be done.
After all the cutting operations, the resulting graph will contain some $\alpha$-atoms. For each $\alpha$-atom $v$ and a given decoration we define an auxiliary number $\iota_v\in\{0,1\}$, such that $\iota_v=1$ if a cutting operation happened before the cutting at the atom $v$, such that the gaps $r,r'$ of these cuttings satisfy $0<|r\pm r'|\leq L^{-50\eta}R_v$; if no such cutting operation exists then define $\iota_v=0$. Note that the number of choices for all $(\iota_v)$ is at most $C^p$, which can be absorbed into the last factor on the right hand side of (\ref{defequan}). Therefore we may assume a choice of $(\iota_v)$ is fixed in the proof below. \begin{prop}\label{excessprop1} After each operation in Steps 1--4, we have that \begin{equation}\label{excesseqn2}\mathfrak D\lesssim L^{-\eta^3}\cdot\prod_{v}^{(\alpha)}L^{(\gamma+3\eta)/2-\kappa_v\eta}R_v\cdot L^{(2\gamma_0+5\eta^2)\Delta F-(\gamma_0+2\eta^2)\Delta V_\beta} \end{equation} for any $R_v\in[L^{-1},L^{-\gamma+\eta}]$, where the product is taken over all the newly created $\alpha$-atoms $v$, and $R_v$ denotes the label of $v$. The parameter $\kappa_v$ equals $0$ if $\iota_v=0$, and equals $50$ if $\iota_v\neq 0$. \end{prop}
\begin{proof} First assume $\iota_v=0$ for all $\alpha$-atoms $v$. We start with the simplest case (in Steps 3--4) where we cut at a single atom $v$. Note that, if this is an $\alpha$- (resp. $\beta$-) cut then we have $(\Delta\chi,\Delta F,\Delta V_\beta)=(-1,0,0)$ (resp. $(0,1,2)$). In the case of $\beta$-cut at $v$ along $(\ell_1,\ell_2)$, the value of $k_{\ell_1}-k_{\ell_2}$ is uniquely fixed (by summing the equation (\ref{decmole1}) over all atoms $v'\neq v$ that belongs to one component after making the cut). Similarly, using (\ref{defomegadec}), we know that the number of choices of $|k_{\ell_1}|^2-|k_{\ell_2}|^2$, up to distance $\delta^{-1}L^{-2\gamma}$, is $\lesssim n_{\mathrm{sub}}\leq (\log L)^{C}$. This means that $\mathfrak C_{\mathrm{pre}}\lesssim (\log L)^C\mathfrak C_{\mathrm{pos}}$, hence $\mathfrak D\lesssim(\log L)^{C}$ using (\ref{defequan}) and (\ref{defdev}), and noticing that the cut only changes the length of at most one maximal ladder by $O(1)$, using also $1\leq \mathfrak X_j\leq (\log L)^2$. Therefore (\ref{excesseqn2}) is true in this case. As for $\alpha$-cuts, let the corresponding gap $R$ be fixed, then the number of choices for $k_{\ell_1}-k_{\ell_2}$ is $\lesssim (RL)^d$; once $k_{\ell_1}-k_{\ell_2}$ is fixed (and $|k_{\ell_1}-k_{\ell_2}|\sim R$), the number of choices of $|k_{\ell_1}|^2-|k_{\ell_2}|^2$, up to distance $\delta^{-1}L^{-2\gamma}$, is $\lesssim 1+\delta RL^{2\gamma}$ because $k_{\ell_1}+k_{\ell_2}$ belongs to a fixed ball of radius $\sim 1$. This implies that \[\mathfrak D\lesssim (RL)^d(1+RL^{2\gamma})L^{-(d-\gamma)}(\log L)^C\lesssim L^{-\eta^3}\cdot L^{\gamma+3\eta}R^2,\] where the $(\log L)^C$ factor is similar to above, and the last inequality can be verified using $R\in[L^{-1},L^{-\gamma+\eta}]$ and $d\geq 3$. Since both new $\alpha$-atoms are labeled by $R$, this proves (\ref{excesseqn2}).
Next consider Step 2, for which we know $\Delta F\in\{0,1,2\}$, $\Delta V_\beta=\Delta F+1$, and $\Delta\chi=\Delta F-3$; moreover $\mathfrak D\lesssim L^{(d-\gamma)\Delta\chi}(\log L)^C\mathfrak F$, where $\mathfrak F$ is the number of choices for $(k_{\ell_1},k_{\ell_2},k_{\ell_3})$. If $\Delta F=2$, then similar to above, for \emph{each} $j$ we know that
\begin{multline}\label{triangle1}k_{\ell_j}-k_{\ell_{j+1}}\textrm{\ is uniquely fixed,\ and\ the\ number\ of\ choices}\\\textrm{for\ }|k_{\ell_j}|^2-|k_{\ell_{j+1}}|^2,\textrm{\ up\ to\ distance\ }\delta^{-1}L^{-2\gamma},\textrm{\ is\ }\lesssim(\log L)^{C}.\end{multline} By Lemma \ref{basiccount} (1) (where we also use that $R\geq L^{-1}$), we have \[\mathfrak D\lesssim \delta^{-1}\min(L^{d-2\gamma+1},L^d)\cdot L^{-(d-\gamma)}(\log L)^C\lesssim L^{-\eta^3}\cdot L^{\gamma_0+4\eta^2}\] using that $\gamma_0=\min(\gamma,1-\gamma)$, which proves (\ref{excesseqn2}). If $\Delta F=1$, then (\ref{triangle1}) still holds for \emph{some} $j$. Moreover, once $(k_{\ell_j},k_{\ell_{j+1}})$ is fixed, the number of choices for $k_{\ell_{j+2}}$ is at most $(RL)^d$ where $R$ is the label of the unique $\alpha$-atom. This implies that \[\mathfrak D\lesssim\delta^{-1} \min(L^{d-2\gamma+1},L^d)\cdot L^{-2(d-\gamma)}(RL)^d(\log L)^C\lesssim L^{-\eta^3}\cdot L^{\eta^2}L^{(\gamma+3\eta)/2}R\] using that $R\in[L^{-1},L^{-\gamma+\eta}]$ and $d\geq 3$, so (\ref{excesseqn2}) is again true. Now if $\Delta F=0$, then simply by using the gap assumptions we have $\mathfrak F\lesssim L^d(R_1L)^d(R_2L)^d$, hence \[\mathfrak D\lesssim L^{3d}(R_1R_2)^dL^{-3(d-\gamma)}(\log L)^C\lesssim L^{-\eta^3}\cdot L^{-\gamma_0-2\eta^2}L^{\gamma+3\eta}R_1R_2,\] again using that $R_1,R_2\in[L^{-1},L^{-\gamma+\eta}]$ and $d\geq 3$, which proves (\ref{excesseqn2}).
Now consider Step 1. By Proposition \ref{subsetvc}, we know that $\mathbb V\mathbb C$ is either a single vine, or is formed by two double bonds. First assume it is a single vine. For the operation we have $\Delta F=\Delta V_\beta=0$ by combining Proposition \ref{block_clcn}, Proposition \ref{subpro} (a) (e), and Proposition \ref{subsetvc} (and an easy verification for DV). If $\mathbb V\mathbb C$ contains $m$ atoms (including the joints) then it is easy to verify that $\Delta\chi=-m$; let the the number of choices for $(k_\ell)$ for all bonds $\ell\in\mathbb V\mathbb C$ be $\mathfrak F$, then \begin{equation}\label{fbyg}\mathfrak D\lesssim \mathfrak F\cdot L^{-m(d-\gamma)}\cdot (\log L)^C\delta^{m/2}\prod_{1\leq j\leq q:\mathcal L_j\subset\mathbb V\mathbb C}\mathfrak X_j^{z_j},\end{equation} where the product is taken over all $j$ such that the maximal ladder $\mathcal L_j$ (see Proposition \ref{kqmainest2}) is a subset of $\mathbb V\mathbb C$ (so there are at most three such $j$, see Figure \ref{fig:vines}), and $z_j$ is the length of $\mathcal L_j$.
To calculate $\mathfrak F$, first fix $x_0=k_{\ell_1}$ and $y_0=k_{\ell_2}$, where $\ell_1$ and $\ell_2$ are the two bonds at one joint of $\mathbb V\mathbb C$ that belong to $\mathbb V\mathbb C$. These have $L^d(RL)^d$ choices since the gap of $\mathbb V\mathbb C$ is $|r|\sim R$. Then, we invoke the reparametrization introduced in the proof of Propositions \ref{estbadvine}--\ref{estnormalvine}, and define the new variables $(x_j,y_j)\,(1\leq j\leq m_1)$ if $\mathbb V\mathbb C$ is bad vine, or $(x_j,y_j)\,(1\leq j\leq \widetilde{m})$ and $(u_1,u_2,u_3)$ if $\mathbb V\mathbb C$ is normal vine, where $m=2\widetilde{m}+2$ for bad vine and $m=2\widetilde{m}+5$ for normal vine. In either case, since each $\Gamma_v$ belongs to a fixed interval of length $\delta^{-1}L^{-2\gamma}$, we know that each $(x_j,y_j)$ satisfies a system of form (\ref{basiccount01}), and $(u_1,u_2,u_3)$ satisfies a system of form (\ref{basiccount02}) with $r=x_0-y_0$ or $r=0$ (so $P=R$ or $P=0$) and $v=0$. Therefore, by applying Lemma \ref{basiccount0}, we get in either case that \[\mathfrak F\lesssim L^d(RL)^dL^{(m-2)(d-\gamma)}\delta^{-m/2}(\log L)^C\prod_{1\leq j\leq q:\mathcal L_j\subset\mathbb V\mathbb C}\mathfrak X_j^{-z_j},\] and hence $\mathfrak D\lesssim R^dL^{2\gamma}(\log L)^C\lesssim L^{-\eta^3}\cdot R^2L^{\gamma+3\eta}$, so (\ref{excesseqn2}) is true. Finally, if $\mathbb V\mathbb C$ is formed by two double bonds, then $\Delta\chi=-3$. We again first fix $(x_0,y_0)$, and then apply Lemma \ref{basiccount} (1) to get \[\mathfrak D\lesssim L^{-3(d-\gamma)}L^d(RL)^d(L^{d-1}+\delta^{-1}\min(R^{-1}L^{d-2\gamma},L^d))(\log L)^C\lesssim L^{-\eta^3}\cdot R^2L^{\gamma+3\eta},\] using that $R\in[L^{-1},L^{-\gamma+\eta}]$ and $d\geq 3$.
In the case where $\iota_v\neq 0$ for some $\alpha$-atom $v$, it is clear that the corresponding gap $r$ must belong to a fixed ball of radius $L^{-50\eta}R$ where $|r|\sim R$, and the number of possible choices of such balls is bounded by $(\log L)^C$. As such, the factor $(RL)^d$ in the above proof, which indicates the number of choices for this gap $r$, is replaced by $(\log L )^C(RL^{1-50\eta})^d$, and the other factors remain the same, therefore (\ref{excesseqn2}) is true with the improved $\kappa_v$. \end{proof} \subsection{The molecule $\mathbb M_{\mathrm{fin}}$}\label{largegapmole} Let the result of stage 2 reduction be $\mathbb M_{\mathrm{fin}}$. If it is not connected, let $\mathbb M_1$ be any of its components. Consider also a decoration of $\mathbb M_{\mathrm{fin}}$ inherited from a decoration $(k_\ell)$ of $\mathbb M(\mathcal Q_{\mathrm{sub}})$ as in Remark \ref{subcondition}. Then they have the following properties:
\begin{prop}\label{finalmole} There is only one component, which we call the \emph{odd component}, that contain two atoms of degree $1$ and $3$. All the other atoms have degree $2$ and $4$. The atoms of degree $1$ and $2$ are classified as $\alpha$-, or $\beta$-atoms. Each $\alpha$-atom $v$ is labeled by a dyadic number $R_v\in [L^{-1},L^{-\gamma+\eta}]$, such that if $v$ has two bonds $(\ell_1,\ell_2)$ then $|k_{\ell_1}-k_{\ell_2}|\sim R_v$ in the decoration; recall also $\iota_v$ and $\kappa_v$ introduced in Section \ref{reductcut} and Proposition \ref{excessprop1}. Atoms that are neither $\alpha$- nor $\beta$-atoms are called \emph{$\varepsilon$-atoms}; any $\varepsilon$-atom in $\mathbb M_{\mathrm{fin}}$ \emph{must} be $LG$ in the decoration. Moreover $\mathbb M_{\mathrm{fin}}$ contains no triple bond.
Any even (i.e. non-odd) component has at least one $\beta$-atom. If an even component $\mathbb M_0$ is a cycle, then it is either a double bond (which is also vine (I)), or a cycle of length at least $4$, or a triangle with at most one $\alpha$-atom. If $\mathbb M_0$ is not a cycle, then all its $\alpha$- and $\beta$-atoms form several disjoint chains, such that each chain has two distinct $\varepsilon$-atom at both ends. Finally, if any component $\mathbb M_0$ is a \emph{vine} (with two joints having degree $2$), then it \emph{must} be LG in the decoration. \label{proplg} \end{prop} \begin{proof} The total number of odd degree atoms is not changed under the cutting operation and removing of isolated components (which only contain even degree atoms). This value is $2$ initially (Proposition \ref{subpro}), so it will remain $2$. The two odd degree atoms have to be in the same component as any component must have an even number of odd degree atoms. The degree $1$ and $2$ atoms are classified as $\alpha$-, or $\beta$-atoms, and $(R_v,\iota_v,\kappa_v)$ for $\alpha$-atoms $v$ are defined as before. $\mathbb M_{\mathrm{fin}}$ contains no triple bond because any triple bond is destroyed in Step 1.
Now consider an even component $\mathbb M_0$ with only degree $2$ and $4$ atoms. Such a component can only be formed after a $\beta$-cut, so it will contain at least one $\beta$-atom. If it is a cycle, then it is either a double bond, or a triangle, or has at least length $4$. If it is a triangle, then it cannot have at least two $\alpha$-atoms, since any $\alpha$-atom must have SG in the original molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$, and any triangle with at least two SG atoms will be removed in Step 2. If $\mathbb M_0$ is not a cycle, then it has at least one degree $4$ (i.e. $\varepsilon$-) atom. For any degree $2$ atom, consider the longest chain of degree $2$ atoms containing it, which ends at two degree $4$ atoms $v_1$ and $v_2$ in both directions; they cannot coincide, otherwise we can perform a $\beta$-cut at this common atom according to Step 4 above.
Finally, each $\varepsilon$-atom must have LG by definition, and $|k_{\ell_1}-k_{\ell_2}|\sim R_v$ for any $\alpha$-atom $v$ of degree $2$ labeled by $R_v$. If any component $\mathbb M_1$ is an SG vine $\mathbb V$, then this vine $\mathbb V$ must exist in the original molecule $\mathbb M(\mathcal Q_{\mathrm{sub}})$, and is not changed in the process. But the SG vine $\mathbb V$ cannot be any vine occurring in (a) or (e) of Proposition \ref{subpro} (or any triple bond in (c)), because then it would be removed in Step 1. Therefore, $\mathbb V$ has to be a vine occurring in (f) of Proposition \ref{subpro}. However, since our reduction involves cutting the molecule at the hinge atom along the two bonds $(\ell_1,\ell_2)$, by (f) of Proposition \ref{subpro}, the vine $\mathbb V$ cannot remain intact after this cutting operation. This completes the proof. \end{proof} With all the above preparations, we can now reduce Proposition \ref{kqmainest2} to the following \begin{prop} \label{lgmolect} For each component $\mathbb M_0$ of the final molecule $\mathbb M_{\mathrm{fin}}$, define $(\mathfrak C_{0},\mathfrak A_0,\mathfrak P_0)$ and $\rho_0$ as in Section \ref{reductcut0} and (\ref{defequan}), but associated with $\mathbb M_0$ (here $p$ should be replaced by $0$ in (\ref{defequan}) as there is no more SG atoms in $\mathbb M_0$). Let also each $\alpha$-atom $v$ be labeled by the dyadic number $R_v$, then we have \begin{equation}\label{finalcount}\mathfrak A_0\lesssim \prod_v^{(\alpha)}L^{-(\gamma+3\eta)/2+\kappa_v\eta}R_v^{-1}\cdot L^{(\gamma_0+2\eta^2)V_\beta-(2\gamma_0+5\eta^2)G}\cdot L^{-\eta^5\rho_0}, \end{equation} where the product is taken over all $\alpha$-atoms $v$, and $V_\beta$ is the number of $\beta$-atoms in $\mathbb M_0$; moreover $G$ is $0$ or $1$ depending on whether $\mathbb M_0$ is odd or even component. \end{prop} \begin{proof}[Proof of Proposition \ref{kqmainest2} assuming Proposition \ref{lgmolect}] Since (\ref{finalcount}) holds for each component $\mathbb M_0$, clearly it also holds for the union $\mathbb M_{\mathrm{fin}}$, if all the expressions and quantities on the right hand side are replaced by the ones corresponding to $\mathbb M_{\mathrm{fin}}$ (and $G$ replaced by $F-1$ where $F$ is the number of components of $\mathbb M_{\mathrm{fin}}$). Using (\ref{finalcount}) and the deviation bound (\ref{excesseqn2}) for each operation, we get that \begin{equation}\label{finalcount2}\mathfrak A_{\mathrm{sub}}\leq (C^+)^{n_{\mathrm{sub}}}\cdot L^{-\eta^5(\sigma+\rho_{\mathrm{fin}})-\eta^3\sigma/2},\end{equation} where $\mathfrak A_{\mathrm{sub}}$ and $\rho_{\mathrm{fin}}$ are defined as before, and $\sigma$ is the total number of operations that are done in Steps 1--4. It is easy to see that $\Delta_{\mathrm{sub}}\leq C\eta^4\sigma$ for the value $\Delta_{\mathrm{sub}}$ in Proposition \ref{kqmainest2}. This is because each SGVC described in Proposition \ref{subpro} (a) leads to a hinge atom as in Proposition \ref{subpro} (b) at which we perform a cut, so the number of these SGVC is at most $\sigma$, while by the definition of $\Delta_j$ in Proposition \ref{red1step} the contribution of each such SGVC to $\Delta_{\mathrm{sub}}$ is at most $\eta^4$ (or negative). Therefore, the term $L^{\eta^3\sigma/2}$ in (\ref{finalcount2}) takes care of the $L^{-\Delta_{\mathrm{sub}}}$ term in (\ref{kqmainest2-1}).
Comparing (\ref{finalcount2}) with (\ref{kqmainest2-1}), using also the definition (\ref{defequan}) and noticing the logarithmic loss in Remark \ref{subcondition}, it now suffices to prove that $\rho_{\mathrm{sub}}\leq C(\sigma+\rho_{\mathrm{fin}})$ for the quantity $\rho$ defined in Proposition \ref{kqmainest2}. However, the value of $\rho_{\mathrm{sub}}$ becomes $\rho_{\mathrm{fin}}$ after all the operations, and each operation changes the value of $\rho$ by at most $O(1)$ because each vine contains at most $O(1)$ maximal ladders and at most $O(1)$ atoms apart from these maximal ladders (which is clear from Figure \ref{fig:vines}), so it is clear that $|\rho_{\mathrm{sub}}-\rho_{\mathrm{fin}}|\leq C\sigma$, as desired. \end{proof} \section{Counting problem for large gap molecules}\label{lgmole}
\subsection{Preliminary setup}\label{lgmole1} We now start the proof of Proposition \ref{lgmolect}. We will first get rid of the expression on the right hand side of (\ref{finalcount}), and reduce Proposition \ref{lgmolect} to the following Propositions \ref{moleprop1}--\ref{moleprop4}. In these propositions, we always consider a connected molecule or pseudomolecule $\mathbb M$ (cf. Definition \ref{defmole0}), and a $(c_v)$-decoration $(k_\ell)$ of $\mathbb M$ which is also restricted by $(\beta_v)$ and $(k_\ell^0)$; however, \emph{in Propositions \ref{moleprop1}--\ref{moleprop4} only}, we will relax the definition of decorations by \emph{not} requiring $c_v=0$ for degree $4$ atoms $v$ as in Definition \ref{decmole}. For each atom $v\in\mathbb M$ we also assume the decoration is LG at $v$ (i.e. for any bonds $(\ell_1,\ell_2)$ of opposite directions at $v$ we have $|k_{\ell_1}-k_{\ell_2}|\geq L^{-\gamma+\eta}$).
Let $(V,E,\chi)$ etc. be associated with $\mathbb M$, and let $\mathfrak C$ and $(\rho,q,m')$ be defined for $\mathbb M$ as before. Note that, for a given ladder, the differences $k_{\ell}-k_{\ell'}$ for different pairs $(\ell,\ell')$ of parallel single bonds may not be the same as in Definition \ref{defdiff}, due to the relaxation of the assumption $c_v=0$ for degree $4$ atoms $v$. Therefore a ladder of length $z_j\geq 1$ will have $z_j$ different gaps $|r_{ji}|\sim P_{ji}\,(1\leq i\leq z_j)$; consequently we define the quantity $\mathfrak A$ as in (\ref{defequan}) but with $p$ replaced by $0$ and the $\mathfrak X_j^{z_j}$ factor in $\mathfrak P$ replaced by $\prod_{i=1}^{z_j}\min((\log L)^2,1+\delta L^{2\gamma}P_{ji})$ where $|r_{ji}|\sim P_{ji}$ for the $i$-th gap of the ladder $\mathcal L_j$ with the notations of Proposition \ref{kqmainest2}. Note that we may drop any $C^n$ factor below, since they can be absorbed into the definition of $\mathfrak A$. \begin{prop}\label{moleprop1} If $\mathbb M$ is a molecule as above, then we always have $\mathfrak A\lesssim 1$. \end{prop} \begin{prop}\label{moleprop2} If $\mathbb M$ is a molecule that contains no triple bond and $E=2V-1$, then $\mathfrak A\lesssim L^{-3\gamma_0/5}$; if $\mathbb M$ contains no triple bond and $E=2V-2>0$, then $\mathfrak A\lesssim L^{-\eta/3}$. Furthermore, if the number of atoms in $\mathbb M$ not of degree $4$ is $w$, \emph{and we also allow at most $w$ atoms to be SG in the decoration}, then we have $\mathfrak A\lesssim L^{-\eta^2\cdot\rho+C(w+1)}$. \end{prop} \begin{prop}\label{moleprop3} If $\mathbb M$ is a molecule that contains only single bonds, and has two degree $3$ atoms with all other atoms having degree $4$, then $\mathfrak A\lesssim L^{-\gamma_0-\eta/2}$. \end{prop} \begin{prop}\label{moleprop4} (1) If $\mathbb M$ is molecule that has at most one triple bond and $E=2V-1$, and one cannot make a $\beta$-cut in $\mathbb M$ such that one of the new components has all atoms of degree $4$ except the newly formed $\beta$-atom which has degree $2$, and $\mathbb M$ is not formed by removing the two joints (with their bonds) of a vine (II) and adding one new bond between the two atoms connected to one of the joints, then $\mathfrak A\lesssim L^{-4\gamma_0/7}$.
(2) Suppose $\mathbb M$ is $4$-regular pseudomolecule that contains at most two triple bonds, and is \emph{not} formed by removing the two joints of a vine and adding one new bond between the pair of atoms connected to each joint. Fix any bond $\ell$ in $\mathbb M$, then either $\mathfrak A(\mathbb M)\lesssim L^{-\eta/4}$ for $\mathbb M$, or $\mathfrak A(\mathbb M\backslash\{\ell\})\lesssim L^{-\gamma_0-\eta/4}$ for the molecule after removing $\ell$ (note that the exact case may depend on assumptions we impose on the decoration, which will be made clear in the proof). \end{prop} We start with Proposition \ref{moleprop1} as it is the simplest, and some ingredients in its proof will be reused later. \begin{proof}[Proof of Proposition \ref{moleprop1}] We will reduce the molecule $\mathbb M$ to the empty set by the following operations: \underline{Operation (a)} which consists of removing one atom (and all the bonds), and \underline{Operation (b)} which consists of removing two atoms $v_1$ of degree 3, and $v_2$ of degree 3 or 4, that are connected by a double bond, and all the bonds attached to them. Note that $\mathbb M$ is a molecule which does not have components of only degree $4$ atoms, so we can always assume that the removed atom has degree $\leq 3$ in any operation (a); assume also that in the whole process, we perform operation (a) only when (b) is not possible.
For each operation we will consider the deviation, i.e. the quantity $\mathfrak D$ such as $\mathfrak A_{\mathrm{pre}}\lesssim\mathfrak D\cdot\mathfrak A_{\mathrm{pos}}$ as in (\ref{defdev}), where the meaning of the quantities should be obvious. Note that each operation (a) does not affect any ladder of length at least one (since the removed atom cannot belong to such a ladder, or otherwise we should perform operation (b)), and hence does not affect the factor $\mathfrak P$. Moreover, if the removed atom $v$ has degree $r\in\{1,2,3\}$, then $0\leq \Delta F\leq r-1$ and $\Delta\chi=\Delta F-r+1$. By Lemma \ref{basiccount} (1)--(2), we see that \[\mathfrak D\lesssim\delta^{-1}L^{2(d-\gamma)}\cdot L^{-2(d-\gamma)}(C^+\delta^{-1/2})^{-2}\lesssim1\] if $\Delta\chi=-2$ (so $(r,\Delta F)=(3,0)$), and that \[\mathfrak D\lesssim\delta^{-1}L^{d-\gamma-\eta}\cdot L^{-(d-\gamma)}\lesssim L^{-\eta/2}\] if $\Delta\chi=-1$ (so $(r,\Delta F)\in\{(2,0),(3,1)\}$), and $\mathfrak D\lesssim1$ if $\Delta\chi=0$ (so $(r,\Delta F)\in\{(1,0),(2,1),(3,2)\}$). Here we have also used the fact that $R\gtrsim L^{-\gamma+\eta}$ in Lemma \ref{basiccount} (1) due to LG assumption; moreover if $\Delta F\geq 1$ then one of the values $k_\ell$ for bonds $\ell$ at $v$ will be uniquely fixed (if $\Delta F\geq 2$ then all $k_\ell$ will be uniquely fixed).
Now consider operation (b). Here, we have two cases. Either the operation does not affect any ladder of length at least one, or it can reduce the length of one such ladder by one. Now, in the former case we clearly has $\mathfrak D\lesssim 1$ as the operation (b) can be split into two operations (a). In the latter case, the operation removes a factor $\min((\log L)^2,1+\delta L^{2\gamma}P)$ from the product $\mathfrak P$, where $P\sim|k_{\ell}-k_{\ell'}|$ for the two single bonds $(\ell,\ell')$ at the two removed atoms. If both atoms have degree 3, then we have $\Delta\chi=-2$, and by Lemma \ref{basiccount} (3) we have \begin{multline}\label{laddersharp}\mathfrak D\lesssim \delta^{-1}L^{2(d-\gamma)}\cdot\max(\delta^{-1}L^{-\gamma_0},(\log L)^{-2},(1+\delta L^{2\gamma}P)^{-1})\\\times L^{-2(d-\gamma)}(C^+\delta^{-1/2})^{-2}\cdot \min((\log L)^2,1+\delta L^{2\gamma}P)\lesssim 1.\end{multline} If $v_2$ has degree 4, then either $\Delta\chi=-2$ and we have the same bound as in (\ref{laddersharp}), or $\Delta \chi=-3$ and a better bound holds using Lemma \ref{basiccount} (4). Therefore, in all cases we will have $\mathfrak A_{\mathrm{pre}}\lesssim\mathfrak A_{\mathrm{pos}}$. By choosing the constant $C^+$ in (\ref{defequan}) large enough we can assume $\mathfrak A_{\mathrm{pre}}\leq\mathfrak A_{\mathrm{pos}}$; this implies that we must have $\mathfrak A\leq 1$ in the beginning, since it trivially holds in the end when $\mathbb M$ has no bonds. This completes the proof. \end{proof} In the rest of this subsection we prove Proposition \ref{lgmolect} assuming Propositions \ref{moleprop2}--\ref{moleprop4}. \begin{proof}[Proof of Proposition \ref{lgmolect} assuming Propositions \ref{moleprop2}--\ref{moleprop4}] Let $\mathbb M_0$ be a component of $\mathbb M_{\mathrm{fin}}$ as in Proposition \ref{lgmolect}. If $\mathbb M_0$ is a double bond, then it must be LG and both atoms must be $\beta$-atoms, due to Proposition \ref{finalmole}. In this case we have $\mathfrak A_0\lesssim \delta^{-1}L^{d-\gamma-\eta}\cdot L^{-(d-\gamma)}\lesssim L^{-\eta/2}\lesssim L^{-\eta^5}\cdot L^{2(\gamma_0+2\eta^2)-(2\gamma_0+5\eta^2)}$ using Lemma \ref{basiccount} (1) and the LG condition, so (\ref{finalcount}) is true. If $\mathbb M_0$ is a triangle, then there are at least two $\beta$-atoms due to Proposition \ref{finalmole}. Moreover by Lemma \ref{basiccount} (1) we see that \[\mathfrak A_0\lesssim \delta^{-1}L^{d-\gamma+\gamma_0}\cdot L^{-(d-\gamma)}\lesssim L^{-3\eta^5}\cdot L^{3(\gamma_0+2\eta^2)-(2\gamma_0+5\eta^2)}\] if there is no $\alpha$-atom, and that \[\mathfrak A_1\lesssim (L^{d-1}+\delta^{-1}R^{-1}L^{d-2\gamma})L^{-(d-\gamma)}\lesssim L^{-3\eta^5}\cdot L^{-(\gamma+3\eta)/2}R^{-1}\cdot L^{2(\gamma_0+2\eta^2)-(2\gamma_0+5\eta^2)}\] if there is one $\alpha$-atom labeled by $R\in [L^{-1},L^{-\gamma+\eta}]$. In either case (\ref{finalcount}) is true. If $\mathbb M_1$ is a cycle of length $s\geq4$, then there is at least one $\beta$-atom. If there is no $\alpha$-atom the proof is same as the triangle case; if there is at least one $\alpha$-atom labeled by $R$, then \[\mathfrak A_1\lesssim (L^{d-1}+\delta^{-1}R^{-1}L^{d-2\gamma})L^{-(d-\gamma)}\lesssim L^{-s\cdot \eta^5}L^{-(\gamma+3\eta)/2}R^{-1}\cdot (L^{(\gamma-5\eta)/2})^{s-2}L^{\gamma_0+2\eta^2}\cdot L^{-(2\gamma_0+5\eta^2)},\] which proves (\ref{finalcount}) since $L^{-(\gamma+3\eta)/2}R^{-1}\gtrsim L^{(\gamma-5\eta)/2}$ and $L^{\gamma_0+2\eta^2}\gtrsim L^{(\gamma-5\eta)/2}$ when $R\leq L^{-\gamma+\eta}$. This completes case when $\mathbb M_0$ is a cycle.
Now assume $\mathbb M_0$ has at least one $\varepsilon$-atom. Thanks to Proposition \ref{moleprop2} we always have \begin{equation}\label{roughbd}\mathfrak A_0\lesssim L^{-\eta^2\rho_0+C(V_\alpha+V_\beta+1)}. \end{equation} Then we need to prove some other estimates to interpolate with (\ref{roughbd}). To achieve this we need to perform some operations on $\mathbb M_0$; these operations include Operation (a) and Operation (b) defined in the proof of Proposition \ref{moleprop1} above, as well as the new ones defined below.
\underline{Operation (c):} Remove a chain of $\alpha$- and $\beta$-atoms (as in Proposition \ref{finalmole}), which has two distinct $\varepsilon$-atoms at both its ends, and all bonds. By Lemma \ref{basiccount} (1) we have \begin{equation}\label{operc}\mathfrak D\lesssim1+\delta^{-1}R^{-1}L^{-\gamma}
\end{equation} for both $\alpha$- and $\beta$-atoms, where $R$ is such that $\max|r|\sim R$ for all the gaps $r$ at the $\alpha$- and $\beta$-atoms in this chain.
\underline{Operation (d):} Remove a chain of $\alpha$- and $\beta$-atoms, which has two distinct $\varepsilon$-atoms $v_1$ and $v_2$ at both its ends, and all bonds, and then add a new bond between $v_1$ and $v_2$ in the same direction as the chain. This operation does not change $\chi$, and we will show that $\mathfrak D\lesssim (\log L)^C$.
To see this, fix a decoration of the molecule $\mathbb M_{\mathrm{pre}}$ before operation. Let the two bonds in the chain at the two $\varepsilon$-atoms be $\ell_1$ and $\ell_2$ respectively, and the new bond added be $\ell_3$. Then $k_{\ell_1}-k_{\ell_2}$ and $|k_{\ell_1}|^2-|k_{\ell_2}|^2$ are fixed (the latter up to distance $\delta^{-1}L^{-2\gamma}$) in the decoration. For any integer $|g|\leq 3$, the value is also fixed of $|k_{\ell_1}|^2-|k_{\ell_2}+g(k_{\ell_1}-k_{\ell_2})|^2$ up to distance $O(1)\delta^{-1}L^{-2\gamma}$, thus we obtain a decoration for the molecule $\mathbb M_{\mathrm{pos}}$ after operation, by setting $k_{\ell_3}=k_{\ell_2}+g(k_{\ell_1}-k_{\ell_2})$. The LG condition for the new decoration is still satisfied, if we choose a suitable $g$, and weaken LG condition to $|r|\geq L^{-\gamma+\eta}/10$ (if not, then by pigeonhole principle we must have $|k_{\ell_1}-k_{\ell_2}|\leq L^{-\gamma+\eta}/10$, and thus the LG condition for $\mathbb M_{\mathrm{pre}}$ implies the weakened LG condition for $\mathbb M_{\mathrm{pos}}$). This implies that $\mathfrak C_{\mathrm{pre}}\lesssim\mathfrak C_{\mathrm{pos}}$ and hence $\mathfrak D\lesssim (\log L)^C$, because this operation affects at most $O(1)$ nodes in $O(1)$ ladders, and modifies $\mathfrak P$ by at most a $(\log L)^C$ factor.
\underline{Operation (e):} Suppose after operation (d), a triple bond forms between $v_1$ and $v_2$. If these two atoms have at most one external bond then we remove them and all bonds; otherwise, if they are connected to two $\varepsilon$-atoms $v_3\neq v_4$ by two single bonds, then we remove $(v_1,v_2)$ and all bonds, then add a new bond between $v_3$ and $v_4$, matching the directions of the removed single bonds.
By using the same arguments above (assigning a suitable $k_{\ell_2}+g(k_{\ell_1}-k_{\ell_2})$ to the new bond $\ell_3$, where $\ell_1$ and $\ell_2$ are the two single bonds connecting to $v_3$ and $v_4$ to $v_1$ and $v_2$) and also using Lemma \ref{basiccount} (3) we can show that as a result of applying Operations (d) and (e) consecutively, \begin{equation}\label{opere}\mathfrak D\lesssim L^{-\gamma_0+200\eta},\end{equation} provided that $\max|r|\gtrsim L^{-\gamma-100\eta}$ for all the gaps $r$ at the $\alpha$- and $\beta$-atoms in this chain. Note that, after operations (d) and (e), we no longer require $c_v=0$ for degree $4$ atoms $v$ as in Definition \ref{decmole}.
We treat the remaining cases of Proposition \ref{lgmolect}. For even component, note that $V_\beta\geq 1$ because the last cut that separate $\mathbb M_0$ from the other components must be $\beta$-cut; moreover if $V_\beta=1$ then we must have $V_\alpha\geq 1$, since otherwise the only $\beta$-atom will have gap $r=0$ which is not possible.
(1) If $\mathbb M_0$ is the odd component, then we perform the same operations (a) and (b) as in the proof of Proposition \ref{moleprop1} above, but we only remove $\varepsilon$-atoms (even if they may become degree $1$ or $2$ in the process). Note that the value of $\mathfrak A$ becomes $1$ after removing all $\varepsilon$-atoms (because the remaining $\alpha$- and $\beta$-atoms can only form finitely many chains for which $\chi=0$), and $\mathfrak D\lesssim 1$ for each step in the same way as in the proof of Proposition \ref{moleprop1}, we conclude that $\mathfrak A_0\lesssim 1$. Moreover, let $V_\alpha$ and $V_\beta$ be the number of $\alpha$- and $\beta$-atoms respectively; if $w:=V_\alpha+V_\beta=0$ then $E=2V-1$ and $\mathfrak A_0\lesssim L^{-3\gamma_0/5}$ by Proposition \ref{moleprop2}, and if $w>0$ then $\mathfrak A_0\lesssim 1$ and the right hand side of (\ref{finalcount}) is at least $L^{(\gamma_0-5\eta)w/2-\eta^5\rho_0}$ (note also that $G=0$ for the odd component). In either case (\ref{finalcount}) follows from an interpolation with (\ref{roughbd}).
(2) From now on we assume $\mathbb M_0$ is an even component. If $V_\beta\geq 3$, then we first choose any chain of $\alpha$- and $\beta$-atoms and perform operation (c). After this, the molecule will no longer have any component such that all $\varepsilon$-atoms have degree $4$, so we can perform operations (a) and (b) as in (1) above, to prove that $\mathfrak A_0\lesssim\delta^{-1}L^{\gamma_0}$ using also (\ref{operc}). As the right hand side of (\ref{finalcount}) is at least \[L^{\eta^2V_\alpha}\cdot L^{V_\beta(\gamma_0+2\eta^2)-2\gamma_0-5\eta^2-\eta^5\rho_0}\gtrsim L^{\gamma_0+\eta^2/2-\eta^5\rho_0}\cdot L^{(V_\alpha+V_\beta)\eta^2/10},\] we can interpolate (\ref{roughbd}) with the bound $\mathfrak A_0\lesssim\delta^{-1}L^{\gamma_0}$ to prove (\ref{finalcount}).
(3) If $V_\beta=2$ and $V_\alpha\geq 1$, or if $V_\beta=1$ and $V_\alpha\geq 3$, then we choose any chain containing at least one $\alpha$-atom and perform operation (c), and proceed as in (2) above. Let the label of the $\alpha$-atom in this chain be $R$, then by (\ref{operc}) we have $\mathfrak A_0\lesssim1+\delta^{-1}R^{-1}L^{-\gamma}$. Interpolating with (\ref{roughbd}) we get \begin{multline}\label{case3est}\mathfrak A_0\lesssim(1+\delta^{-1}R^{-1}L^{-\gamma})\cdot L^{-\eta^5\rho_0+C\eta^3(V_\alpha+1)}\\\lesssim L^{-\eta^5\rho_0}\cdot L^{-(\gamma+3\eta)/2}R^{-1}\cdot L^{V_\beta(\gamma_0+2\eta^2)-2\gamma_0-5\eta^2}\cdot L^{(V_\alpha-1)(\gamma-5\eta)/2}\end{multline} which is better than (\ref{finalcount}) and is easily verified when $V_\beta=2$ and $V_\alpha\geq 1$ or $V_\beta=1$ and $V_\alpha\geq 3$.
(4) If $V_\alpha=0$ and $V_\beta=2$, then choose all the chains of $\beta$-atoms and perform operation (d) to them. The resulting molecule $\mathbb M_1$ is $4$-regular and has at most two triple bonds. Therefore, by Proposition \ref{moleprop4} (2), we know that either $\mathfrak A_1\lesssim L^{-\eta/5}$ or $\mathbb M_1$ is formed by removing the two joints of a vine and adding one new bond between the pair of atoms connected to each joint. (note that, the number of choices for vector $k_\ell$ for a newly added bond $\ell$ is bounded by $\delta^{-1}L^{d-\gamma+\gamma_0}$ due to definition of operation (d) and Lemma \ref{basiccount} (1), so if $\mathfrak A\lesssim L^{-\gamma_0-\eta/4}$ for the molecule $\mathbb M_1\backslash\{\ell\}$ we also have $\mathfrak A_1\lesssim L^{-\eta/5}$). In the former case interpolating with (\ref{roughbd}) yields $\mathfrak A_0\lesssim L^{-\eta/5-\eta^5\rho_0}\lesssim L^{\eta^2-\eta^5\rho_0}$ which implies (\ref{finalcount}).
In the latter case, an enumeration shows that $\mathbb M_0$ must be a vine and is thus LG due to Proposition \ref{finalmole}, so we may now perform operation (c) to the chains of $\beta$-atoms in $\mathbb M_0$. For the resulting molecule $\mathbb M_1$ we have $\mathfrak A_1\lesssim 1$ by Proposition \ref{moleprop1}, and using also (\ref{operc}) and the LG assumption we get that $\mathfrak A_0\lesssim L^{-\eta/2}$, so (\ref{finalcount}) again follows by interpolation.
(5) If $V_\alpha=V_\beta=1$, then we choose the chain containing the $\alpha$-atom and perform operation (c), and choose any other possible chain and perform operation (d), to get a molecule $\mathbb M_1$ with only $\varepsilon$-atoms. Note that $\mathbb M_1$ has at most one triple bond, satisfies the $\beta$-cut assumption in Proposition \ref{moleprop4} (1) (which follows because one cannot make any $\beta$-cuts in $\mathbb M_0$) as well as $E=2V-1$, so by Proposition \ref{moleprop4} (1) we know either $\mathfrak A_1\lesssim L^{-4\gamma_0/7}$, or $\mathbb M_1$ is formed by removing the two joints of a vine (II) and adding one new bond between the two atoms connected to one of the joints. But the latter case is impossible, because then $\mathbb M_0$ has to be a vine (II) and thus has LG due to Proposition \ref{finalmole}, which is impossible as one of its joints is an $\alpha$-atom.
Let the $\alpha$-atom be labeled by $R$, then using (\ref{operc}) we get $\mathfrak A_0\lesssim (1+\delta^{-1}R^{-1}L^{-\gamma})L^{-4\gamma_0/7}$, and interpolating with (\ref{roughbd}) we get
\[\mathfrak A_0\lesssim(1+\delta^{-1}R^{-1}L^{-\gamma})L^{-4\gamma_0/7}\cdot L^{-\eta^5\rho_0+C\eta^3}\lesssim L^{-\eta^5\rho_0}\cdot L^{(\gamma_0+2\eta^2)-2\gamma_0-5\eta^2}\cdot L^{-(\gamma+3\eta)/2}R^{-1},\] which is easily proved using $R\leq L^{-\gamma+\eta}$. This proves (\ref{finalcount}).
(6) Finally suppose $V_\alpha=2$ and $V_\beta=1$. Here we will use the $\iota_v$ and $\kappa_v$ parameters defined in Section \ref{reductcut}. First, let the labels of the two $\alpha$-atoms $(v_1,v_2)$ be $R_1\geq R_2$, we choose the chain containing $v_1$ and perform operation (c), then choose the other chains and perform operation (d), to reduce to $\mathbb M_1$. By (\ref{operc}) and interpolation, we know that (\ref{finalcount}) is true as long as the inequality \begin{multline}\label{case6est} L^{-\eta^5\rho_0+C\eta^3}\cdot(1+\delta^{-1}R_1^{-1}L^{-\gamma})(\log L)^C\cdot \mathfrak A_1\\\lesssim L^{-\eta^5\rho_0}\cdot L^{-(\gamma+3\eta)+(\kappa_{v_1}+\kappa_{v_2})\eta}(R_1R_2)^{-1}\cdot L^{-(\gamma_0+3\eta^2)}\end{multline} holds. Note that $\mathfrak A_1\lesssim 1$ and $R_j\lesssim L^{-\gamma+\eta}$, an easy calculation shows that (\ref{case6est}) is true if $\kappa_{v_1}+\kappa_{v_2}>0$, or if $\mathbb M_1$ has no triple bond (so Proposition \ref{moleprop2} implies that $\mathfrak A_1\lesssim L^{-3\gamma_0/5}$), or if $R_2\leq L^{-\gamma-50\eta}$.
Now if $\kappa_{v_1}+\kappa_{v_2}=0$, $\mathbb M_1$ has a triple bond, and $R_1\geq R_2\geq L^{-\gamma-50\eta}$. Note that an algebraic sum of the three gaps at the three $\alpha$- and $\beta$-atoms equal to $0$, and none of the three gaps is $0$ itself, so by the definition of $(\iota_v,\kappa_v)$, we must have $|r|\gtrsim L^{-50\eta}R_2\geq L^{-\gamma-100\eta}$ for the gap $r$ at the $\beta$-atom. Then we may perform operation (e) instead of (d) in the last step at one of the chains not containing $v_1$, and reduce to a molecule $\mathbb M_2$. Clearly $\mathfrak A_2\lesssim 1$, and using (\ref{opere}) and interpolation gives that \[\mathfrak A_0\lesssim L^{-\eta^5\rho_0+C\eta^3}\cdot(1+\delta^{-1}R_1^{-1}L^{-\gamma})L^{-\gamma_0+200\eta},\] and this extra $L^{-\gamma_0}$ gain (with $L^{O(\eta)}$ loss) is more than enough to imply (\ref{finalcount}) as above. This finishes the proof of Proposition \ref{lgmolect}. \end{proof} \subsection{Proof of Proposition \ref{moleprop2}}\label{mole2proof} In this subsection we prove Proposition \ref{moleprop2}. Under the \emph{large gap} assumption, this proof relies on the steps and the algorithm that are almost identical to those defined in the proof of Proposition 9.10 in \cite{DH21} (see \cite{DH21}, Sections 9.3--9.4). For completeness, we have included the definitions and properties of these steps and algorithm, with suitable modifications adapted to the current scaling law, in Appendix \ref{appalg}.
With these preparations we can prove Proposition \ref{moleprop2}, by adopting the same arguments as in Section 9.5 of \cite{DH21}, which we present below. We start with the case when $E\in\{2V-1,2V-2\}$ (so $\mathbb M$ is a molecule), and $\mathbb M$ has no triple bond, and apply the algorithm described in Section \ref{alg}. The algorithm contains $O(n)$ operations, where $n$ is the size of the molecule, and in some cases we are making binary choices depending on properties of the decoration, leading to at most $C^n$ possibilities. Such $C^n$ factors are always negligible by choosing the constant $C^+$ in (\ref{defequan}); Below we will fix one such possibility (and hence an operation sequence). Let $r_1$ be the total number of \emph{fine} operations, and $r_2$ be the total number of \emph{good} operations. Note that the change of any of the quantities we will study below, caused by any single operation we defined above, is at most $O(1)$. \subsubsection{Increments of $\eta$ and $V_3$}\label{incre1} First, note that operations (TB-1N) and (TB-2N) only occur once after (3S3-3G) or (3D3-3G) which are good operations, the number of those is at most $Cr_2$. Let the number of (BR-N) where $d(v_1)=d(v_2)=3$ (see Proposition \ref{brprop}) be $z_1$, the number of other (BR-N) be $z_1'$. Let the number of (3S3-1N) be $z_2$, the number of (3R-1N) be $z_3$, the numbers of (2R-2F)--(2R-4F) be $z_4$, $z_5$ and $z_6$, and the number of (2R-1F) be $z_7$. By Propositions \ref{brprop}--\ref{2rprop}, we can examine the increment of $\nu$ in the whole process and get \begin{equation}\label{increeta}-2z_1-2z_1'-2z_2+2z_3-2z_5\geq -\nu_0-Cr_2, \end{equation} where $\nu_0\in\{0,-2\}$ is the initial value of $\nu$, and in the end $\nu=0$. In the same way, by examining the increment of $V_3$ we get \begin{equation}\label{increv3}-2z_1-z_1'+2z_2+2z_3+z_4\leq -V_{30}-Cr_2, \end{equation} where $V_{30}\geq 0$ is the initial value of $V_3$ and in the end $V_3=0$. Subtracting these two inequalities yields $z_1'+z_2+z_4+z_5\leq \nu_0-V_{30}+Cr_2$. In particular we have $z_1'+z_2+z_4+z_5\leq Cr_2$, and if $E=2V-1$ then $r_2\geq 1$. Note also that $z_6+z_7\leq r_1$ because (2R-1F) and (2R-4F) are fine. \subsubsection{The other operations} Next we will prove that $z_1+z_3\leq Cr_2$. By (\ref{increv3}) we have $z_3\leq z_1+Cr_2$, so we only need to prove $z_1\leq Cr_2$. Let $V_2^*$ be the number of degree 2 atoms with two single bonds. It is clear that $\Delta V_2^*=0$ for (3D3-1N), (3R-1N) and (2R-4F), and $\Delta V_2^*\geq 0$ for (2R-1F), and $\Delta V_2^*\geq 0$ for (BR-N) assuming $d(v_1)=d(v_2)=3$. Moreover, equality holds for (BR-N) if and only if the bridge removed is special. Therefore, with at most $Cr_2$ exceptions, all the bridges removed in (BR-N) are special.
Consider the increment of the number of special bonds, denoted by $\xi$. Clearly $\Delta\xi=0$ for (2R-1F) and (2R-4F); for (BR-N) which removes a special bridge, we can check that this operation cannot make any existing non-special bond special, so $\Delta\xi=-1$. Moreover, by our algorithm, whenever we perform (3R-1N), it is always assumed that the component contains no special bond after this step, so $\Delta\xi\leq 0$. Similarly, whenever we perform (3D3-1N) we are always in (3-b) or (3-c-iii) (or in (3-c-i) but then the next operation will be good). For (3-c-iii), $v_3$ and $v_4$ are the only two degree 3 atom in the component after performing (3D3-1N), and they are not connected by a special bond (otherwise we are in (3-c-i)), so this step also does not create any special bond, hence $\Delta\xi\leq 0$.
Now let us consider operations (3D3-1N) occurring in (3-b). By our algorithm, if we also include the possible (3D3-2G), then such steps occur in the form of sequences which follow the type II chains in the molecule. For any operation in this sequence \emph{except} the last one, we must have $\Delta\xi=0$ (because in this case, after (3D3-1N), neither $v_3$ nor $v_4$ is connected to a degree 3 atom by a single bond). Moreover, if for the last one in the sequence we do have $\Delta\xi>0$, then immediately after this sequence we must have a good operation (because in this case, after we finish the sequence and move to (3-c), either $v_3$ or $v_4$ will have degree 3 instead of 4, so we must be in (3-c-ii)). Since the number of good operations is at most $r_2$, we know that the number of operations for which $\Delta\xi>0$ is at most $Cr_2$. Thus, considering the increment of $\xi$, we see that $z_1\leq Cr_2$. \subsubsection{Ladders}\label{lad1} Now we see that the number of steps \emph{different from} (3D3-1N) is at most $C(r_1+r_2)$. In particular steps (3D3-1N) occurring in (3-c-i) and (3-c-iii) is also at most $C(r_1+r_2)$ because each of them must be followed by an operation different from (3D3-1N). As for the sequences of (3D3-1N) or (3D3-2G) occurring in (3-b), each sequence corresponds to a ladder, and each chain can be as long as $Cn$, but the number of chains must be at most $C(r_1+r_2)$ for the same reason. Note that some of the bonds in the ladders may not exist in the original base molecule, but the number of those bonds is again at most $C(r_1+r_2)$ because (3S3-3G) and (3D3-3G) are both good steps. Upon further dividing, we can find these (at most $C(r_1+r_2)$) ladders in the original molecule $\mathbb M$, such that the number of atoms not belonging to one of these ladders is at most $C(r_1+r_2)$. By the definition of $\rho$ (see Proposition \ref{kqmainest2}), we know that $\rho\leq C(r_1+r_2)$. \subsubsection{Conslusion} Now we can prove Proposition \ref{moleprop2}. First, if $E=2V-1$, then as shown in Section \ref{incre1} we must have $r_2\geq 1$, hence $\mathfrak A\lesssim L^{-3\gamma_0/5}$ by definition of good operations; here note that, for operation (3D3-1N), which is the only operation whose number is not controlled by $C(r_1+r_2)$, we do not have any logarithmic loss due to Proposition \ref{3d3prop}, so th possible logarithmic losses can be easily accommodated. In the same way, if $E=2V-2>0$, then the total number of operations is $\leq C(r_1+r_2)$ as shown above, and this total number must be positive if $E>0$, so we know $r_1+r_2\geq 1$ and hence $\mathfrak A\lesssim L^{-\eta/3}$ (again considering possible log losses).
Finally, suppose $\mathbb M$ has at most $w$ atoms not of degree $4$ and is allowed to have at most $w$ SG atoms. Then we may first remove each of the $w$ SG atoms, where for each operation we trivially have $\mathfrak D\lesssim L^C$. The resulting molecule still has at most $C(w+1)$ atoms not of degree $4$, which allows us to apply the algorithm described in Section \ref{alg}. All the arguments in Sections \ref{incre1}--\ref{lad1} still apply, if one allows remainders of size $C(w+1)$ (for example the values of $\nu_0$ and $V_{30}$ in (\ref{increeta}) and (\ref{increv3}) will both be $\leq C(w+1)$, etc.). In the end, using the definition of good and fine operations, we get that \[\mathfrak A\lesssim L^{C(w+1)}\cdot L^{-(r_1+r_2)\eta/3}\qquad\mathrm{and}\qquad \rho\leq C(r_1+r_2)+C(w+1),\] which clearly implies $\mathfrak A\lesssim L^{-\eta^2\rho+C(w+1)}$, as desired. \subsection{Proof of Proposition \ref{moleprop3}} In this subsection we prove Proposition \ref{moleprop3}. The proof involves a different procedure with the following operations, which also occur in other sections, but for simplicity we shall give them specific names that are used \emph{only in this subsection}. These include: (R), where we remove a degree $2$ or $3$ atom, (B), where we remove a bridge (in the sense of Section \ref{secbr}), and ($\beta$), where we perform a $\beta$-cut at a degree $4$ atom such that none of its bonds is a bridge. Note that removing a bridge does not affect whether or not any other bond is a bridge, and also does not create any new possibility of $\beta$-cut as in ($\beta$).
Start with the molecule $\mathbb M$ described in Proposition \ref{moleprop3}; note in particular that $\mathbb M$ has no ladders. We use $\mathbb M\rightarrow (3,3)[4]$ to indicate that there $\mathbb M$ has two degree $3$ atoms and the other atoms have degree $4$ (similarly $\mathbb M\rightarrow (2)[3,4]$ means that $\mathbb M$ has one degree $2$ atom and the other atoms have degree $3$ and $4$, etc.). If $\mathbb M$ has a bridge, then we remove it by (B) to get two components $\mathbb M_1$ and $\mathbb M_2$. It is easy to check that either $\mathbb M_j\rightarrow (3,3)[4]$ or $\mathbb M_j\rightarrow (2)[4]$ for each $j$ (since the sum of degrees is always even), but if $\mathbb M_j\rightarrow (2)[4]$ then removing the degree $2$ atom by (R) also yields a molecule $\widetilde{\mathbb M_j}\rightarrow(3,3)[4]$. If $\mathbb M$ admits a $\beta$-cut as specified above, then we perform operation ($\beta$), then one of the resulting components will be $\rightarrow (2)[4]$, and the other satisfies $\mathfrak A\lesssim1$ by Proposition \ref{moleprop1}. As all the above operations satisfy $\mathfrak D\lesssim 1$ due to the LG assumption, we may always reduce to the case where $\mathbb M\rightarrow(3,3)[4]$ has no bridge and admits no $\beta$-cuts as above (so in particular removing any atom in $\mathbb M$ does not create any new component).
Now we remove one degree $3$ atom $v_1$ in $\mathbb M$ by (R), and denote the bonds by $(\ell_1,\ell_2,\ell_3)$. Consider the following possibilities after this operation: \begin{enumerate}[{(1)}]
\item If there is a degree $3$ atom $v_2$ with bonds $(\ell_4,\ell_5,\ell_6)$ such that only $\ell_4$ is a bridge, then by Lemma 9.14 of \cite{DH21}, for some $j\in\{1,2,3\}$ we must have $k_{\ell_4}\pm k_{\ell_j}$ equals constant and $|k_{\ell_4}|^2\pm|k_{\ell_j}|^2$ equals constant up to distance $n\delta^{-1}L^{-2\gamma}$ for some choice of $\pm$, where $n$ is the size of $\mathbb M$. Then we remove $(v_1,v_2)$ and all bonds; since this results in $\Delta F=1$ (recall $F$ is the number of components) and $\Delta\chi=-3$, using Lemma \ref{basiccount} (4), we see that this composition operation has $\mathfrak D\lesssim L^{-\gamma_0}(\log L)^C$. The same result holds if there is a degree $2$ atom $v_2$ with two bonds that are not bridge.
\item If there is a degree $4$ atom $v_2$ with no bridge at which a $\beta$-cut is possible, say along the bonds $(\ell_4,\ell_5)$ and $(\ell_6,\ell_7)$. Then by Lemma 9.14 of \cite{DH21}, for some choice of $\pm$ and $j\in\{1,2,3\}$ we have $k_{\ell_j}\pm k_{\ell_4}\pm k_{\ell_5}$ equals constant and $|k_{\ell_j}|^2\pm |k_{\ell_4}|^2\pm |k_{\ell_5}|^2$ equals constant up to distance $n\delta^{-1}L^{-2\gamma}$. Then we remove $(v_1,v_2)$ and all bonds; since $\Delta\chi=-4$, by using Lemma \ref{basiccount} (4) for $(k_{\ell_1},\cdots,k_{\ell_5})$ and Lemma \ref{basiccount} (1) plus LG assumption for $(k_{\ell_6},k_{\ell_7})$, we see that this composition operation has $\mathfrak D\lesssim L^{-\gamma_0-\eta}(\log L)^C$.
\item If there is a degree $4$ atom $v_2$ with bonds $(\ell_4,\cdots,\ell_7)$ such that exactly two bonds (say $\ell_4$ and $\ell_5$) are bridges (we may assume $\ell_4$ and $\ell_5$ are in opposite directions or the proof will be much easier using the better bounds in Lemma \ref{basiccount} (1)), then there are three components after removing $v_1$ (and all bonds) and $(\ell_4,\ell_5)$, denote them by $X_j\,(1\leq j\leq 3)$ with $v_2\in X_1$. Then, since $v_4$ and $v_5$ are not bridges before removing $v_1$, and one cannot make a $\beta$-cut before removing $v_1$, we see that the three other endpoint for $\ell_j\,(1\leq j\leq 3)$ must be in $X_j$ respectively. By Lemma 9.14 of \cite{DH21}, this implies that $k_{\ell_1}\pm k_{\ell_6}\pm k_{\ell_7}$ equals constant and $|k_{\ell_1}|^2\pm |k_{\ell_6}|^2\pm |k_{\ell_7}|^2$ equals constant up to distance $n\delta^{-1}L^{-2\gamma}$, for some choice of $\pm$. Then we remove $(v_1,v_2)$ and all bonds; since $\Delta\chi=-3$, by using Lemma \ref{basiccount} (4) we see that this composition operation has $\mathfrak D\lesssim L^{-\gamma_0}(\log L)^C$. \item If none of (1)--(3) holds, then we remove $v_1$ and all bonds, and then remove all the subsequent bridges. In each resulting component, there will be no degree $2$ atom (which would correspond to one of (1)--(3)) nor degree $1$ atom (which would correspond to bridges), so each (nontrivial) component will be $\rightarrow[3,4]$, and there is no bridge nor $\beta$ cut possibilities. We then remove another degree $3$ atom and repeat the above procedure, until one of (1)--(3) happens or the molecule becomes trivial (i.e. with no bonds). But the latter case is impossible, since removing one degree $3$ atom has $\Delta\chi=-2$, removing one bridge has $\Delta\chi=0$, and for any molecule $\mathbb M\rightarrow [3,4]$ with only single bonds we have $\chi\geq V/2+1\geq 3$. \end{enumerate}
By the above argument, we have shown that we will be able to perform at least one good operation in (1)--(3) such that either $\mathfrak D\lesssim L^{-\gamma_0}(\log L)^C$ and $\Delta\chi=-3$, or $\mathfrak D\lesssim L^{-\gamma_0-\eta}(\log L)^C$ and $\Delta\chi=-4$. In the latter case we are already done since all other operations in the sequence trivially have $\mathfrak D\lesssim 1$; in the former case, we further remove all bridges after the good operation. If the resulting molecule is nontrivial, then either repeating the above argument or exploiting a degree $2$ atom using Lemma \ref{basiccount} (1) and LG assumption will gain another power $L^{-\eta}$ which allows us to close. Finally, if the resulting molecule is trivial, then we have $\chi=0$ after the good operation which has $\Delta \chi=-3$. Since the molecule $\mathbb M'$ before the good operation has only single bonds, and $\mathbb M'\rightarrow[3,4]$ and $\chi=3$, the only possibility for $\mathbb M'$ is $K_4$ (i.e. a complete graph of $4$ atoms with only single bonds). But in this last case, we can work directly with $\mathbb M'$ and apply Lemma \ref{basiccount} (5) to get $\mathfrak A'\lesssim L^{-\gamma_0-\eta}$ for the molecule $\mathbb M'$. This completes the proof of Proposition \ref{moleprop3}. \subsection{Proof of Proposition \ref{moleprop4}}\label{lgmole4} In this subsection we prove Proposition \ref{moleprop4}. For this purpose we need to introduce one more operation (and operation sequence), which we denote by (Y), as follows. Suppose $\mathbb M$ contains a triple bond between two atoms $(v_1,v_2)$. If these two atoms have at most one extra bond, or if they have two extra single bonds connecting to the same third atom, then we remove them and all the bonds, and call this (Y1); otherwise there are two extra single bonds connecting $v_1$ to $v_3$ and $v_2$ to $v_4$ (with $v_3\neq v_4$), then we remove $(v_1,v_2)$ and all the bonds, and add one new bond between $v_3$ and $v_4$ matching the directions of the removed single bonds, and call this (Y2). If after (Y2) a new triple bond forms between $v_3$ and $v_4$, we then apply another operation (Y) to them, and so on, until there is no more triple bonds, and call this (Y) sequence. Note that (Y) sequence may involve a ladder, as illustrated in Figure \ref{fig:yoper}.
\begin{figure}
\caption{The structure involved in (Y) sequence, which may contain a ladder. In the sequence we remove all atoms up to $v_{2n}$ and all bonds; if the last operation is (Y2), we also add a new bond (the blue one) between $v_{2n+1}$ and $v_{2n+2}$.}
\label{fig:yoper}
\end{figure}
We need two lemmas concerning (Y) sequences before proving Proposition \ref{moleprop4}. \begin{lem}\label{ylem1} Suppose $\mathbb M$ is $4$-regular with \emph{at most two triple bonds} and $\mathbb M'$ is formed from $\mathbb M$ by (Y) sequences. If $\mathbb M'$ is \emph{either a quadruple bond or a triangle formed by $3$ double bonds}, then $\mathbb M$ is formed by removing the two joints of a vine and adding one new bond between the pair of atoms connected to each joint. \end{lem} \begin{proof} The proof is an enumeration of all possibilities. Clearly going from $\mathbb M$ to $\mathbb M'$ involves at most two (Y) sequences with last operation (Y2). To invert one such sequence, one simply selects a non-triple bond from $\mathbb M'$ (see e.g. the blue one in Figure \ref{fig:yoper}), remove it, then insert the new structure shown in case (Y2) of Figure \ref{fig:yoper}; we call this (Z). If $\mathbb M'$ is a quadruple bond, then by applying (Z) once, we get a pseudomolecule formed from vine (II) (by removing the two joints of a vine and adding one new bond between the pair of atoms connected to each joint; same below). This already has two triple bonds, so one cannot further apply (Z) at any other bond (or one would produce a third triple bond), and hence $\mathbb M$ is formed by vine (II).
Now suppose $\mathbb M'$ is a triangle formed by $3$ double bonds. We may choose any bond $\ell\in \mathbb M$ and apply (Z) to get an intermediate pseudomolecule $\widetilde{\mathbb M}$, which can be formed from vines (III), (IV), (V), (VII) or (VIII), and contains only one triple bond. Then we may choose one non-triple bond $\ell'$ in $\widetilde{\mathbb M}$ and apply (Z) again to get $\mathbb M$. The structure of $\mathbb M$ depends on which bond we choose: \begin{itemize} \item If $\ell'$ is the other bond in the double bond containing $\ell$, then $\mathbb M$ is formed from vine (IV); \item If $\ell'$ is from another double bond in the triangle $\mathbb M'$, then $\mathbb M$ is formed from vine (III); \item If $\ell'$ is a double bond inserted in the first (Z) operation, then $\mathbb M$ is formed from vine (VII); \item If $\ell'$ is a single bond inserted in the first (Z) operation, then $\mathbb M$ is formed from (VI). \end{itemize} In any case, this proves Lemma \ref{ylem1}. \end{proof} \begin{lem}\label{ylem2} Suppose $\mathbb M'$ is formed from $\mathbb M$ by one (Y) sequence. Fix a bond $\ell\in\mathbb M$, and if the last operation is (Y2), let the newly added bond be $\ell'$. Then we have $\mathfrak A(\mathbb M)\lesssim (\log L)^C\cdot\mathfrak A(\mathbb M')$. Moreover, if the last operation is (Y2), and $\ell$ is removed in the (Y) sequence, then $\mathfrak A(\mathbb M\backslash\{\ell\})\lesssim (\log L)^C\cdot\mathfrak A(\mathbb M'\backslash\{\ell'\})$. \end{lem} \begin{proof} We use the notation of the different cases in Figure \ref{fig:yoper}. First note that, in any case, the (Y) sequence removes a ladder of length $n-2$ as in Figure \ref{fig:yoper}. If the last operation is (Y1) in case 1 or case 2, then we have $\Delta\chi=-2n$; once a decoration of $\mathbb M'$ is fixed, we may examine the remaining part of decoration, going from bottom to top, using Lemma \ref{basiccount} (3) for each step and Lemma \ref{basiccount} (2) for the last step, to bound $\mathfrak D\lesssim (\log L)^C$ for this (Y) sequence. Note that for all but $O(1)$ operations in this (Y) sequence we have the sharp bound $\mathfrak D\lesssim 1$, in the same way as (\ref{laddersharp}), in view of the $\mathfrak X^{-1}$ factor in Lemma \ref{basiccount} (3) and the definition of the $\mathfrak P$ product. Next, if the last operation is (Y1) in case 3, then we have $\Delta\chi=-(2n+1)$; once a decoration of $\mathbb M'$ is fixed, we again go from bottom to top in exactly the same way as above, the only difference being that we consider the two bonds at $v_{2n+1}$ in the first step, using Lemma \ref{basiccount} (1) and the LG assumption at $v_{2n+1}$, to get $\mathfrak D\lesssim L^{-\eta/2}$.
Now suppose the last operation is (Y2) with $\Delta\chi=-2n$. Given a decoration of $\mathbb M$, let the two bonds connecting $v_{2n-1}$ and $v_{2n}$ to $v_{2n-3}$ and $v_{2n-2}$ (see Figure \ref{fig:yoper}) be $\ell_1$ and $\ell_2$, and let the new bond be $\ell'$, then $k_{\ell_1}-k_{\ell_2}$ is fixed and $|k_{\ell_1}|^2-|k_{\ell_2}|^2$ is fixed up to distance $\delta^{-1}L^{-2\gamma}$ due to Lemma 9.14 of \cite{DH21}. We then define a decoration of $\mathbb M'$, as in operation (d) and (e) in the proof of Proposition \ref{kqmainest2} in Section \ref{lgmole1}, by assigning $k_{\ell'}=k_{\ell_2}+g(k_{\ell_1}-k_{\ell_2})$ for some $|g|\lesssim 3$. This will keep the LG assumption for $\mathbb M'$ (which is weakened by a constant multiple, but this does not matter since we will only ever perform (Y) sequence $O(1)$ times). Once a decoration of $\mathbb M'$ is fixed, then $k_{\ell_1}$ and $k_{\ell_2}$ are also fixed, and we can go from bottom to top just as above to show that $\mathfrak D\lesssim (\log L)^C$ for this (Y) sequence.
Finally, assume the last operation is (Y2) and $\ell$ is a bond removed in this sequence, then $\mathbb M'\backslash\{\ell'\}$ is formed from $\mathbb M\backslash\{\ell\}$ by removing all atoms up to $v_{2n}$ and all bonds other than $\ell$, an operation with $\Delta\chi=-2n$. Once a decoration of $\mathbb M'\backslash\{\ell'\}$ is fixed, we simply start from the atom $v_j$ containing $\ell$ and apply Lemma \ref{basiccount} (2), then apply Lemma 9.14 of \cite{DH21} to fix the values of $k_{\ell_i}$ for bonds $\ell_i$ at the atom $v_{j\pm 1}$ connected to $v_j$ by a double or triple bond. Next, we simply go from $(v_j,v_{j\pm1})$ both upwards and downwards, using Lemma \ref{basiccount} (2) and (3) exactly as above, to show that $\mathfrak D\lesssim (\log L)^C$ for this sequence of operation. This completes the proof. \end{proof} With Lemmas \ref{ylem1} and \ref{ylem2}, we can now prove Proposition \ref{moleprop4}. \begin{proof}[Proof of Proposition \ref{moleprop4}] Start with (1). Since $\mathbb M$ has at most one triple bond, we can apply (Y) sequence once to get $\mathbb M'$ which has no triple bond. The last operation cannot be (Y1) case 3 due to the $\beta$-cut assumption for $\mathbb M$, and cannot be (Y1) case 1 because then $\mathbb M$ would be formed from vine (II) by removing the two joints of a vine (II) and adding one new bond between the two atoms connected to one of the joints. Now, if the last operation is either (Y1) case 2 or (Y2), then we have $E=2V-1$ for $\mathbb M'$, so by Proposition \ref{moleprop2} and Lemma \ref{ylem2} we get \[\mathfrak A\lesssim (\log L)^C\mathfrak A'\lesssim L^{-3\gamma_0/5}(\log L)^C\lesssim L^{-4\gamma_0/7}.\]
Now consider (2). Since $\mathbb M$ has at most two triple bonds and is not formed from a vine, using Lemma \ref{ylem1}, we can apply at most two (Y) sequences to reduce it to $\mathbb M'$, which does not contain triple or quadruple bond, and is not a triangle formed by $3$ double bonds. By Lemma \ref{ylem2} it suffices to prove the same result (with slightly better powers) for $\mathbb M'$ (say with a fixed bond $\ell'$).
If the last operation is (Y1), which must be case 3 (since $\mathbb M$ is 4-regular), then we have $E=2V-1$ for $\mathbb M'$, so the result for $\mathfrak A(\mathbb M')$ follows from Proposition \ref{moleprop2}; now we assume the last operation is (Y2), so $\mathbb M'$ is $4$-regular (hence cannot contain any bridge, because otherwise we get a component with odd total degree after removing the bridge). If either $\mathbb M'$ admits a $\beta$-cut or $\mathbb M'\backslash\{\ell'\}$ has a bridge, then by preforming this cut or removing this bridge we can divide $\mathbb M'$ or $\mathbb M'\backslash\{\ell'\}$ into two molecules with $E=2V-1$ and no triple bonds, so the result for $\mathfrak A(\mathbb M')$ or $\mathfrak A(\mathbb M'\backslash\{\ell'\})$ follows from Proposition \ref{moleprop2} (note also that $(L^{-3\gamma_0/5})^2\lesssim L^{-\gamma_0-\eta_0/2}$). Finally, if $\mathbb M'$ has only single bonds, then the result for $\mathfrak A(\mathbb M'\backslash\{\ell'\})$ follows from Proposition \ref{moleprop3}.
From now on, assume that $\mathbb M'$ does not admit any $\beta$-cut, has at least one double bond but no triple bond, and $\mathbb M'\backslash\{\ell'\}$ has no bridge. Consider the following cases, where in each case we also assume that no earlier cases happen:
(I) Suppose a double bond, say between two atoms $v_1$ and $v_2$, shares a common atom with the fixed bond $\ell'$, then we choose to prove $\mathfrak A(\mathbb M'\backslash\{\ell'\})\lesssim L^{-\gamma_0-\eta/4}$. In fact since $\mathbb M'$ does not admit any bridge or $\beta$-cut, we see that removing the two atoms $(v_1,v_2)$ and all the bonds from $\mathbb M'\backslash\{\ell'\}$ has either $\Delta\chi=-3$ or $\Delta\chi=-2$. In the former case we have $\mathfrak D\lesssim L^{-\gamma_0}(\log L)^C$ by Lemma \ref{basiccount} (4), and the resulting molecule has $E=2V-2>0$ without triple bond, so by Proposition \ref{moleprop2} we have $\mathfrak A(\mathbb M'')\lesssim L^{-\eta/3}$, and hence $\mathfrak A(\mathbb M'\backslash\{\ell'\})\lesssim L^{-\gamma_0-\eta/4}$. In the latter case we have $\mathfrak D\lesssim (\log L)^C$ by Lemma \ref{basiccount} (3), and the resulting molecule $\mathbb M''$ has two components each satisfying $E=2V-1$ and having no triple bond. By Proposition \ref{moleprop2} we have $\mathfrak A(\mathbb M'')\lesssim (L^{-3\gamma_0/5})^2$, which again implies $\mathfrak A(\mathbb M'\backslash\{\ell'\})\lesssim L^{-\gamma_0-\eta/4}$.
(II) Suppose two bonds $(\ell_1,\ell_2)$ form a \emph{bi-bridge}, such that $\mathbb M'$ becomes disconnected after removing both bonds; by assumption we know $\ell'\not\in\{\ell_1,\ell_2\}$. By Lemma 9.14 of \cite{DH21} we know that $k_{\ell_1}- k_{\ell_2}$ is fixed and $|k_{\ell_1}|^2- |k_{\ell_2}|^2$ is fixed up to distance $O(n\delta^{-1}L^{-2\gamma})$, where $n\lesssim(\log L)^C$ is the size of $\mathbb M'$. If $k_{\ell_1}\neq k_{\ell_2}$, then we choose to prove $\mathfrak A(\mathbb M')\lesssim L^{-\eta/4}$, and remove the bonds $(\ell_1,\ell_2)$ to get a new molecule $\mathbb M''$. By Lemma \ref{basiccount} (1) we have $\mathfrak D\lesssim L^{\gamma_0}(\log L)^C$ for this operation, and $\mathbb M''$ has two components with $E=2V-1$ and no triple bonds, so Proposition \ref{moleprop2} implies that \[\mathfrak A(\mathbb M')\lesssim L^{\gamma_0}(\log L)^C\mathfrak A(\mathbb M'')\lesssim L^{\gamma_0}(\log L)^C(L^{-3\gamma_0/5})^2\lesssim L^{-\eta/4}.\]
Now if $k_{\ell_1}=k_{\ell_2}$, then we choose to prove $\mathfrak A(\mathbb M'\backslash\{\ell'\})\lesssim L^{-\gamma_0-\eta/4}$. We remove the bonds $(\ell_1,\ell_2)$ from $\mathbb M'\backslash\{\ell'\}$, but add one new bond $\ell_3$ between the two endpoints of $\ell_1$ and $\ell_2$ that belong to the component containing $\ell'$, matching the directions of $(\ell_1,\ell_2)$. This generates a new molecule $\mathbb M''$, and any decoration of $\mathbb M'\backslash\{\ell'\}$ leads to a decoration of $\mathbb M''$ by defining $k_{\ell_3}=k_{\ell_1}=k_{\ell_2}$, so the operation going from $\mathbb M'\backslash\{\ell'\}$ to $\mathbb M''$ has $\mathfrak D\lesssim(\log L)^C$. By our choice, $\mathbb M''$ has two components with $E=2V-1$ and at most one triple bond. Moreover the component with (possibly) one triple bond cannot be the exceptional case described in part (1) above, because then it would have a double bond between the only two degree $3$ atoms, which is impossible because $\mathbb M'$ has no triple bond and the two endpoints of $\ell'$ cannot be connected by a double bond in $\mathbb M'\backslash\{\ell'\}$. Therefore, by using Propositions \ref{moleprop2} and part (1) just proved, first considering the component of $\mathbb M''$ containing $\ell_3$ and then the one not containing $\ell_3$, we get \[\mathfrak A(\mathbb M'\backslash\{\ell'\})\lesssim(\log L)^C\mathfrak A(\mathbb M'')\lesssim (\log L)^C(L^{-3\gamma_0/5})^2\lesssim L^{-\gamma_0-\eta/4}.\]
(III) Suppose $\mathbb M'$ has a double bond $(\ell_1,\ell_2)$ between two atoms $v_1$ and $v_2$, and each $v_j$ has two extra bonds $(\ell_{2j+1},\ell_{2j+2})$, such that $|k_{\ell_i}\pm k_{\ell_j}|\leq L^{-\gamma+\eta}$ for some $i\in\{3,4\}$ and $j\in\{5,6\}$. We then choose to prove $\mathfrak A(\mathbb M')\lesssim L^{-\eta/4}$, and remove $(v_1,v_2)$ and all the bonds. This does not disconnect $\mathbb M'$ (otherwise $\mathbb M'$ would admit a $\beta$-cut or bi-bridge), so $\Delta\chi=-4$. The number of choices for $(k_{\ell_1},\cdots,k_{\ell_6})$ is bounded by first fixing $(k_{\ell_i},k_{\ell_j})$ and applying Lemma \ref{basiccount} (3), which results in \[L^d(L^{1-\gamma+\eta})^dL^{2(d-\gamma)}\lesssim L^{4(d-\gamma)-\gamma_0+d\eta},\] so $\mathfrak D\lesssim L^{-\gamma_0/2}$ for this operation, and the new molecule $\mathbb M''$ satisfies $\mathfrak A(\mathbb M'')\lesssim 1$ by Proposition \ref{moleprop1}, which then implies $\mathfrak A(\mathbb M')\lesssim L^{-\eta/4}$.
(IV) Finally, suppose $\mathbb M'$ has a double bond as in (III), but no inequality $|k_{\ell_i}\pm k_{\ell_j}|\leq L^{-\gamma+\eta}$ holds (and $\ell'\not\in\{\ell_1,\cdots,\ell_6\}$). Then we merge the two atoms $(v_1,v_2)$ into one atom which has bonds $(\ell_3,\cdots,\ell_6)$, to get a new pseudomolecule $\mathbb M''$. A decoration of $\mathbb M'$ naturally leads to a decoration of $\mathbb M''$, which is also LG by our assumptions; the operation going from $\mathbb M'$ to $\mathbb M''$ has $\Delta\chi=-1$ and $\mathfrak D\lesssim L^{-\eta/2}$ by Lemma \ref{basiccount} (1) and the LG assumption at $v_1$. Therefore the bound for $\mathfrak A(\mathbb M')$ (or $\mathfrak A(\mathbb M'\backslash\{\ell'\}$) follows from the same bound for $\mathfrak A(\mathbb M'')$ (or $\mathfrak A(\mathbb M''\backslash\{\ell'\}$). Note that $\mathbb M''$ has no quadruple bond (otherwise $\mathbb M'$ would be a triangle of double bonds) and no triple bond (otherwise $\mathbb M'$ would contain a triangle with one single bond and two double bonds, and the two outgoing bonds of this triangle would form a bi-bridge), so if $\mathbb M''$ is not a triangle of double bonds, we can repeat the same arguments above for $\mathbb M''$ until it either becomes a triangle of double bonds or runs out of double bonds (in this latter case we prove the bound for $\mathfrak A(\mathbb M''\backslash\{\ell'\})$ using Proposition \ref{moleprop3}). If $\mathbb M''$ is a triangle of double bonds, then $\mathbb M'$ must be a quadrilateral of double bonds, in which case we can prove the bounds for $\mathfrak A(\mathbb M'\backslash\{\ell'\})$ by first using Lemma \ref{basiccount} (4) and then using Lemma \ref{basiccount} (1) plus the LG assumption. This completes the proof of Proposition \ref{moleprop4}. \end{proof} \subsection{Addressing degenerate cases}\label{extradegen} In this subsection we discuss the possibility of degenerated cases, defined by $k_2\in\{k_1,k_3\}$ (and hence $k_1=k_2=k_3$) in (\ref{defset}), see Remark \ref{nonresrem}. Such degeneracies may occur at various stages in the main proof above, but due to the strong restriction $k_1=k_2=k_3$, they enjoy much better summation and counting estimates etc. than non-degenerate cases $k_2\not\in\{k_1,k_3\}$, and are easily addressed. We briefly demonstrate this below.
(1) Regular couples, trees and vines: Consider the regular couples $\mathcal Q^{(\mathfrak l,\mathfrak l')}$ and regular trees $\mathcal T^{(\mathfrak m)}$ during the reduction process from $\mathcal Q$ to $\mathcal Q_{\mathrm{sk}}$ in Section \ref{primered}, or in the vine-like objects $\mathbb U_j$ during the stage 1 reduction from $\mathcal Q_{\mathrm{sk}}$ to $\mathcal Q_{\mathrm{sub}}$ in Section \ref{stage1red}. As is clear from the proofs of Proposition \ref{regcpltreeasymp} and Lemmas \ref{sumintest1}--\ref{sumintest2}, any degeneracies occurring in these expressions only produce lower order remainder terms (for example, they correspond to $x_j=y_j=0$ for some $j$ in Lemma \ref{sumintest1}), so they do not affect the proof.
(2) Molecule structure: As shown in Remark \ref{moleremark}, the molecule $\mathcal Q_{\mathrm{sub}}$ may have a degree $2$ atom instead of two degree $3$ atoms if degeneracy is allowed. However in this case the degree $2$ atom $v$ must be degenerate, and the values $k_{\ell_1}=k_{\ell_2}$ for its two bonds must be fixed, so we simply remove this atom (and more atoms connected to it if needed) to reduce to the case of two degree $3$ atoms. This operation will have a huge gain $\mathfrak D\lesssim L^{-d+\gamma+\eta}$, which is enough to cover all possible losses that may occur later, so we just proceed normally thereafter. As for self connecting bonds, they are left for now and will be treated in later steps.
(3) The cutting process: Consider the cutting operations during stage 2 reduction in Section \ref{reductcut0}. Note that in selecting the collection $\mathscr V_0$ of SG vines in Section \ref{stage1red} we shall exclude those vines of \emph{zero gap} (i.e. with degenerate joints), so all the hinge atoms (see Proposition \ref{subpro}) produced in stage 1 reduction will not be degenerate once they are cut according to the rules in Section \ref{reductcut0}. Moreover, we shall not make any cut at non-hinge degenerate atoms, so there will be no degenerate atoms involved in the cutting process, and any $\alpha$- or $\beta$- atom will not be degenerate; however, we also allow degenerate $\varepsilon$-atoms in the resulting molecule $\mathbb M_{\mathrm{fin}}$.
(4) The final molecule: Suppose there is a degenerate $\varepsilon$-atom $v$ in a component $\mathbb M_0$ of the final molecule $\mathbb M_{\mathrm{fin}}$. Assume $v$ has $4$ bonds $\ell_j\,(1\leq j\leq 4)$, as other cases are similar and easier, then $k_{\ell_1}=\cdots =k_{\ell_4}:=k_0$. We shall remove this atom $v$; if $\Delta\chi\leq -2$ then this operation has a big gain $\mathfrak D\lesssim L^{-d+2\gamma+\eta}$ which is enough to cover all possible losses and we just proceed normally (using Proposition \ref{moleprop1}) thereafter. If $\Delta\chi\geq -1$, then one of $k_j$ must be a bridge, so in the specific counting problem for $\mathbb M_0$, the value of $k$ will be uniquely fixed due to Lemma 9.14 of \cite{DH21}; in this case the exact value of $k_{\ell_j}$ is not important, so we may replace them by some arbitrary non-degenerate configurations and proceed normally as in Sections \ref{lgmole1}--\ref{lgmole4} above.
Combining cases (1)--(4) above, this finishes the discussion of degenerate cases and concludes the proof of Propositions \ref{mainprop1}--\ref{mainprop4}. \section{Linearization and the end of the proof}\label{linoper} \subsection{Proof of Proposition \ref{mainprop2}}\label{linoper1} In this subsection we prove Proposition \ref{mainprop2}. The proof is a slight modification of the proofs of Propositions \ref{mainprop1} and \ref{mainprop4}, following the same arguments in Section 11 of \cite{DH21}, but with a few differences specific to this paper. \subsubsection{Construction of $\mathscr X$} Recall the notion of \emph{flower trees} and \emph{flower couples} defined in Definition 11.1 of \cite{DH21}: a flower tree is a ternary tree with one leaf specified (called \emph{flower}), and a flower couple is formed two flower trees with their leaves paired, such that the two flowers are paired to each other. The \emph{stem} of a flower tree is the unique path from its root to flower.
For any flower tree $\mathcal T$ and flower couple $\mathcal Q$, define the quantities \begin{equation}\label{jtflower}\widetilde{\Jc}_\mathcal T(t,s,k,k')=\bigg(\frac{\delta}{2L^{d-1}}\bigg)^m\zeta(\mathcal T)\sum_\mathscr D\epsilon_\mathscr D\int_{\mathcal D}\prod_{\mathfrak n\in\mathcal N}e^{\zeta_\mathfrak n\pi i \delta L^2\Omega_\mathfrak n t_\mathfrak n}\mathrm{d}t_\mathfrak n\cdot\boldsymbol{\delta}(t_{\mathfrak f^p}-s)\prod_{\mathfrak f\neq\mathfrak l\in\mathcal L}\sqrt{n_{\mathrm{in}}(k_\mathfrak l)}\eta_{k_{\mathfrak l}}^{\zeta_{\mathfrak l}}(\omega)\mathbf{1}_{k_\mathfrak f=k'},\end{equation} \begin{equation}\label{kqflower}\widetilde{\mathcal K}_\mathcal Q(t,s,k,k')=\bigg(\frac{\delta}{2L^{d-1}}\bigg)^{2m}\zeta(\mathcal Q)\sum_\mathscr E\epsilon_\mathscr E\int_{\mathcal E}\prod_{\mathfrak n\in\mathcal N}e^{\zeta_\mathfrak n\pi i \delta L^2\Omega_\mathfrak n t_\mathfrak n}\mathrm{d}t_\mathfrak n\prod_{\mathfrak f}\boldsymbol{\delta}(t_{\mathfrak f^p}-s){\prod_{\mathfrak f\neq\mathfrak l\in\mathcal L^*}^{(+)}n_{\mathrm{in}}(k_\mathfrak l)}\mathbf{1}_{k_\mathfrak f=k'},\end{equation} which are slight modifications of (\ref{defjt}) and (\ref{defkq}) in the same way as (11.2) and (11.3) of \cite{DH21}. Here in (\ref{jtflower}), $\mathscr D$ is a $k$-decoration of $\mathcal T$, $\mathcal D$ is defined as in (\ref{defdomaind}), and the other objects are associated with the tree $\mathcal T$. In (\ref{kqflower}), $\mathscr E$ is a $k$-decoration of $\mathcal Q$, the other objects are associated with the couple $\mathcal Q$, and the set $\mathcal E$ is defined as in (\ref{defdomaine}) but with $s$ replaced by $t$; the second product is taken over the two flower leafs $\mathfrak f$ and in the last product we assume $\mathfrak l$ has sign $+$ and is not one of the two flowers $\mathfrak f$ of the flower couple $\mathcal Q$.
We now define the $\mathbb R$-linear operators $\mathscr X$ and $\mathscr X_m$ in Proposition \ref{mainprop2} such that its kernel \begin{equation}\label{kernelx}(\mathscr X_m)_{kk'}^\zeta(t,s)=\sum_{\mathcal T}\widetilde{\Jc}_\mathcal T(t,s,k,k'), \end{equation} where the sum is taken over all flower trees $\mathcal T$ such that $\mathcal T$ has order $m$, and the root $\mathfrak r$ and flower $\mathfrak f$ of $\mathcal T$ has signs $\zeta_\mathfrak r=+$ and $\zeta_\mathfrak f=\zeta$. Then, by multiplying out the definition (\ref{jtflower}), similar to the proof of Proposition 11.2 of \cite{DH21}, it is easy to see that the operators $\mathscr Y_m$ and $\mathscr W_m$ defined in Proposition \ref{mainprop2} have the same expression as in (\ref{kernelx}), but with the sum taken over different sets of $\mathcal T$. Namely, in both cases we still require $\mathcal T$ has order $m$ and $\zeta_\mathfrak r=+$ and $\zeta_\mathfrak f=\zeta$, but in $\mathscr Y_m$ we additionally require that (Y-1) the value $m>N$, (Y-2) the subtree rooted at each child node of $\mathfrak r$ has order $\leq N$. In $\mathscr W_m$ we additionally require that (W-1) the value $m>N$, (W-2) the subtree rooted at each of the two sibling nodes of $\mathfrak f$ has order $\leq N$, and (W-3) the flower tree obtained by replacing the parent $\mathfrak f^p$ of $\mathfrak f$ with a new flower has also order $\leq N$, see Figure \ref{fig:flowertree} for illustration. Note that the above requirement imposes that $N+1\leq m\leq 3N+1$ for both $\mathscr Y_m$ and $\mathscr W_m$.
\begin{figure}
\caption{A flower tree $\mathcal T$ with root $\mathfrak r$ and flower $\mathfrak f$. Let the order of subtrees $\mathcal T_j$ be $n_j$ etc., and the order of $\mathcal T$ be $m$. Then the condition for $\mathscr Y_m$ is that $N<m\leq N+n_1'+n_2'+1$ and $n_j'\leq N$, the condition for $\mathscr W_m$ is that $N<m\leq N+n_1+n_2+1$ and $n_j\leq N$.}
\label{fig:flowertree}
\end{figure}
By definitions (\ref{jtflower}) and (\ref{kqflower}), similar to the proof of Proposition 11.2 of \cite{DH21}, it is easy to show
\begin{equation}\label{flowerexp2}\mathbb E|(\mathscr X_m)_{kk'}^\zeta(t,s)|^2=\sum_{\mathcal Q}\widetilde{\mathcal K}_\mathcal Q(t,s,k,k'),\end{equation} where the sum is taken over all flower couples $\mathcal Q=(\mathcal T^+,\mathcal T^-)$ such that both trees have order $m$, and the flower of tree $\mathcal T^{\pm}$ has sign $\pm\zeta$. The expressions for $\mathscr Y_m$ and $\mathscr W_m$ are the same, except that both trees in $\mathcal Q$ also have to satisfy the assumptions (Y-1)--(Y-2), or (W-1)--(W-3) above.
We now need to prove that the right hand side of (\ref{flowerexp2}) satisfies (\ref{mainest2}). Since the definition of $\widetilde{\mathcal K}_\mathcal Q$ is almost the same as $\mathcal K_\mathcal Q$ in (\ref{defkq}), the proof of (\ref{mainest2}) can also be done in almost the same way as in the proof of (\ref{mainest1}) in Proposition \ref{mainprop1}, with slight modifications due to the few differences between $\widetilde{\mathcal K}_\mathcal Q$ and $\mathcal K_\mathcal Q$ that lead to a loss of at most $L^{40d}$. In fact, this modification follows the same arguments in Section 11 of \cite{DH21}, with only two differences which we will discuss below.
The first difference concerns the vine cancellation (Proposition \ref{estbadvine}), which requires to group together couples that are full twists of each other (Definition \ref{twistgen}). Let [X], [Y] and [W] be the set of conditions posed on the flower tree $\mathcal T$ by $\mathscr X_m$, $\mathscr Y_m$ and $\mathscr W_m$ respectively, as described above (see Figure \ref{fig:flowertree}). We only need to prove that, if $\widetilde{\mathcal Q}$ is a full twist of $\mathcal Q$, then both trees of $\widetilde{\mathcal Q}$ satisfy [X] (or [Y] or [W]) if and only if both trees of $\mathcal Q$ satisfy the same set of conditions. Note that each set of conditions only depend on the values of $(n_1,n_2,n_1',n_2')$ and $m$ as in Figure \ref{fig:flowertree}, we just need to show that each full twist (in fact each full unit twist at vine $\mathbb V$) does not change any of these values $n_j$ or $n_j'$. But this is obviously true, provided that the vine $\mathbb V$ in $\mathcal Q_{\mathrm{sk}}$ does not contain any of the following ``special" nodes: the root $\mathfrak r$, and child of $\mathfrak r$, the flower $\mathfrak f$ (or a leaf in $\mathcal Q_{\mathrm{sk}}$ such that the regular couple in $\mathcal Q$ attached at it contains $\mathfrak f$), or the parent of $\mathfrak f$. In fact, in this case, suppose (for example) the vine $\mathbb V$ is like in Figure \ref{fig:vinescancel}, then the flower $\mathfrak f$ must belong to one of parts (A)--(D) in Figure \ref{fig:vinescancel}, so changing $\mathcal Q$ to its full unit twist obviously does not change the values of $n_1$ and $n_2$; similarly it does not change the values of $n_1'$ and $n_2'$. Finally, as for the vines containing any of the special nodes, clearly the number of such vines does not exceed the number of special nodes which is less than $10$. Since each vine without exploiting cancellation only leads to loss of at most $L^{1/2}$, all these exceptional vines will lead to at most $L^5$ loss which is acceptable in view of (\ref{mainest2}).
\subsubsection{An extra argument with ladders} The second difference between the current proof and Section 11 of \cite{DH21} is as follows. Note that the factor $n_{\mathrm{in}}(k_\mathfrak f)$ is absent from (\ref{kqflower}) and replaced by $\mathbf{1}_{k_\mathfrak f=k'}$, so one does not have a decay factor in $k_\mathfrak f$, instead this $k_\mathfrak f$ is equal to a fixed vector $k'$. In fact, for any node $\mathfrak n$ on the stem, we do not have a decay factor in $k_\mathfrak n$, but instead a decay factor in $k_\mathfrak n-k'$ (or equivalently $k_\mathfrak n-k$). Note that the shift $k'$ is \emph{fixed but may be arbitrarily large}; fortunately most of the proof in the previous sections is translation invariant. In fact, the only part that is not translation invariant is the proof of the $L^1$ bound (\ref{1stsumbound}) in Section \ref{stage1red}. Here, in that proof we are using the fact that $\Omega_{\mathfrak n_1}\pm\Omega_{\mathfrak n_2}=r_j\cdot (k_\mathfrak m\pm k_{\mathfrak m'})$ to control the number of possibilities for $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}$, where $|r_j|\sim P_j$ is the gap of a given ladder, $(\mathfrak n_1,\mathfrak n_2)$ is a pair of branching nodes corresponding to two atoms in the ladder connected by a double bond, and $\sigma_{\mathfrak n_j}=\lfloor\delta L^{2\gamma}\Omega_{\mathfrak n_j}\rfloor$. Now if $|r_j|\sim P_j$ and each $k_\mathfrak m$ belongs to a unit ball \emph{centered at $0$}, then we always have $|\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}|\leq \delta L^{2\gamma}P_j$; however, if $k_\mathfrak m$ belongs to a unit ball \emph{centered at $k'$}, then with $r_j$ also allowed to vary, we can no longer restrict $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}$ to a fixed interval of length $\delta L^{2\gamma}P_j$, but only have $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}=\kappa\delta L^{2\gamma}(r_j\cdot k')+O(\delta L^{2\gamma}P_j)$ with $\kappa\in\{-2,-1,0,1,2\}$, which may cause problems in counting the number of possibilities for $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}$.
This issue is resolved by examining the proof of (\ref{1stsumbound}) in Section \ref{stage1red}, which follows the same arguments as in Sections 10.1--10.2 of \cite{DH21}. By going through the combinatorial arguments in Section 10.1 of \cite{DH21}, we can show that, apart from at most $O(\rho)$ counterexamples which are negligible (where $\rho=\rho_{\mathrm{sub}}$ is defined in Proposition \ref{kqmainest2}), for each double bond connecting two atoms in a given ladder, either (1) both bonds are LP bonds (see Definition \ref{defmole}), or (2) one bond is LP, the other bond is PC, and each of the two pairs of parallel single bonds at these two atoms is also one LP and one PC. Moreover, we may assume the gap $|r_j|\sim P_j\lesssim 1$, otherwise the power gain from (\ref{countc1-1}) easily covers the log loss. Then, in case 2, for each of the four single bonds $\ell$, the decoration $k_\ell$ is \emph{not} shifted by $k'$; indeed, this is true for the LP bond $\ell$ since $k_\ell=k_\mathfrak l$ for some leaf $\mathfrak l$ which may not belong to the stem apart from at most one counterexample, and is also true for the parallel PC bond due to the assumption $|r_j|\lesssim 1$. Then, we would have $|\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}|\lesssim \delta L^{2\gamma}P_j$ again, so the proof in Section \ref{stage1red} still goes through.
It remains to consider case 1 of an LP-LP double bond. In this case we will assume $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}=A+O(\delta L^{2\gamma}P_j)$, where $A$ is a quantity that depends only on the gap $r_j$ of the ladder. By repeating the arguments in the proof of (\ref{1stsumbound}) before, we may reduce to the case $\sigma_{\mathfrak n_1}\pm\sigma_{\mathfrak n_2}=A+\mu$ with at most $C^n$ loss, where $\mu$ is now a fixed integer. We next classify the possibilities of $A$; recall that in the setting of Section 10 of \cite{DH21}, each $\Omega_\mathfrak n$ (and hence $\sigma_\mathfrak n$) variable belongs to a fixed set which is the union of at most $L^{10d}$ unit intervals, so $A$ has $\lesssim L^{20d}$ choices. For each fixed $A$, the arguments in Section 10.2, case 1 of \cite{DH21} implies that the left hand side of (\ref{1stsumbound}) is bounded by the same expression but without the $(\sigma_{\mathfrak n_1},\sigma_{\mathfrak n_2})$ variables, multiplied by a factor of
\begin{equation}\label{smallloss}\int_\mathbb R\frac{1}{\langle \alpha\rangle}\cdot\frac{1}{\langle \alpha+A-\mu\rangle}\,\mathrm{d}\alpha\lesssim\frac{\log (2+|A-\mu|)}{\langle A-\mu\rangle}.\end{equation} Once $A$ is fixed, the whole ladder can be treated in the same way as in \cite{DH21} without further loss; finally we sum in different choices of $A$, and summing up the factor in (\ref{smallloss}) yields a factor $\lesssim (\log L)^2$, which is acceptable. This completes the proof of Proposition \ref{mainprop2}.
\subsection{Proof of Theorem \ref{main}}\label{linoper2} In this subsection we prove Theorem \ref{main}. Note that with Proposition \ref{mainprop1} is proved, by (\ref{mainest1}) and (\ref{correlation}) we have that \[\mathbb E|(\Jc_n)_k(t)|^2\lesssim\langle k\rangle^{-20d}(C^+\sqrt{\delta})^n\]for any $0\leq n\leq N^3$. This plays the role of Proposition 2.5 in \cite{DH21}, while Propositions \ref{mainprop2} and \ref{mainprop4} play the roles of Propositions 2.6 and 2.7 in \cite{DH21}.
Therefore, we may repeat the arguments in Section 12 of \cite{DH21} to control the remainder term $\textit{\textbf{b}}$. Note that, strictly speaking, we are actually applying the version of this argument in Section 4 of \cite{DH21-2}, because here we have $N=\lfloor(\log L)^4\rfloor$ as in \cite{DH21-2}, but the proof can be easily adapted. Another difference here concerns the invertibility of $1-\mathscr L$, which follows from Proposition \ref{mainprop2}. In fact, the same proof as in Section 12 of \cite{DH21} (but with $N=\lfloor(\log L)^4\rfloor$ and with a corollary in the form of Corollary 11.3 of \cite{DH21}, which follows from the same proof) yields that
\[\|\mathscr X_m\|_{Z\to Z}+\|\mathscr Y_m\|_{Z\to Z}+\|\mathscr W_m\|_{Z\to Z}\lesssim (C^+\sqrt{\delta})^{n/2}L^{60d}\] with probability $\geq 1-e^{-(\log L)^2}$, which plays the role of Proposition 12.2 of \cite{DH21}. This then implies that
\[\|\mathscr X\|_{Z\to Z}\leq L^{61d},\quad \|\mathscr Y-1\|_{Z\to Z}+\|\mathscr W-1\|_{Z\to Z}\leq 1/2.\] But $\mathscr Y=(1-\mathscr L)\mathscr X$ and $\mathscr W=\mathscr X(1-\mathscr L)$, so the invertibility of \emph{both} $\mathscr Y$ and $\mathscr W$ by Von Neumann series, implies that $1-\mathscr L$ has both left and right inverse, hence it is invertible. In particular
\[\|(1-\mathscr L)^{-1}\|_{Z\to Z}\leq \|\mathscr X\|_{Z\to Z}\cdot\|\mathscr Y^{-1}\|_{Z\to Z}\leq L^{62d}.\] The rest of the proof in Section 12 of \cite{DH21} then carries along, which allows one to control the remainder term $\textit{\textbf{b}}$, and complete the proof of Theorem \ref{main}. \appendix \section{Auxiliary results}\label{aux} \subsection{Counting estimates}\label{basiccounting} We collect the vector counting estimates used in Sections \ref{reduct2} and \ref{lgmole}. \begin{lem}\label{basiccount0pre} (1) Let $A\subset\mathbb R^2$ be the intersection of an annulus with radius $Q$ and width $Q^{-1}\ll\rho\ll Q$, and a disc of radius $L$. Then we have \begin{equation}\label{disclattice}\#(A\cap\mathbb Z^2)\lesssim\min\big(Q\rho+Q^{7/11},(Q\rho)^{1/2}+L(Q\rho)^{3/2}Q^{-1}+L(Q\rho)^{1/2}Q^{-1/3}\big). \end{equation}
(2) Fix two dyadic numbers $1\ll\rho\ll L$ and $1\lesssim\theta\ll \rho$, let $A,B\subset \mathbb R^d$ be two balls of radius $L$ and $\alpha\in\mathbb R$, then we have \begin{equation}\label{estcross}
\#\bigg\{x\in A\cap \mathbb Z^d:\sup_{B,\alpha}\# \{y\in B\cap\mathbb Z^d:|x\cdot y-\alpha|\lesssim\rho\}\gtrsim L^{d-1}\theta^{-1}\bigg\}\lesssim \rho^d+L^{d-2}(\log L)^C\rho^2\theta^2. \end{equation} \end{lem} \begin{proof} (1) The first upper bound $Q\rho+Q^{7/11}$ follows from ignoring the disc or radius $L$ and using the error term bound of Huxley \cite{Hux93} for counting the number of lattice points inside a disc (or ellipse). Now let us consider the second upper bound.
Clearly $A$ is contained in an annulus section of radius $\sim Q$, width $\rho$ and angle $\alpha\lesssim LQ^{-1}$. We may divide it into at most $1+\alpha/\theta$ annulus sections of angle $\theta$ where $\theta\ll\min(Q^{-2/3},(Q\rho)^{-1})$. Note that it is easily calculated by elementary geometry, that the area of the convex hull of each smaller annulus section is $\lesssim Q\rho\theta+Q^2\theta^3\ll 1$, so it may not contain any three lattices points that are not collinear. On the other hand, the length of any line segment contained in the whole annulus is clearly $\lesssim(Q\rho)^{1/2}$, thus \[\#(A\cap \mathbb Z^d)\lesssim (1+LQ^{-1}\max(Q^{2/3},Q\rho))\cdot(Q\rho)^{1/2},\] which implies (\ref{disclattice}).
(2) Assume $|x^1|=\max |x^j|$ where $x_j$ are the coordinates of $x$. If $|x^1|\lesssim \rho$ then we get the trivial upper bound $\rho^d$; suppose now $|x^1|\gg \rho$. In the set of $y$ defined in (\ref{estcross}), we may fix the last $d-2$ coordinates, by pigeonhole principle and translation, up to a constant factor, we will have
\[\#\{y\in B(0,L)\cap \mathbb Z^2:|x^1y^1+x^2y^2|\lesssim\rho\}\gtrsim L\theta^{-1}.\] Note that for any $y^2$ there exists at most one $y^1$ satisfying the above inequality, by pigeonhole principle again we see that there exist $(a^1,a^2)$ such that $0<|a^2|\lesssim \theta$ and $|a^1x^1+a^2x^2|\lesssim\rho$. In the same way, if we first fix the coordinates $(y^2,y^4,\cdots y^d)$ of $y$, we also get that $|b^1x^1+b^3x^3|\lesssim\rho$ for some $(b^1,b^3)$ with $0<|b^3|\lesssim\theta$.
Now let $|x^j|\sim X_j$ etc., then the number of choices for $(x^1,a^2,b^3)$ is at most $LA_2B_3$. When they are fixed, we must have $|a^1x^1|\lesssim |a^2x^2|+\rho$ etc., so the number of choices for $(a^1,b^1)$ is at most $(1+A_2X_2/X_1)(1+B_3X_3/X_1)$. Finally when $x^1$ and $(a^j,b^j)$ are fixed, the number of choices for $(x^2,\cdots,x^d)$ is at most $\rho^2A_2^{-1}B_3^{-1}L^{d-3}$, so the number of choices for $x$ is at most \[\sum_{A_2,B_3,X_1,X_2,X_3}LA_2B_3\cdot\bigg(1+\frac{A_2X_2}{X_1}\bigg)\bigg(1+\frac{B_3X_3}{X_1}\bigg)\cdot \rho^2A_2^{-1}B_3^{-1}L^{d-3}\lesssim L^{d-2}(\log L)^C\rho^2\theta^2,\] noticing that each dyadic variable has at most $\log L$ choices in the summation. This completes the proof. \end{proof}
\begin{lem}\label{basiccount0} Fix $\alpha,\beta\in\mathbb R$ and $r,v\in\mathbb Z_L^d$, such that $|r|\sim P$ with $P\in[L^{-1},1]\cup\{0\}$ (with $|r|\gtrsim 1$ if $P=1$). Let each of $(x,y,z)$ belong to $\mathbb Z_L^d$ intersecting a fixed unit ball, assume $\xi\in\{x,y,x+y\}$ and $\zeta\in\{x-v,x+y-v,x+y-v-z\}$, and define $\mathfrak X:=\min((\log L)^2,1+\delta L^{2\gamma}P)$ as in Proposition \ref{kqmainest2}. Consider the counting problems
\begin{equation}\label{basiccount01}\big\{(x,y):|x\cdot y-\alpha|\leq \delta^{-1}L^{-2\gamma},\,\, |r\cdot\xi-\beta|\leq \delta^{-1}L^{-2\gamma}\big\}:=\mathfrak C_1,\end{equation} \begin{equation}\label{basiccount02}
\big\{(x,y,z):|x\cdot y-\alpha|\leq \delta^{-1}L^{-2\gamma},\,\, |\zeta\cdot z-\beta|\leq \delta^{-1}L^{-2\gamma}\big\}:=\mathfrak C_2. \end{equation}
(1) We have \begin{equation}\label{countc1-1}\mathfrak C_1\lesssim\left\{ \begin{aligned}&\delta^{-1}L^{2(d-\gamma)}(1+\delta L^{2\gamma}P)^{-1},&&\mathrm{if}\,\,\gamma\leq1/2\,\,\mathrm{or}\,\,P=0\\ & \delta^{-2}L^{2(d-\gamma)}\min((LP)^{-1}+L^{-(2-2\gamma)},L^{-10\eta}),&&\mathrm{if}\,\,\gamma>1/2\,\,\mathrm{and}\,\,P\neq 0 \end{aligned} \right\}\lesssim\delta^{-1}L^{2(d-\gamma)}\mathfrak X^{-1}. \end{equation}
(2) If $\gamma>(4/5)-10\eta$ and $P\neq 0$, then we have $\mathfrak C_1\lesssim\delta^{-2} L^{2(d-\gamma)-(1-\gamma)-20\eta}$.
(3) We have \begin{equation}\label{countc2-1}\mathfrak C_2\lesssim\left\{ \begin{aligned}&\delta^{-2}L^{3d-4\gamma},&&\mathrm{if}\,\, \gamma\leq 1/2\\ &\delta^{-2}L^{3(d-\gamma)-\gamma_0-10\eta},&&\mathrm{if}\,\,\gamma>1/2 \end{aligned} \right\}\lesssim\delta^{-2}L^{3(d-\gamma)-\gamma_0}. \end{equation} \end{lem}
\begin{proof} (1) First we have $\mathfrak C_1\lesssim \delta^{-1}L^{2(d-\gamma)}$ using only the first inequality $|x\cdot y-\alpha|\leq \delta^{-1}L^{-2\gamma}$, due to the classical counting estimates for $\gamma=1$ proved in \cite{DH21} (see Proposition 6.1 and Lemma A.9 (2) in \cite{DH21}, where the upper bound also holds for square torus. This already implies (\ref{basiccount01}) if $P=0$, so below we will assume $P\neq 0$; moreover by subdividing the conditions in (\ref{basiccount01}) and (\ref{basiccount02}) we can also $\delta=1$ (same for (2) and (3) below).
Next we prove that $\mathfrak C_1\lesssim L^{2d-1-2\gamma}P^{-1}+L^{2d-2}$ when $\gamma>1/2$, and $\mathfrak C_1\lesssim L^{2d-4\gamma}P^{-1}$ when $\gamma\leq 1/2$. In fact, assume either $\xi=x$ and $|x|\sim R$ (with $|x|\gtrsim 1$ if $R=1$, same below) or $\xi=x+y$ and $|x-y|\sim R$, then the number of choices for $\xi$ is $\lesssim (LR)^{d-1}(1+L^{1-2\gamma}P^{-1})$ but with $(LR)^{d-1}$ replaced by $L^{d-1}$ if $\xi=x+y$, and the number of choices for $y$ with $\xi$ fixed is $\lesssim L^{d-1}(1+L^{1-2\gamma}R^{-1})$ but with $L^{d-1}$ replaced by $(LR)^{d-1}$ if $\xi=x+y$. In either case \begin{equation}\label{4vecnew}\mathfrak C_1\lesssim (L^2R)^{d-1}(1+L^{1-2\gamma}P^{-1})(1+L^{1-2\gamma}R^{-1});\end{equation} by summing over $R$, we get the desired bound both when $\gamma>1/2$ and when $\gamma\leq 1/2$.
We now need to prove that $\mathfrak C_1\lesssim L^{2(d-\gamma)}L^{-10\eta}$ when $\gamma>1/2$. Recall from the choice of $\eta$ that $0<\eta\ll \gamma-1/2$. If $R\leq L^{-30\eta}$ or $LP\geq L^{30\eta}$ then the desired bound already follows from (\ref{4vecnew}), so we will assume $R>L^{-30\eta}$ and $LP<L^{30\eta}$. If $\xi=x+y$, then the number of choices for $\xi$ is $\lesssim L^{d+1-2\gamma+O(\eta)}$; when $\xi$ is fixed, the number of choices for $x$, using a rescaled version of (\ref{disclattice}), is bounded by \[L^{d-2}\cdot\min\big(L^{2-2\gamma}+Q^{7/11},L^{1-\gamma}+L^{4-3\gamma}Q^{-1}+L^{2-\gamma}Q^{-1/3}\big)\] for some value $Q$ (with $\rho\sim L^{2-2\gamma}$); an easy calculation implies that the above quantity is always $\lesssim L^{d-1-\min(10^{-3},2\gamma-1)}$ for any $Q$ and any $\gamma>1/2$, which gives the desired result.
Assume now that $\xi=x$. In the arguments leading to (\ref{4vecnew}), we can choose a dyadic variable $\theta\geq 1$ and assume that $x$ is such that the number of choices for $y$ is $\sim L^{d-1}\theta^{-1}$. We may assume $\theta\leq L^{300\eta}$, or the desired bound again follows from the improved version of (\ref{4vecnew}); then, by a rescaled version of (\ref{estcross}) with $\rho\sim L^{2-2\gamma}$, we can bound the number of choices for $x$ by $L^{d-2+2(2-2\gamma)+O(\eta)}$. This, combined with the number of choices for $y$ above, again yields the desired bound.
(2) The proof is similar to the last parts of (1). If $\xi=x+y$, then the number of choices for $\xi$ is $\lesssim L^{d-2\gamma+1+O(\eta)}$. For fixed $\xi$, the number of choices for $x$ is bounded, using (\ref{disclattice}), by \[L^{d-2+O(\eta)}\cdot\min\big(L^{2/5}+Q^{7/11},L^{1/5}+L^{8/5}Q^{-1},L^{6/5}Q^{-1/3}\big),\] where the $\min(\cdots)$ factor is bounded by $L^{(4/5)-10^{-4}}$ by a simple calculation, hence the desired bound (recall $\gamma>(4/5)-10\eta$).
Now if $\xi=x$, then we may choose a dyadic variable $\theta$ and assume that $x$ is such that the number of choices for $y$ is $\sim L^{d-1}\theta^{-1}$. If $1\lesssim\theta\ll L^{2-2\gamma}$, by the second equation in (\ref{basiccount01}) and (\ref{estcross}), we can bound the number of choices for $x$ by $\min\big(L^{d-2\gamma+1+O(\eta)},L^{d-4\gamma+2+O(\eta)}\theta^2\big)$, and hence
\[\mathfrak C_1\lesssim\sum_{\theta\geq 1}L^{d-1+O(\eta)}\theta^{-1}\cdot \min\big(L^{d-(2\gamma-1)},L^{d-(4\gamma-2)}\theta^2\big)\lesssim L^{2d-3\gamma+(1/2)+O(\eta)},\] which suffices when $\gamma>(4/5)-10\eta$. If $\theta\gtrsim L^{2-2\gamma}$ we use only the first bound above which suffices, and if $\theta\ll 1$ then necessarily $|x|\lesssim L^{2-2\gamma}\leq L^{(2/5)+O(\eta)}$ and the estimate is easily proved.
(3) If $\zeta\in\{x+y-v,x+y-v-z\}$, then we first fix $(x+y,z)$ which has $\lesssim L^{2(d-\gamma)}$ choices, and then count $(x,y)$. If $\gamma\leq 1/2$ the number of choices is easily seen to be $L^{d-2\gamma}$ which suffices. If $\gamma>1/2$, then the same argument as in the proof of (1), using (\ref{disclattice}), shows that the number of choices for $(x,y)$ is at most $L^{d-1-\min(10^{-3},2\gamma-1)}$, which suffices due to the choice of $\eta$ so that $0<\eta\ll \gamma-1/2$.
Now suppose $\xi=x-v$, and let $|\xi|\sim R$, then with $(x,y)$ fixed, the number of choices for $z$ is $\lesssim L^{d-1}(1+L^{1-2\gamma}R^{-1})$; moreover a simple variation of Proposition 6.1 and Lemma A.9 (2) of \cite{DH21} allows to control the number of choices for $(x,y)$ by $L^{2-2\gamma}L^{d-1}(LR)^{d-1}$, hence we get
\begin{equation}\label{bdc2new}\mathfrak C_2\lesssim (L^2R)^{d-1}L^{2-2\gamma}L^{d-1}(1+L^{1-2\gamma}R^{-1})\lesssim L^{3(d-\gamma)-\gamma_0}.\end{equation} This is already enough if $\gamma\leq 1/2$. If $\gamma>1/2$ we need to gain extra $L^{-10\eta}$, which is provided by (\ref{bdc2new}), unless $R\geq L^{-300\eta}$ (and similarly also $|x|\geq L^{-300\eta}$). In the latter case, we choose two dyadic variables $1\leq\theta_1,\theta_2\leq L^{300\eta}$ and assume for $x$ that with this fixed $x$, the number of choices for $y$ is $\sim L^{d-1}\theta_1^{-1}$ and the number of choices for $z$ is $\sim L^{d-1}\theta_2^{-1}$. By (\ref{estcross}), the number of choices for $x$ is at most $L^{d-2+O(\eta)}L^{2(2-2\gamma)}\min(\theta_1,\theta_2)^2$, and putting together we get $\mathfrak C_2\lesssim L^{3d-4\gamma+O(\eta)}$. \end{proof}
Now let $k_j\,(j=1,2,\cdots)$ be vector variables such that $k_j\in\mathbb Z_L^d$ and $|k_j-k_j^0|\leq 1$ for some fixed values $k_j^0$. For any tuple $(j_1^{\epsilon_1},\cdots,j_r^{\epsilon_r})$ with $r\leq 4$ and $\epsilon_i\in\{\pm\}$, we associate with it a system \begin{equation}\label{counteqn}
\sum_{i=1}^r\epsilon_ik_{j_i}=k,\quad \bigg|\sum_{i=1}^r\epsilon_i|k_{j_i}|^2-\beta\bigg|\leq\delta^{-1}L^{-2\gamma}, \end{equation} where $k\in\mathbb Z_L^d$ and $\beta\in\mathbb R$ are fixed (we label different tuples differently, and do not require $k=0$ when $r=4$). If some $j_i$ in the tuple does not come with an $\epsilon_i$, we understand that the sign of $k_{j_i}$ in (\ref{counteqn}) can be arbitrary, but subject to the restrictions stated below. Each counting problem is represented by a set of tuples (such as $\{(1^+,2^-)\}$ or $\{(1,2,3),(1,4,5)\}$); we require that no tuple contains $3$ elements of same sign, each $j$ appears in at most two tuples, and the signs of $k_j$ in the two tuples must be opposite. Denote the corresponding number of solutions by $\mathfrak C$. \begin{lem}\label{basiccount} We have the following bounds for the number of solutions to counting problems. \begin{enumerate} \item For $\{(1^+,2^+)\}$ we have $\mathfrak C\lesssim \delta^{-1}L^{d-\gamma-\gamma_0}$.
\noindent For $\{(1^+,2^-)\}$, let $|k_1-k_2|\sim R\in[L^{-1},1]$ (with $|k_1-k_2|\gtrsim R$ when $R=1$), then we have $\mathfrak C\lesssim L^{d-1}+\delta^{-1}\min(R^{-1}L^{d-2\gamma},L^d)$. \item For $\{(1,2,3)\}$ we have $\mathfrak C\lesssim \delta^{-1}L^{2(d-\gamma)}$. \item For $\{(1,2,3^+),(1,2,4^+)\}$ or $\{(1,2,3),(3^+,4^+)\}$ we have $\mathfrak C\lesssim \delta^{-2}L^{2(d-\gamma)-\gamma_0}$.
\noindent For $\{(1,2,3^+),(1,2,4^-)\}$ or $\{(1,2,3),(3^+,4^-)\}$, let $|k_3-k_4|\sim P\in[L^{-1},1]\cup\{0\}$ (with $|k_3-k_4|\gtrsim P$ when $P=1$), then $\mathfrak C$ satisfies the same bounds as in (\ref{countc1-1}) and Lemma \ref{basiccount0} (2), where $\mathfrak X:=\min((\log L)^2,1+\delta L^{2\gamma}P)$. \item For $\{(1,2,3),(1,4,5)\}$ or $\{(1,2,3),(1,2,4,5)\}$, $\mathfrak C$ satisfies the same bounds as in (\ref{countc2-1}). \item For $\{(1,2,4),(2,3,5),(3,4,6)\}$ we have $\mathfrak C\lesssim \delta^{-3}L^{3(d-\gamma)-1.01\gamma_0}$. \end{enumerate} \end{lem}
\begin{proof} In all proofs we may assume $\delta=1$. The first half of (1) follows by assuming $k_1+k_2$ is fixed and $|k_1-k_2|\sim R$, then the number of choices for $(k_1,k_2)$ is at most $\delta^{-1}(LR)^{d-1}(1+L^{1-2\gamma}R^{-1})\lesssim \delta^{-1}L^{d-\gamma-\gamma_0}$. The second half of (1) follow similarly, by first choosing the first $d-1$ coordinates of $k_1$ (assuming the $d$-th coordinate of $k_1-k_2$ is $\sim R$) and then considering the last coordinate. Next, (2) follows from Proposition 6.1 and Lemma A.9 (2) in \cite{DH21}. For the first part of (3), if the signs associated with $k_1$ and $k_2$ are the same then it follows from applying (1) twice (first for $(k_3,k_4)$ and then for $(k_1,k_2)$); otherwise, we can assume $|k_1-k_2|\sim R$, then once $(k_3,k_4)$ is fixed, the number of choices for $(k_1,k_2)$ is $\lesssim L^{d-1}(1+L^{1-2\gamma}R^{-1})$, while the number of choices for $(k_3,k_4)$ is now $\lesssim (LR)^{d-1}(1+L^{1-2\gamma})$, which gives $\mathfrak C\lesssim L^{2d-2}(1+L^{1-2\gamma})^2\lesssim L^{2(d-\gamma-\gamma_0)}$ which proves the first part of (3). Moreover, the second part of (3) follows from Lemma \ref{basiccount0} (1) and (2), and (4) follows from Lemma \ref{basiccount0} (3), after a suitable reparametrization using the factorization $|k_1|^2-|k_2|^2+|k_3|^2-|k|^2=2(k_1-k)\cdot (k_3-k)$ when $k_1-k_2+k_3=k$ (and the case $\{(1,2,3),(1,2,4,5)\}$ follows as a consequence).
Now let us prove (5). We may assume (say) the signs corresponding to $k_3$ and $k_5$ in the triple $(2,3,5)$ are the same, and the signs corresponding to $k_3$ and $k_6$ in the triple $(3,4,6)$ are the opposite. We will first fix $(k_1,k_2,k_4)$, which has $\lesssim L^{2-2\gamma}(L^2R)^{d-1}$ choices assuming $|k_3-k_6|\sim R$, due to a variation of Proposition 6.1 and Lemma A.9 (2) of \cite{DH21}; then $k_3+k_5$ and $k_3-k_6:=r$ are fixed, and $|k_3|^2+|k_5|^2$ and $k_3\cdot r$ are fixed up to distance $L^{-2\gamma}$. We may assume $r\neq 0$ (otherwise the bound is trivial), and let $q:=k_3-k_5$, then $q\in\mathbb Z_L^d$ belongs to a fixed unit ball, and both $|q|^2$ and $r\cdot q$ are fixed up to distance $L^{-2\gamma}$. We then fix the first $d-2$ coordinates of $q$ (which has $\lesssim L^{d-2}$ choices) and reduce to counting the number of $(x,y)\in \mathbb Z_L$ such that
\[|x-x_0|\leq 1,\,\,|y-y_0|\leq 1;\qquad x^2+y^2=\alpha+O(L^{-2\gamma}),\,\,ax+by=\beta+O(L^{-2\gamma})\] where $x_0,y_0,a,b,\alpha,\beta\in\mathbb R$ are fixed and $|a|+|b|\sim R$. Assuming also $|x|+|y|\sim M$, we can reduce this lattice point counting estimate to a area counting estimate by considering the $O(L^{-1})$ neighborhood of these lattice points, so that $x^2+y^2=\alpha+O(L^{-2\gamma}+ML^{-1})$ and $ax+by=\beta+O(L^{-2\gamma}+RL^{-1})$. By doing a rotation to calculate the area, we can easily prove that\[\#\mathrm{\ of\ choices\ for\ }(x,y)\lesssim R^{-1}L^2(L^{-1}+L^{-2\gamma})(L^{-\gamma}+M^{1/2}L^{-1/2}),\] which is enough provided $M\leq L^{1-\gamma_0/10}$. Finally, if $M\geq L^{1-\gamma_0/10}$ then the single condition $x^2+y^2=\alpha+O(L^{-2\gamma})$ suffices, using (\ref{disclattice}), to show that the number of choices for $(x,y)$ is $\lesssim L^{\max(1,2-2\gamma)-\gamma_0/10}$, which implies the desired result. \end{proof} \subsection{Miscellaneous}\label{misc} We collect some miscellaneous results.
\begin{lem}\label{explem} Given a tree $\mathcal T$ of order $n$, consider the collection of all decorations $(k_\mathfrak n)$, such that $|k_\mathfrak l-k_\mathfrak l^0|\leq 1$ for each leaf node $\mathfrak l$, where $k_\mathfrak l^0\in\mathbb Z_L^d$ are fixed vectors for each leaf $\mathfrak l$. Then: \begin{enumerate} \item This collections of decorations can be divided into at most $C^n$ sub-collections, such that for any decoration $(k_\mathfrak n)$ in a given sub-collection, each $k_\mathfrak n$ belongs to a fixed unit ball that depends only on $(k_\mathfrak n^0)$, $\mathfrak n$ and the sub-collection but not $(k_\mathfrak n)$ itself.
\item This collections of decorations can be divided into at most \[C^n\prod_{\mathfrak l\in\mathcal L}\langle k_{\mathfrak l}^0\rangle^{4d}\] sub-collections, such that for any decoration $(k_\mathfrak n)$ in a given sub-collection, and any vector $r\in\mathbb R^d$ with $|r|\leq 1$, the value $r\cdot k_\mathfrak n\in\mathbb R$ belongs to a fixed unit interval that depends only on $(k_\mathfrak n^0)$, $\mathfrak n$ and the sub-collection but not $(k_\mathfrak n)$ itself and not on $r$. \end{enumerate} \end{lem} \begin{proof} (1) The key tool is Lemma 6.6 of \cite{DH21}. It implies that: for any branching node $\mathfrak n\in\mathcal T$, there exists $\mathfrak n'$ which is a child of $\mathfrak n$, and a ball $B_\mathfrak n$ which only depends on $(k_\mathfrak n^0)$ and $\mathfrak n$, such that
\[\prod_{\mathfrak n}\rho(B_\mathfrak n)\leq \frac{3^n}{2n+1}\leq C^n\] with $\rho(B_\mathfrak n)\geq 1$ being the radius of $B_\mathfrak n$, and for any decoration satisfying $|k_\mathfrak l-k_\mathfrak l^0|\leq 1$ for each leaf $\mathfrak l$, we must have that $k_{\mathfrak n}\pm k_{\mathfrak n'}\in B$. This implies that $\lfloor k_\mathfrak n\rfloor\pm\lfloor k_{\mathfrak n'}\rfloor$ is an integer vector in a fixed ball of radius $\rho(B_\mathfrak n)+1\leq 2\rho(B_\mathfrak n)$, where $\lfloor k_\mathfrak n\rfloor$ denotes the point whose coordinates are the integer parts of coordinates of $k_\mathfrak n$. Since the values of $\lfloor k_\mathfrak n\rfloor\pm\lfloor k_{\mathfrak n'}\rfloor$ for all branching nodes $\mathfrak n$ and the values $\lfloor k_\mathfrak l\rfloor$ for all leaves $\mathfrak l$ uniquely determine $\lfloor k_\mathfrak n\rfloor$ for all nodes $\mathfrak n$, we know that the collection $(\lfloor k_\mathfrak n \rfloor)_{\mathfrak n\in\mathcal T}$ has at most $C^n$ possible choices, hence the result.
(2) By running a similar (inductive) proof as in Lemma 6.6 of \cite{DH21}, we can choose a child $\mathfrak n'$ of each branching node $\mathfrak n$ as in (1), and the corresponding ball $B_\mathfrak n$, but we require that $B_\mathfrak n$ be centered at the origin, and the radii satisfy that $\rho(B_\mathfrak n)\geq 1$ and
\[\prod_{\mathfrak n}\rho(B_\mathfrak n)\leq \frac{3^n\prod_{\mathfrak l\in\mathcal L}(|k_\mathfrak l^0|+1)}{\sum_{\mathfrak l\in\mathcal L}(|k_\mathfrak l^0|+1)}\leq C^n\prod_{\mathfrak l\in\mathcal L}\langle k_\mathfrak l^0\rangle.\] This implies that for any $|r|\leq 1$, we have $|r\cdot k_\mathfrak n\pm r\cdot k_{\mathfrak n'}|\leq \rho(B_\mathfrak n)$, hence $\lfloor r\cdot k_\mathfrak n\rfloor\pm \lfloor r\cdot k_{\mathfrak n'}\rfloor$ is an integer of absolute value $\leq 2\rho(B_\mathfrak n)$. Also for each leaf $\mathfrak l$ we have that $\lfloor r\cdot k_\mathfrak l\rfloor$ is an integer of absolute value $\leq 2\langle k_\mathfrak l^0\rangle$, so in the same way as in (1), the collection $(\lfloor r\cdot k_\mathfrak n \rfloor)_{\mathfrak n\in\mathcal T}$ has at most $C^n\prod_{\mathfrak l\in\mathcal L}\langle k_{\mathfrak l}^0\rangle^{2}$ choices as $r$ and the decoration $(k_\mathfrak n)$ vary. This completes the proof. \end{proof} \begin{prop}\label{subsetvc} Suppose a vine-like object $\mathbb U_1$, is contained in another vine-like object $\mathbb U_2$, and they do not have any vine as a common ingredient. Then $\mathbb U_1$ is either one double bond, or the adjoint of vine (V) (which is an HV), or the adjoint of two double bonds (which is an HVC). In any case, $\mathbb U_2$ still remains connected after removing $\mathbb U_1$ or the VC which $\mathbb U_1$ is the adjoint of. \end{prop} \begin{proof} If $\mathbb U_1$ and $\mathbb U_2$ do not have any vine as a common ingredient, then $\mathbb U_1$ has to be contained in a single ingredient of $\mathbb U_2$, which is a single vine (I)--(VIII) as shown in Figure \ref{fig:vines}. The result is then self-evident by examining the structures of these vines, and we omit the proof. \end{proof} \begin{lem}\label{timeFourierlem} (1) Let $\mathcal T$ be a ternary tree, and denote by $\mathcal N$ the set of branching nodes. Consider
\begin{equation}\label{modifiedtimeint2}
\mathcal U_{\mathcal T}(t,\alpha[\mathcal N])=\chi_0(t)\int_{\widetilde{\mathcal D}}\prod_{\mathfrak n\in\mathcal N}e^{\pi i\alpha_\mathfrak n t_\mathfrak n}\,\mathrm{d}t_\mathfrak n,
\end{equation} where the domain $\widetilde{\mathcal D}=\big\{t[\mathcal N]:0<t_{\mathfrak n'}<t_\mathfrak n<t\mathrm{\ whenever\ }\mathfrak n'\mathrm{\ is\ a\ child\ node\ of\ }\mathfrak n\big\}$.
For every choice of $d_{\mathfrak n}\in \{0, 1\}\,(\mathfrak n\in \mathcal N)$, we define $q_{\mathfrak n}$ for $\mathfrak n \in \mathcal N$ inductively as follows: Set $q_{\mathfrak n}=0$ if $\mathfrak n$ is a leaf, and otherwise define $q_{\mathfrak n}=\alpha_\mathfrak n+d_{\mathfrak n_1}q_{\mathfrak n_1}+d_{\mathfrak n_2}q_{\mathfrak n_2}+d_{\mathfrak n_3}q_{\mathfrak n_3}$ where $\mathfrak n_1, \mathfrak n_2, \mathfrak n_3$ are the three children of $\mathfrak n$. The following estimate holds:
\begin{equation}\label{timeintlemma1}
|\widehat \mathcal U_\mathcal T (\tau,\alpha[\mathcal N])| \leq C^n\sum_{d_\mathfrak n\in \{0, 1\}} \langle \tau-d_\mathfrak r q_\mathfrak r\rangle^{-10}\prod_{\mathfrak n \in \mathcal N}\frac{1}{\langle q_\mathfrak n\rangle }.
\end{equation}
(2) Suppose $F:\mathbb R\to \mathbb R$ is in $X^\alpha(\mathbb R)$ for some $\alpha<1$, then for any $\epsilon>0$, there holds that $\chi_0(t_1)\chi_0(t_2)F(\max(t_1, t_2)) \in X^{\alpha-\epsilon}(\mathbb R^2)$. \end{lem}
\begin{proof} The proof of (1) is contained in Proposition 2.3 of \cite{DH19}. Note that the result can be extended to the case of multiple trees, which will be used in the proof of Propositions \ref{estbadvine} and \ref{estnormalvine}. For (2), we expand the Fourier transform of $G(t_1, t_2)=\chi_0(t_1)\chi_0(t_2)F(\max(t_1, t_2))$ as \[ \int_{\mathbb R}\widehat F(\mu) \int_{\mathbb R\times \mathbb R} e^{-2\pi i \lambda_1 t_1}e^{-2\pi i \lambda_2 t_2}\chi(t_1) \chi(t_2) e^{2\pi i \max(t_1, t_2)} \mathrm{d}t_1\mathrm{d}t_2\mathrm{d}\mu. \] Splitting the integral into $t_1\leq t_2$ and $t_1\geq t_2$ and integrating by parts once in $t_1$, we get $$
\widehat G(\lambda_1, \lambda_2)=-\int_{|\mu-\lambda_1|\geq 1}\widehat F(\mu) \widehat{(\chi^2)}(\lambda_2+\lambda_1-\mu)\left(\frac{1}{\lambda_1-\mu}-\frac{1}{\lambda_1}\right) \mathrm{d}\mu + \textrm{Rem} $$ where the remainder term Rem can be easily seen (possibly by one more additional integration by parts) to be in $X^\alpha$. Using the Schwartz decay of $\widehat{(\chi^2)}(\lambda_2+\lambda_1-\mu)$, and the fact that $$
\int_{|\lambda_1|, |\lambda_1 -\mu|\geq 1} |\lambda|^\beta \frac{|\mu|}{|(\lambda_1-\mu) \lambda_1|} d\lambda_1 \lesssim |\mu|^\beta \log |\mu|, \qquad 0\leq \beta<1, $$ gives the result. \end{proof}
\section{The algorithm for large gap molecules}\label{appalg} \subsection{The setup} In this appendix we define and analyze the operations and algorithm that occur in the proof of Proposition \ref{moleprop2}, which are almost the same as those in Sections 9.3--9.4 of \cite{DH21}, except for some minor differences (for example we are omitting the degenerate atoms, etc.).
For each operation we will consider the corresponding $\mathfrak D$ value. There will be three cases, which we denote by \emph{normal} (N), \emph{fine} (F) and \emph{good} (G) operations, where we have $\mathfrak D\lesssim 1$ (or $\mathfrak D\lesssim(\log L)^C$), $\mathfrak D\lesssim L^{-\eta/2}$ and $\mathfrak D\lesssim L^{-5\gamma_0/8}$ respectively. Recall that for any decoration under consideration, each atom is assumed to be LG, apart from at most $w$ atoms; all the arguments below are based on this LG assumption, and the exceptional atoms will be discussed separately in the proof of Proposition \ref{moleprop2} (see Section \ref{mole2proof}). Note also that we do not require $c_v=0$ for degree $4$ atoms $v$ as in Definition \ref{decmole}, but this plays no role in the proof. In consistence with Sections 9.3--9.4 of \cite{DH21}, in the end of the operations we will reduce $\mathbb M$ to a molecule of isolated atoms only (and no bonds).
For each operation and each decoration of the molecule $\mathbb M_{\mathrm{pre}}$ before the operation, we will define a corresponding decoration of the molecule $\mathbb M_{\mathrm{pos}}$ after the operation (either obviously, or in a precise way we will describe below). In some cases the exact operation we perform will depend on some specific assumptions for the decoration (of form $|k_{\ell}-k_{\ell'}|\geq L^{-\gamma+2\gamma_0/3}$ or $|k_{\ell}-k_{\ell'}|<L^{-\gamma+2\gamma_0/3}$, corresponding to the extra conditions $\mathtt{Ext}$ described in Section 9.3 of \cite{DH21}). Finally, define as in \cite{DH21} that $\nu:= V_3+2V_2+3V_1+4V_0-4F=4V-2E-4F$, where $V_j$ is the number of degree $j$ atoms. \subsection{The operations}\label{oper} We now define all the different operations. \subsubsection{Triple bonds} Recall that the molecule $\mathbb M_{\mathrm{pre}}$ does not contain any degenerate atom (i.e. $k_{\ell}\neq k_{\ell'}$ in the decoration for any two bonds $(\ell,\ell')$ of opposite direction at one atom $v$, see Remark \ref{nonresrem}), and does not contain any self-connecting bonds.
In this operation, assume there is a triple bond between two atoms $v_1$ and $v_2$ in $\mathbb M_{\mathrm{pre}}$, such that $d(v_1)$ and $d(v_2)$ are not both $4$. In (TB-1N) we assume $d(v_1)=d(v_2)=3$, so the triple bond is separated from the rest of the molecule; in (TB-2N) we assume $d(v_1)=3$ and $d(v_2)=4$, so $v_2$ has an extra single bond. \begin{itemize} \item Operations (TB-1N)--(TB-2N): we remove atoms $v_1$, $v_2$ and all the bonds. \end{itemize} \begin{prop}\label{tbprop} We have $\mathfrak D\lesssim (\log L)^C$ for operations (TB-1N)--(TB-2N). \end{prop} \begin{proof} This follows from Lemma \ref{basiccount} (2), noticing that $\Delta\chi=-2$; the factor $(\log L)^C$ is due to the possible change it may cause to the $\mathfrak P$ factor in $\mathfrak A$ (same below). \end{proof} \subsubsection{Bridge removal}\label{secbr} In all subsequent operations, we assume $\mathbb M_{\mathrm{pre}}$ has no triple bonds. In this operation, we assume $\mathbb M_{\mathrm{pre}}$ contains a \emph{bridge} $\ell$, which is a single bond connecting atoms $v_1$ and $v_2$, such that removing this bond will create a new component. \begin{itemize} \item Operation (BR-N): we remove the bond $\ell$. \end{itemize} \begin{prop}\label{brprop} We have $\mathfrak D\lesssim (\log L)^C$ for operation (BR-N). We also have $\Delta\nu=2$ and $\Delta V_3\geq -2$, with equality holding only when $d(v_1)=d(v_2)=3$. \end{prop} \begin{proof} The bound $\mathfrak D\lesssim (\log L)^C$ follows from Lemma 9.14 of \cite{DH21}. The effect of (BR-N) reduces the degrees of two atoms each by $1$, and adds one new component. by definition of $\nu$ we have $\Delta\nu=2-4=-2$, because the contribution to $V_3 +2V_2 +3V_1 +4V_0$ of each of the two atoms connected by $\ell$ increased by $1$ after the removal of $\ell$. The other statements are obvious. \end{proof} \subsubsection{Degree 3 atoms connected by a single bond}\label{3s3} In all subsequent operations, we assume there is no bridge in $\mathbb M_{\mathrm{pre}}$. In this operation, we assume that there are two degree 3 atoms $v_1$ and $v_2$, connected by a single bond $\ell_1$. Then $\mathbb M_{\mathrm{pre}}$ must contain one of the atomic groups shown in Figures \ref{fig:3s3}--\ref{fig:3s3new}.
In operations (3S3-1N)--(3S3-4G) we assume that $v_1$ and $v_2$ each has two more single bonds $\ell_2,\ell_3$ and $\ell_4,\ell_5$, connecting to four different atoms $v_3,v_4$ and $v_5,v_6$, see Figure \ref{fig:3s3}; in operation (3S3-5G) we assume this does not hold, see Figure \ref{fig:3s3new}. In (3S3-1N)--(3S3-3G) we assume that (i) after removing $(v_1,v_2)$ and all the bonds, $(v_3,v_5)$ is in one new component, and $(v_4,v_6)$ is in the other new component, and that (ii) the bonds $\ell_2$ and $\ell_4$ have opposite directions (viewing from $(v_1,v_2)$), and the bonds $\ell_3$ and $\ell_5$ also have opposite directions. In (3S3-4G) we assume either (i) or (ii) is false. Moreover, in (3S3-1N) we assume that $d(v_3)=\cdots=d(v_6)=4$, and in (3S3-3G) we assume that $d(v_3)$ and $d(v_5)$ are not both 4.
\begin{figure}
\caption{The atomic group involved in operations (3S3-1N)--(3S3-4G). In the first two pictures $(v_3,v_5)$ are $(v_4,v_6)$ are not in the same component after removing $v_1$ and $v_2$, while in the third picture they are.}
\label{fig:3s3}
\end{figure}
\begin{figure}
\caption{The atomic groups involved in operation (3S3-5G). In total there are 6 scenarios.}
\label{fig:3s3new}
\end{figure} \begin{itemize}
\item Operation (3S3-1N): assuming that $|k_{\ell_2}-k_{\ell_4}|+|k_{\ell_3}-k_{\ell_5}|\leq L^{-\gamma}$, and $|k_{\ell_1}-k_{\ell_3}|\geq L^{-\gamma+2\gamma_0/3}$ if $(\ell_1,\ell_3)$ have opposite directions viewing from $v_1$, we remove the atoms $(v_1,v_2)$ and all the bonds. \item Operation (3S3-2G): assuming the negation of the conditions in (3S3-1N), we remove $(v_1,v_2)$ and all the bonds. \item Operation (3S3-3G): assuming the conditions in (3S3-1N), we remove $(v_1,v_2)$ and all the bonds, but add a new bond $\ell_6$ between $v_3$ and $v_5$ (not drawn in Figure \ref{fig:3s3}), which goes from $v_3$ to $v_5$ if $\ell_2$ goes from $v_3$ to $v_1$ and vice versa. \item Operations (3S3-4G)--(3S3-5G): we remove $(v_1,v_2)$ and all the bonds. \end{itemize} \begin{prop}\label{3s3prop} We have $\mathfrak D\lesssim (\log L)^C$ for operation (3S3-1N), and $\mathfrak D\lesssim L^{-5\gamma_0/8}$ for operations (3S3-2G)--(3S3-5G). For (3S3-1N) we also have $\Delta\nu=-2$ and $\Delta V_3=2$. \end{prop}
\begin{proof} First consider the four operations other than (3S3-3G). In each scenario in Figures \ref{fig:3s3}--\ref{fig:3s3new}. If $\Delta\chi=-3$ then we have $\mathfrak D\lesssim L^{-5\gamma_0/8}$ using Lemma \ref{basiccount} (4) (the possible $(\log L)^C$ due to the $\mathfrak P$ factor is also absorbed; same below). If $\Delta\chi=-2$, then Lemma 9.14 of \cite{DH21} implies that the values of $k_{\ell_2}-k_{\ell_4}$ and $k_{\ell_3}-k_{\ell_5}$ must be fixed. In this case Lemma \ref{basiccount} (3) implies $\mathfrak D\lesssim (\log L)^C$. Moreover, this bound can be improved to $\mathfrak D\lesssim L^{-5\gamma_0/8}$ if the bonds $(\ell_2,\ell_4)$ and $(\ell_3,\ell_5)$ do not both have opposite directions viewing from $(v_1,v_2)$, or if $|k_{\ell_2}-k_{\ell_4}|+|k_{\ell_3}-k_{\ell_5}|\geq L^{-\gamma}$ (if the latter happens the we have $|P|\gtrsim L^{-\gamma}$ in Lemma \ref{basiccount} (3)). The same improvement works if $|k_{\ell_1}-k_{\ell_3}|\leq L^{-\gamma+2\gamma_0/3}$ and $(\ell_1,\ell_3)$ have opposite directions, because in this case $k_{\ell_2}$ belongs to a fixed ball of radius $L^{-\gamma+2\gamma_0/3}$, so a simple variation of Lemma \ref{basiccount} (2) gives that the number of choices for $(k_{\ell_1},k_{\ell_2},k_{\ell_3})$ is at most \[\delta^{-1}L^{2-2\gamma}L^{d-1}(L^{1-\gamma+2\gamma_0/3})^{d-1}\lesssim \delta^{-1}L^{2(d-\gamma)}L^{-2\gamma_0/3}.\] These observations are sufficient to prove the bounds for the four operations other than (3S3-3G) (the fact about $\Delta\nu$ and $\Delta V_3$ for (3S3-1N) is also clear), noticing also the LG assumption for $v_3$ in Scenarios 1--2 in Figure \ref{fig:3s3new}, and that we must have $\Delta\chi=-3$ in Scenarios 3--6 in Figure \ref{fig:3s3new}.
Now consider (3S3-3G), where $\Delta\chi=-1$. To get a decoration of $\mathbb M_{\mathrm{pos}}$ from that of $\mathbb M_{\mathrm{pre}}$, we simply define $k_{\ell_6}=k_{\ell_2}$ for the newly added bond $\ell_6$. Note that if the decoration for $\mathbb M_{\mathrm{pre}}$ is LG at each atom, the the gap at each atom for the resulting decoration for $\mathbb M_{\mathrm{pos}}$ is still at least $L^{-\gamma+\eta}-L^{-\gamma}$ since $|k_{\ell_2}-k_{\ell_4}|\leq L^{-\gamma}$. Therefore, even after at most $n\leq (\log L)^C$ iterations (where $n$ is the size of the molecule $\mathbb M$) this gap is still $\geq L^{-\gamma+\eta}/2$ which does not affect any estimate later. Moreover, we have $\mathfrak C_{\mathrm{pre}}\lesssim \delta^{-1}L^{d-\gamma-2\gamma_0/3}\mathfrak C_{\mathrm{pos}}$ by first looking at the decoration $(k_\ell)$ for $\ell$ in the component of $\mathbb M_{\mathrm{pos}}$ containing $\ell_6$, then looking at the two vectors $(k_{\ell_1},k_{\ell_3})$, and then looking at the decoration $(k_\ell)$ for $\ell$ in the component of $\mathbb M_{\mathrm{pos}}$ not containing $\ell_6$. Here note that, once $k_{\ell_6}=k_{\ell_2}$ is fixed, the number of choices for $(k_{\ell_1},k_{\ell_3})$ is at most \[\delta^{-1}L^{d-\gamma-\gamma_0}+\delta^{-1}L^{d-2\gamma}(L^{-\gamma+2\gamma_0/3})^{-1}\lesssim \delta^{-1}L^{d-\gamma}L^{-2\gamma_0/3}\] due to Lemma \ref{basiccount} (1), where we have $R\gtrsim L^{-\gamma+2\gamma_0/3}$ in Lemma \ref{basiccount} (1) when applicable. Taking into account of the possible changes to the $\mathfrak P$ factor, we get that $\mathfrak D\lesssim L^{-5\gamma_0/8}$, as desired. \end{proof} \subsubsection{Degree 3 atoms connected by a double bond} In this operation, we assume there are two degree $3$ atoms $v_1$ and $v_2$, connected by a double bond $(\ell_1,\ell_2)$, which are also connected to two other atoms $v_3$ and $v_4$ by two single bonds $\ell_3$ and $\ell_4$, see Figures \ref{fig:3d3} and \ref{fig:3d3new}. In (3D3-1N)--(3D3-3G) and (3D3-6G) we assume $v_3\neq v_4$ and $\ell_3$ and $\ell_4$ are in opposite directions (viewing from $(v_1,v_2)$); in (3D3-1N) we assume $d(v_3)=d(v_4)=4$, and in (3D3-3G) we assume that \emph{not} all atoms in the current component other than $(v_1,v_2)$ have degree 4. In (3D3-4G) we assume $v_3\neq v_4$ and $\ell_3$ and $\ell_4$ are in the same direction, and in (3D3-5G) we assume $v_3=v_4$. Finally, in (3D3-6G) we assume that $v_3$ is connected to $v_4$ via a single bond $\ell_5$, and $v_3$ and $v_4$ are each connected to different atoms $v_5$ and $v_6$ via double bonds $(\ell_6,\ell_7)$ and $(\ell_8,\ell_9)$, see Figure \ref{fig:3d3new}.
\begin{figure}
\caption{The atomic groups involved in operations (3D3-1N)--(3D3-5G).}
\label{fig:3d3}
\end{figure}
\begin{figure}
\caption{The atomic group involved in operation (3D3-6G). In the left picture $\ell_5$ becomes a bridge after removing $\{v_1,v_2\}$, while in the right picture it does not.}
\label{fig:3d3new}
\end{figure} \begin{itemize}
\item Operation (3D3-1N): assuming that $|k_{\ell_3}-k_{\ell_4}|\leq L^{-\gamma}$, and $|k_{\ell_1}-k_{\ell_2}|\geq L^{-\gamma+2\gamma_0/3}$ if $(\ell_1,\ell_2)$ have opposite directions, we remove the atoms $(v_1,v_2)$ and all bonds. \item Step (3D3-2G): assuming the negation of the conditions in (3D3-1N), we remove $(v_1,v_2)$ and all the bonds. \item Step (3D3-3G): assuming the conditions in (3D3-1N), we remove $(v_1,v_2)$ and all the bonds, but add a new bond $\ell_5$ between $v_3$ and $v_4$ (not drawn in Figure \ref{fig:3d3}), which goes from $v_4$ to $v_3$ if $\ell_3$ goes from $v_1$ to $v_3$ and vice versa. \item Steps (3D3-4G)--(3D3-5G): we remove the atoms $(v_1,v_2)$ and all the bonds. \item Step (3D3-6G): we remove the atoms $(v_1,\cdots,v_4)$ and all the bonds. \end{itemize} \begin{prop}\label{3d3prop} For operation (3D3-1N) we have $\mathfrak D\lesssim 1$ and $\Delta\nu=\Delta V_3=0$. For operations (3D3-2G)--(3D3-6G) we have $\mathfrak D\lesssim L^{-5\gamma_0/8}$. Note the absence of $(\log L)^C$ loss for (3D3-1N). \end{prop}
\begin{proof} First consider the four operations other than (3D3-1N) and (3D3-3G). In each scenario in Figures \ref{fig:3d3}--\ref{fig:3d3new}, except for (3D3-6G), we always have $\Delta\chi=-2$, so Lemma \ref{basiccount} (3) implies $\mathfrak D\lesssim L^{-5\gamma_0/8}$ if the bonds $(\ell_3,\ell_4)$ have the same direction viewing from $(v_1,v_2)$, or if $|k_{\ell_3}-k_{\ell_4}|\geq L^{-\gamma}$, or if $|k_{\ell_1}-k_{\ell_2}|\leq L^{-\gamma+2\gamma_0/3}$ and $(\ell_1,\ell_2)$ have opposite directions, in the same way as in the proof of Proposition \ref{3s3prop} above. These observations are sufficient to prove $\mathfrak D\lesssim L^{-5\gamma_0/8}$ for (3D3-2G), (3D3-4G) and (3D3-5G), using also the LG assumption for $v_3$ in (3D3-5G). As for (3D3-6G) we have $\Delta\chi\in\{-4,-5\}$; if $\Delta\chi=-5$ the result follows from applying Lemma \ref{basiccount} (3) for $(k_{\ell_1},\cdots k_{\ell_4})$ and then applying Lemma \ref{basiccount} (4) for $(k_{\ell_5},\cdots k_{\ell_9})$. If $\Delta\chi=-4$, then by Lemma 9.14 of \cite{DH21} we know that $k_{\ell_3}\pm k_{\ell_5}$ is fixed and $|k_{\ell_3}|^2\pm|k_{\ell_5}|^2$ is fixed up to distance $O(n\delta^{-1}L^{-2\gamma})$, with a suitable choice of $\pm$ (note also that $n\lesssim (\log L)^C$). We then apply Lemma \ref{basiccount} (3) for $(k_{\ell_1},k_{\ell_2},k_{\ell_3},k_{\ell_5})$ using also the LG assumption for $v_3$, and then apply Lemma \ref{basiccount} (1) for $(k_{\ell_6},k_{\ell_7})$ and $(k_{\ell_8},k_{\ell_9)}$ to get $\mathfrak D\lesssim L^{-5\gamma_0/8}$.
Next consider (3D3-1N). The fact $\Delta\nu=\Delta V_3=0$ is clear. Moreover this operation may not affect any ladder of length $\geq1$, or it may reduce the length of one such ladder by one; in the latter case it removes a factor $\min((\log L)^2,1+\delta L^{2\gamma}P)$ from the product $\mathfrak P$, where $P\sim |k_{\ell_3}-k_{\ell_4}|$. In either case, since $\Delta\chi=-2$, by Lemma \ref{basiccount} (3) we have $\mathfrak D\lesssim 1$ in the same way as (\ref{laddersharp}).
Finally consider (3D3-3G), where $\Delta\chi=-1$. To get a decoration of $\mathbb M_{\mathrm{pos}}$ from that of $\mathbb M_{\mathrm{pre}}$, we simply define $k_{\ell_5}=k_{\ell_3}$ for the newly added bond $\ell_5$. This does not affect the LG assumptions, in the same way as in the proof of Proposition \ref{3s3prop} above. Moreover, we have $\mathfrak C_{\mathrm{pre}}\lesssim\delta^{-1}L^{d-\gamma-2\gamma_0/3}$ and consequently $\mathfrak D\lesssim L^{-5\gamma_0/8}$, also in the same way as in the proof of Proposition \ref{3s3prop}, using the assumption $|k_{\ell_1}-k_{\ell_2}|\geq L^{-\gamma+2\gamma_0/3}$ if $(\ell_1,\ell_2)$ have the same direction. \end{proof} \subsubsection{Degree 3 and 4 atoms connected by a double bond} In this operation, we assume there is an atom $v_1$ of degree 3, and another atom $v_2$ of degree 4, that are connected by a double bond $(\ell_1,\ell_2)$. Then $\mathbb M_{\mathrm{pre}}$ must contain one of the atomic groups shown in Figure \ref{fig:3d4}.
\begin{figure}
\caption{The atomic groups involved in step (3D4G). In total there are 4 scenarios.}
\label{fig:3d4}
\end{figure} \begin{itemize} \item Operation (3D4-G): we remove the atoms $(v_1,v_2)$ and all the bonds. \end{itemize} \begin{prop}\label{3d4prop} We have $\mathfrak D\lesssim L^{-5\gamma_0/8}$ for operation (3D4-G). \end{prop} \begin{proof} In each case in Figure \ref{fig:3d4} we have $\Delta\chi=-3$, so the result follows from Lemma \ref{basiccount} (4). \end{proof} \subsubsection{Degree 3 and 2 atoms connected} In this operation, we assume there is an atom $v_1$ of degree 3, and another atom $v_2$ of degree 2, that are connected. Note that they must be connected by a single bond $\ell_1$, otherwise there would be a bridge. Then, $\mathbb M_{\mathrm{pre}}$ must contain one of the atomic groups shown in Figure \ref{fig:3s2}.
\begin{figure}
\caption{The atomic groups involved in step (3S2-G). In total there are 4 scenarios.}
\label{fig:3s2}
\end{figure} \begin{itemize} \item Step (3S2-G): we remove the atoms $(v_1,v_2)$ and all the bonds. \end{itemize} \begin{prop}\label{3s2prop} We have $\mathfrak D\lesssim L^{-5\gamma_0/8}$ for operation (3S2-G). \end{prop}
\begin{proof} In each case we have $\Delta\chi=-2$, so the result follows from Lemma \ref{basiccount} (3), noting also that $|P|\gtrsim L^{-\gamma+\eta}$ if $\ell_1$ and $\ell_2$ have opposite directions, due to the LG assumption. \end{proof} \subsubsection{Degree 3 atom removal}\label{3r} In this operation, we assume there is an atom $v$ of degree 3, which is connected to three atoms $v_j\,(1\leq j\leq 3)$ of degree 4, by three single bonds $\ell_j\,(1\leq j\leq 3)$. In step (3R-2G) we further assume that, there is a \emph{special} bond $\ell_1'$ (i.e. a single bond connecting two degree $3$ atoms, such that they have two double bonds connecting to two different atoms) in the molecule (or component) after removing the atom $v$ and the bonds $\ell_j$. In this case, suppose $\ell_1'$ connects atoms $v_1'$ and $v_2'$, $v_1'$ is connected to $v_3'$ by a double bond $(\ell_2',\ell_3')$, and $v_2'$ is connected to $v_4'$ by a double bond $(\ell_4',\ell_5')$, see Figure \ref{fig:3rproof}.
\begin{figure}
\caption{The functional group involved in operation (3R-2G). Here $v_1,v_2,v_3$ are not drawn; some of them may coincide with some $v_j'$. Also we only draw the scenario where $\ell_1'$ becomes a bridge after removing $v$, but the other scenario is also possible.}
\label{fig:3rproof}
\end{figure} \begin{itemize} \item Operation (3R-1N): we remove the atom $v$ and all the bonds. \item Operation (3R-2G): we we remove the atoms $(v,v_1',v_2')$ and all the bonds. \end{itemize} \begin{prop}\label{3rprop} We have $\mathscr D\lesssim(\log L)^C$ for operation (3R-1N), and also $\Delta\nu=\Delta V_3=2$. For operation (3R-2G) we have $\mathfrak D\lesssim L^{-5\gamma_0/8}$. \end{prop} \begin{proof} For (3R-1N) we have $\Delta\chi=-2$, so the result follows from Lemma \ref{basiccount} (2) and simple calculations. As for (3R-2G), we have $\Delta\chi\in\{-4,-5\}$. If $\Delta\chi=-5$, then the result follows from applying Lemma \ref{basiccount} (2) for $(k_{\ell_1},k_{\ell_2},k_{\ell_3})$ and then applying Lemma \ref{basiccount} (4) for $(k_{\ell_1'},\cdots,k_{\ell_5'})$.
If $\Delta\chi=-4$, this means that $\ell_1'$ becomes a bridge after removing $v$, see Figure \ref{fig:3rproof}. Since $\ell_1'$ is not a bridge in $\mathbb M_{\mathrm{pre}}$, we know $v$ must have at least one bond connecting to each of the two components after removing $v$ and $\ell_1'$. Without loss of generality, assume $v$ has only one bond, say $\ell_1$, connecting to an atom $v_1$ in $X$ (the component containing $(v_1',v_3')$), then by Lemma 9.14 of \cite{DH21} we know that $k_{\ell_1}\pm k_{\ell_1'}$ is fixed and $|k_{\ell_1}|^2\pm|k_{\ell_1'}|^2$ is fixed up to distance $O(n\delta^{-1}L^{-2\gamma})$, with a suitable choice of $\pm$ (note also that $n\lesssim (\log L)^C$). Therefore, we can apply Lemma \ref{basiccount} (4) for $(k_{\ell_1},k_{\ell_2},k_{\ell_3},k_{\ell_2'},k_{\ell_3'})$ and then apply Lemma \ref{basiccount} (1) for $(k_{\ell_4'},k_{\ell_5'})$ (using also the LG assumption) to get $\mathfrak D\lesssim L^{-5\gamma_0/8}$. \end{proof} \subsubsection{Degree 2 atom removal} In this operation, we assume there is an atom $v$ of degree 2, connected to one or two atom(s) of degree 2 or 4. \begin{itemize} \item Operation (2R-1F): assuming $v$ is connected to a degree 4 atom by a double bond, we remove the atom $v$ and all the bonds. \item Operation (2R-2F): assuming $v$ is connected to a degree 4 atom by a single bond, and also connected to another atom of degree 2 or 4 by a single bond, we remove the atom $v$ and all the bonds. \item Operation (2R-3F): assuming $v$ is connected to two degree 2 atoms $v_1$ and $v_2$ by two single bonds, such that neither $v_1$ nor $v_2$ is connected to a degree 3 atom, we remove the atoms $(v, v_1,v_2)$ and all the bonds. \item Operation (2R-4F): assuming $v$ is connected to a degree 2 atom $v'$ by a double bond, we remove the atoms $(v,v')$ and tall bonds. \end{itemize} \begin{prop}\label{2rprop} We have $\mathfrak D\lesssim L^{-\eta/2}$ for operations (2R-1F)--(2R-4F). For (2R-1F) and (2R-4F) we have $\Delta V_3=\Delta \nu=0$, for (2R-2F) we have $\Delta\nu=0$ and $\Delta V_3\geq 1$, and for (2R-3F) we have $\Delta V_3\geq 0$ and $\Delta\nu\leq -2$. \end{prop} \begin{proof} In each case we have $\Delta\chi=-1$, so the bond for $\mathfrak D$ follows from Lemma \ref{basiccount} (1) using also the LG asumption. The other statements follow from simple calculations. \end{proof} \subsection{The algorithm}\label{alg} The algorithm is described as a big loop. Once we enter the loop, we shall follow a set of rules to choose the next operation depending on the current molecule $\mathbb M$ and possible assumptions made on the decoration. In some cases we may also choose a sequence of successive operations, again following a specific set of rules, until we are done with this execution of the loop and return to the start of the loop. The loop ends when $\mathbb M$ contains only isolated atoms. \subsubsection{Description of the loop}\label{loop} The loop is described as follows. Start with a molecule $\mathbb M$. \begin{enumerate} \item If $\mathbb M$ contains a bridge, then remove it using (BR-N). Go to (1). \item Otherwise, if $\mathbb M$ contains two degree 3 atoms $v_1$ and $v_2$ connected by a single bond $\ell_1$, then: \begin{enumerate} \item If $\mathbb M$ contains one atomic group in Figure \ref{fig:3s3new}, then preform (3S3-5G). Go to (1). \item Otherwise, $\mathbb M$ contains the atomic group in Figure \ref{fig:3s3}. If it satisfies (i) and (ii) in Section \ref{3s3}, and $d(v_3)=\cdots =d(v_6)=4$, then we perform (3S3-1N) or (3S3-2G), depending on whether the conditions in (3S3-1N) are met. Go to (1). \item If it satisfies (i) and (ii) in Section \ref{3s3}, but (say) $d(v_3)$ and $d(v_5)$ are not both 4, then we perform (3S3-3G) or (3S3-2G), depending on whether the conditions in (3S3-1N) are met. If after (3S3-3G) a triple bond forms between $v_3$ and $v_5$, immediately remove it by (TB-1N)--(TB-2N). Go to (1). \item If either (i) or (ii) in Section \ref{3s3} is violated, then we perform (3S3-4G). Go to (1). \end{enumerate} \item Otherwise, if $\mathbb M$ contains two degree 3 atoms $v_1$ and $v_2$ connected by a double bond $(\ell_1,\ell_2)$, then: \begin{enumerate} \item If $\mathbb M$ contains the atomic group in Figure \ref{fig:3d3} corresponding to (3D3-4G) or (3D3-5G), then we perform the corresponding step. Go to (1). \item Otherwise, $\mathbb M$ contains the atomic group in Figure \ref{fig:3d3} corresponding to (3D3-1N)--(3D3-3G). This can be seen as the start of a ladder. Now, if and while this ladder \emph{continues} (i.e. $v_3$ and $v_4$ are connected by a double bond, and they are connected to two different atoms $v_5$ and $v_6$ by two single bonds of opposite directions viewing form $(v_3,v_4)$), we perform (3D3-1N) or (3D3-2G), depending on whether the conditions in (3D3-1N) are met. Proceed with (c) below. \item Now assume the ladder does not continue. Then: \begin{enumerate} \item If $v_3$ and $v_4$ are like in Figure \ref{fig:3d3new}, then perform (3D3-6G); if $v_3$ and $v_4$ are like (3D3-5G) in Figure \ref{fig:3d3} after removing $(v_1,v_2)$, then perform (3D3-1N) followed by (3D3-5G). Go to (1). \item Otherwise, if not all atoms in the current component other than $(v_1,v_2)$ have degree 4, we perform (3D3-3G) or (3D3-2G), depending on whether the conditions in (3D3-1N) are met. If after (3D3-3G) a triple bond forms between $v_3$ and $v_4$, immediately remove it by (TB-1N)--(TB-2N). Go to (1). \item Otherwise, we preform (3D3-1N). Go to (1) but scan within this component. \end{enumerate} \end{enumerate} \item Otherwise, if $\mathbb M$ contains a degree 3 atom $v_1$ connected to a degree 4 atom $v_2$ by a double bond $(\ell_1,\ell_2)$, then we have one atomic group in Figure \ref{fig:3d4}. We perform (3D4G). Go to (1). \item Otherwise, if $\mathbb M$ contains a degree 3 atom $v_1$ connected to a degree 2 atom $v_2$, then we have one atomic group in Figure \ref{fig:3s2}. We perform (3S2G). Go to (1). \item Otherwise, if $\mathbb M$ contains a degree 3 atom $v$, then $v$ must be connected to three degree 4 atoms $v_j\,(1\leq j\leq 3)$ by three single bonds $\ell_j\,(1\leq j\leq 3)$. Then: \begin{enumerate} \item If the component after removing $v$ and $\ell_j$ contains a special bond (see Section \ref{3r}), then we perform (3R-2G). Go to (1). \item Otherwise, we perform (3R-1N). Go to (1). \end{enumerate} \item Otherwise, $\mathbb M$ only contains atoms of degree (0 and) 2 and 4. Then, we find an atom of degree $2$, which must be in one of the cases corresponding to operations (2R-1F)--(2R-4F); perform the corresponding operation. Go to (1). \end{enumerate}
We make a few remarks about the algorithm, which is useful in the proof of Proposition \ref{moleprop2}.
\begin{enumerate}[{1.}] \item There is no triple bond when we perform any operation other than (TB-1N)--(TB-2N). This is because only operations (3S3-3G) and (3D3-3G) may create triple bonds, but they are immediately removed using (TB-1N)--(TB-2N), as in (2-c) and (3-c-i). Similarly there is no bridge when we perform any operation other than (TB-1N)--(TB-2N) or (BR-N), because operation (BR-N) has the top priority; moreover, if we are in (3-b), i.e. the ladder continues, then the operations (3D3-1N) and (3D3-2G) cause the same change on $\mathbb M$, and this change does not create any bridge. \item In (3-c-ii), after (3D3-3G), it cannot happen that $v_3$ and $v_4$ are connected by a triple bond, and $d(v_3)=d(v_4)=4$. In fact, in this case the two extra single bonds $\ell_1'$ and $\ell_2'$ from $v_3$ and $v_4$ must have opposite directions, so either the ladder continues, or the bonds $\ell_1'$ and $\ell_2'$ shares a common atom. This means we are in either (3-b) or (3-c-i) before performing (3D3-3G), which is impossible. \item When executing a ``Go to" sentence, we may proceed to scan the whole molecule for the relevant structures, except in (3-c-iii), where we only scan \emph{the current component}. Note that after performing (3D3-1N) in (3-c-iii), $v_3$ and $v_4$ will have degree 3, and all other atoms in the current component will have degree 4. Therefore the next operation(s) we perform in this component, following our algorithm, may be (BR-N), (3S3-1N)--(3S3-5G), (3D3-4G)--(3D3-5G), (3D4G), (3R-1N)--(3R-2G), possibly accompanied by (TB-1N)--(TB-2N), but \emph{cannot} be (3D3-1N)--(3D3-3G) because the ladder does not continue. \item In the whole process we never have a saturated component (which can be verified for (3S3-3G) and (3D3-3G) by our algorithm, and is obvious for the other operations), thus in (7) there must be at least one degree 2 atom (unless there are only isolated atoms, in which case the loop ends; note that we are also not considering degree 1 atoms, as those imply the existence of bridges). \end{enumerate}
\end{document} |
\begin{document}
\title{The Child is Father of the Man: Foresee the Success \\ at the Early Stage}
\numberofauthors{2} \author{ \alignauthor Liangyue Li\\ \affaddr{Arizona State University}\\ \email{liangyue@asu.edu}
\alignauthor Hanghang Tong\\ \affaddr{Arizona State University}\\ \email{hanghang.tong@asu.edu} }
\hide{ \numberofauthors{8}
\author{
\alignauthor Ben Trovato\titlenote{Dr.~Trovato insisted his name be first.}\\
\affaddr{Institute for Clarity in Documentation}\\
\affaddr{1932 Wallamaloo Lane}\\
\affaddr{Wallamaloo, New Zealand}\\
\email{trovato@corporation.com}
\alignauthor G.K.M. Tobin\titlenote{The secretary disavows any knowledge of this author's actions.}\\
\affaddr{Institute for Clarity in Documentation}\\
\affaddr{P.O. Box 1212}\\
\affaddr{Dublin, Ohio 43017-6221}\\
\email{webmaster@marysville-ohio.com}
\alignauthor Lars Th{\o}rv{\"a}ld\titlenote{This author is the one who did all the really hard work.}\\
\affaddr{The Th{\o}rv{\"a}ld Group}\\
\affaddr{1 Th{\o}rv{\"a}ld Circle}\\
\affaddr{Hekla, Iceland}\\
\email{larst@affiliation.org} \and
\alignauthor Lawrence P. Leipuner\\
\affaddr{Brookhaven Laboratories}\\
\affaddr{Brookhaven National Lab}\\
\affaddr{P.O. Box 5000}\\
\email{lleipuner@researchlabs.org}
\alignauthor Sean Fogarty\\
\affaddr{NASA Ames Research Center}\\
\affaddr{Moffett Field}\\
\affaddr{California 94035}\\
\email{fogartys@amesres.org}
\alignauthor Charles Palmer\\
\affaddr{Palmer Research Laboratories}\\
\affaddr{8600 Datapoint Drive}\\
\affaddr{San Antonio, Texas 78229}\\
\email{cpalmer@prl.com} }
\additionalauthors{Additional authors: John Smith (The Th{\o}rv{\"a}ld Group, email: {\texttt{jsmith@affiliation.org}}) and Julius P.~Kumquat (The Kumquat Consortium, email: {\texttt{jpkumquat@consortium.net}}).} \date{30 July 1999}
}
\maketitle
\begin{abstract}
Understanding the dynamic mechanisms that drive the high-impact scientific work (e.g., research papers, patents) is a long-debated research topic and has many important implications, ranging from personal career development and recruitment search, to the jurisdiction of research resources. Recent advances in characterizing and modeling scientific success have made it possible to forecast the long-term impact of scientific work, where data mining techniques, supervised learning in particular, play an essential role. Despite much progress, several key algorithmic challenges in relation to predicting long-term scientific impact have largely remained open. In this paper, we propose a joint predictive model to forecast the long-term scientific impact at the early stage, which simultaneously addresses a number of these open challenges, including the scholarly feature design, the non-linearity, the domain-heterogeneity and dynamics. In particular, we formulate it as a regularized optimization problem and propose effective and scalable algorithms to solve it. We perform extensive empirical evaluations on large, real scholarly data sets to validate the effectiveness and the efficiency of our method.
\hide{
Understanding the underlying pattern of items' impact can help with people's attention economy. In this paper, we focus on predicting individual paper's future citation count by looking only at its first three years' citation history. Prior studies assume that all the papers' citations are governed by the same underlying process. Instead of learning a single regression model for the whole paper corpus, we partition the papers based on their citation behaviors and learn a regression model for each domain. To leverage the similarity of closely related domains, we propose to jointly learn these regression models. The framework can be generalized to linear and non-linear regression and enjoys closed-form solution. To tackle the computation challenges of the non-linear learning algorithm, we design a fast approximation algorithm using low-rank matrix approximation techniques. We also give the correctness proof and error bound analysis of our algorithms. The experimental results on real world citation dataset clearly show the effectiveness and efficiency of our algorithms. } \end{abstract}
\category{H.2.8}{Database Management}{Database applications}[Data mining] \keywords{long term impact prediction; joint predictive model}
\section{Introduction} \label{sec:intro} Understanding the dynamic mechanisms that drive the high-impact scientific work (e.g., research papers, patents) is a long-debated research topic and has many important implications, ranging from personal career development and recruitment search, to the jurisdiction of research resources. Scholars, especially junior scholar, who could master the key to producing high-impact work would attract more attentions as well as research resources; and thus put themselves in a better position in their career developments. High-impact work remains as one of the most important criteria for various organization (e.g. companies, universities and governments) to identify the best talents, especially at their early stages. It is highly desirable for researchers to judiciously search the right literature that can best benefit their research.
\hide{ The impact/popularity of an item reflects how much influence it has on the general public. For example, Ellen DeGeneres' selfie with other celebrities at Oscars 2014 set the record of most retweeted tweet of all time. In December 2014, Psy's Gangnam Style music video has been viewed so many times that Youtube had to upgrade to a 64-bit integer. The question ``Lifestyle: What can I learn/know right now in 10 minutes that will be useful for the rest of my life?" has more than 10 million views on Quora. In these examples, the number of retweets and views reflect the impact of the tweets, videos and questions. The items with high impact often have more information, importance, and insights and can spur more discussion, comments and inspiration. Understanding the pattern of impact can not only prevent us from drowning in information explosion, but also is of importance for policy maker, marketing practitioner, etc. }
Recent advances in characterizing and modeling scientific success have made it possible to forecast the long-term impact of scientific work. Wuchty et al.~\cite{Wuchty18052007} observe that papers with multiple authors receive more citations than solo-authored ones. Uzzi et al.~\cite{Uzzi25102013} find that the highest-impact science work is primarily grounded in atypical combinations of prior ideas while embedding them in conventional knowledge frames. Recently, Wang et al. ~\cite{wang2013quantifying} develop a mechanistic model for the citation dynamics of individual papers. In data mining community, efforts have also been made to predict the long-term success. Carlos et al.~\cite{castillo2007estimating} estimate the number of citations of a paper based on the information of past articles written by the same author(s). Yan et al.~\cite{DBLP:conf/cikm/YanTLSL11} design effective content (e.g., topic diversity) and contextual (e.g., author's {\it h}-index)\hh{liangyue: what kind of features, e.g., content-based, contextual features? mention that} features for the prediction of future citation counts. Despite much progress, the following four key algorithmic challenges in relation to predicting long-term scientific impact have largely remained open.
\begin{itemize} \setlength\itemsep{1pt} \item [C1] {\it Scholarly feature design:} many factors could affect scientific work's long-term impact, e.g., research topics, author reputations, venue ranks, citation networks' topological features, etc. Among them, which bears the most predictive power? \item [C2] {\it Non-linearity:} the effect of the above scholarly features on the long-term scientific impact might be way beyond a linear relationship. \item [C3] {\it Domain heterogeneity:} the impact of scientific work in different fields or domains might behave differently; yet some closely related fields could still share certain commonalities. Thus, a one-size-fits-all or one-size-fits-one solution might be sub-optimal. \item [C4] {\it Dynamics:} with the rapid development of science and engineering, a significant number of new research papers are published each year, even on a daily basis with the advent of arXiv\footnote{arxiv.org}. The predictive model needs to handle such stream-like data efficiently, to reflect the recency of the scientific work. \end{itemize}
In this paper, we propose a joint predictive model--\underline{I}mpact Crystal \underline{Ball} (\textsf{iBall}~in short) -- to forecast the long term scientific impact at an early stage by collectively addressing the above four challenges. First (for C1), we found that the citation history of a scholarly entity (e.g., paper, researcher, venue) in the first three years (e.g., since its publication date) is a strong indicator of its long-term impact (e.g., the accumulated citation count in ten years); and adding additional contextual or content features brings little marginal benefits in terms of prediction performance. This not only largely simplifies the feature design, but also enables us to forecast the long-term scientific impact at its early stage. Second (for C2), our joint predictive model is flexible, being able to characterize both the linear and non-linear relationship between the features and the impact score. Third (for C3), we propose to jointly learn a predictive model to differentiate distinctive domains, while taking into consideration the commonalities among these similar domains (see an illustration in Figure~\ref{fig:domain_relation}).
Fourth (for C4), we further propose a fast on-line update algorithm to adapt our joint predictive model efficiently over time to accommodate newly arrived training examples (e.g., newly published papers).
\hide{
The challenges in predicting a paper's future citation count come in two folds: first, the distribution of citation counts exhibits heavy tail. That is, majority of the papers have no or very few citations. For example, in AMiner citation network dataset~\cite{DBLP:conf/kdd/TangZYLZS08}, 87.45\% of the papers have zero citation 10 years after publication. Second, the dynamic process that governs each individual paper is too noisy to be amenable to quantification as noted in~\cite{DBLP:conf/aaai/ShenWSB14,wang2013quantifying}. Besides, the papers' yearly citations exhibits drastically complex patterns. For instance, some papers have no or few citations throughout their life cycle, some papers receive intense attentions in certain years after they publish. Such two challenges make a single regression model incompetent for predicting a paper's citation count as in the pilot study~\cite{DBLP:conf/cikm/YanTLSL11}.
In this paper, we propose to stratify the papers according to their citation behavior observed within the first three years of publication and then learn a regression model in each strata. To further improve the prediction within each strata, we design a joint learning framework to learn a regression model for each strata together. The reasoning is that strata with similar citation behavior would share similar learning parameters and such knowledge should be leveraged across strata. The benefit is especially pronounced for strata with very few training samples. For example, in Figure~\ref{fig:domain_relation}, we use nodes to represent different domains and each domain has some papers represented by their citation history. The strength of links between domains indicate the similarity between the two domains. In this figure, Domain 1, 2 and 3 have triangular relationship, intuitively the processes that govern the citation behavior in each of these three domains might be similar; while Domain 4 is only connected Domain 2, its citation pattern is only similar to that of Domain 2. Our joint learning model will enforce such domain relations.
We show in the paper that the joint learning model can be generalized to both linear and non-linear models. The experiment results shows that the non-linear models exhibit exceptionally good prediction accuracy, however, its power is limited by the large space consumption and low efficiency. To speed up the joint non-linear method, we carefully design an approximation algorithm taking advantage of low-rank approximation in matrix computation. The experiment shows ... }
\begin{figure}
\caption{An illustrative example of the proposed joint predictive model. Papers from the same domain (e.g., AI, Databases, Data Mining and Bio) share similar patterns in terms of attracting citations over time. Certain domains (e.g., AI and Data Mining) are more related with each other than other domains (e.g., AI and Bio). We want to jointly learn four predictive models (one for each domain), with the goal of encouraging the predictive models from more related domains (e.g., AI and Data Mining) to be `similar' with each other.}
\label{fig:domain_relation}
\end{figure}
Our main contributions can be summarized as follows: \begin{itemize} \item {\bf Algorithms:} we propose a joint predictive model --\textsf{iBall} -- for the long-term scientific impact prediction problem, together with its efficient solvers.
\item {\bf Proofs and analysis:} we analyze the correctness, the approximation quality and the complexity of our proposed algorithms.
\item {\bf Empirical evaluations:} we conduct extensive experiments to demonstrate the effectiveness and efficiency of our proposed algorithms.
\end{itemize}
The rest of the paper is organized as follows. Section~\ref{sec:prob} gives the problem definition. Section~\ref{sec:empirical_obs} provides empirical observation of the AMiner citation network dataset. Section~\ref{sec:algs} proposes our joint model and the fast algorithm. Section~\ref{sec:exp} shows the experimental results. Section~\ref{sec:rel} reviews related work and the paper concludes in Section~\ref{sec:con}.
\section{Problem Statement} \label{sec:prob} \begin{table}[!t] \caption{Symbols} \centering
\begin{tabular}{| c | p{7cm}|} \hline \bf{Symbols} & \bf{Definition}\\ \hline \hline $n_d$ & number of domains\\ $n_i$ & number of training samples in the $i$-th domain\\ $m_i$ & number of new training samples in the $i$-th domain\\ $d$ & feature dimensionality\\ $\matsup{X_t}{i}$ & feature matrix of training samples from the $i$-th domain at time $t$\\ $\matsup{x_{t+1}}{i}$ & feature matrix of new training samples from the $i$-th domain at time $t+1$\\ $\matsup{Y_t}{i}$ & impact vector of training samples from the $i$-th domain at time $t$\\ $\matsup{y_{t+1}}{i}$ & impact vector of new training samples from the $i$-th domain at time $t+1$\\ $\mat{A}$ & adjacency matrix of domain relation graph\\ $\matsup{w}{i}$ & model parameter for the $i$-th domain\\ $\matsup{K}{i}$ & kernel matrix of training samples in the $i$-th domain\\ $\matsup{K}{ij}$ & cross domain kernel matrix of training samples in the $i$-th and $j$-th domains\\ \hline \end{tabular}
\label{tab:symbol} \end{table}
In this section, we first present the notations used throughout the paper and then formally define the long-term scientific impact prediction for scholarly entities (e.g., research papers, researchers, conferences).
Table~\ref{tab:symbol} lists the main symbols used throughout the paper. We use bold capital letters (e.g., $\mat{A}$) for matrices, bold lowercase letters (e.g., $\mat{w}$) for vectors, and lowercase letters (e.g., $\lambda$) for scalars. For matrix indexing, we use a convention similar to Matlab as follows, e.g., we use $\mat{A}(i,j)$ to denote the entry at the $i$-th row and $j$-th column of a matrix $\mat{A}$,
$\mat{A}(i, :)$ to denote the $i$-th row of $\mat{A}$ and $\mat{A}(:, j)$ to denote the $j$-th column of $\mat{A}$. Besides, we use prime for matrix transpose, e.g., $\mat{A'}$ is the transpose of $\mat{A}$.
To differentiate samples from different domains at different time steps, we use superscript to index the domain and subscript to indicate timestamp.
For instance, $\matsup{X_t}{i}$ denotes the feature matrix of all the scholarly entities in the $i$-th domain at time $t$ and $\matsup{x_{t+1}}{i}$ denotes the feature matrix of new scholarly entities in the $i$-th domain at time $t+1$. Hence, $\matsup{X_{t+1}}{i} = [\matsup{X_t}{i};\matsup{x_{t+1}}{i}]$.
Similarly, $\matsup{Y_t}{i}$ denotes the impact vector of scholarly entities in the $i$-th domain at time $t$ and $\matsup{y_{t+1}}{i}$ denotes the impact vector of new scholarly entities in the $i$-th domain at time $t+1$. Hence, $\matsup{Y_{t+1}}{i} = [\matsup{Y_t}{i};\matsup{y_{t+1}}{i}]$. We will omit the superscript and/or subscript when the meaning of the matrix is clear from the context.
With the above notations, we are ready to define the long-term impact prediction problem in both static and dynamic settings as follows:
\begin{problem}{Static Long-term Scientific Impact Prediction}
\begin{description} \item[Given:] feature matrix $\mat{X}$ and impact $\mat Y$ of scholarly entities
\item[Predict:] the long-term impact of new scholarly entities \end{description} \end{problem}
We further define the dynamic impact prediction problem as: \begin{problem}{Dynamic Long-term Scientific Impact Prediction}
\begin{description} \item[Given:] feature matrix $\mat{X_t}$ and new training feature matrix $\mat{x_{t+1}}$ of scholarly entities, the impact vector $\mat{Y_t}$, and the impact vector of new training samples $\mat{y_{t+1}}$
\item[Predict:] the long-term impact of new scholarly entities \end{description}
\end{problem}
\section{Empirical Observations} \label{sec:empirical_obs} \begin{figure*}
\caption{Prediction error comparison with different features.}
\label{fig:feature_design}
\caption{RMSE comparisons using different methods. The citation count is normalized in this figure. See Section~\ref{sec:exp} for normalization details.}
\label{fig:non_linearity}
\caption{Visualization of papers' citatoin behavior. Different colors encodes different citation behaviors.}
\label{fig:citation_pattern}
\end{figure*}
In this section, we perform an empirical analysis to highlight some of the key challenges (summarized in introduction section), on {\em AMiner} citation network~\cite{DBLP:conf/kdd/TangZYLZS08}. This is a rich real dataset for bibliography network analysis and mining. The dataset contains 2,243,976 papers, 1,274,360 authors, and 8,882 computer science venues. For each paper, the dataset provides its titles, authors, references, publication venue and publication year. The papers date from year 1936 to 2013. In total, the dataset has 1,912,780 citation relationships extracted from ACM library.
\subsection{Power-law distribution}
The distribution of the citation counts of all the papers and the distribution of the number of citations received within 10 years after publication are presented in Figures~\ref{subfig:paper_citation} and \ref{subfig:paper_10yr_citation}. We also show the distribution of citation counts of all the authors and all the venues respectively in Figures~\ref{subfig:author_citation} and \ref{subfig:venue_citation}. It is clear that all these citations are of a power law distribution. Nearly 87.45\% papers have zero citations within 10 years.
\begin{figure}
\caption{Distribution of the number of all citations of papers.}
\label{subfig:paper_citation}
\caption{Distribution of the number of citations of papers received within 10 years after publication.}
\label{subfig:paper_10yr_citation}
\caption{Distribution of the number of all citations of authors.}
\label{subfig:author_citation}
\caption{Distribution of the number of all citations of venues}
\label{subfig:venue_citation}
\caption{Citation distributions of AMiner citation dataset.}
\label{fig:citation_dist}
\end{figure}
\subsection{Feature design} Prior work~\cite{castillo2007estimating,DBLP:conf/cikm/YanTLSL11} has proposed some effective features for citation count prediction, e.g., topic features (topic rank, diversity), author features ({\it h}-index, productivity), venue features (venue rank, venue centrality). Other work~\cite{wang2013quantifying} make predictions only on the basis of the early years' citation data and find that the future impact of majority papers fall within the predicted citation range. We conduct experiment to compare performance of different features. Figure~\ref{fig:feature_design} shows the root mean squared error using different features with a regression model for the prediction of 10 years' citation count. For example, `3 years' means using the first 3 years' citation as feature, and `3 years + content' means using the first 3 years' citation along with content features (e.g., topic, author features). The result shows that adding content features (the right three bars in the figure) brings little improvement for citation prediction.
\subsection{Non-linearity} To see if the feature has linear relationship with the citation, we compare the performance of different methods using only the first 3 years' citation history. In Figure~\ref{fig:non_linearity}, the non-linear models (\textsf{iBall}-fast, \textsf{iBall}-kernel, Kernel-combine) all outperform the linear models (\textsf{iBall}-linear, Linear-separate, Linear-combine). See Section~\ref{sec:algs} and \ref{sec:exp} for details of these models. It is clear that complex relationship between the features and the impact cannot be well characterized by a simple linear model - the prediction performance for all the linear models is even worse than the baseline method (using the summation of the first 3 years' citation counts).
\subsection{Domain heterogeneity} To get a sense of the dynamic patterns of the citation count, we construct a paper-age citation matrix $\mat{M}$, where $\mat{M_{ij}}$ indicates the number of citations the $i$-th paper receives in the $j$-th year after it gets published. The matrix $\mat{M}$ is then factorized as $\mat{M} \approx \mat{WH}$ using Non-negative Matrix Factorization (NMF)~\cite{lee1999learning}. We visualize the first six rows of $\mat{H}$ in Figure~\ref{fig:citation_pattern}, which can give us different clustering citation dynamic patterns. As can be seen from the figure, the cyan line has a very small peak in the first 3 years and then fades out very quickly; the blue line picks up very fast in the early years and then fades out; the yellow line indicates a delayed pattern where the scientific work only receives some amount of attentions decades after it gets published. This highlights that impact of scientific work from different domains behaves differently.
\section{Proposed algorithms} \label{sec:algs}
In this section, we present our joint predictive model to forecast the long-term scientific impact at an early stage. We first formulate it as a regularized optimization problem; then propose effective, scalable and adaptive algorithms; followed up by theoretical analysis in terms of the optimality, the approximation quality as well as the computational complexity.
\subsection{iBall -- Formulations} Our predictive model applies to different types of scholarly entities (e.g., papers, researchers and venues). For the sake of clarity, we will use paper citation prediction as an example. As mentioned earlier, research papers are in general from different domains. We want to jointly learn a predictive model for each of the domains, with the design objective to leverage the commonalities between related domains. Here, the commonalities among different domains is described by a non-negative $\mat{A}$, i.e., if the $i$-th and $j$-th domains are closely related, its corresponding $\mat{A}_{ij}$ entry will have a higher numerical value. Denote feature matrix for papers in the $i$-th domain by $\matsup{X}{i}$, citation count of papers in the $i$-th domain by $\mat{Y^{(i)}}$ and the model parameter for the $i$-th domain by $\matsup{w}{i}$, we have the following joint predictive model\hh{liangyue: replace all the prediction model by predictive model}:
\begin{equation} \begin{array}{rl} \min\limits_{\matsup{w}{i}, i=1,\ldots, n_d} & \sum\limits_{i=1}^{n_d} \mathcal{L}[f(\matsup{X}{i},\matsup{w}{i}), \matsup{Y}{i}] \\
& + \theta\sum\limits_{i=1}^{n_d} \sum\limits_{j=1}^{n_d} \mat{A_{ij}}g(\matsup{w}{i},\matsup{w}{j})+ \lambda \sum\limits_{i=1}^{n_d} \Omega(\matsup{w}{i})\\ \end{array} \label{eq:joint_model} \end{equation} \noindent where $f(\matsup{X}{i},\matsup{w}{i})$ is the prediction function for the $i$-th domain, $\mathcal{L}(.)$ is a loss function, $g(\matsup{w}{i},\matsup{w}{j})$ characterizes the relationship between the model parameters of the $i$-th and $j$-th domains, $\Omega(\matsup{w}{i})$ is the regularization term for model parameters and $\theta$, $\lambda$ are regularization parameters to balance the relative importance of each aspect.
As can be seen, this formulation is quite flexible and general. Depending on the loss function we use, our predictive model can be formulated as regression or classification task. Depending on the prediction function we use, we can have either linear or non-linear models. The core of our joint model is the second term that relates parameters of different models. If $\mat{A}_{ij}$ is large, meaning the $i$-th and $j$-th domains are closely related to each other, we want the function value $g(.)$ that characterizes the relationship between the parameters to be small.
{\it \textsf{iBall} -- linear formulation:} if the feature and the output can be characterized by a linear relationship, we can use a linear function as the prediction function and the Euclidean distance for the distance between model parameters. The linear model can be formulated as follows: \begin{equation} \begin{array}{rl}
\min\limits_{\matsup{w}{i}, i=1,\ldots, n_d} & \sum\limits_{i=1}^{n_d} \| \matsup{X}{i} \matsup{w}{i} - \matsup{Y}{i} \|_2^2 \\
& + \theta\sum\limits_{i=1}^{n_d} \sum\limits_{j=1}^{n_d} \mat{A_{ij}}\|\matsup{w}{i} - \matsup{w}{j} \|_2^2+ \lambda \sum\limits_{i=1}^{n_d} \|\matsup{w}{i}\|_2^2\\ \end{array} \label{eq:joint_linear} \end{equation} \noindent where $\theta$ is a balance parameter to control the importance of domain relations, and $\lambda$ is a regularization parameter. From the above objective function we can see that, if the $i$-th domain and $j$-th domain are closely related, i.e., $\mat{A}_{ij}$ is a large positive number, it encourages a smaller Euclidean distance between $\matsup{w}{i}$ and $\matsup{w}{j}$. The intuition is that for a given feature, it would have a similar effect in predicting the papers from two similar/closely related domains.
{\it \textsf{iBall} -- non-linear formulation:} As indicated in our empirical studies (Figure~\ref{fig:non_linearity}), the relationship between the features and the output (citation counts in ten years) is far beyond linear. Thus, we further develop the kernelized counterpart of the above linear model. Let us denote the kernel matrix of papers in the $i$-th domain by $\matsup{K}{i}$, which can be computed as $\matsup{K}{i}(a,b) = k(\matsup{X}{i}(a,:), \matsup{X}{i}(b,:))$, where $k(\cdot, \cdot)$ is a kernel function that implicitly computes the inner product in a high-dimensional reproducing kernel Hilbert space (RKHS)~\cite{aronszajn1950theory} . Similarly, we define the cross-domain kernel matrix by $\matsup{K}{ij}$, which can be computed as $\matsup{K}{ij}(a,b) = k(\matsup{X}{i}(a,:) , \matsup{X}{j}(b,:))$, reflecting the similarity between papers in the $i$-th domain and $j$-th domain. Different from the linear model where the model parameters in different domains share the same dimensionality (i.e., the dimensionality of the raw feature), in the non-linear case, the dimensionality of the model parameters are the same as the number of training samples in each domain, which is very likely to be different across different domains. Thus, we cannot use the same distance function for $g(.)$. To address this issue, the key is to realize that the predicted value of a test sample using kernel methods is a linear combination of the similarities between the test sample and all the training samples. Therefore, instead of restricting the model parameters to be similar, we impose the constraint that the predicted value of a test sample using the training samples in its own domain and using training samples in a closely related domain to be similar. The resulting non-linear model can be formulated as follows:
\begin{equation} \begin{array}{rl}
\min\limits_{\matsup{w}{i}, i=1,\ldots, n_d} & \sum\limits_{i=1}^{n_d} \| \matsup{K}{i} \matsup{w}{i} - \matsup{Y}{i} \|_2^2 \\
& + \theta\sum\limits_{i=1}^{n_d} \sum\limits_{j=1}^{n_d} \mat{A_{ij}}\|\matsup{K}{i}\matsup{w}{i} - \matsup{K}{ij}\matsup{w}{j} \|_2^2\\
& + \lambda \sum\limits_{i=1}^{n_d} \matsup{w}{i}' \matsup{K}{i}\matsup{w}{i}\\ \end{array} \label{eq:joint_kernel} \end{equation} \noindent where $\theta$ is a balance parameter to control the importance of domain relations, and $\lambda$ is a regularization parameter. From the above objective function we can see that, if the $i$-th domain and $j$-th domain are closely related, i.e., $\mat{A}_{ij}$ is a large positive number, the predicted value of papers in the $i$-th domain computed using training samples from the $i$-th domain ($\matsup{K}{i}\matsup{w}{i}$) should be similar to that using training samples from the $j$-th domain ($\matsup{K}{ij}\matsup{w}{j}$).
\subsection{iBall -- Closed-form Solutions} It turns out that both \textsf{iBall} ~linear and non-linear formulations have the following closed-form solutions (see proof in subsection~\ref{subsec:proof}\hh{fill in}):
\begin{equation} \mat{w} = \mat{S}^{-1} \mat{Y} \label{eq:linear_solution} \end{equation}
\noindent {\it \textsf{iBall} ~linear formulation.} In the linear case, we have that
$\mat{w} = [\matsup{w}{1};\ldots; \matsup{w}{n_d}]$, $\mat{Y} = [\matsup{X}{1}'\matsup{Y}{1};\ldots;\matsup{X}{n_d}'\matsup{Y}{n_d}]$, and $\mat{S}$ is a block matrix composed of $n_d \times n_d$ blocks, each of size $d \times d$, where $d$ is the feature dimensionality. $\mat{S}$ can be computed as follows:
\begin{equation} \begin{blockarray}{c c c c}
& \matindex{i-th block column} & \matindex{j-th block column} & \\
\begin{block}{[c c c]c}
\ldots & \ldots & \ldots & \\
\ldots & \matsup{X}{i}'\matsup{X}{i} + (\theta\sum\limits_{j=1}^{n_d} \mat{A_{ij}} + \lambda)\mat{I} & -\theta \mat{A_{ij} I} & \parbox{1.5cm}{\matindex{i-th block}\\ \matindex{row}} \\
\ldots & \ldots & \ldots \\
\end{block}
\end{blockarray} \label{eq:linear_S} \end{equation}
\hide{
The objective function given in Eq~\ref{eq:joint_kernel} can also be solved in a closed form as follows:
\begin{equation} \mat{w} = \mat{S}^{-1}\mat{Y} \label{eq:w_joint_kernel} \end{equation} }
\noindent {\it \textsf{iBall} ~non-linear formulation.} In the non-linear case, we have that $\mat{w} = [\matsup{w}{1};\ldots; \matsup{w}{n_d}]$, $\mat{Y}=[\matsup{Y}{1};\ldots;\matsup{Y}{n_d}]$, and $\mat{S}$ is a block matrix composed of $n_d \times n_d$ blocks with the $(i,j)$-th block of size $n_i \times n_j$, where $n_i$ is the number of training samples in the $i$-th domain. $\mat{S}$ can be computed as follows:
\begin{equation} \begin{blockarray}{c c c c}
& \matindex{i-th block column} & \matindex{j-th block column} & \\
\begin{block}{[c c c]c}
\ldots & \ldots & \ldots & \\
\ldots & (1 + \theta \sum\limits_{j=1}^{n_d} \mat{A_{ij}}) \matsup{K}{i}+\lambda\mat{I} & -\theta \mat{A_{ij}}\matsup{K}{ij} & \parbox{1.5cm}{\matindex{i-th block}\\\matindex{row}} \\
\ldots & \ldots & \ldots \\
\end{block} \end{blockarray} \label{eq:kernel_joint_S} \end{equation}
\subsection{iBall -- Scale-up with Dynamic Update} The major computation cost for the closed-form solutions lies in the matrix inverse $\mat{S}^{-1}$. In the linear case, the size of $\mat{S}$ is $(dn_d) \times (dn_d)$; and so its computational cost is manageable. However, this is not the case for non-linear closed-form solution since the matrix $\mat{S}$ in Eq.~(\ref{eq:kernel_joint_S}) is of size $n \times n$, where $n=\sum_{i=1}^{n_d} n_i$, which is the number of all the training samples. It would be very expensive to store this dense matrix ($O(n^2)$ space) and to compute its inverse ($O(n^3)$ time); especially when the number of training samples is very large, and the model receives new training examples constantly over time (dynamic update). In this subsection, we devise an efficient algorithm to scale up the non-linear closed-form solution and efficiently update the model to accommodate the new training samples over time. The key of the \textsf{iBall} ~algorithm is to use the low-rank approximation of the $\mat{S}$ matrix to approximate the original $\mat{S}$ matrix to {\em avoid} the matrix inversion; and at each time step, efficiently update the low-rank approximation itself.
After new papers in all the domains are seen at time step $t+1$, the new $\mat{S}_{t+1}$ computed by Eq.~(\ref{eq:kernel_joint_S}) becomes:
\begin{equation} \begin{blockarray}{c c c c}
& \matindex{i-th block column} & \matindex{j-th block column} & \\
\begin{block}{[c c c]c}
\ldots & \ldots & \ldots & \\
\ldots & (1 + \theta \sum\limits_{j=1}^{n_d} \mat{A_{ij}}) \matsup{K_{t+1}}{i}+\lambda\mat{I} & -\theta \mat{A_{ij}}\matsup{K_{t+1}}{ij} & \parbox{1.5cm}{\matindex{i-th block}\\\matindex{row}} \\
\ldots & \ldots & \ldots \\
\end{block} \end{blockarray} \label{eq:new_S} \end{equation}
\noindent where $\matsup{K_{t+1}}{i}$ is the new within-domain kernel matrix for the $i$-th domain and $\matsup{K_{t+1}}{ij}$ is the new cross domain kernel matrix for the $i$-th and $j$-th domains. The two new kernel matrix can be computed as follows: \begin{equation} \matsup{K_{t+1}}{i} = \begin{blockarray}{[c c]}
\matsup{K_t}{i} & (\matsup{k_{t+1}}{i})' \\
\matsup{k_{t+1}}{i} & \matsup{h_{t+1}}{i}\\ \end{blockarray} \quad \matsup{K_{t+1}}{ij} = \begin{blockarray}{[c c]}
\matsup{K_t}{ij} & \matsup{k_{t+1}}{ij_*} \\
\matsup{k_{t+1}}{i_*j} & \matsup{h_{t+1}}{i_* j_*}\\ \end{blockarray} \end{equation} \noindent where $\matsup{k_{t+1}}{i}$ is the matrix characterizing the similarity between new training samples and old training samples and can be computed as: $\matsup{k_{t+1}}{i}(a,b) = k(\matsup{x_{t+1}}{i}(a,:), \matsup{X_t}{i}(b, :))$; $\matsup{h_{t+1}}{i}$ is the similarity matrix among new training samples and can be computed as: $\matsup{h_{t+1}}{i}(a,b) = k(\matsup{x_{t+1}}{i}(a,:),\matsup{x_{t+1}}{i}(b,:))$. $\matsup{k_{t+1}}{i_*j}$ is the matrix characterizing the similarity between new training samples in the $i$-th domain and old training samples in the $j$-th domain and can be computed as: $\matsup{k_{t+1}}{i_*j}(a,b) = k(\matsup{x_{t+1}}{i}(a,:)), \matsup{X_t}{j}(b, :)$. Similarly, $\matsup{k_{t+1}}{ij_*}$ measures the similarity between old training samples in the $i$-th domain and new training samples in the $j$-th domain and can be computed as: $\matsup{k_{t+1}}{ij_*} = k(\matsup{X_t}{i}(a,:), \matsup{x_{t+1}}{j}(b,:))$; $\matsup{h_{t+1}}{i_* j_*}$ is the similarity matrix between new training samples from both $i$-th and $j$-th domains and is computed as: $\matsup{h_{t+1}}{i_* j_*} = k(\matsup{x_{t+1}}{i}(a,:), \matsup{x_{t+1}}{j}(b,:))$.
\hide{
}
Given that $\mat{S}_t$ is a symmetric matrix, we can approximate it using top-$r$ eigen-decomposition as: $\mat{S}_t \approx \mat{U}_t \mat{\Lambda}_t \mat{U}'_t$, where $\mat{U}_t$ is an $n \times r$ orthogonal matrix and $\mat{\Lambda}_t$ is an $r\times r$ diagonal matrix with the largest $r$ eigenvalues of $\mat{S}_t$ on the diagonal. If we can directly update the eigen-decomposition of $\mat{S}_{t+1}$ after seeing the new training samples from all the domains, we can efficiently compute the new model parameters as follows: \begin{equation} \begin{array}{rl} \mat{w}_{t+1} &= \mat{S}_{t+1}^{-1} \mat{Y}_{t+1}\\
&= \mat{U}_{t+1}\mat{\Lambda}_{t+1}^{-1} \mat{U}'_{t+1}\mat{Y}_{t+1}\\ \end{array} \end{equation}
\noindent where $\mat{Y}_{t+1} = [\matsup{Y_t}{1};\matsup{y_{t+1}}{1};\ldots;\matsup{Y_t}{n_d};\matsup{y_{t+1}}{n_d}]$. Here, $\mat{\Lambda}_{t+1}^{-1}$ a $r \times r$ diagonal matrix, whose diagonal entries are the reciprocals of the corresponding eigenvalues of $\mat{\Lambda}_{t+1}$. In this way, we avoid the computationally costly matrix inverse in the closed-form solution.
Compare $\mat{S}_{t+1}$ with $\mat{S}_t$, we find that $\mat{S}_{t+1}$ can be obtained by inserting into $\mat{S}_t$ at the right positions with some rows and columns of the kernel matrices involving new training samples, i.e.,$\matsup{k_{t+1}}{i}$, $\matsup{h_{t+1}}{i}$,$\matsup{k_{t+1}}{i_*j}$,$\matsup{k_{t+1}}{ij_*}$,$\matsup{k_{t+1}}{i_*j_*}$ . From this perspective, $\mat{S}_{t+1}$ can be seen as the sum of the following two matrices: \begin{equation} \begin{array}{l}
\underbrace{\begin{blockarray}{c c c c}
& \matindex{i-th block column} & \matindex{j-th block column} & \\
\begin{block}{[c c c]c}
\ldots & \ldots & \ldots & \\
\ldots & \begin{bmatrix}
\alpha_i\matsup{K_t}{i} & \mat{0}\\
\mat{0} & \mat{0}\\
\end{bmatrix} & \begin{bmatrix}
-\theta \mat{A_{ij}}\matsup{K_t}{ij} & \mat{0}\\
\mat{0} & \mat{0}\\
\end{bmatrix} & \parbox{1.5cm}{\matindex{i-th block}\\\matindex{row}} \\
\ldots & \ldots & \ldots \\
\end{block} \end{blockarray}}_{\tilde{\mat{S}}_t}\\ +{\tiny \underbrace{\begin{blockarray}{c c c c}
& \matindex{i-th block column} & \matindex{j-th block column} & \\
\begin{block}{[c c c]c}
\ldots & \ldots & \ldots & \\
\ldots & \begin{bmatrix}
\mat{0}& \alpha_i(\matsup{k_{t+1}}{i})'\\
\alpha_i\matsup{k_{t+1}}{i} & \alpha_i \matsup{h_{t+1}}{i} + \lambda\mat{I}\\
\end{bmatrix} & \begin{bmatrix}
\mat{0} & -\theta \mat{A_{ij}}\matsup{k_{t+1}}{ij_*}\\
-\theta \mat{A_{ij}}\matsup{k_{t+1}}{i_*j} & -\theta \mat{A_{ij}}\matsup{h_{t+1}}{i_*j_*}\\
\end{bmatrix} & \parbox{1.0cm}{\matindex{i-th block}\\\matindex{row}} \\
\ldots & \ldots & \ldots \\
\end{block} \end{blockarray}}_{\Delta \mat{S}}}\\ \myeq \tilde{\mat{S}}_t + \Delta \mat{S} \end{array} \end{equation}
\noindent where we denote $1+\theta\sum_{j=1}^{n_d} \mat{A}_{ij}$ by $\alpha_i$. The top-$r$ eigen-decomposition of $\tilde{\mat{S}}_t$ can be directly written out from that of $\mat{S}_t$ as: $\tilde{\mat{S}}_t \approx \tilde{\mat{U}}_t \mat{\Lambda}_t \tilde{\mat{U}}'_t$, where $\tilde{\mat{U}}_t$ can be obtained by inserting into $\mat{U}_t$ corresponding rows of 0, the same row positions as we insert into $\mat{S}_t$ the new kernel matrices. We propose Algorithm~\ref{alg:eigen_update} to update the eigen-decomposition of $\mat{S}_{t+1}$, based on the observation that $\mat{S}_{t+1}$ can be viewed as $\tilde{\mat{S}}_t$ perturbed by a low-rank matrix $\Delta \mat{S}$. In line 5 of Algorithm~\ref{alg:eigen_update}, the only difference between the partial QR decomposition and the standard one, is that since $\tilde{\mat{U}}_t$ is already orthogonal, we only need to perform the Gram-Schmidt procedure starting from the first column of $\mat{P}$.
\begin{algorithm}[!htb] \caption{Eigen update of $\mat{S}_{t+1}$}\label{alg:eigen_update}
\KwIn{(1)eigen pair of $\mat{S}_t$: $\mat{U}_t$, $\mat{\Lambda}_{t}$\;
(2)feature matrices of new papers in each domain: $\matsup{x_{t+1}}{i},i=1,\ldots, n_d$\;
(3)adjacency matrix of domain relation graph $\mat{A}$ \;
(4)balance parameters $\theta, \lambda$ } \KwOut{eigen pair of $\mat{S}_{t+1}$: $\mat{U}_{t+1}$, $\mat{\Lambda}_{t+1}$} \BlankLine Obtain $\tilde{\mat{U}}_t$ by inserting into $\mat{U}_t$ rows of 0 at the right positions \; Compute $\matsup{k_{t+1}}{i}$, $\matsup{h_{t+1}}{i}$,$\matsup{k_{t+1}}{i_*j}$,$\matsup{k_{t+1}}{ij_*}$,$\matsup{k_{t+1}}{i_*j_*}$ for $i=1,\ldots,n_d, j=1,\ldots, n_d$ \; Construct sparse matrix $\Delta \mat{S}$ \; Perform eigen decomposition of $\Delta \mat{S}$: $\Delta \mat{S} = \mat{P\Sigma P'}$\; Perform partial QR decomposition of $[\tilde{\mat{U}}_t, \mat{P}]$:$[\tilde{\mat{U}}_t, \Delta\mat{Q}]\mat{R} \leftarrow \mbox{QR}(\tilde{\mat{U}}_t,\mat{P})$\; Set $\mat{Z} = \mat{R}[\mat{\Lambda}_t ~ \mat{0}; \mat{0} ~ \mat{\Sigma}]\mat{R}'$\; Perform full eigen decomposition of $\mat{Z}$: $\mat{Z} = \mat{V} \mat{L}\mat{V}'$\; Set $\mat{U}_{t+1} = [\tilde{\mat{U}}_{t}, \Delta \mat{Q}]\mat{V}$ and $\mat{\Lambda}_{t+1} = \mat{L}$\; {\bf Return}: $\mat{U}_{t+1}$, $\mat{\Lambda}_{t+1}$. \end{algorithm}
Building upon Algorithm~\ref{alg:eigen_update}, we have the fast \textsf{iBall} ~algorithm (Algorithm~\ref{alg:approx_kernel_joint}) for scaling up the non-linear solution with dynamic model update.
\begin{algorithm}[!htb] \caption{\textsf{iBall} ~--scale-up with dynamic update}\label{alg:approx_kernel_joint}
\KwIn{(1)eigen pair of $\mat{S}_t$: $\mat{U}_t$, $\mat{\Lambda}_{t}$\;
(2)feature matrices of new papers in each domain: $\matsup{x_{t+1}}{i},i=1,\ldots, n_d$\;
(3)citation count vectors of new papers in each domain: $\matsup{y_{t+1}}{i},i=1,\ldots, n_d$\;
(4)adjacency matrix of domain relation graph $\mat{A}$ \;
(5)balance parameters $\theta, \lambda$ } \KwOut{(1) updated model parameters $\mat{w}_{t+1}$, (2) eigen pair of $\mat{S}_{t+1}$: $\mat{U}_{t+1}$, $\mat{\Lambda}_{t+1}$} \BlankLine Update the eigen-decomposition of $\mat{S}_{t+1}$ using Algorithm~\ref{alg:eigen_update} as: $\mat{S}_{t+1} \approx \mat{U}_{t+1} \mat{\Lambda}_{t+1}\mat{U}'_{t+1}$\; Compute the new model parameters: $\mat{w}_{t+1} = \mat{U}_{t+1}\mat{\Lambda}_{t+1}^{-1} \mat{U}'_{t+1}\mat{Y}_{t+1}$\; {\bf Return}: $\mat{w}_{t+1}$, $\mat{U}_{t+1}$ and $\mat{\Lambda}_{t+1}$. \end{algorithm}
\subsection{iBall -- Proofs and Analysis}\label{subsec:proof} In this subsection, we will provide some analysis regarding the optimality, the approximation quality as well as the computational complexity of our proposed algorithms.
\hh{(1) merge lemma 1 and 2; and only give the proof for linear case; (2) hide the proof for eigen-update; and (3) hide the proof for complexity} {\bf A - Correctness of the closed-form solutions of the \textsf{iBall} ~linear and non-linear formulations:} In Lemma~\ref{lm:closed_form_solution}, we prove that the closed-form solution given in Eq.~(\ref{eq:linear_solution}) with $\mat{S}$ computed by Eq.~(\ref{eq:linear_S}) is the fixed-point solution to the linear formulation in Eq.~(\ref{eq:joint_linear}) and the closed-form solution given in Eq.~(\ref{eq:linear_solution}) with $\mat{S}$ computed by Eq.~(\ref{eq:kernel_joint_S}) is the fixed-point solution to the non-linear formulation in Eq.~(\ref{eq:joint_kernel}).
\begin{lemma}\label{lm:closed_form_solution}{(Correctness of closed-form solution of the \textsf{iBall} ~linear and non-linear formulations.)} For the closed-form solution given in Eq.~(\ref{eq:linear_solution}), if $\mat{S}$ is computed by Eq.~(\ref{eq:linear_S}), it is the fixed-point solution to the objective function in Eq.~(\ref{eq:joint_linear}); and if $\mat{S}$ is computed by Eq.~(\ref{eq:kernel_joint_S}), it is the fixed-point solution to the objective function in Eq.~(\ref{eq:joint_kernel}). \end{lemma} \begin{proof} Let's take the partial derivative of the objective function (denoted by $J$) in Eq.~(\ref{eq:joint_linear}) w.r.t. $ \matsup{w}{i}$, we get \begin{equation} \begin{array}{r l} \frac{\partial J}{\partial \matsup{w}{i}} &= 2 \matsup{X}{i}'\matsup{X}{i}\matsup{w}{i} - 2 \matsup{X}{i}'\mat{Y^{(i)}} \\
&+ \sum_{j=1}^{n_d} 2\theta \mat{A}_{ij}(\matsup{w}{i} - \matsup{w}{j}) + 2\lambda \matsup{w}{i}\\ \end{array} \end{equation} Now, the derivative of $J$ w.r.t. all the parameters $\mat{w}$ can be computed as:
\begin{equation} \begin{array}{l l} \frac{\partial J}{\partial \mat{w}} &= \begin{bmatrix}
\frac{\partial J}{\partial \matsup{w}{1}}\\
\ldots\\
\frac{\partial J}{\partial \matsup{w}{n_d}}\\
\end{bmatrix}\\
&= \begin{bmatrix}
2 \matsup{X}{1}'\matsup{X}{1}\matsup{w}{1} - 2 \matsup{X}{1}'\mat{Y^{(1)}}\\
+ \sum_{j=1}^{n_d} 2\theta \mat{A}_{1j}(\matsup{w}{1} - \matsup{w}{j}) + 2\lambda \matsup{w}{1}\\
\ldots\\
2 \matsup{X}{n_d}'\matsup{X}{n_d}\matsup{w}{n_d} - 2 \matsup{X}{n_d}'\mat{Y^{(n_d)}}\\
+ \sum_{j=1}^{n_d} 2\theta \mat{A}_{n_d j}(\matsup{w}{n_d} - \matsup{w}{j}) + 2\lambda \matsup{w}{n_d}\\
\end{bmatrix}\\ \end{array} \end{equation}
Set the above derivative to 0 and with some rearrangement, we get \begin{equation} \mat{S w} = \mat{Y} \end{equation} Therefore, $\mat{w} = \mat{S}^{-1} \mat{Y}$.
The similar procedure can be applied to get the closed-form solution for the non-linear formulation. We will omit the derivation for brevity. \end{proof}
\hide{
{\bf Correctness of the closed-form solution of the \textsf{iBall} non-linear formulation:} In Lemma~\ref{lm:kernel_solution}, we prove that the closed-form solution given in Eq.~\ref{eq:w_joint_kernel} is the optimal solution to the non-linear formulation in Eq.~\ref{eq:joint_kernel}.
\begin{lemma}\label{lm:kernel_solution}{(Correctness of the closed-form solution of the \textsf{iBall} non-linear formulation.)} Eq.~\ref{eq:w_joint_kernel} is the optimal solution to the objective function in Eq.~\ref{eq:joint_kernel}. \end{lemma} \begin{proof} Let's take the partial derivative of the objective function (denoted by $J$) in Eq.~(\ref{eq:joint_kernel}) w.r.t. $\matsup{w}{i}$, we get \begin{equation} \begin{array}{r l} \frac{\partial J}{\partial \matsup{w}{i}} &= 2\matsup{K}{i}'\matsup{K}{i}\matsup{w}{i} - 2\matsup{K}{i}'\matsup{Y}{i}\\
&+ \sum\limits_{i=1}^{n_d} 2\theta\mat{A}_{ij}(\matsup{K}{i}'\matsup{K}{i}\matsup{w}{i} -\matsup{K}{i}'\matsup{K}{ij}\matsup{w}{j} ) + 2\lambda \matsup{K}{i}\matsup{w}{i}\\ \end{array} \end{equation}
Now, the derivative of $J$ w.r.t. all the parameters $\mat{w}$ can be computed as:
\begin{equation} \begin{array}{l } \frac{\partial J}{\partial \mat{w}} = \begin{bmatrix}
\frac{\partial J}{\partial \matsup{w}{1}}\\
\ldots\\
\frac{\partial J}{\partial \matsup{w}{n_d}}\\
\end{bmatrix}\\
= {\tiny\begin{bmatrix}
2\matsup{K}{1}'\matsup{K}{1}\matsup{w}{1} - 2\matsup{K}{1}'\matsup{Y}{1}\\
+ \sum\limits_{i=1}^{n_d} 2\theta\mat{A}_{1j}(\matsup{K}{1}'\matsup{K}{1}\matsup{w}{1} -\matsup{K}{1}'\matsup{K}{1j}\matsup{w}{j} ) + 2\lambda \matsup{K}{1}\matsup{w}{1}\\
\ldots\\
2\matsup{K}{n_d}'\matsup{K}{n_d}\matsup{w}{n_d} - 2\matsup{K}{n_d}'\matsup{Y}{n_d}\\
+ \sum\limits_{i=1}^{n_d} 2\theta\mat{A}_{n_dj}(\matsup{K}{n_d}'\matsup{K}{n_d}\matsup{w}{n_d} -\matsup{K}{n_d}'\matsup{K}{n_dj}\matsup{w}{j} ) + 2\lambda \matsup{K}{n_d}\matsup{w}{n_d}\\
\end{bmatrix}}\\ \end{array} \end{equation} Set the above derivative to 0 and with some rearrangement, we get \begin{equation} \mat{Sw} = \mat{Y} \end{equation} Therefore, $\mat{w} = \mat{S}^{-1} \mat{Y}$. \end{proof} }
{\bf B - Correctness of the eigen update of $\mat{S}_{t+1}$:} The critical part of Algorithm~\ref{alg:approx_kernel_joint} is the subroutine Algorithm~\ref{alg:eigen_update} for updating the eigen-decomposition of $\mat{S}_{t+1}$. According to Lemma~\ref{lm:eigen_update}, the only place that approximation error occurs is the initial eigen-decomposition of $\mat{S}_0$. The eigen updating procedure won't introduce additional error.
\begin{lemma}{(Correctness of Algorithm~\ref{alg:eigen_update}.)}\label{lm:eigen_update} If $\mat{S}_t = \mat{U}_t\mat{\Lambda}_t\mat{U}'_t$ holds, Algorithm~\ref{alg:eigen_update} gives the exact eigen-decomposition of $\mat{S}_{t+1}$. \end{lemma} \begin{proof} Omitted for brevity. See~\cite{conf/sdm/LiTXF15} for details. \hide{ If $\mat{S}_t = \mat{U}_t\mat{\Lambda}_t\mat{U}'_t$ holds, then $\tilde{\mat{S}}_t = \tilde{\mat{U}}_t \mat{\Lambda}_t \mat{U0}'_t$ also holds. Since $\Delta \mat{S}$ is a symmetric matrix, we can again write its eigen-decomposition as follows: \begin{equation} \Delta \mat{S} = \mat{P\Sigma P'} \end{equation} \noindent where $\mat{P}$ and $\mat{\Sigma}$ are the eigen pair of $\Delta \mat{S}$. After the update, we can write $\mat{S}_{t+1}$ as the sum of $\tilde{\mat{S}}_t$ and $\Delta \mat{S}$ as follows: \begin{equation} \begin{array}{rl} \mat{S}_{t+1} &= \tilde{\mat{S}}_t + \Delta \mat{S} \\
&= \tilde{\mat{U}}_t \mat{\Lambda}_t \mat{U0}'_t + \mat{P\Sigma P'}\\
&= \begin{bmatrix}
\tilde{\mat{U}}_t & \mat{P}\\
\end{bmatrix}
\begin{bmatrix}
\mat{\Lambda}_t & \mat{0}\\
\mat{0} & \mat{\Sigma}\\
\end{bmatrix}
\begin{bmatrix}
\tilde{\mat{U}}_t & \mat{P}\\
\end{bmatrix}'\\ \end{array} \end{equation} Denote $[\tilde{\mat{U}}_t~\mat{P}]$ by $\tilde{\mat{U}}$ and perform a decomposition on $\tilde{\mat{U}}$ similar to QR decomposition and get the following: \begin{equation} \tilde{\mat{U}} = \begin{bmatrix}
\tilde{\mat{U}}_t & \Delta \mat{Q}\\
\end{bmatrix}
\begin{bmatrix}
\mat{I} & \mat{R}_1\\
\mat{0} & \mat{R}_2\\
\end{bmatrix} \end{equation} where $[\tilde{\mat{U}}_t ~ \Delta \mat{Q}]$ is orthogonal and $[\mat{I} ~ \mat{R}_1; \mat{0}~\mat{R}_2]$ is an upper triangle matrix. The difference between this partial QR decomposition and standard QR decomposition is that since $\tilde{\mat{U}}_t$ is already orthogonal as it is the eigenvectors of $\tilde{\mat{S}}_t$, we only need to perform the Gram-Schmidt procedure starting from the first column of $\mat{P}$. Now, let's further expand $\mat{S}_{t+1}$ as follows: \begin{equation} \begin{array}{rl} \mat{S}_{t+1} =& \tilde{\mat{U}}\begin{bmatrix}
\mat{\Lambda}_t & \mat{0}\\
\mat{0} & \mat{\Sigma}\\
\end{bmatrix} \tilde{\mat{U}}'\\
=& \begin{bmatrix}
\tilde{\mat{U}}_t & \Delta \mat{Q}\\
\end{bmatrix}
\begin{bmatrix}
\mat{I} & \mat{R}_1\\
\mat{0} & \mat{R}_2\\
\end{bmatrix}
\begin{bmatrix}
\mat{\Lambda}_t & \mat{0}\\
\mat{0} & \mat{\Sigma}\\
\end{bmatrix}
\begin{bmatrix}
\mat{I} & \mat{R}_1\\
\mat{0} & \mat{R}_2\\
\end{bmatrix}'
\begin{bmatrix}
\tilde{\mat{U}}_t & \Delta \mat{Q}\\
\end{bmatrix}' \end{array} \end{equation}
Denote $\begin{bmatrix}
\mat{I} & \mat{R}_1\\
\mat{0} & \mat{R}_2\\
\end{bmatrix}
\begin{bmatrix}
\mat{\Lambda}_t & \mat{0}\\
\mat{0} & \mat{\Sigma}\\
\end{bmatrix}
\begin{bmatrix}
\mat{I} & \mat{R}_1\\
\mat{0} & \mat{R}_2\\
\end{bmatrix}'$ by $\mat{Z}$ and perform a full eigen decomposition of $\mat{Z}$ as $\mat{Z} = \mat{VLV'}$, where $\mat{V}$ and $\mat{L}$ are its eigen pairs. Finally, $\mat{S}_{t+1}$ can be written as the following: \begin{equation} \begin{array}{rl} \mat{S}_{t+1} &= \begin{bmatrix}
\tilde{\mat{U}}_t & \Delta \mat{Q}\\
\end{bmatrix} \mat{VLV'} \begin{bmatrix}
\tilde{\mat{U}}_t & \Delta \mat{Q}\\
\end{bmatrix}'\\
&\myeq \mat{U}_{t+1} \mat{\Lambda}_{t+1} \mat{U}'_{t+1}\\ \end{array} \end{equation} \noindent $\mat{U}_{t+1} = [\tilde{\mat{U}}_t ~ \Delta \mat{Q}] \mat{V}$ is the updated eigenvectors of $\mat{S}_{t+1}$ and $\mat{\Lambda}_{t+1}=\mat{L}$ is the updated eigenvalues of $\mat{S}_{t+1}$.
Summarizing the above derivations, we have the eigen update algorithm in Algorithm~\ref{alg:eigen_update}. } \end{proof}
{\bf C - Approximation Quality:} We analyze the approximation quality of Algorithm~\ref{alg:approx_kernel_joint} to see how much the learned model parameters deviate from the parameters learned using the exact \textsf{iBall} ~non-linear formulation. The result is summarized in Theorem ~\ref{theorem:error_bound}.
\begin{theorem}{(Error bound of Algorithm~\ref{alg:approx_kernel_joint}.)}\label{theorem:error_bound} In Algorithm~\ref{alg:approx_kernel_joint}, if $\frac{\sum_{i\notin \mathcal{H}} \lambda_t^{(i)}}{\sum_i \lambda_{t+1} ^{(i)}} <1$, the error of the learned model parameters is bounded by: \begin{equation}
\|\mat{w}_{t+1} - \hat{\mat{w}}_{t+1}\|_2 \leq \frac{\sum_{i\notin \mathcal{H}} \lambda_t^{(i)}}{(\sum_i\lambda_{t+1}^{(i)})^2 (1-\delta)} \|\mat{Y}_{t+1}\|_2 \end{equation} \noindent where $\mat{w}_{t+1} $ is the model parameter learned by the exact \textsf{iBall} ~non-linear formulation at time $t+1$, $\hat{\mat{w}}_{t+1}$ is the updated model parameter output by Algorithm~\ref{alg:approx_kernel_joint} from time $t$ to $t+1$,
$\lambda_{t}^{(i)}$ and $\lambda_{t+1}^{(i)}$ are the largest $i$-th eigenvalues of $\mat{S}_{t}$ and $\mat{S}_{t+1}$ respectively, $\delta = \|(\tilde{\mat{U}}_t\mat{\Lambda}_t\tilde{\mat{U}}'_t + \Delta \mat{S})^{-1} (\tilde{\mat{S}}_{t} - \tilde{\mat{U}}_t\mat{\Lambda}_t\tilde{\mat{U}}'_t)\|_F$, $\mathcal{H}$ is the set of integers between 1 and $r$, i.e., $\mathcal{H}=\{a|a\in [1,r]\}$. \end{theorem} \begin{proof} Suppose we know the exact $\mat{S}_t$ at time $t$ and its top-$r$ approximation: $\hat{\mat{S}}_t = \mat{U}_t\mat{\Lambda}_t\mat{U}'_t$. After one time step, we can construct $\Delta \mat{S}$ and the exact $\mat{S}_{t+1}$ can be computed as $\mat{S}_{t+1} = \tilde{\mat{S}}_{t} + \Delta \mat{S}$. The model parameters learned by the exact non-linear model is: \begin{equation} \begin{array}{rl} \mat{w}_{t+1} &= \mat{S}_{t+1}^{-1} \mat{Y}_{t+1}\\
&= (\tilde{\mat{S}}_{t} + \Delta \mat{S})^{-1} \mat{Y}_{t+1} \end{array} \end{equation}
If we allow approximation as in Algorithm~\ref{alg:approx_kernel_joint}, the approximated model parameter is: \begin{equation} \begin{array}{rl} \hat{\mat{w}}_{t+1} &= \hat{\mat{S}}_{t+1}^{-1} \mat{Y}_{t+1}\\
&= (\tilde{\mat{U}}_t\mat{\Lambda}_t\tilde{\mat{U}}'_t + \Delta \mat{S})^{-1} \mat{Y}_{t+1} \end{array} \end{equation}
Denote $\tilde{\mat{S}}_{t} + \Delta \mat{S}$ by $\mat{B}$ and $\tilde{\mat{U}}_t\mat{\Lambda}_t\tilde{\mat{U}}'_t + \Delta \mat{S}$ by $\mat{C}$,we have the following: \begin{equation} \begin{array}{rl}
\| \mat{B} - \mat{C} \|_F &= \|\tilde{\mat{S}}_{t} - \tilde{\mat{U}}_t\mat{\Lambda}_t\tilde{\mat{U}}'_t\|_F\\
&\leq \sum_{i\notin \mathcal{H}} \lambda_t^{(i)} \end{array} \end{equation} \noindent where the last inequality is due to the following fact: \begin{equation} \begin{array}{rl}
\| \sum_i a_i \mat{u}_i \mat{u}'_i\|_F &= \sqrt{\mbox{tr}(\sum_i a_i^2\mat{u}_i \mat{u}'_i )}\\
&= \sqrt{\sum_i a_i^2 \mbox{tr} (\mat{u}_i \mat{u}'_i)}\\
&= \sqrt{\sum_i a_i^2}\\
&\leq \sum_i |a_i| \end{array} \end{equation}
Denote $\|\mat{C}^{-1} (\mat{B} - \mat{C})\|_F$ by $\delta$, we know that \begin{equation} \begin{array}{rl}
\delta &\leq \|\mat{C}^{-1}\|_F\|\mat{B} - \mat{C}\|_F\\
&\leq \frac{\sum_{i\notin \mathcal{H}} \lambda_t^{(i)}}{\sum_i \lambda_{t+1} ^{(i)}} <1 \end{array} \end{equation}
From matrix perturbation theory~\cite{Golub:1996:MC:248979}, we will reach the following: \begin{equation} \begin{array}{rl}
\|\mat{w}_{t+1} - \hat{\mat{w}}_{t+1}\|_2 &= \|\mat{B}^{-1} \mat{Y}_{t+1} - \mat{C}^{-1} \mat{Y}_{t+1}\|_2\\
&\leq \|\mat{B}^{-1} - \mat{C}^{-1}\|_F \|\mat{Y}_{t+1}\|_2\\
&\leq \frac{\|\mat{C}^{-1}\|_F^2 \|\mat{B} - \mat{C}\|_F}{1-\delta} \|\mat{Y}_{t+1}\|_2\\
&\leq \frac{\sum_{i\notin \mathcal{H}} \lambda_t^{(i)}}{(\sum_i\lambda_{t+1}^{(i)})^2 (1-\delta)} \|\mat{Y}_{t+1}\|_2\\ \end{array} \end{equation} \end{proof}
{\bf D - Complexities:} Finally, we analyze the complexities of Algorithm~\ref{alg:eigen_update} and Algorithm~\ref{alg:approx_kernel_joint}. In terms of time complexity, the savings are two-folds: (1) we only need to compute the kernel matrices involving new training samples; (2) we avoid the time consuming large matrix inverse operation. In terms of space complexity, we don't need to maintain the huge $\mat{S}_t$ matrix, but instead store its top-$r$ eigen pairs which is only of $O(nr)$ space.
\begin{theorem}{(Complexities of Algorithm~\ref{alg:eigen_update} and Algorithm~\ref{alg:approx_kernel_joint}.)}\label{theorem:complexities} Algorithm~\ref{alg:eigen_update} takes $O((n+m)(r^2 + r'^2))$ time and $O((n+m)(r+r'))$ space. Algorithm~\ref{alg:approx_kernel_joint} also takes $O((n+m)(r^2 + r'^2))$ time and $O((n+m)(r+r'))$ space, where $m$ is total number of new training samples. \end{theorem} \begin{proof} Omitted for brevity. \hide{ {\it Time complexity of Algorithm~\ref{alg:eigen_update}:} Step 1-3 take $O(nm)$ time, where $n$ is total number of training samples from previous step, and $m$ is the total number of new training samples. Eigen decomposition of $\Delta \mat{S}$ in step 4 takes $O(nmr')$, where $r'$ is the rank of $\Delta \mat{S}$, since $\Delta \mat{S}$ is sparse matrix with $O(nm)$ non-zero entries. QR decomposition in step 5 takes $O((n+m)r'^2)$ since we only need to start from the columns in $\mat{P}$. Step 6 and 7 both take $O((r+r')^3)$ time. The last line takes at most $O((n+m)(r+r')^2)$. The overall time complexity is $O((n+m)(r^2 + r'^2))$.
{\it Space complexity of Algorithm~\ref{alg:eigen_update}:} The storage of eigen pairs requires $O((n+m)r)$ space. Step 1-3 take $O(mn)$ space. Eigen decomposition of $\Delta \mat{S}$ in step 4 takes $O((n+m)r')$ space. QR decomposition in step 5 needs $O((n+m)(r+r'))$ space. Step 6 and 7 take $O((r+r')^2)$ space and line 8 needs $O((n+m)(r+r'))$. The overall space complexity is $O((n+m)(r+r'))$.
{\it Time complexity of Algorithm~\ref{alg:approx_kernel_joint}:} Update eigen decomposition of $\mat{S}_{t+1}$ in step 1 takes $O((n+m)(r^2 + r'^2))$ time and computing the new learning parameter in step 2 takes $O(n+m)r$ time. The overall time complexity is $O((n+m)(r^2 + r'^2))$.
{\it Space complexity of Algorithm~\ref{alg:approx_kernel_joint}:} Update eigen decomposition of $\mat{S}_{t+1}$ in step 1 takes $O((n+m)(r+r'))$ and computing the new learing parameter in step 2 takes $O((n+m)r)$ space. The overall space complexity is $O((n+m)(r+r'))$. } \end{proof}
\section{Experiments} \label{sec:exp}
In this section, we design and conduct experiments mainly to inspect the following aspects: \begin{itemize} \item {\it Effectiveness:} How accurate are the proposed algorithms for predicting scholarly entities' long-term impact?
\item {\it Efficiency:} How fast are the proposed algorithms? \end{itemize}
\subsection{Experiment Setup} We use the real-world citation network dataset AMiner\footnote{http://arnetminer.org/billboard/citation} to evaluate our proposed algorithms. The statistics and empirical observations are described in Section~\ref{sec:prob}. Our primary task is to predict a paper's citations after 10 years given its citation history in the first three years. Thus, we only keep papers published between year 1936 and 2000 to make sure they are at least 10 years old. This leaves us 508,773 papers. Given that the citation distribution is skewed (see Figure~\ref{fig:citation_dist}\hh{fill in}), the 10-year citation counts are normalized to the range of $[0,7]$. Our algorithm is also able to predict citation counts for other scholarly entities including researchers and venues. We keep authors whose research career (when they publish the first paper) begin between year 1960 and 2000 and venues that are founded before year 2002. This leaves us 315,340 authors and 3,783 venues.
For each scholarly entity, we represent it as a three dimensional feature vector, where the $i$-th dimension is the number of citations the entity receives in the $i$-th year after its life cycle begins (e.g., paper gets published, researchers publish the first paper ). We build a $k$-nn graph ($k=5$) among different scholarly entities; use METIS~\cite{Karypis:1998:FHQ:305219.305248} to partition the graph into balanced clusters; and treat each cluster as a domain. We set the domain number ($n_d$) to be 10 for both papers and researchers; and 5 for venues. The Gaussian kernel matrix of the cluster centroids is used to construct the domain-domain adjacency matrix $\mat{A}$.
To simulate the dynamic scenario where training samples come in stream, we start with a small initial training set and at each time step add new training samples to it. The training samples in each domain are sorted by starting year (e.g., publication year). In the experiment, for papers, we start with 0.1\% initial training data and at each update add another 0.1\% training samples. The last 10\% samples are reserved as test samples, i.e., we always use information from older publications for the prediction of the latest ones. For authors, we start with 0.2\% initial training data and at each update add another 0.2\% training data and use the last 10\% for testing. For venues, we start with 20\%, add 10\% at each update and use last 10\% for testing.
The root mean squared error (RMSE) between the the actual citation and the predicted one is adopted for accuracy evaluation. All the experiments were performed on a Windows machine with four 3.5GHz Intel Cores and 256GB RAM.
{\it Repeatability of Experimenal Results:} The AMiner citation dataset is publicly available. We will release the code of the proposed algorithms through authors' website. For all the results reported in this section, we set $\theta = \lambda = 0.01$ in our joint predictive model. Gaussian kernel with $\sigma = 5.1$ is used in the non-linear formulations.
\subsection{Effectiveness Results}
We perform the effectiveness comparisons of the following nine methods: \begin{itemize}
\setlength\itemsep{0.5pt} \item [1] {\it Predict 0:} directly predict 0 for test samples since majority of the papers have 0 citations. \item [2] {\it Sum of the first 3 years:} assume the total number of citations doesn't change after three years. \item [3] {\it Linear-combine:} combine training samples of all the domains for training using linear regression model. \item [4] {\it Linear-separate:} train a linear regression model for each domain separately. \item [5] {\it \textsf{iBall}-linear:} jointly learn the linear regression models as in our linear formulation. \item [6] {\it Kernel-combine:} combine training samples of all the domains for training using kernel ridge regression model~\cite{Saunders:1998:RRL:645527.657464}. \item [7] {\it Kernel-separate:} train a kernel ridge regression model for each domain separately. \item [8] {\it \textsf{iBall}-kernel:} jointly learn the kernel regression models as in our non-linear formulation. \item [9] {\it \textsf{iBall}-fast :} proposed algorithm for speeding up the joint non-linear model. \end{itemize}
\noindent {\it A - Overall paper citation prediction performance.} The RMSE result of different methods for test samples from all the domains is shown in Figure~\ref{fig:rmse_alldomain}. We have the following observations: (1) the non-linear methods (\textsf{iBall}-fast, \textsf{iBall}-kernel, Kernel-separate, Kernel-combine) outperform the linear methods (\textsf{iBall}-linear, Linear-separate, Linear-combine) and the straightforward `Sum of first 3 years' is much better than the linear methods, which reflects the complex non-linear relationship between the features and the impact. (2) The performance of \textsf{iBall}-fast is very close to \textsf{iBall}-kernel and sometimes even better, which confirms the good approximation quality of the model update and the possible de-noising effect offered by the low-rank approximation. (3) The \textsf{iBall}~ family of joint models is better than their separate versions (Kernel-separate, Linear-separate). To evaluate the statistical significance, we perform a t-test using 1.4\% of the training samples and show the $p$-values in Table~{\ref{tab:ttest}}. From the result, we see that the improvement of our method is significant. To investigate parameter sensitivity, we perform parametric studies with three parameters in \mbox{\textsf{iBall}-fast}, namely, $\theta$, $\lambda$ and $r$. Figure~{\ref{fig:sen_study}} shows that the proposed method is stable in a large range of the parameter space.
\begin{table*}[!t] \caption{$p$-value of statistical significance} \centering
\begin{tabular}{|p{45pt}|p{35pt}|p{35pt}|p{35pt}|p{35pt}|p{40pt}|p{35pt}|p{35pt}|p{35pt}|} \hline
& Predict 0 & Linear-combine & Linear-separate & \textsf{iBall}-linear & Sum of first 3 years & Kernel-combine & Kernel-separate & \textsf{iBall}-fast \\
\hline \textsf{iBall}-kernel & 0 & 5.53e-16 & 6.12e-17 & 1.16e-13 & 1.56e-219 & 1.60e-72 & 8.22e-30 & 3.39e-14\\
\hline \end{tabular}
\label{tab:ttest} \end{table*}
\begin{figure*}
\caption{Overall paper citation prediction performance comparisons. Lower is better. }
\label{fig:rmse_alldomain}
\caption{Author citation prediction performance comparison. Lower is better. }
\label{fig:author_rmse}
\caption{Venue citation prediction performance comparison. Lower is better. }
\label{fig:venue_rmse}
\end{figure*}
\begin{figure*}
\caption{RMSE vs. $\theta$}
\caption{RMSE vs. $\lambda$}
\caption{RMSE vs. $r$}
\caption{Sensitivity study on \textsf{iBall}-fast: study the effect of the parameters $\theta$, $\lambda$ and $r$ in terms of RMSE.}
\label{fig:sen_study}
\end{figure*}
\noindent {\it B - Domain-by-domain paper citation prediction performance.} In Figure~\ref{fig:domain_rmse} we show the RMSE comparison results for four domains with different total training sizes. \textsf{iBall}-kernel and its fast version \textsf{iBall}-fast consistently outperform other methods in all the domains. In the third domain, some linear methods (Linear-separate and Linear-combine) perform even worse than the baseline (`Predict 0').
\begin{figure*}
\caption{Prediction performance comparison in the first domain. }
\caption{Prediction performance comparison in the second domain. }
\caption{Prediction performance comparison in the third domain. }
\caption{Prediction performance comparison in the fourth domain. }
\caption{Paper citation prediction performance comparison in four domains.}
\label{fig:domain_rmse}
\end{figure*}
\noindent {\it C - Prediction error analysis.} We visualize the actual citation vs. the predicted citation using \textsf{iBall}~as a heat map in Figure~\ref{fig:heatmap}. The $(x,y)$ square means among all the test samples with actual citation $y$, the percentage that have predicted citation $x$. We observe a very bright region near the $x=y$ diagonal. The prediction error mainly occurs in a bright strip at $x=1, y\geq 1$. This is probably due to the delayed high-impact of some scientific work, as suggested by the blue and green lines in Figure~\ref{fig:citation_pattern}, i.e., some papers only pick up attentions many years after they were published.
\begin{figure}
\caption{Prediction error analysis: actual citation vs. predicted citation. Best viewed in color.}
\label{fig:heatmap}
\end{figure}
\noindent{\it D - Author and venue citation prediction performance.} We also show the RMSE comparison results for the impact prediction of authors and venues in Figure~\ref{fig:author_rmse} and \ref{fig:venue_rmse} respectively. Similar observations can be made as the paper impact prediction, except that for the venue citation prediction, \textsf{iBall}-linear can achieve the similar performance as \textsf{iBall}-fast and \textsf{iBall}-kernel. This is probably due to the effect that venue citation (which involves the aggregation of the citations of all of its authors and papers) prediction is at a much coarser granularity, and thus a relatively simple linear model is sufficient to characterize the correlation between features and outputs (citation counts).
\subsection{Efficiency Results}
\noindent {\it A - Running time comparison:} We compare the running time of different methods with different training sizes and show the result in Figure~\ref{fig:time_comparison} with time in log scale. All the linear methods are very fast ($<0.01s$) as the feature dimensionality is only 3. Our \textsf{iBall}-fast outperforms all other non-linear methods and scales linearly \hh{liangyue: is it true? i thought it is linear}.
\begin{figure}
\caption{Comparison of running time of different methods. The time axis is of log scale. }
\label{fig:time_comparison}
\caption{Quality vs. speed with 88,905 training samples. }
\label{fig:quality_plot}
\end{figure}
\noindent {\it B - Quality vs. speed:} Finally, we evaluate how the proposed methods balance between the prediction quality and speed. In Figure~\ref{fig:quality_plot}, we show the RMSE vs. running time of different methods with 88,905 total training samples. For \textsf{iBall}-fast, we show its results using different rank $r$ for the low-rank approximation. Clearly, \textsf{iBall}-fast achieves the best trade-off between quality and speed as its results all lie in the bottom left corner.
\section{Related Work} \label{sec:rel} In this section, we review the related work.
{\bf Impact/popularity prediction:} As a pilot study, Yan et al.~\cite{DBLP:conf/cikm/YanTLSL11, DBLP:conf/jcdl/YanHTZL12} identify effective features to address citation count prediction problem. Davletov et al.~\cite{DBLP:conf/cikm/DavletovAC14} address the same problem by first clustering papers according to their temporal change in citation counts over time and assigning a polynomial to each cluster for regression. In light of the difficulty posed by power law distribution of citations, Dong et al.~\cite{conf/wsdm/dong15} instead consider whether a paper can increase the primary author's {\it h}-index. Yu et al.~\cite{DBLP:conf/sdm/YuGZH12} address predicting citation relations in heterogeneous bibliographical networks.
A close line of work is to predict the popularity of other online contents, e.g., posts, videos, TV series. Yao et al.~\cite{DBLP:conf/kdd/YaoTXL14} predict the long-term impact of questions/answers. Notice that in terms of methodology, the method in ~\cite{DBLP:conf/kdd/YaoTXL14} can be conceptually viewed as a special case of our \textsf{iBall}\ model when there are only two domains and the instance-level correspondence across different domains (e.g., question-answers association) is known. Li et al.~\cite{DBLP:conf/cikm/LiMWLX13} conduct an study on popularity forecast of videos shared in social networks. They consider both the intrinsic attractiveness of a video and the influence from the underlying diffusion structure. Chang et al.~\cite{DBLP:conf/cikm/ChangZGCXT14} are the first to comprehensively study for predicting the popularity of online serials with autoregressive models. As online serials have strong sequence dependence and release date dependence, they develop an autoregressive model to capture the dynamic behaviors of audiences. Though the focus of this paper is to propose a tailored method to predict the long-term citation counts, our method could be naturally applied to other related applications, e.g., popularity prediction.
{\bf Multi-task learning:} Our joint model is also related to multi-task learning as we jointly learn the models for each domain (task). Multi-task learning aims to improve the generalization performance of a learning task with the help of other related tasks. A key challenge in multi-task learning is how to exploit the relationship among different tasks to allow information shared across tasks. One way is by sharing of parameters. In neural networks, hidden units are shared across tasks~\cite{caruana1997multitask}. It can also be induced by assuming that the parameters used by all tasks are close to each other by minimizing the Frobenius norms of their differences in methods based on convex optimization formulations~\cite{evgeniou2004regularized}. In Bayesian hierarchical models, parameter sharing can be imposed by assuming a common prior they share~\cite{yu2005learning}. A second way is assuming a common basis of the parameter space. A low-rank and sparse structure of the underlying predictive hypothesis has been applied to capture the tasks relatedness as well as outlier tasks~\cite{DBLP:conf/kdd/ChenLY10,Chen:2011:ILG:2020408.2020423,jalali2010dirty}.Our method is directly applicable when the correlation/similarity among different tasks is known and enjoys a closed-form solution. In terms of computation, we also provide an efficient way to track the joint predictive model in the dynamic setting.
{\bf Scholarly data mining:} Scholarly data can be viewed as a heterogeneous information network of papers, authors, venues and terms~\cite{sun2009ranking}. Mining of such scholarly data is often from following perspectives: (1) similarity search to find a similar scholarly entities given a query entity or a set of query entities~\cite{DBLP:journals/pvldb/SunHYYW11,DBLP:conf/icdm/TongFP06,DBLP:conf/kdd/TongF06}; (2)literature recommendation to recommend related research papers on a topic~\cite{Liu:2014:MRP:2661829.2661965,chandrasekaran2008concept}; and (3)co-author collaboration prediction, to predict if two researchers will collaborate in the future~\cite{sun2011co,sun2012will}.
\section{Conclusions} \label{sec:con} In this paper, we propose \textsf{iBall} ~-- a family of algorithms for the prediction of long-term impact of scientific work given its citation history in the first few years. The proposed algorithms collectively address a number of key algorithmic challenges in impact prediction (i.e., feature design, non-linearity, domain heterogeneity and dynamics). It is flexible and general in the sense that it can be generalized to both regression and classification models; and in both linear and non-linear formulations; it is scalable and adaptive to new training data.
\hide{ Our main contributions include: \begin{itemize} \item {\bf Algorithm:} A family of algorithms are proposed to address long-term impact prediction for scientific work. \item {\bf Proofs and analysis:} Proofs of correctness of closed-form solutions and eigen update are given; update quality and complexity analysis are provided. \item {\bf Empirical evaluations:} Extensive experimental evaluations validate the effectiveness and efficiency of the proposed algorithms. \end{itemize} }
\section{Acknowledgments} We would like to thank Dr. Jie Tang for providing the dataset. This material is supported by the National Science Foundation under Grant No. IIS1017415, by the Army Research Laboratory under Cooperative Agreement Number W911NF-09-2-0053, by National Institutes of Health under the grant number R01LM011986, Region II University Transportation Center under the project number 49997-33 25.
The content of the information in this document does not necessarily reflect the position or the policy of the Government, and no official endorsement should be inferred. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation here on.
\balance \small
\end{document} |
\begin{document}
\title{A Fractional Analogue of Brooks' Theorem} \begin{abstract}
Let $\Delta(G)$ be the maximum degree of a graph $G$.
Brooks' theorem states that the only connected graphs with chromatic
number $\chi(G)=\Delta(G)+1$ are complete graphs and odd cycles. We
prove a fractional analogue of Brooks' theorem in this
paper. Namely, we classify all connected graphs $G$ such that the
fractional chromatic number $\chi_f(G)$ is at least $\Delta(G)$.
These graphs are complete graphs, odd cycles, $C^2_8$, $C_5\boxtimes K_2$,
and graphs whose clique number $\omega(G)$ equals the maximum degree
$\Delta(G)$. Among the two sporadic graphs, the graph $C^2_8$ is
the square graph of cycle $C_8$ while the other graph $C_5\boxtimes K_2$
is the strong product of $C_5$ and $K_2$. In fact, we prove a
stronger result; if a connected graph $G$ with $\Delta(G)\geq 4$ is
not one of the graphs listed above, then we have $\chi_f(G)\leq
\Delta(G)- \frac{2}{67}$. \end{abstract} \section{Introduction} The chromatic number of graphs with bounded degrees has been studied for many years. Brooks' theorem perhaps is one of the most fundamental results; it is included by many textbooks on graph theory. Given a simple connected graph $G$, let $\Delta(G)$ be the maximum degree, $\omega(G)$ be the clique number,
and $\chi(G)$ be the chromatic number. Brooks' theorem states that $\chi(G)\leq \Delta(G)$ unless $G$ is a complete graph or an odd cycle.
Reed \cite{reed1} proved that $\chi(G)\leq \Delta(G)-1$ if $\omega(G)\leq \Delta(G)-1$ and $\Delta(G)\geq \Delta_0$ for some large constant $\Delta_0$. This excellent result was proved by probabilistic methods, and $\Delta_0$ is at least hundreds. Before this result, Borodin and Kostochka \cite{bk} made the following conjecture.
{\bf Conjecture \cite{bk}:} Suppose that $G$ is a connected graph. If $\omega(G)\leq \Delta(G)-1$ and $\Delta(G)\geq 9$, then we have $$\chi(G)\leq \Delta(G)-1.$$
If the conjecture is true, then it is best possible since there is a $K_8$-free graph $G=C_5\boxtimes K_3$ (actually $K_7$-free, see Figure \ref{fig:1}) with $\Delta(G)=8$ and $\chi(G)=8$.
\begin{figure}
\caption{The graph $C_5 \boxtimes K_3$.}
\label{fig:1}
\end{figure}
Here we use the following notation of the strong product. Given two graphs $G$ and $H$, the {\it strong product} $G\boxtimes H$ is the graph with vertex set $V(G)\times V(H)$, and $(a,x)$ is connected to $(b,y)$ if one of the following holds \begin{itemize} \item $a=b$ and $xy\in E(H)$, \item $ab\in E(G)$ and $x=y$, \item $ab\in E(G)$ and $xy\in E(H)$. \end{itemize}
Reed's result \cite{reed1} settled Borodin and Kostochka's conjecture for sufficiently large $\Delta(G)$, but the cases with small $\Delta(G)$ are hard to cover using the probabilistic method.
In this paper we consider a fractional analogue of this problem. The fractional chromatic number $\chi_f(G)$ can be defined as follows. A $b$-{\it fold coloring} of $G$ assigns a set of $b$ colors to each vertex such that any two adjacent vertices receive disjoint sets of colors. We say a graph $G$ is $a$:$b$-{\it colorable} if there is a $b$-fold coloring of $G$ in which each color is drawn from a palette of $a$ colors. We refer to such a coloring as an $a$:$b$-coloring. The $b$-{\it fold coloring number}, denoted by $\chi_b(G)$, is the smallest integer $a$ such that $G$ has an $a$:$b$-coloring. Note that $\chi_1(G)=\chi(G)$. It was shown that $\chi_{a+b}(G) \leq \chi_a(G) + \chi_b(G) $. The {\it
fractional chromatic number} $\chi_f(G)$ is
$ \underset{b \rightarrow \infty} \lim \frac{\chi_b(G)}{b}.$
By the definition, we have $\chi_f(G)\leq \chi(G)$.
The fractional chromatic number can be viewed as a relaxation of the
chromatic number. Many problems involving the chromatic number can be asked again using the fractional chromatic number. The fractional analogue often has a simpler solution than the original problem. For example, the famous $\omega-\Delta-\chi$ conjecture of
Reed \cite{reed} states that for any simple graph $G$, we have $$\chi(G)\leq \left\lceil \frac{\omega(G)+\Delta(G)+1}{2}\right \rceil.$$
The fractional analogue of $\omega-\Delta-\chi$ conjecture was proved by Molloy and Reed \cite{mr}; they actually proved a stronger result with ceiling removed, i.e., \begin{equation} \label{eq:1} \chi_f(G)\leq \frac{\omega(G)+\Delta(G)+1}{2}. \end{equation}
In this paper, we classify all connected graphs $G$ with $\chi_f(G)\geq \Delta(G)$.
\begin{theorem}\label{main}
A connected graph $G$ satisfies $\chi_f(G)\geq \Delta(G)$ if and only if $G$ is one of the following \begin{enumerate} \item a complete graph, \item an odd cycle, \item a graph with $\omega(G)=\Delta(G)$, \item $C^2_8$, \item $C_5\boxtimes K_2$. \end{enumerate} \end{theorem}
For the complete graph $K_n$, we have $\chi_f(K_n)=n$ and $\Delta(K_n)=n-1$. For the odd cycle $C_{2k+1}$, we have $\chi_f(C_{2k+1})=2+\frac{1}{k}$ and $\Delta(C_{2k+1})=2$. If $G$ is neither a complete graph nor an odd cycle but contains a clique of size $\Delta(G)$, then we have \begin{equation} \label{less} \Delta(G)\leq \omega(G)\leq \chi_f(G)\leq \chi(G)\leq \Delta(G). \end{equation} The last inequality is from Brooks' theorem. The sequence of inequalities above implies $\chi_f(G)=\Delta(G)$.
If $G$ is a vertex-transitive graph, then we have \cite{su}
$$\chi_f(G)=\frac{|V(G)|}{\alpha(G)},$$ where $\alpha(G)$ is the independence number of $G$. Note that both graphs $C^2_8$ and $C_5\boxtimes K_2$ are vertex-transitive and have the independence number $2$. Thus we have $$\chi_f(C^2_8)=4=\Delta(C^2_8)\quad \mbox { and }\quad \chi_f(C_5\boxtimes K_2)=5= \Delta(C_5\boxtimes K_2).$$
\begin{figure}
\caption{The graph $C_8^2$ and $C_5 \boxtimes K_2$ .}
\end{figure}
Actually, Theorem \ref{main} is a corollary of the following stronger result.
\begin{theorem}\label{submain} Assume that a connected graph $G$ is neither $C^2_8$ nor $C_5\boxtimes K_2$. If $\Delta(G)\geq 4$ and $\omega(G)\leq \Delta(G)-1$, then we have $$\chi_f(G)\leq \Delta(G) -\frac{2}{67}.$$ \end{theorem}
\noindent {\bf Remark:} In the case $\Delta(G)=3$, Heckman and Thomas \cite{ht} conjectured that $\chi_f(G) \leq 14/5$ if $G$ is triangle-free. Hatami and Zhu \cite{hz} proved $\chi_f(G) \leq 3- \frac{3}{64}$ for any triangle-free graph $G$ with $\Delta(G) \leq 3$. The second and third authors showed an improved result $\chi_f(G) \leq 3 - \frac{3}{43}$ in the previous paper \cite{lup}. Thus we need only consider the cases $\Delta(G) \geq 4$.
For any connected graph $G$ with sufficiently large $\Delta(G)$ and $\omega(G) \leq \Delta(G)-1$, Reed's result \cite{reed1} $\chi(G)\leq \Delta(G)-1$ implies $\chi_f(G) \leq \Delta(G)-1$.
The method introduced in \cite{hz} and strengthened in \cite{lup}, has a strong influence on this paper. The readers are encouraged to read these two papers \cite{hz, lup}.
Let $f(k)=\inf_G\{\Delta(G)-\chi_f(G)\}$, where the infimum is taken over all connected graphs $G$ with $\Delta(G)=k$ and not one of the graphs listed in Theorem \ref{main}. Since $\chi_f(G)\geq \omega(G)$, by taking a graph with $\omega(G)=\Delta(G)-1$, we have $f(k)\leq 1$. Theorem \ref{submain} says $f(k)\geq \frac{2}{67}$ for any $k\geq 4$.
Reed's result \cite{reed1} implies $f(k)= 1$ for sufficiently large $k$. Heckman and Thomas \cite{ht} conjectured $f(3)=1/5$. It is an interesting problem to determine the value of $f(k)$ for small $k$. Here we conjecture $f(4)=f(5)=\frac{1}{3}$. If Borodin and Kostochka's conjecture is true, then $f(k)= 1$ for $k\geq 9$.
Theorem 2 is proved by induction on $k$. Because the proof is quite long, we split the proof into the following two lemmas. \begin{lemma} \label{f4} We have $f(4) \geq \frac{2}{67}$. \end{lemma} \begin{lemma}\label{increase} For each $k \geq 6$, we have $f(k)\geq \min \left\{f(k-1), \frac{1}{2}\right\}.$ We also have $f(5)\geq \min\left\{f(4), \frac{1}{3}\right\}$. \end{lemma} It is easy to see the combination of Lemma \ref{f4} and Lemma \ref{increase} implies Theorem \ref{submain}. The idea of reduction comes from the first author, who pointed out $f(k)\geq \min \left\{f(k-1), \frac{1}{2}\right\}$ for $k\geq 7$ based on his recent results
\cite{king}. The second and third authors orginally proved $f(k)\geq \frac{C}{k^5}$ (for some $C>0$) using different method in the first version; they also prove the reductions at $k=5,6$, which are much harder than the case $k\geq 7$. We do not know whether a similar reduction exists for $k=4$.
The rest of this paper is organized as follows. In section 2, we will introduce some notation and prove Lemma \ref{increase}. In section 3 and section 4, we will prove $f(4) \geq \frac{2}{67}$.
\section{Proof of Lemma \ref{increase}}
In this paper, we use the following notation. Let $G$ be a simple graph with vertex set $V(G)$ and edge set $E(G)$. The {\it neighborhood} of a vertex $v$ in $G$, denoted by $\Gamma_G(v)$, is the set $\{u \colon uv \in E(G)\}$. The {\it degree} $d_G(v)$ of $v$
is the value of $|\Gamma_G(v)|$. The {\it independent
set} (or {\it stable set}) is a set $S$ such that no edge with both ends in $S$. The {\it independence number} $\alpha(G)$ is the largest size of $S$ among all the independent sets $S$ in $G$. When $T \subset V(G)$, we use $\alpha_G(T)$ to denote the independence number of the induced subgraph of $G$ on $T$. Let $\Delta(G)$ be the maximum degree of $G$. For any two vertex-sets $S$ and $T$, we define $E_G(S,T)$ as $\{uv \in E(G): u \in S \ \textrm{and} \ v \in T\}$. Whenever $G$ is clear under context, we will drop the subscript $G$ for simplicity.
If $S$ is a subset of vertices in $G$, then {\it contracting} $S$ means replacing vertices in $S$ by a single fat vertex, denoted by $\underline{S}$,
whose incident edges are all edges that were incident to at least one vertex in $S$, except edges with both ends in $S$. The new graph obtained by contracting $S$ is denoted by $G/S$. This operation is also known as {\it identifying vertices of $S$} in the literature. For completeness, we allow $S$ to be a single vertex or even the empty set. If $S$ only consists of a single vertex, then $G/S=G$; if $S=\emptyset$, then $G/S$ is the union of $G$ and an isolated vertex. When $S$ consists of $2$ or $3$ vertices, for convenience, we write $G/uv$ for $G/\{u,v\}$ and $G/uvw$ for $G/\{u,v,w\}$; the fat vertex will be denoted by $\underline{uv}$ and $\underline{uvw}$, respectively. Given two disjoint subsets $S_1$ and $S_2$, we can contract $S_1$ and $S_2$ sequentially. The order of contractions does not matter; let $G/S_1/S_2$ be the resulted graph. We use $G-S$ to denote the subgraph of $G$ induced by $V(G)-S$.
In order to prove Lemma \ref{increase}, we need use the following theorems due to King \cite{king}.
\begin{theorem}[King \cite{king}] \label{king0} If a graph $G$ satisfies $\omega(G) > \frac{2} {3} (\Delta(G) + 1)$, then $G$ contains a stable set $S$ meeting every maximum clique. \end{theorem} \begin{theorem}[King \cite{king}] \label{king}
For a positive integer $k$, let $G$ be a graph with vertices partitioned into cliques $V_1,\ldots,V_r$. If for every $i$ and every $v \in V_i$, $v$ has at most $\min\{k, |V_i|-k\}$ neighbors outside $V_i$, then $G$ contains a stable set of size $r$. \end{theorem}
\begin{lemma} \label{6to5} Suppose that $G$ is a connected graph with $\Delta(G)\leq 6$ and $\omega(G)\leq 5$. Then there exists an independent set meeting all induced copies of $K_5$ and $C_5\boxtimes K_2$. \end{lemma}
\noindent {\bf Proof:} We first show that there exists an independent set meeting all copies of $K_5$. If $G$ contains no $K_5$, then this is trivial. Otherwise, we can apply Theorem \ref{king0} to get the desired independent set since $\omega(G)>\frac{2}{3}(\Delta(G)+1)$ is satisfied.
Now we prove the Lemma by contradiction. Suppose the Lemma is false. Let $G$ be a minimum counterexample (with the smallest number of vertices). For any independent set $I$, let $C(I)$ be the number of induced copies of $C_5\boxtimes K_2$ in $G-I$. Among all independent sets which meet all copies of $K_5$, there exists one such independent set $I$ such that $C(I)$ is minimized.
Since $C(I)>0$, there is an induced copy of $C_5\boxtimes K_2$ in $G-I$; we use $H$ to denote it. In $C_5\boxtimes K_2$, there is a unique perfect matching such that identifying
the two ends of each edge in this matching results a $C_5$. An edge in this unique matching is called a {\em canonical } edge. We define a new graph $G'$ as follows: First we contract all canonical edges in $H$ to get a $C_5$, where its vertices are called {\em fat} vertices. Second we add five edges turning the $C_5$ into a $K_5$. Observe that each vertex in this $C_5$ can have at most two neighbors in $G-H$ and $\Delta(G') \leq 6$. We will consider the following four cases.
\noindent{\bf Case 1:} There is a $K_6$ in the new graph $G'$. Since the original graph $G$ is $K_6$-free, the $K_6$ is formed by the following two possible ways.
{\bf Subcase 1a:} This $K_6$ contains $5$ fat vertices. By the symmetry of $H$, there is an induced $C_5$ in $H$
such that the vertices in $C_5$ contain a common neighbor vertex $v$ in $G \setminus V(H)$, see Figure \ref{fig:1a}.
Since $H$ is $K_5$-free, we can find $x, y$ in this $C_5$ such that $x,y$ is a non-edge. Let $I':=(I\setminus\{v\})\cup\{x, y\}$; $I'$ is also an independent set. Observe that $v$ is not in any $K_5$ in $G-I'$. Thus the set $I'$ is also an independent set and meets every $K_5$ in $G$. Since $C_5\boxtimes K_2$ is a $5$-regular graph, any copy of $C_5\boxtimes K_2$ containing $v$ must contain at least one of $x$ and $y$. Thus, $C(I')<C(I)$. Contradiction!
\begin{figure}
\caption{Subcase 1a.}
\caption{Subcase 1b.}
\label{fig:1a}
\label{fig:1b}
\end{figure}
{\bf Subcase 1b:} This $K_6$ contains $4$ fat vertices. Let $u,v$ be the other two vertices. By the symmetry of $H$, there is a unique way to connect $u$ and $v$ to $H$ as shown by Figure \ref{fig:1b}. Since $uv$ is an edge, one of $u$ and $v$ is not in $I$. We assume $u \not \in I$. Let $\{x,y\} \subset \Gamma_G(v) \cap V(H)$ as shown in Figure \ref{fig:1b} and $I'=I \setminus \{v\} \cup \{x,y\} $. Observe that $I'$ is an independent set and $v$ is not in a $K_5$ in $G-I'$. Thus $I'$ is an independent set meeting each $K_5$ in $G$. Since each $C_5 \boxtimes K_2$ containing $v$ must contain one of $x$ and $y$. Thus $C(I')<C(I)$. Contradiction!
\noindent {\bf Case 2:}
There is a $K_5$ intersecting $H$ with $4$ vertices. Let $v$ be the vertex of this $K_5$ but not in $H$, see Figure \ref{fig:2}. We have two subcases.
{\bf Subcase 2a:} The vertex $v$ has another neighbor $y$ in $H$ but not in this $K_5$. Since $H$ is $K_5$-free, we can select a vertex $x$ in this $K_5$ such that $xy$ is not an edge of $G$. Let $I':=I\setminus \{v\}\cup \{x,y\}$. Note that $v$ is not in a $K_5$ in $G-I'$, and $I'$ is an independent set. Thus $I'$ is an independent set meeting each $K_5$ in $ G$. Since any $C_5 \boxtimes K_2$ containing $v$ must contain one of $x$ and $y$, we have $C(I')<C(I)$. Contradiction!
{\bf Subcase 2b:} All neighbors of $v$ in $H$ are in this $K_5$. Let $x$ be any vertex in this $K_5$ other than $v$, and $I':=I\setminus \{v\}\cup \{x\}$. In this case, there is only one $K_5$ containing $v$. Thus, $I'$ is also an independent set meeting every copy of $K_5$ in $G$. Observe that $\Gamma_G(v) \setminus \{x\}$ is disconnected. If $v \in H'=C_5 \boxtimes K_2$, then $\Gamma_G(v) \cap H'$ is connected. Thus $v$ is not in a $C_5\boxtimes K_2$ in $G-I'$ and $C(I')< C(I)$. Contradiction! \begin{figure}
\caption{Case 2.}
\caption{Case 3.}
\label{fig:2}
\label{fig:3}
\end{figure}
\noindent {\bf Case 3:} There is an induced subgraph $H'$ isomorphic to $C_5\boxtimes K_2$ such that $H'$ and $H$ are intersecting, see Figure \ref{fig:3}.
Since $V(H)\cap V(H')\not =\emptyset$ and $H\not=H'$, we can find a canonical edge $uv$ of $H$ and a canonical edge $uv'$ of $H'$ such that $v\not\in V(H')$ and $v'\not\in V(H)$.
If $vv'$ is a non-edge, then let $I':=I\setminus \{v'\} \cup \{u\}$. It is easy to check $I'$ is still an independent set. We also observe that any possible $K_5$ containing $v'$ must also contain $u$. Thus, $I'$ meets every copy of $K_5$ in $G$. We have $v'$ in no $C_5 \boxtimes K_2$ in $G-I'$ since $vv'$ is not an edge. We therefore get $C(I')<C(I)$. Contradiction! If $vv'$ is an edge, then locally there are two $K_5$ intersecting at $u$, $v$, and $v'$; say the other four vertices are $x_1, x_2, y_1, y_2$, where two cliques are $\{x_1,x_2,u,v,v'\}$ and $\{y_1,y_2,u,v,v'\}$, see Figure \ref{fig:3}. Let $I'=I\cup\{x_1,y_1\}\setminus \{v'\}$. Note that $I'$ is an independent set and $v'$ is not in a $K_5$ in $G-I'$. Thus $I'$ is an independent set meeting each $K_5$ in $G$. Observe that
any copies of $C_5\boxtimes K_2$ containing $v'$ must contain one
of $x_1$ and $y_1$; we have $C(I')<C(I)$. Contradiction!.
\noindent
{\bf Case 4:} This is the remaining case, $G'$ is $K_6$-free. We have $\omega(G')\leq 5$ and $|V(G')| < |V(G)|$. By the minimality of $G$, there is an independent set $I'$ of $G'$ meeting every copy of $K_5$ and $C_5\boxtimes K_2$. In $I'$, there is a unique vertex $x$ of the $K_5$ obtained from contracting canonical edges of $H$. Let $uv$ be the canonical edge corresponding to $x$. Let $I''=I' \setminus \{x\} \cup \{u\}$, we get an independent set $I''$ of $G$. Note that any $v \in H \setminus \{u\}$ is not in any $K_5$ of $G-I''$ by Case 2 as well as not in any $C_5 \boxtimes K_2$ of $G-I''$ by Case 3. Thus $I''$ hits each $K_5$ in $G$ and $C(I'')=0$. Contradiction!
$\square$
The following lemma extends Theorem \ref{king0} when $\omega(G)=4$; a similar result was proved independently in \cite{cek}.
\begin{lemma} \label{5to41} Let $G$ be a connected graph with $\Delta(G)\leq 5$ and $\omega(G)\leq 4$. If $G \not = C_{2l+1} \boxtimes K_2$ for some $l \geq 2$, then there is an independent set $I$ hitting all copies of $K_4$ in $G$. \end{lemma} {\bf Proof:} We will prove it by contradiction. If the lemma is false, then let $G$ be a minimum counterexample. If $G$ is $K_4$-free, then there is nothing to prove. Otherwise, we consider the clique graph ${\cal C}(G)$, whose edge set is the set of all edges appearing in some copy of $K_4$. Because of $\Delta(G)=5$, here are all possible connected component of ${\cal C}(G)$.
\begin{enumerate} \item $C_{t}\boxtimes K_2$ for $t\geq 4$. If this type occurs, then every vertex in $C_{t}\boxtimes K_2$ has degree $5$; thus, this is the entire graph $G$. If $t$ is even, then we can find an independent set $I$ meeting every $K_4$. If $t$ is odd, then it is impossible to find such an independent set. However, this graph is excluded from the assumption of the Lemma.
\item $P_t\boxtimes K_2$ for $t\geq 3$. In this case, all internal vertices have degree $5$ while the four end vertices have degree $4$. Consider a new graph $G'$ which is obtained by deleting all internal vertices and adding four edges to make the four end vertices as a
$K_4$. It is easy to check $\Delta(G')\leq 5$ and $\omega(G')\leq 4$. Since $|G'|<|G|$, there is an independent set $I$ of $G'$ meeting every copy of $K_4$ in $G'$. Note that there is exactly one end vertex in $I$. Observe that any one end vertex can be extended into a maximal independent set meeting every copy of $K_4$ in $P_t\boxtimes K_2$. Thus, we can extend $I$ to an independent set $I'$ of $G$ such that $I'$ meets every copy of $K_4$ in $G$. Hence, this type of component does not occur in ${\cal C}(G)$.
\item There are four other types listed in Figure \ref{k4}. \begin{figure}
\caption{All types of components in the clique graph $C(G)$.}
\label{k4}
\end{figure}
For each component $C_i$ in ${\cal C}(G)$, let $V_i$ be the set of common vertices in all $K_4$'s of $C_i$; for the leftmost figure in Figure \ref{k4}, $V_i$ is the set of all 4 vertices; for the middle two figures, $V_i$ is the set of bottom three vertices; for the rightmost figure, $V_i$ consists of the left-bottom vertex and the middle-bottom vertex. Note that all $V_i$'s are pairwise disjoint. Let $G'$ be the induce subgraph of $G$ on $\cup_i V_i$. Note that $G'$ does not contains any vertex in $C_i\setminus V_i$. By checking each type, we find out that
for each $i$ and each $v\in V_i$, $v$ has at most $\min\{2,|V_i|-2\}$ neighbors outside $V_i$ in $G'$ (not in $G$!). Applying Theorem \ref{king} to $G'$, we conclude that there exists an independent set $I$ of $G'$ meeting every $V_i$; thus $I$ meets every $K_4$ in $G$. Contradiction! \end{enumerate}
$\square$
\begin{lemma} \label{5to42} Let $G$ be a connected graph with $\Delta(G)\leq 5$ and $\omega(G)\leq 4$. If $G \not = C_{2l+1} \boxtimes K_2$ for some $l \geq 2$, then there exists an independent set meeting all induced copies of $K_4$ and $C_8^2$. \end{lemma}
\noindent {\bf Proof:} We will use proof by contradiction. Suppose the Lemma is false. Let $G$ be a minimum counterexample (with the smallest number of vertices). For any independent set $I$, let $C(I)$ be the number of induced copies of $C_8^2$ in $G-I$. Among all independent sets which meet all copies of $K_4$, there exists an independent set $I$ such that $C(I)$ is minimized. Since $C(I)>0$, let $H$ be a copy of $C_8^2$ in $G-I$. The vertices of $H$ are listed by $u_i$ for $i\in {\mathbb Z}_8$ anticlockwise such that $u_iu_j$ is an edge of
$H$ if and only if $|i-j| \leq 2$. The vertex $v_{i+4}$ is the {\em antipode} of $v_i$ for any $i\in {\mathbb Z}_8$.
\noindent {\bf Case 1:} There exists a vertex $v\not\in V(H)$ such that $v$ has
five neighbors in $H$. By the Pigeonhole Principle, $\Gamma(v)$ contains a pair of antipodes. Without loss of generality, say $u_0, u_4\in \Gamma(v)$. If the other three neighbors of $v$ do not form a triangle, then we let $I':=I\setminus \{v\} \cup \{u_0,u_4\}$; note that $v$ is not in any $K_4$ of $G-I'$. Thus $I'$ is an independent set meeting every copy of $K_4$. Since every copy of $C_8^2$ containing $v$ must contain one of $u_0$ and $u_4$, we have $C(I')<C(I)$. Contradiction! Hence, the other three neighbors of $v$ must form a triangle. Without loss of generality, we can assume that the three neighbors are $u_1$, $u_2$, and $u_3$. Now we let $I'':=I\setminus \{v\} \cup \{u_0,u_3\}$; note that $v \not \in K_4 \subset G-I'$. Thus $I''$ is also an independent set meeting every copy of $K_4$ of $G$. Since every copy of $C_8^2$ containing $v$ must contain one of $u_0$ and $u_3$, we have $C(I'')<C(I)$. Contradiction!
\noindent {\bf Case 2:} There exists a vertex $v\not\in V(H)$ such that $v$ has exactly four neighbors in $H$. Since $H$ is $K_4$-free, we can find $u_i, u_j\in \Gamma(v)\cap V(H)$ such that $u_iu_j$ is a non-edge. Let $I':=I\setminus \{v\} \cup \{u_i,u_j\}$; $I'$ is also an independent set. Note that $\Gamma(v)\setminus \{u_i, u_j\}$ can not be a triangle, $v$ is not in any $K_4 \subset G-I'$. Thus $I'$ meets every copy of $K_4$. Since every copy of $C_8^2$ containing $v$ must contain one of $u_i$ and $u_j$, we have $C(I')<C(I)$. Contradiction!
\noindent {\bf Case 3:} There exists a vertex $v\not\in V(H)$ such that $v$ has exactly three neighbors in $H$. If the $3$ neighbors do not form a triangle, then choose $u_i, u_j\in \Gamma(v)\cap V(H)$ such that $u_iu_j$ is a non-edge. Note that $\Gamma(v)\setminus \{u_i, u_j\}$ can not be a triangle; $v$ is not in any $K_4 \subset G-I'$. Let $I':=I\setminus \{v\} \cup \{u_i,u_j\}$; $I'$ is also an independent set meeting every copy of $K_4$. Since every copy of $C_8^2$ containing $v$ must contain one of $u_i$ and $u_j$, we have $C(I')<C(I)$. Contradiction! Else, the three neighbors form a triangle; let $u_i$ be one of them and $I':=I\setminus \{v\} \cup \{u_i\}$; $v$ is not in any $K_4 \subset G-I'$. Thus $I'$ is an independent set meeting every copy of $K_4$. Note that $\Gamma(v)\setminus \{u_i\}$ has only two vertices in $H$. The induced graph on $\Gamma(v)\setminus \{u_i\}$ is disconnected. However, for any vertex $v$ in $H'=C_8^2$, the subgraph induced by $\Gamma_G(v) \cap H'$ is a $P_4$. There is no $C_8^2$ in $G-I'$ containing $v$. Thus, $C(I')<C(I)$. Contradiction!
\noindent {\bf Case 4:} Every vertex outside $H$ can have at most $2$ neighbors in $H$. We identify each pair of antipodes of $H$ to get a new graph $G'$ from $G$. After identifying, $H$ is turned into a $K_4$; where the vertices of this $K_4$ are referred as fat vertices.
{\bf Subcase 4a:} $G'\not=C_{2l+1}\boxtimes K_2$.
Observe $\Delta(G')\leq 5$. We claim $G'$ is $K_5$-free. Suppose not. Since every vertex in $H$ has at most one neighbor outside $H$, then each fat vertex can have at most two neighbors outside $H$. Recall that the original graph $G$ is $K_5$-free. If $G'$ has some $K_5$, then this $K_5$ contains either $3$ or $4$ fat vertices. Let $w$ be one of the other vertices in this $K_5$. We get $w$ has at least three neighbors in $H$. However, this is covered by Case 1, Case 2, or Case 3. Thus, $G'$ is $K_5$-free.
Since $|G'|<|G|$, by the minimality of $G$, $G'$ has an independent set $I'$ meeting every copy of $K_4$ and $C_8^2$ in $G'$. There is exactly one fat vertex in $I'$. Now replacing this fat vertex by its corresponding pair of antipodal vertices, we get an independent set $I''$; we assume the pair of antipodal vertices are $u_2$ and $u_6$. It is easy to check that $I''$ is an independent set of $G$. Next we claim any $v \in V(H) \setminus \{u_2,u_6\}$ is neither in a $K_4 \subset V(G)-I''$ nor in a $C_8^2 \subset V(G)-I''$. Suppose there is some $v$ such that $v \in K_4 \subset G-I''$. Recall each $v \in V(H)$ has at most one neighbor outside $H$ and $H$ is $K_4$-free; there is some $w \not \in V(H)$ such that $w$ has at least three neighbors in $H$. This is already considered by Case 1, Case 2, or Case 3. We are left to show that $v \not \in C_8^2 \subset G-I''$ for each $v \in V(H) \setminus \{u_2,u_6\}$. If not, there exists a copy $H'$ of $C_8^2$ in $G-I''$ containing $v$. Note $H'$ is $4$-regular, any vertex in $H'$ can have at most one neighbor in $I''$; in particular, $v\not=u_0, u_4$. Without loss of generality, we assume $v=u_3$.
Then there is a vertex $w \not \in V(H)$ such that $u_3w$ is an edge, see Figure \ref{subcase4a}. Observe that the neighborhood of each vertex of an induced $C_8^2$ is is a $P_4$. Since $u_1u_4$ and $u_1u_5$ are two non-edges, we have $wu_1$ being an edge. Observe $\Gamma_G(u_1)=\{u_7,u_0,u_2,u_3,w\}$. Since $u_2 \not \in H'$, we have $u_0 \in H'$; $u_0$ has two neighbors ($u_2$ and $u_6$) outside $H'$, contradiction!
Therefore, $I''$ meets every copy of $K_4$ and $C_8^2$ in $G$.
Contradiction!
\begin{figure}
\caption{Subcase 4a.}
\label{subcase4a}
\end{figure}
{\bf Subcase 4b:} $G'=C_{2l+1}\boxtimes K_2$. The graph $G$ can be recovered from $G'$. It consists of an induced subgraph $H=C_8^2$ and an induced subgraph $P_{2l-1}\boxtimes K_2$. For each vertex $u$ in $H$, there is exactly one edge connecting it to one of the four end vertices of $P_{2l-1}\boxtimes K_2$; for each end vertex $v$ of $P_{2l-1}\boxtimes K_2$, there are exactly two edges connecting $v$ to the vertices in $H$. First, we take any maximum independent set $I'$ of $P_{2l-1}\boxtimes K_2$. Observe that $I'$ has exactly two end points of $P_{2l-1} \boxtimes K_2$; so $I'$ has exactly four neighbors in $H$. In the remaining four vertices of $H$, there exists a non-edge $u_iu_j$ since $H$ is $K_4$-free. Let $I:=I'\cup\{u_i,u_j\}$. Clearly $I$ is an independent set of $G$ meeting every copy of $K_4$ and $C_8^2$. Contradiction!
$\square$
We are ready to prove Lemma \ref{increase}.
\noindent {\bf Proof of Lemma \ref{increase}:} We need prove for $k\geq 5$ and any connected graph $G$ with $\Delta(G)=k$ and $\omega(G)\leq k-1$ satisfies \begin{equation}
\label{eq:rec} \chi_f(G)\leq k- \min\left\{f(k-1),\frac{1}{2}\right\}. \end{equation}
If $\omega(G)\leq k-2$, then by inequality (\ref{eq:1}), we have $$\chi_f(G)\leq \frac{\Delta(G)+\omega(G)+1}{2}\leq k-\frac{1}{2}.$$ Thus, inequality (\ref{eq:rec}) is satisfied. From now on, we assume $\omega(G)=\Delta(G)-1$.
For $\Delta(G)=k\geq 6$ and $\omega(G)=k-1$, the condition $\omega(G)>\frac{2}{3}(\Delta(G)+1)$ is satisfied. By Theorem \ref{king0}, $G$ contains an independent set meeting every maximum clique. Extend this independent set to a maximal independent set and denote it by $I$.
Note that $\Delta(G-I) \leq k-1$ and $\omega(G-I)\leq k-2$.
\noindent {\bf Case 1:} $k\geq 7$. From the definition of $f(k-1)$, we have $\chi_f(G-I) \leq \Delta(G-I)-f(k-1)$. Thus, $$ \chi_f(G) \leq \chi_f(G-I)+1 \leq k-1-f(k-1)+1=k-f(k-1). $$ Thus, we have $f(k) \geq \min\{f(k-1),1/2\}$.
\noindent {\bf Case 2:} $k=6$. By Lemma \ref{6to5}, we can find an independent set meets every copy of $K_5$ and $C_5\boxtimes K_2$; we extend this independent set as a maximal independent set $I$. Note that
$G-I$ contains no induced subgraph isomorphic $C_5\boxtimes K_2$. We have $\chi_f(G-I) \leq 5-f(5)$; it implies $\chi_f(G)\leq 6-f(5)$.
Thus, $f(6) \geq \min\{f(5),1/2\}$ and we are done.
\noindent {\bf Case 3:} $k=5$. If $G=C_{2l+1}\boxtimes K_2$ for some $l\geq 3$; then $G$ is vertex-transitive and $\alpha(G)=l$. It implies that
$$\chi_f(G)=\frac{|V(G)|}{\alpha(G)}=4+\frac{2}{l}\leq 5-\frac{1}{3}.$$
If $G\not =C_{2l+1}\boxtimes K_2$, then by Lemma \ref{5to42}, we can find an independent set meeting every copy of $K_4$ and $C_8^2$; we extend it as a maximal independent set $I$. Note that
$G-I$ contains no induced subgraph isomorphic $C_8^2$. We have $\chi_f(G-I) \leq 4-f(4)$; it implies $\chi_f(G)\leq 5-f(4)$.
Thus, $f(3) \geq \min\{f(4),1/3\}$ and we are finished.
$\square$
\section{The case $\Delta(G)=4$}
To prove $f(4)\geq \frac{2}{67}$, we will use an approach which is similar to those in \cite{hz, lup}. We will construct 133 4-colorable auxiliary graphs, and from these colorings we will construct a 134-fold coloring of $G$ using 532 colors.
It suffices to prove that the minimum counterexample does not exist.
Let $G$ be a graph with the smallest number of vertices and satisfying \begin{enumerate} \item $\Delta(G)=4$ and $\omega(G)\leq 3$; \item $\chi_f(G)> 4-\frac{2}{67}$; \item $G\not=C_8^2$. \end{enumerate}
By the minimality of $G$, each vertex in $G$ has degree either $4$ or $3$.
To prove Lemma \ref{f4},
we will show $\chi_f (G) \leq 4 - \frac{2}{67}$, which gives us the desired contradiction.
For a given vertex $x$ in $V(G)$, it is easy to color its neighborhood $\Gamma_G(x)$ using $2$ colors. If $d_G(x)=3$, then we pick a non-edge $S$ from $\Gamma_G(x)$ and color the two vertices in $S$ using color 1. If $d_G(x)=4$ and $\alpha(\Gamma_G(x))\geq 3$, then we pick an independent set $S$ in $\Gamma_G(x)$ of size 3 and assign the color 1 to each vertex in $S$. If $d_G(x)=4$ and $\alpha(\Gamma_G(x))=2$, then we pick two
disjoint non-edges $S_1$ and $S_2$ from $\Gamma_G(x)$ ; we assign color 1 to each vertex in $S_1$ and color 2 to each vertex in $S_2$.
The following Lemma shows that $G$ has a key property, which eventually implies that this local coloring scheme works simultaneously for $x$ in a large subset of $V(G)$.
\begin{lemma}\label{D41}
For each $x \in V(G)$ with $d_G(x)=4$ and $\alpha(\Gamma_G(x))=2$, there exist two vertex-disjoint non-edges $S_1(x), S_2(x) \subset \Gamma_G(x)$ satisfying the following property. If we contract $S_1(x)$ and $S_2(x)$, then the resulting graph $G/S_1(x) /S_2(x)$ contains neither $K^-_{5}$ nor $G_0$. Here $K_5^-$ is the graph obtained from $K_5$ by removing one edge and $G_0$ is the graph shown in Figure \ref{fig:H13}. \begin{figure}
\caption{The graph $G_0$.}
\label{fig:H13}
\end{figure} \end{lemma} The proof of this lemma is quite long and we will present its proof in section 4.
For each vertex $x$ in $G$, we associate
a small set of vertices $S(x)$ selected from $\Gamma_G(x)$ as follows. If $d_G(x)=3$, then let $S(x)$ be the endpoints of a non-edge in $\Gamma_G(x)$ and label the vertices in $S(x)$ as 1; if $d_G(x)=4$ and $\alpha(\Gamma_G(x))\geq 3$, then let $S(x)$ be any independent set of size $3$ in $\Gamma_G(x)$ and label all vertices in $S(x)$ as 1; if $d_G(x)=4$ and $\alpha(\Gamma_G(x))=2$, then let $S(x)=S_1(x)\cup S_2(x)$, where $S_1(x)$ and $S_2(x)$ are guaranteed by Lemma \ref{D41}; we label the vertices in $S_1(x)$ as 1 and the vertices in $S_2(x)$ as 2. For any $x\in V(G)$, we have
$|S(x)|=2$, $3$, or $4$.
The following definitions depend on the choice of $S(\ast)$, which is assumed to be fixed through this section. For $v \in G$ and $j \in \{1,2,3\}$, we define $$
N_G^j(v) = \{u| \ \mbox{there is a path} \ vv_0 \ldots v_{j-2}u \ \mbox{in}\ G \ \mbox{of length} \ j \ \mbox{such that} \ v_0 \in S(v) \ \mbox{and} \ v_{j-2} \in S(u)\}. $$
We now define $N_G^j(u)$ for $j\in \{4,5,7\}$; each $N_G^j(u)$ is a subset of the $j$th neighborhood of $u$. For $j=4$, $v \in N_G^4(u)$ if $d_G(u)=4$, $\alpha(\Gamma_G(u))=2$, $u$ and $v$ are connected as shown in Figure \ref{fig:4n}; otherwise $N_G^4(u)=\emptyset$. In Figure \ref{fig:4n}, $w$ is connected to one of the two vertices in $S_2(u)$. Similarly, in Figure \ref{fig:5n} and \ref{fig:5n}, a vertex is connected to a group of vertices means it is connected to any vertex in this group.
For $j=5$, $v \in N_G^5(u)$ if $d_G(w)=4$, $\alpha(\Gamma_G(w))=2$ for $w \in \{u,v\}$ and $u$ and $v$ are connected as shown in Figure \ref{fig:5n}; otherwise $N_G^5(u)=\emptyset$.
For $j=7$, $v \in N_G^7(u)$ if $d_G(w)=4$, $\alpha(\Gamma_G(w))=2$ for $w \in \{u,v\}$ and $u$ and $v$ are connected as shown in Figure \ref{fig:7n}; otherwise $N_G^7(u)=\emptyset$.
\begin{figure}
\caption{\! 4-th neighborhood.\!\!\!}
\caption{\! 5-th neighborhood.\!\!\!}
\caption{\! 7-th neighborhood.\!\!\!}
\label{fig:4n}
\label{fig:5n}
\label{fig:7n}
\end{figure}
Note that for $j\in\{1,2,3,5,7\}$, $v\in N_G^j(u)$ if and only if $u\in N_G^j(v)$; but this does not hold for $j=4$. We have the following lemma.
\begin{lemma} \label{N5}
For $u \in V(G)$ such that $d_G(u)=4$ and $\alpha(\Gamma_G(u))=2$, we have $|N_G^1(u) \cup N_G^2(u) \cup N_G^3(u) \cup N_G^4(u) \cup N_G^5(u) \cup N_G^7(u)| \leq 96$. \end{lemma}
{\bf Proof:} It is clear that $|N_G^1(u) \cup N_G^2(u) \cup N_G^3(u)| \leq 4+8+8 \times 3=36$. We next estimate $|N_G^4(u)|$. In Figure \ref{fig:4n}, observe that $w$ is connected to one vertex of $S_2(u)$ and $w \not \in \Gamma_G(u)$.
For a fixed $u$, there are at most four choices for $w$, at most three choices for $z$, and at most three choices for $v$. Therefore, we have $|N_G^4(u)| \leq 4 \times 3 \times 3=36$.
Let us estimate $|N_G^5(u)|$. In Figure \ref{fig:5n}, for a fixed $u$, we have four choices for $w$ and two choices for $z$. Fix a $z$. Assume $\Gamma_G(z) \setminus \{w\}=\{a,b,c\}$. Let $T_1=\{a,b\}$, $T_2=\{b,c\}$, and $T_3=\{a,c\}$. We have the following claim.
\noindent {\bf Claim} There are at most three $v \in N_G^5(u)$ such that for each $v$ we have $ \Gamma_G(z) \cap \Gamma_G(v)=T_i$ for some $1 \leq i\leq 3$ as shown in Figure \ref{fig:5n}.
\noindent {\bf Proof of the claim:} For each $1 \leq i \leq 3$, there are at most three $v \in N_G^5(u)$ such that $ \Gamma_G(z) \cap \Gamma_G(v)=T_i$ as shown in Figure \ref{fig:5n} since each vertex in $T_i$ has at most three neighbors other than $z$. If the claim is false, then there is $1 \leq i \not =j \leq 3$ such that $\Gamma_G(z) \cap \Gamma_G(v_{i})=T_i$ and $\Gamma_G(z) \cap \Gamma_G(v_{i}')=T_i$ for some $v_{i}, v_{i}' \in N_G^5(u)$, and $\Gamma_G(z) \cap \Gamma_G(v_j)=T_j$ for some $v_j \in N_G^5(u)$, where $v_i,v_i', v_j$ are distinct. Without loss of generality, we assume $\Gamma_G(z) \cap \Gamma_G(v_1)=\Gamma_G(z) \cap \Gamma_G(v_1')=T_1$ for $v_1, v_1' \in N_G^5(u)$, and $\Gamma_G(z) \cap \Gamma_G(v_2)=T_2$ for some $v_2 \in N_G^5(u)$, see Figure \ref{N51}. Observe that $\Gamma_G(b)=\{v_1,v_1',v_2,z\}$. Since $\Gamma_G(z) \cap \Gamma_G(v_1)=T_1$ as shown in Figure \ref{fig:5n}, $a$ and one of $b$'s neighbors form $S_i(v_1)$ for some $i \in \{1,2\}$; we assume it is $S_1(v_1)$. Note $\{z,v_1,v_1'\} \subset \Gamma_G(a)$. Thus $S_1(v_1)=\{a,v_2\}$ and $v_2 \in \Gamma_G(v_1)$. Similarly, we can show $S_1(v_1')=\{a,v_2\}$ and $v_2 \in \Gamma_G(v_1')$. Now, observe that $\Gamma_G(v_2)=\{v_1,v_1',b,c\}$. Since $\Gamma_G(z) \cap \Gamma_G(v_2)=T_2$ as shown in Figure \ref{fig:5n}, $b$ and one of neighbors of $v_2$ form $S_i(v_2)$ for some $i \in \{1,2\}$; we assume $i=1$. Because $\{v_1,v_1'\} \subset \Gamma_G(b)$, then $S_1(v_2)=\{b,c\}$. However, $b$ and $c$ are not is in the same independent set in the definition of $N_G^5(u)$, see Figure \ref{fig:5n}. This is a contradiction and this case can not happen. The claim follows. \begin{figure}
\caption{The picture for the claim.}
\label{N51}
\end{figure}
Therefore, $|N_G^5(u)| \leq 4 \times 2 \times 3=24.$
In Figure \ref{fig:7n}, for a fixed $u$, we have two choices for the edge $e$, one choice for $w$, two choices for $z$, and three choices for the edge $f$. Fix a $z$. By considering the degrees of the endpoints of $f$, there is at most one $f$ and at most one $v \in N_G^7(u)$ such that $|\Gamma_G(f)
\cap \Gamma_G(v)|=4 $ as shown in Figure \ref{fig:7n}. Therefore, we have $|N_G^7(u)| \leq 2 \times 2 \times 1=4$.
Last, we estimate $|N_G^5(u) \cup N_G^7(u)|$. If there is some $v \in N_G^7(u)$, then we observe that there are at most five $z$'s (see Figure \ref{fig:5n}). We get the number of $v \in N_G^5(u)$ is at most $5 \times 3=15$. In this case, we have $$
|N_G^5(u) \cup N_G^7(u)| \leq 4+15 < 24. $$ If $N_G^7(u)=\emptyset$, then also we have $$
|N_G^5(u) \cup N_G^7(u) \leq 24.$$
Therefore $$|N_G^1(u) \cup N_G^2(u) \cup N_G^3(u) \cup N_G^4(u)
\cup N_G^5(u) \cup N_G^7(u)| \leq 36+36+24=96.$$
$\square$
Based on the graph $G$, we define an auxiliary graph $G^{\ast}$ on vertex set $V(G)$. The edge set is defined as follows: $uv \in E(G^{\ast})$ precisely if
either $u \in N_G^1(v) \cup N_G^2(v) \cup N_G^3(v) \cup N_G^4(v) \cup N_G^5(v) \cup N_G^7(v) $, or $v \in N_G^4(u)$.
We have the following lemma.
\begin{lemma} \label{121color} The graph $G^{\ast}$ is 133-colorable. \end{lemma}
\noindent {\bf Proof:} Let $\sigma$ be an increasing order of $V(G^{\ast})$ satisfying the following conditions. \begin{description} \item [1:] For $u$ and $v$ such that $d_G(u)=3$ and $d_G(v)=4$, we have $\sigma(u) < \sigma(v)$. \item [2:] For $u$ and $v$ such that $d_G(u)=d_G(v)=4$, $\alpha(\Gamma_G(u)) \geq 3$, and $\alpha(\Gamma_G(v))=2$, we have $\sigma(u) < \sigma(v)$. \end{description} We will color $V(G^{\ast})$ according to the order $\sigma$. For each $v$, we have the following estimate on the number of colors forbidden to use for $v$. \begin{description}
\item [1:] For $v$ such that $d_G(v)=3$, the number of colors forbidden to use for $v$ is at most $|N_G^1(v) \cup N_G^2(v) \cup N_G^3(v)| \leq 3+9+27=39$. \item [2:] For $v$ such that $d_G(v)=4$ and $\alpha(\Gamma_G(v)) \geq 3$, the number of colors forbidden to use for $v$ is at most
$|N_G^1(v) \cup N_G^2(v) \cup N_G^3(v)| \leq 3+9+27=39$. \item [3:] For $v$ such that $d_G(v)=4$ and $\alpha(\Gamma_G(v))=2$, the number of colors forbidden to use for
$v$ is at most $|N_G^1(v) \cup N_G^2(v) \cup N_G^3(v) \cup N_G^4(v)
\cup N_G^5(v) \cup N_G^7(v)|+|N_G^4(v)| \leq 96+36=132$ by Lemma \ref{N5}. \end{description} Therefore, the greedy algorithm shows $G^{\ast}$ is 133-colorable.
$\square$
Let $X$ be a color class of $G^{\ast}$. We define a new graph $G(X)$ by the following process.
\begin{enumerate}
\item For each $x \in X$, if $|S(x)|=2$ or $|S(x)|=3$, then we contract $S(x)$ as a single vertex, delete the vertices in $\Gamma_G(v) \setminus S(v)$, and keep label 1 on the new vertex; if $|S(x)|=4$, i.e., $S(x)=S_1(x) \cup S_2(x)$, then we contract $S_1(x)$ and $S_2(x)$ as single vertices and keep their labels. After that, we delete $X$. Let $H$ be the resulting graph. \item Note that $\Gamma_H(x) \cap \Gamma_H(y) = \emptyset$ and there is no edge from
$\Gamma_H(x)$ to $\Gamma_H(y)$ for any $x,y \in X$ as $X$ is a color class. \item We identify all vertices with label $i$ as a single vertex $w_i$ for $i \in \{1,2\}$. Let $G(X)$ be the resulted graph. \end{enumerate} We have the following lemma on the chromatic number of $G(X)$. \begin{lemma} \label{4colorable} The graph $G(X)$ is $4$-colorable for each color class. \end{lemma} We postpone the proof of this lemma until the end of this section and prove Lemma \ref{f4} first.
\noindent {\bf Proof of Lemma \ref{f4}:} By Lemma \ref{121color}, there is a proper 133-coloring of $G^{\ast}$. We assume $V(G^{\ast})=V(G)=\cup_{i=1}^{133} X_i $, where $X_i$ is the $i$-th color class.
For each $i \in \{1,\ldots, 133\}$, Lemma \ref{4colorable} shows $G(X_i)$ is $4$-colorable; let $c_i\colon V(G(X_i)) \to T_i$ be a proper $4$-coloring of the graph $G(X_i)$. Here $T_1,T_2,\ldots, T_{133}$ are pairwise disjoint; each of them consists of $4$ colors. For $i \in \{1,\ldots, 133\}$, the $4$-coloring $c_i$ can be viewed as a $4$-coloring of $G \setminus X_i$ since each vertex with label $j$ receives the color $c_i(w_i)$ for $j=1,2$ and each removed vertex has at most three neighbors in $G \setminus X_i$.
Now we reuse the notation $c_i$ to denote this $4$-coloring of $G
\setminus X_i$. For each $v \in X_i$, we have $|\cup_{u \in
\Gamma_G(v)}c_i(u)| \leq 2$. We can assign two unused colors, denoted by the set $Y(v)$, to $v$. We define $f_i: V(G) \rightarrow
\mathcal P(T_i)$ (the power set of $T_i$) satisfying
$$
f_i(v)= \left\{
\begin{array}{ll}
\{c_i(v)\} \ & \textrm{if} \ v \in V(G) \setminus X_i, \\
Y(v) & \textrm{if} \ v \in X_i.\\
\end{array}
\right.
$$
Observe that each vertex in $X_i$ receives two colors from $f_i$ and
every other vertex receives one color. Let $\sigma: V(G)
\rightarrow \mathcal P(\cup_{i=1}^{k} T_i)$ be a mapping such that $\sigma(v)=\cup_{i=1}^{m} f_i(v)$. It is easy to verify
$\sigma$ is a $134$-fold coloring of $G$ such that each color is drawn
from a palette of $532$ colors; namely we have
$$\chi_f(G) \leq
\frac{532}{134} =4-\frac{2}{67}.$$ The proof of Lemma \ref{f4} is finished.
$\square$
Before we prove Lemma \ref{4colorable}, we need the following definitions.
A {\it block} of a graph is a maximal $2$-connected induced subgraph. A {\it Gallai tree} is a connected graph in which all blocks are either complete graphs or odd cycles. A {\it Gallai forest} is a graph all of whose components are Gallai trees. A $k$-{\it Gallai tree (forest)} is a Gallai tree (forest) such that the degree of all vertices are at most $k-1$. A $k$-critical graph is a graph $G$ whose chromatic number is $k$ and deleting any vertex can decrease the chromatic number. Gallai showed the following Lemma.
\begin{lemma}\label{gallai} \cite{gallai} If $G$ is a $k$-critical graph, then the subgraph of $G$ induced on the vertices of degree $k-1$ is a k-Gallai forest. \end{lemma}
\noindent
{\bf Proof of Lemma \ref{4colorable}:} We use proof by
contradiction. Suppose that $G(X)$ is not $4$-colorable.
The only possible vertices in $G(X)$ with degree greater than $4$ are the
vertices $w_1$ and $w_2$, which are
obtained by contracting the vertices with label 1 and 2 in the intermediate graph $H$. The simple greedy algorithm shows that $G(X)$ is always $5$-colorable. Let
$G'(X)$ be a $5$-critical subgraph of $G(X)$. Applying
Lemma \ref{gallai} to $G'(X)$, the subgraph of $G'(X)$ induced on the
vertices of degree $4$ is a $5$-Gallai forest $F$. The vertex set of $F$ may contain $w_1$ or $w_2$. Delete $w_1$ and $w_1$ from $F$ if $F$ contains one of them. Let $F'$ be the resulting Gallai forest. (Any induced subgraph of a Gallai forest is still a Gallai forest.) The Gallai forest $F'$ is not empty. Let $T$ be a connected component of $F'$ and $B$ be a leaf block of $T$. The block $B$ is either a clique or an odd cycle from the definition of a Gallai tree.
Let $v$ be a vertex in $B$. As $v$ has at most two neighbors ($w_1$ and $w_2$) outside $F'$ in $G(X)$, we have $d_{F'}(v)\geq 2$. If
$v$ is not in other blocks of $F'$, then we have $d_B(v)\geq 2$. It follows that $|B|\geq 3$. Since $B$ is a subgraph of $G$ and $G$ is $K_4$-free, the block $B$ is an odd cycle.
Let $v_1v_2$ be an edge in $B$ such that $v_1$ and $v_2$ are not in other blocks. The degree requirement implies $v_iw_j$ are edges in $G(X)$ for all $i, j\in\{1,2\}$. For $i=1,2$, there are vertices $x_i, y_i\in X$ satisfying $S(x_i)\cap \Gamma_G(v_i)\not =\emptyset$ and $S(y_i)\cap \Gamma_G(v_i)\not =\emptyset$; moreover either
$|S(x_i)| =4$ or $|S(y_i)|=4$ since one of its neighborhood has label 2. Without loss of generality, we assume $|S(x_i)|=4$ for $i \in \{1,2\}$. If $x_i\not= y_i$, then $y_i \in N_G^4(x_i)$, i.e., $y_i \in \Gamma_{G^{\ast}}(x_i)$; this contradicts $X$ being a color class. Thus we have $x_i=y_i$
and $|S(x_i)|=4$ for $i \in \{1,2\}$. For $\{i,j\}=\{1,2\}$, if $x_i\not= y_j$, then $y_i \in N_G^5(x_i)$,
i.e., $y_i \in \Gamma_{G^{\ast}}(x_i)$; this is a contradiction of $X$ being a color class.
Thus we have $$x_1=x_2 = y_1=y_2.$$ Let $x$ denote this common vertex above. Then $d_G(x)=4$ and $\alpha(\Gamma_G(x))=2$.
Let $v_0$ be the only vertex in $B$ shared by other blocks. Since $B-v_0$ is connected, the argument above shows there is a common $x$ for all edges in $B-v_0$. If $\Gamma_{G(X)}(v_0)\cap \{w_1,w_2\}\not=\emptyset$, the there is some vertex $x_0 \in X$ such that $S(x_0) \cup \Gamma_G(v_0) \not =\emptyset$. By the similar argument, we also have $x_0=x$.
Therefore, $x$ depends only on $B$. In the sense that for any $y\in X$ and any $v\in B$, if $S(y)\cap \Gamma_G(v)\not=\emptyset$, then $y=x$.
The block $B$ is an odd cycle as we mentioned above. Suppose $|B|=2r+1$. Let $v_0,v_1,\ldots,v_{2r}$ be the vertices of $B$ in cyclic order and $v_0$ be the only vertex which may be shared by other block.
Let $x\in X$ be the vertex determined by $B$. Recall $d_G(x)=4$ and $\alpha(\Gamma_G(x))=2$. Each vertex in $\Gamma(x)$ can have at most $2$ edges to $B$. We get \begin{equation}
\label{eq:12}
4r\leq |E(B,\Gamma(x))|\leq 8. \end{equation} We have $r\leq 2$. The block $B$ is either a $C_5$ or a $K_3$. We claim both $v_0w_1$ and $v_0w_2$ are non-edges of $G(X)$.
If $B=C_5$, then inequality (\ref{eq:12}) implies
that $v_0$ has no neighbor in $\Gamma(x)$ and the claim holds.
If $B=K_3$, then the claim also holds; otherwise $B\cup \{\underline{S_1(x)}, \underline{S_2(x)}\}$ forms a $K_5^-$ in $G/S_1(x)/S_2(x)$, which is a contradiction to Lemma \ref{D41}.
Let $u_1$ and $u_2$ be the two neighbors of $v_0$ in other blocks of $F'$. If $u_1$ and $u_2$ are in the same block, then this block is an odd cycle; otherwise, $v_0u_1$ and $v_0u_2$ are in two different blocks.
The union of non-leaf blocks of $T$ is a Gallai-tree, denoted by
$T'$. The argument above shows every leaf block of $T'$ must be an odd cycle. Let $C$ be such a leaf block of $T'$. Now $C$ is an odd cycle, and $C$ is connected to $|C|-1$ leaf blocks of $T$. Let $B$ and $B'$ be two leaf blocks of $T$ such that $B\cap C$ is adjacent to $B'\cap C$. Without loss of generality, we may assume $B$ is the one we considered before. By the same argument, $B'$ is an odd cycle of size $2r'+1$ with $r'\in \{1,2\}$. Let $v'_0,v'_1,\ldots, v'_{2r'}$ be the vertices of $B'$ and $v'_0$ be the only vertex in $B'\cap C$. For $i$ in $\{1,2,\ldots, 2r'\}$ and $j$ in $\{1,2\}$, $v'_iw_j$ are edges in $G'(X)$. Similarly, there exists a vertex $x'\in X$ with $d_G(x')=4$ and $\alpha(\Gamma_G(x'))=2$ such that
$|E(v_i, S_1(x'))|\geq 1$ and $|E(v_i, S_2(x'))|\geq 1$. We must have $x=x'$; otherwise $x'\in N_G^7(x)$, i.e., $x' \in
\Gamma_{G^{\ast}}(x)$, and this contradicts the fact that $X$ is a color class in $D$. Now we have $|E(\Gamma(x), B)|\geq 4r$ and
$E(\Gamma(x), B')|\geq 4r'$. By counting the degrees of vertices in $\Gamma(x)$ in $G$, we have $$4r+4r'+4+4\leq 16.$$ We get $r=r'=1$. Both $B$ and $B'$ are $K_3$'s. In this case, $G/S_1(x)/S_2(x)$ contains the graph $G_0$, see figure \ref{fig:H13}. This contradicts Lemma \ref{D41}.
We can find the desired contradiction, so the lemma follows.
$\square$
\section{Proof of Lemma \ref{D41}}
In this section, we will prove Lemma \ref{D41}. We first review a Lemma from \cite{lup}.
\begin{lemma}\label{cut2} Let $G$ be a graph. Suppose that $G_1$ and $G_2$ are two subgraphs such that $G_1 \cup G_2=G$ and $V(G_1) \cap V(G_2) =\{u,v\}$. \begin{enumerate} \item If $uv$ is an edge of $G$, then we have $$\chi_f(G) = \max\{\chi_f(G_1), \chi_f(G_2)\}.$$ \item If $uv$ is not an edge of $G$, then we have $$\chi_f(G) \leq \max\{\chi_f(G_1), \chi_f(G_2+uv), \chi_f(G_2/uv)\},$$ \end{enumerate} where $G_2+uv$ is the graph obtained from $G_2$ by adding edge $uv$ and $G_2/uv$ is the graph obtained from $G_2$ by contracting $\{u, v\}$. \end{lemma}
\noindent {\bf Proof of Lemma \ref{D41}:} Recall that $G$ is a connected $K_4$-free graph with minimum number of vertices such that $G \not = C_8^2$ and $ \chi_f(G) > 4-\frac{2}{67} $. Note that $G$ is 2-connected. We will prove it by contradiction.
Suppose Lemma \ref{D41} fails for some vertex $x$ in $G$. Observe $\Gamma_G(x)$ is one of the graphs in Figure \ref{Delta=4}. Here we assume $\Gamma_G(x)=\{a,b,c,d\}$. Through the proof of the lemma, let $S_1$ and $S_2$ be two vertex-disjoint independent sets in $\Gamma_G(x)$, $H$ be a triangle in $V(G) \setminus ( \{x\} \cup \Gamma_G(x))$, then say $(S_1,S_2,H)$ is a bad triple if $\{\underline {S_1}, \underline {S_2}, H\}$ contains a $K_5^-$ in $G/S_1/S_2$.
\begin{figure}
\caption{Three possible cases of $\Gamma_G(x)$. }
\label{Delta=4}
\end{figure}
If $\Gamma_G(x)=P_4$, then $\{a,d\}$ and $\{b,c\}$ is the only pair of disjoint non-edges. There is a triangle $H$ with
$V(H)=\{y,z,w\}$ such that $(\{a,d\}, \{b,c\}, H)$ is a bad triple. Note that $|E(\{a,b,c,d\},\{y,z,w\})|=5$ or $6$.
By an exhaustive search, the induced subgraph of $G$ on $\{x, a,b,c,d,y,z,w\}$ is one of the following six graphs (see Figure \ref{fig:P4}). \begin{figure}
\caption{If $\Gamma_G(x)=P_4$, then there are six possible induced subgraphs.}
\label{fig:P4}
\end{figure}
If $\Gamma_G(x)=2~e$, then $(\{a,c\}, \{b,d\})$ and $(\{a,d\}, \{b,c\})$ are two pairs of disjoint non-edges. By considering the degrees of vertices in $\Gamma_G(x)$, there is only one triangle $H$ with $V(H)=\{y,z,w\}$ such that $(\{a,c\}, \{b,d\}, H)$ and $(\{a,d\}, \{b,c\}, H)$ are two bad triples. By an exhaustive search, the induced subgraph of $G$ on $\{x, a,b,c,d,y,z,w\}$ is one of the following three graphs (see Figure \ref{fig:C4}).
\begin{figure}
\caption{If $\Gamma_G(x)=2\ e$, then there are three possible induced subgraphs.}
\label{fig:C4}
\end{figure}
It suffices to show that $G$ cannot contain $H_i$ for $1\leq i\leq 9$. Since all vertices in $H_1$ (and $H_2$) have degree 4, $H_1$ (and $H_2$) is the entire graph $G$. Observe that $H_1$ is isomorphic to $C_8^2$ and $H_2$ is 11:3-colorable (see Figure \ref{fig:H2H7}). Contradiction! \begin{figure}
\caption{The graph $H_2$ and $H_7$ are 11:3-colorable.}
\label{fig:H2H7}
\end{figure}
In $H_7$, the vertex $d$ is the only vertex with degree less than $4$. If $H_7$ is not the entire graph $G$, then $d$ is a cut vertex of $G$. This contradicts the fact that $G$ is $2$-connected. Thus $G=H_7$. The graph $H_7$ is 11:3-colorable as shown by Figure \ref{fig:H2H7}. Contradiction!
Now we consider the case $H_3$. Note $H_3+bz$ is the graph $H_2$. We have $\chi_f(H_3)\leq \chi_f(H_2)\leq 11/3$. The graph $H_3$ must be a proper induced subgraph of $G$, and the pair $\{b,z\}$ is a vertex cut of $G$. Let $G'$ be the induced subgraph of $G$ by deleting all vertices in $H_3$ but $b,z$. We apply Lemma \ref{cut2} to $G+bz$ with $G_1=H_3+bz=H_2$ and $G_2=G'+bz$. We have $$\chi_f(G+bz)\leq \max\{\chi_f(H_2), \chi_f(G'+bz)\}.$$ Note $\chi_f(H_2)\leq 11/3$ and $11/3<\chi_f(G)\leq \chi_f(G+bz)$. We have $\chi_f(G)\leq \chi_f(G'+bz)$. Both $b$ and $z$ have at most $2$ neighbors in $G'+bz$. Thus $G'+bz$ is $K_4$-free; $G'+bz\not=C_8^2$ and has fewer vertices than $G$. This contradicts to the minimality of $G$.
Note $H_5+cy=H_2$. The case $H_5$ is similar to the case $H_3$.
Note that $H_4$, $H_6$, and $H_8$ are isomorphic to each other. It suffices to show $G$ does not contain $H_4$. Suppose that $H_4$ is a proper induced subgraph of $G$. Let $G_1$ be the induced subgraph of $G$ by deleting all vertices in $H_4$. Note $C_8^2$ is not a proper subgraph of any graph in ${\cal G}_4$. We have $G_1\not=C_8^2$. Note that $c$ and $z$ have degree $3$ while other vertices in $H_4$ have degree $4$.
Since $G$ is 2-connected, $c$ has a unique neighbor, denoted by $u$, in $V(G_1)$. Similarly, $z$ has a unique neighbor, denoted by $v$, in $V(G_1)$. Observe that the pair $\{u,v\}$ forms a vertex cut of $G$. Let $G_2$ be the induced graph of $G$ on $V(H_4)\cup \{u,v\}$. Applying Lemma \ref{cut2} to $G$ with $G_1$ and $G_2$, we have $$\chi_f(G) \leq \max\{\chi_f(G_1), \chi_f(G_2+uv), \chi_f(G_2/uv)\}.$$ Figure \ref{fig:H4} shows $\chi_f(G_2+uv)$ and $\chi_f(G_2/uv)$ are at most $11/3$.
\begin{figure}
\caption{Case $H_4$: both graph $G_2+uv$ and $G_2/uv$ are 11:3-colorable.}
\label{fig:H4}
\end{figure}
Since $\chi_f(G)> 11/3$, we have $\chi_f(G)\leq \chi_f(G_1)$. Now $G_1$ is $K_4$-free and has maximum degree at most $4$; $G_1$ has fewer vertices than $G$. This contradicts the minimality of $G$.
If $G=H_4$, then $\chi_f(H_4)\leq 11/3$, since $H_4$ is a subgraph of $G_2+uv$
in Figure \ref{fig:H4}.
Now we consider the last case $H_9$. First, we contract $b,c,z$ into a fat vertex denoted by $\underline{bcz}$. We write $G/bcz$ for the graph after this contraction. Observe that $\{\underline{bcz},d\}$ is a vertex-cut of $G/bcz$. Let $G_4$ and $G_4'$ be two connected subgraphs of $G/bcz$ such that $G_4 \cup G_4'=G/bcz$, $G_4 \cap G_4'=\{\underline{bcz},d\}$, and $\{u,v\} \subset G_4'$. Note that $G_4$ is $11$:$3$ colorable, see Figure \ref{fig:H9}. Now by Lemma \ref{cut2}, we have $$ \chi_f(G/bcz) \leq \max \{\chi_f(G_4), \chi_f(G_4')\}. $$ As $\{b,c,z\}$ is an independent set, each $a$:$b$-coloring of $G/bcz$ gives an $a$:$b$-coloring of $G$, that is $\chi_f(G/bcz)\geq \chi_f(G)>11/3$. The graph $G_4$ is 11:3-colorable; see Figure \ref{fig:H9}. Thus we have $\chi_f(G_4')\geq \chi_f(G/bcz)\geq \chi_f(G)$. It is easy to check that $G_4'$ has maximum degree $4$, $K_4$-free, and it is not $C_8^2$. Hence $G_4'$ must contain a $K_4$. Otherwise, it contradicts the minimality of $G$.
Second, we contract $\{b,d,z\}$ into a fat vertex $\underline{bdz}$ and denote the graph by $G/bcz$. Let $G_5$ and $G_5'$ be two connected subgraphs of $G/bdz$ such that $G_5 \cup G_5'=G/bzd$, $G_5 \cap G_5'=\{\underline{bzd},c\}$, and $\{u,v\} \subset G_5'$. Note that $G_5$ is 11:3-colorable; see Figure \ref{fig:H9}. By a similar argument, $G_5'$ must contain a $K_4$.
\begin{figure}
\caption{Case $H_9$: the graphs $G_4$, $G_5$, and $G_6$ are 11:3-colorable.}
\label{fig:H9}
\end{figure}
The remaining case is that both $G_4'$ and $G_5'$ have a $K_4$ when we contract $b$ and $z$. Since the original graph $G$ is $K_4$-free, the $K_4$ in $G_4'$ (and in $G_5'$) must contain the fat vertex $\underline{bcz}$ (or $\underline{bdz}$), respectively. Note that each of the four vertices $b,c,d,z$ has at most one edge leaving $H_9$. There must be a triangle $uvp$ in $G$ and these four outward edges are connected to some element of $\{u,v,p\}$. The graph $G/{bz}$ must contain the subgraph $G_6$ as drawn in Figure \ref{fig:H9}.
Note that $\{u,v\}$ is a vertex-cut in $G/ bz$. Let $G_6$ and $G_6'$ be two connected subgraphs of $G/bz,$ which satisfy $G_6 \cup G_6'=G$, $G_6 \cap G_6'=\{u,v\}$, and $\underline{bz} \in G_6$. By Lemma \ref{cut2}, we have $$ \chi_f(G/bz) \leq \max\{\chi_f(G_6), \chi_f(G_6')\}. $$ Note that $G_6$ is 11:3-colorable; see Figure \ref{fig:H9}. We also have $\chi_f(G/bz) \geq \chi_f(G) >\frac{11}{3}$. We obtain $\chi_f(G_6') \geq \chi_f(G/bz) \geq \chi_f(G)$. Observe that $G_6'$ is a subgraph of $G$. We arrive at a contradiction of the minimality of $G$.
If $\Gamma_G(x)=C_4$, then the only possible choice for the two independent sets are $\{a,c\}$ and $\{b,d\}$. If there is some triangle $H$ such that $(\{a,c\},\{b,d\},H)$ is a bad triple, then we have $$
|E(\Gamma_G(x),H)| \geq 5. $$
However, $|E(\Gamma_G(x),H)| \leq 4$. This is a contradiction. Thus the lemma follows in this case.
We can select two vertex disjoint non-edges $S_1$ and $S_2$ such that the graph $G/S_1/S_2$ contains no $K_5^-$. For these particular $S_1$ and $S_2$, if $G/S_1/S_2$ contains no $G_0$, then Lemma \ref{D41} holds.
Without loss of generality, we assume that $G/S_1/S_2$ does contain $G_0$. Let $s_i=\underline{S_i}$ for $i=1,2$.
Observe that both $s_1$ and $s_2$ have four neighbors $u,v,p,q$ other than $x$ in $G_0$. It follows that $$|E(S_1\cup S_2, \{u,v,p,q\})|\geq 8.$$ On the one hand, we have \begin{eqnarray*}
|E(G\mid_{S_1\cup S_2})| &=&\frac{1}{2}\left(\sum_{v\in S_1\cup S_2}d(v)
-|E(S_1\cup S_2, \{u,v,p,q\})| - 4\right)\\ &\leq& \frac{1}{2}(16- 8 - 4)\\ &=&2. \end{eqnarray*} On the other hand, $\alpha(\Gamma(x))=2$ implies $G\mid_{S_1\cup S_2}$ contains at least two edges. Thus, we have $\Gamma_G(x)=2 ~ e$. Label the vertices in $\Gamma_G(x)$ by $a,b,c,d$ as in Figure \ref{Delta=4}. We assume $ab$ and $cd$ are edges while $ac, bd, ad, bc$ are non-edges. Observe that each vertex in $\{u,v,p,q\}$ has exactly two neighbors in $\{a,b,c,d\}$.
If one vertex, say $u$, has two neighbors forming a non-edge, say $ac$, then we can choose $S'_1=\{a,c\}$ and $S_2'=\{b,d\}$. It is easy to check that
$G/S_1'/S_2'$ contains neither $G_0$ nor $K_5^-$. We are done in this case.
In the remaining case, we can assume that for each vertex $y$ in $\{u,v,p,q\}$, the neighbors of $y$ in $\{a,b,c,d\}$ always form an edge. Up to relabeling vertices, there is only one arrangement for edges between $\{u,v,p,q\}$ and $\{a,b,c,d\}$; see the graph $H_{10}$ defined in Figure \ref{fig:H14}. \begin{figure}
\caption{$H_{10}$ and an 11:3-coloring of $H_{10}$.}
\label{fig:H14}
\end{figure} The graph $H_{10}$ is 11:3-colorable as shown in Figure \ref{fig:H14}. Since $\chi_f(G)>11/3$, $H_{10}$ is a proper subgraph of $G$.
Note in $H_{10}$, every vertices except $w$ and $r$ has degree $4$; both $w$ and $r$ have degree $3$. Thus, $\{w,r\}$ is a vertex cut of $G$. Let $G_1=H_{10}$ and $G_2$ be the subgraph of $G$ by deleting vertices in $\{x,a,b,c,d,p,q,u,v\}$. Applying Lemma \ref{cut2} with $G_1$ and $G_2$ defined above, we have $$ \chi_f(G) \leq \max \{\chi_f(G_1),\chi_f(G_2)\}. $$ Since $\chi_f(G)> 11/3$ and $\chi_f(G_1) \leq 11/3 $ (see Figure \ref{fig:H14}), we must have $\chi_f(G_2) \geq \chi_f(G)$. Note that $G_2$ has fewer number of vertices than $G$. This contradicts the minimality of $G$. Therefore, the lemma follows.
$\square$
\end{document} |
\begin{document}
\title{ An eigenvalue problem for fully nonlinear elliptic equations with gradient constraints}
\author{Ryan Hynd\footnote{Department of Mathematics, University of Pennsylvania. Partially supported by NSF grant DMS-1301628.}}
\maketitle
\begin{abstract} We consider the problem of finding $\lambda\in \mathbb{R}$ and a function $u:\mathbb{R}^n\rightarrow\mathbb{R}$ that satisfy the PDE $$ \max\left\{\lambda + F(D^2u) -f(x),H(Du)\right\}=0, \quad x\in \mathbb{R}^n. $$ Here $F$ is elliptic, positively homogeneous and superadditive, $f$ is convex and superlinear, and $H$ is typically assumed to be convex. Examples of this type of PDE arise in the theory of singular ergodic control. We show that there is a unique $\lambda^*$ for which the above equation has a solution $u$ with appropriate growth as
$|x|\rightarrow \infty$. Moreover, associated to $\lambda^*$ is a convex solution $u^*$ that has bounded second derivatives, provided $F$ is uniformly elliptic and $H$ is uniformly convex. It is unknown whether or not $u^*$ is unique up to an additive constant; however, we verify this is the case when $n=1$ or when $F, f,H$ are ``rotational." \end{abstract}
\section{Introduction}
The eigenvalue problem of singular ergodic control is to find a real number $\lambda$ and function $u:\mathbb{R}^n\rightarrow\mathbb{R}$ that satisfy the PDE
\begin{equation}\label{LinEigProb}
\max\left\{\lambda -\Delta u -f(x),|Du|-1\right\}=0, \quad x\in \mathbb{R}^n. \end{equation} Here $Du=(u_{x_i})$ is the gradient of $u$, $\Delta u=\sum^{n}_{i=1}u_{x_ix_i}$ is the usual Laplacian, and $f$ is assumed to be convex and superlinear $$
\lim_{|x|\rightarrow \infty}\frac{f(x)}{|x|}=\infty. $$ We call any such $\lambda$ an {\it eigenvalue}. In previous work \cite{Hynd}, we showed there is a unique eigenvalue $\lambda^*\in \mathbb{R}$ such that the PDE \eqref{LinEigProb} admits a viscosity solution $u$ satisfying the growth condition $$
\lim_{|x|\rightarrow \infty}\frac{u(x)}{|x|}=1. $$ Moreover, associated to $\lambda^*$, there is always one solution $u^*$ that is convex with $D^2u^*\in L^\infty(\mathbb{R}^n;S_n(\R))$. Here $S_n(\R)$ denotes the collection of real, symmetric $n\times n$ matrices.
\par The eigenvalue $\lambda^*$ is also known to have the ergodic control theoretic interpretation $$
\lambda^*:=\inf_{\nu}\limsup_{t\rightarrow \infty}\frac{1}{t}\left\{\mathbb{E}\int^{t}_{0}f\left(\sqrt{2}W(s) + \nu(s)\right)ds + |\nu|(t)\right\} $$ as shown in \cite{Menaldi}. Here $(W(t),t\ge 0)$ is an $n$-dimensional Brownian motion on a probability space $(\Omega, {\mathcal F}, \mathbb{P})$ and $\nu$ is an $\mathbb{R}^n$ valued control process. Each $\nu$ is required to be adapted to the filtration generated by $W$ and satisfy $$ \begin{cases} \nu(0)=0\;\\ t\mapsto \nu(t) \; \text{is left continuous}\\
|\nu|(t)<\infty , \;\text{for all}\; t> 0\; \end{cases} $$
$\mathbb{P}$ almost surely; the notation $|\nu|(t)$ denotes the total variation of $\nu$ restricted to the interval $[0,t)$. We say $\nu$ is a {\it singular control} as it may have sample paths that are not be absolutely continuous with respect to the standard Lebesgue measure on $[0,\infty)$. We refer the reader to \cite{Borkar, Fleming, Oksendal} for more information on how PDE arise in singular stochastic control.
\par We also showed in \cite{Hynd} that $\lambda^*$ is given by the following ``minmax" formula \begin{equation}\label{minmaxLap}
\lambda^*=\inf\left\{\sup_{|D\psi(x)|<1}\left\{\Delta\psi(x) + f(x)\right\} : \psi\in C^2(\mathbb{R}^n), \; \liminf_{|x|\rightarrow \infty}\frac{\psi(x)}{|x|}\ge 1 \right\} \end{equation} and the ``maxmin" formula \begin{equation}\label{maxminLap}
\lambda^*=\sup\left\{\inf_{x\in\mathbb{R}^n}\left\{\Delta\phi(x) + f(x)\right\} : \phi\in C^2(\mathbb{R}^n), \, |D\phi|\le 1 \right\}. \end{equation} The purpose of this paper is to verify generalizations of these results.
\par In particular, we consider the following eigenvalue problem: find $\lambda\in \mathbb{R}$ and $u:\mathbb{R}^n\rightarrow \mathbb{R}$ satisfying the PDE \begin{equation}\label{EigProb} \max\left\{\lambda + F(D^2u) -f(x),H(Du)\right\}=0, \quad x\in \mathbb{R}^n. \end{equation} Here $D^2u=(u_{x_ix_j})$ is the hessian of $u$. A standing assumption in this paper is that the nonlinearity $F:S_n(\R)\rightarrow \mathbb{R}$ is elliptic, positively homogeneous, and superadditive: \begin{equation}\label{Fassump} \begin{cases} -\Theta\tr N\le F(M+N)-F(M)\le -\theta\tr N, \quad (N\ge 0) \\ F(tM)=tF(M)\ \\ F(M)+F(N)\le F(M+N)\ \end{cases} \end{equation} for each $M,N\in S_n(\R)$, $t\ge 0$ and some $\theta,\Theta\ge 0$. If $\theta>0$, we say $F$ is uniformly elliptic. For instance, in \eqref{LinEigProb} $F$ is the linear function $F(M)=-\tr M.$ And a more typical nonlinear example we have in mind is $$ F(M)=\min_{1\le k\le N}\{-\tr(A_kM)\}, $$ where each $\{A_k\}_{k=1,\dots,N}\subsetS_n(\R)$ satisfies $$
\theta|\xi|^2\le A_k\xi\cdot\xi\le \Theta|\xi|^2, \quad \xi\in \mathbb{R}^n. $$
\par We will assume throughout that the gradient constraint function $H\in C(\mathbb{R}^n)$ satisfies \begin{equation}\label{Hassump} \begin{cases} H(0)<0\\ \{p\in \mathbb{R}^n: H(p)\le 0\}\; \text{is compact and strictly convex.} \end{cases} \end{equation}
In the motivating equation \eqref{LinEigProb}, $H(p)=|p|-1$. And in view of the results of \cite{Hynd}, it is natural to study solutions of \eqref{EigProb} subject to a suitable growth condition. To this end, we define the function $$ \ell(v):=\max\{p\cdot v: H(p)\le 0\}, \quad v\in \mathbb{R}^n $$ which is also known as the support function of the convex set $\{p\in \mathbb{R}^n: H(p)\le 0\}$.
\par Note that we can replace $H$ in \eqref{EigProb} with the explicit convex gradient constraint $$
H_0(p):=\max_{|v|=1}\left\{p\cdot v -\ell(v)\right\} $$ since $H(p)\le 0$ if and only if $H_0(p)\le 0$ (Theorem 8.24 in \cite{Rock}). This is something we will do repeatedly in the work that follows. We also note that by the assumptions \eqref{Hassump}, there are positive constants $c_0, c_1$ such that \begin{equation}\label{UpperLowerell}
c_0|v|\le \ell(v)\le c_1|v|, \quad v\in \mathbb{R}^n \end{equation} and consequently \begin{equation}\label{UpperLowerH}
|p|-c_1\le H_0(p)\le |p|-c_0, \quad p\in \mathbb{R}^n. \end{equation}
\par The main result of this paper is as follows. \begin{thm}\label{Thm1} Assume \eqref{Fassump}, \eqref{Hassump}, and that $f$ is convex and superlinear. \\ (i) There is a unique $\lambda^*\in\mathbb{R}$ such that \eqref{EigProb} has a viscosity solution $u\in C(\mathbb{R}^n)$ satisfying the growth condition \begin{equation}\label{ellgrowth}
\lim_{|x|\rightarrow \infty}\frac{u(x)}{\ell(x)}=1. \end{equation} Associated to $\lambda^*$ is a convex viscosity solution $u^*$ that satisfies \eqref{ellgrowth}. \\ (ii) Suppose that $F$ is uniformly elliptic, $H$ is convex and that there are $\sigma,\Sigma>0$ such that \begin{equation}\label{Hassump2}
\sigma|\xi|^2\le D^2H(p)\xi\cdot \xi\le \Sigma|\xi|^2, \quad \xi\in\mathbb{R}^n \end{equation} for Lebesgue almost every $p\in \mathbb{R}^n$. Then we may choose $u^*$ to satisfy $D^2u^*\in L^\infty(\mathbb{R}^n;S_n(\R))$. \end{thm} \par When $\lambda=\lambda^*$ in \eqref{EigProb}, we will call solutions that satisfy the growth condition \eqref{ellgrowth} {\it eigenfunctions}. It is unknown if eigenfunctions are unique up to an additive constant. However, we establish below that when $n=1$ any two convex eigenfunctions differ by a constant; see Proposition \ref{UniqunessN1}. We also show that if $F$, $f$ and $H$ are ``rotational," then $u^*$ can be chosen radial and twice continuously differentiable. This generalizes Theorem 2.3 of \cite{Kruk} and Theorem 1.3 of our previous work \cite{Hynd}.
\begin{thm}\label{SymmRegThm} Suppose \begin{equation}\label{SymmetryCond} \begin{cases} f(Ox)=f(x)\\ H(O^tp)=H(p)\\ F(OMO^t)=F(M) \end{cases} \end{equation} for each $x,p\in \mathbb{R}^n$, $M\in S_n(\R)$ and orthogonal $n\times n$ matrix $O$. If $F$ is uniformly elliptic and $H$ satisfies \eqref{Hassump2}, then there is a radial eigenfunction $u^*\in C^2(\mathbb{R}^n)$. \end{thm}
\par In Proposition \ref{ConvUniqueness} below, we assume \eqref{SymmetryCond} and show any two convex, radial eigenfunctions differ by an additive constant. Unfortunately, we do not know if this symmetry assumption ensures that every eigenfunction is radial. Finally, we verify a minmax formula for $\lambda^*$ which is the fully nonlinear analog of the formula \eqref{minmaxLap}. However, for nonlinear $F$, we only establish an inequality corresponding to the formula \eqref{maxminLap}. \begin{thm}\label{minmaxThm} Define $$
\lambda_+:=\inf\left\{\sup_{H(D\psi(x))<0}\left\{-F(D^2\psi(x)) + f(x)\right\} : \psi\in C^2(\mathbb{R}^n), \; \liminf_{|x|\rightarrow \infty}\frac{\psi(x)}{\ell(x)}\ge 1 \right\}. $$ and $$ \lambda_-:=\sup\left\{\inf_{x\in\mathbb{R}^n}\left\{-F(D^2\phi(x)) + f(x)\right\} : \phi\in C^2(\mathbb{R}^n), \, H(D\phi)\le 0 \right\} $$ Then $$ \lambda_-\le \lambda^*\le\lambda_+. $$ If there is an eigenfunction $u^*$ that satisfies $D^2u^*\in L^\infty(\mathbb{R}^n;S_n(\R))$, then $\lambda^*=\lambda_+.$ \end{thm} The organization of this paper is as follows. In section \ref{CompSect}, we verify the uniqueness of eigenvalues as detailed in Theorem \ref{Thm1}. Then we consider the existence of an eigenvalue $\lambda^*$ in section \ref{ExistSec}. Next, we verify Theorem \ref{SymmRegThm} in section \ref{RegSect} and prove Theorem \ref{SymmRegThm} in section \ref{1DandRotSymmSect}. Section \ref{MinMaxSect} of this paper is dedicated to the proof of Theorem \ref{minmaxThm}. Finally, we would like to acknowledge hospitality of the University of Pennsylvania's Center of Race $\&$ Equity in Education where part of this paper was written.
\section{Comparison principle}\label{CompSect} In this section, we show there can be at most one eigenvalue as detailed in Theorem \ref{Thm1}. As equation \eqref{EigProb} is a fully nonlinear elliptic equation for a scalar function $u$, we will employ the theory of viscosity solutions \cite{Bardi, Crandall, CIL, Fleming}. In particular, we will use results and notation from the ``user guide" \cite{CIL}. Moreover, going forward we typically will omit the modifier ``viscosity" when we refer to sub- and supersolutions. We begin our discussion with a basic proposition about subsolutions of the first order PDE $H(Du)=0$. \begin{lem}\label{HLipLem} A function $u\in C(\mathbb{R}^n)$ satisfies \begin{equation}\label{HSubsoln} H(Du(x))\le 0, \quad x\in \mathbb{R}^n \end{equation} if and only if \begin{equation}\label{ellLip} u(x)-u(y)\le \ell(x-y), \quad x,y\in \mathbb{R}^n. \end{equation} \end{lem} \begin{proof} Assume \eqref{HSubsoln}. Then $u$ is Lipschitz by \eqref{UpperLowerH}, and $H(Du(x))\le 0$ for almost every $x\in \mathbb{R}^n$. Let $u^\epsilon:=\eta^\epsilon*u$ be a standard mollification of $u$. That is, $\eta\in C^\infty_c(\mathbb{R}^n)$ is a nonnegative, radial function supported in $B_1(0)$ that satisfies $\int_{\mathbb{R}^n}\eta(z)dz=1$ and $\eta^\epsilon:=\epsilon^{-n}\eta(\cdot/\epsilon)$. It is readily verified that $u^\epsilon\in C^\infty(\mathbb{R}^n)$ and $u^\epsilon$ converges to $u$ uniformly as $\epsilon$ tends to 0; see Appendix C.5 of \cite{Evans2} for more on mollification. As $H_0$ is convex, we have by Jensen's inequality $$ H_0(Du^\epsilon)=H_0\left(D(\eta^\epsilon*u)\right)=H_0\left(\eta^\epsilon*Du\right)\le \eta^\epsilon*H_0(Du)\le 0. $$ It follows that for any $x,y\in \mathbb{R}^n$ $$ u^\epsilon(x)-u^\epsilon(y)=\int^1_0Du^\epsilon(y+t(x-y))\cdot (x-y)dt\le \ell(x-y). $$ Sending $\epsilon\rightarrow 0^+$ gives \eqref{ellLip}.
\par For the converse, suppose there is $p\in \mathbb{R}^n$ such that $$
u(x)\le u(x_0)+p\cdot(x-x_0) + o(|x-x_0|) $$
as $x\rightarrow x_0$. Substituting $x=x_0 - tv$ for $t>0$ and $|v|=1$ above gives $$ u(x_0)- t\ell(v)\le u(x_0-tv)\le u(x_0) -t p\cdot v+o(t). $$ As a result $p\cdot v\le \ell(v)$. As $v$ was arbitrary, $H(p)\le 0$. \end{proof} \begin{cor} The function $\ell$ satisfies \eqref{HSubsoln}. Moreover, at any $x\in \mathbb{R}^n$ for which $\ell$ is differentiable $$ \ell(x)=D\ell(x)\cdot x\quad \text{and} \quad H(D\ell(x))=0. $$ \end{cor} \begin{proof} As $\ell$ is convex and positively homogeneous, it is sublinear. Therefore, $\ell(x)\le \ell(y)+\ell(x-y)$ for each $x,y\in \mathbb{R}^n$. By the previous lemma, $\ell$ satisfies \eqref{HSubsoln}. Now suppose that $\ell$ is differentiable at $x$, and choose $\xi$ such that $H(\xi)\le 0$ and $\ell(x)=\xi\cdot x$. Then, as $y\rightarrow x$ \begin{align*} \xi\cdot y& \le\ell(y) \\
& =\ell(x)+ D\ell(x)\cdot (y-x)+o(|y-x|)\\
& = \xi\cdot x+D\ell(x)\cdot (y-x)+o(|y-x|). \end{align*} Choosing $y=x+tv$, for $t>0$ and $v\in \mathbb{R}^n$ gives $\xi\cdot v\le D\ell(x)\cdot v + o(1)$ as $t\rightarrow 0^+$. Thus, $\xi=D\ell(x)$ and $H(D\ell(x))\le 0$. If $x\neq 0$, $$
H_0(D\ell(x))\ge D\ell(x)\cdot \frac{x}{|x|} -\ell\left(\frac{x}{|x|}\right)=\frac{D\ell(x)\cdot x-\ell(x)}{|x|}=0 $$ and so $H(D\ell(x))=0$. Conversely, if $x=0$, then $\ell$ is linear since it is positively homogeneous. However, this would contradict \eqref{UpperLowerell}. \end{proof} The following assertion is a comparison principle for eigenvalues that makes use of the growth condition \eqref{ellgrowth}.
\begin{prop}\label{lamCompProp} Assume $u\in USC(\mathbb{R}^n)$ is a subsolution of \eqref{EigProb} with eigenvalue $\lambda$ and $v\in LSC(\mathbb{R}^n)$ is a supersolution of \eqref{EigProb} with eigenvalue $\mu$. If \begin{equation}\label{growthComp}
\limsup_{|x|\rightarrow\infty}\frac{u(x)}{\ell(x)}\le 1\le \liminf_{|x|\rightarrow\infty}\frac{v(x)}{\ell(x)}, \end{equation} the $\lambda\le \mu$. \end{prop} \begin{rem} Any subsolution $u$ of \eqref{EigProb} satisfies $H(Du)\le 0$. By Lemma \ref{HLipLem}, $u$ then satisfies \eqref{ellLip} and therefore the first inequality in \eqref{growthComp} automatically holds. We have included both inequalities in \eqref{growthComp} simply for aesthetic purposes, and we continue this practice throughout this paper. \end{rem}
\begin{proof} For $\tau\in (0,1)$ and $\eta>0$, set $$
w^{\tau}(x,y):=\tau u(x) - v(y), \quad \varphi^\eta(x,y):=\frac{1}{2\eta}|x-y|^2 $$ $x,y\in \mathbb{R}^n$. Observe \begin{align}\label{wminusphi}
(w^{\tau}-\varphi^\eta)(x,y)&=\tau(u(x)-u(y))+\tau u(y)-v(y)-\frac{1}{2\eta}|x-y|^2 \nonumber \\
&\le \tau\ell(x-y)+\tau u(y)-v(y)-\frac{1}{2\eta}|x-y|^2 \nonumber \\
&\le \tau c_1|x-y|+\tau u(y)-v(y)-\frac{1}{2\eta}|x-y|^2 \nonumber \\
&\le \eta \tau^2 c_1^2+\tau u(y)-v(y)-\frac{1}{4\eta}|x-y|^2. \end{align}
In view of \eqref{growthComp}, $\lim_{|y|\rightarrow \infty}(\tau u(y)-v(y))=-\infty$ and so $$
\lim_{|x|+|y|\rightarrow \infty}(w^{\tau}-\varphi^\eta)(x,y)=-\infty. $$ As a result, there is $(x_\eta,y_\eta)$ maximizing $w^{\tau}-\varphi^\eta$.
\par By Theorem 3.2 in \cite{CIL}, for each $\rho>0$ there are $X,Y\in S_n(\R)$ with $X\le Y$ such that $$ \left(\frac{x_\eta-y_\eta}{\eta},X\right)\in \overline{J}^{2,+}(\tau u)(x_\eta) $$ and $$ \left(\frac{x_\eta-y_\eta}{\eta},Y\right)\in \overline{J}^{2,-}v(y_\eta). $$ Note that \begin{align*} H_0\left(\frac{x_\eta-y_\eta}{\eta}\right)&=H_0\left(\tau\frac{x_\eta-y_\eta}{\tau\eta}+(1-\tau)0\right)\\ &\le \tau H_0\left(\frac{x_\eta-y_\eta}{\tau\eta}\right)+(1-\tau)H_0(0)\\ &\le (1-\tau)H_0(0)\\ &<0. \end{align*} As $v$ is a supersolution of \eqref{EigProb}, $$ \mu + F(Y)-f(y_\eta)\ge 0. $$ Since $F$ is elliptic and positively homogeneous, \begin{align}\label{ComparisonIneq} \tau \lambda -\mu&\le -\tau F\left(\frac{X}{\tau}\right)+F(Y)+\tau f(x_\eta)-f(y_\eta) \nonumber\\ &= - F\left(X\right)+F(Y)+\tau f(x_\eta)-f(y_\eta)\nonumber \\ &\le \tau f(x_\eta)-f(y_\eta)\nonumber \\ &= f(x_\eta)-f(y_\eta) + (\tau-1)f(x_\eta)\nonumber \\ &\le f(x_\eta)-f(y_\eta) +(\tau-1)\inf_{\mathbb{R}^n}f. \end{align} \par We now claim that $(y_\eta)_{\eta>0}\subset\mathbb{R}^n$ is bounded. To see this, recall inequality \eqref{wminusphi}. If
there is a sequence $\eta_k\rightarrow 0$ as $k\rightarrow \infty$ for which $|y_{\eta_k}|$ is unbounded, then $(w^\tau-\varphi^{\eta_k})(x_{\eta_k},y_{\eta_k})$ tends to $-\infty$ as $k\rightarrow\infty$. However, \begin{align*} (w^\tau-\varphi^{\eta_k})(x_{\eta_k},y_{\eta_k})&=\sup_{\mathbb{R}^n\times\mathbb{R}^n}(w^\tau-\varphi^{\eta_k})\\ &\ge (w^\tau-\varphi^\eta)(0,0)\\ &=\tau u(0)-v(0). \end{align*} Thus, $(y_\eta)_{\eta>0}$ and similarly $(x_\eta)_{\eta>0}$ is bounded. It then follows from Lemma 3.1 in \cite{CIL} that $$
\lim_{\eta\rightarrow 0^+}\frac{|x_\eta-y_\eta|^2}{2\eta}=0 $$ and $(x_\eta,y_\eta)_{\eta>0}\subset\mathbb{R}^n\times\mathbb{R}^n$ has a cluster point $(x_\tau,x_\tau)$. Passing to the limit along an appropriate sequence $\eta$ tending to $0$ in \eqref{ComparisonIneq} then gives \begin{equation}\label{ComparisonIneq2} \tau \lambda-\mu\le (\tau-1)\inf_{\mathbb{R}^n}f. \end{equation} We conclude after sending $\tau\rightarrow 1^-$. \end{proof} \begin{cor} There can be at most one $\lambda\in \mathbb{R}$ for which \eqref{EigProb} has a solution $u$ satisfying \eqref{ellgrowth}. \end{cor}
\par We are uncertain whether or not eigenfunctions $u$ are uniquely defined up to an additive constant. However, we do know that if $F$ is not uniformly elliptic and $f$ is not strictly convex, eigenfunctions are not necessarily unique. For instance when $F\equiv 0$ and $H(p)=|p|-1$, equation \eqref{EigProb} reduces to \begin{equation}\label{FzeroEqn}
\max\{\lambda-f,|Du|-1\}=0, \quad \mathbb{R}^n. \end{equation}
It is easily verified that $\lambda^*=\inf_{\mathbb{R}^n}f$ and $u(x)=|x-x_0|$ is a solution of \eqref{FzeroEqn} for each $x_0$ such that $\inf_{\mathbb{R}^n}f=f(x_0)$. Notice that if there is another point $y_0\neq x_0$ where
$f$ attains its minimum, then $u(x)=|x-y_0|$ is another solution.
\par We will give some conditions in Proposition \ref{UniqunessN1} below that guarantee uniqueness when $n=1$. However, we postpone this discussion until after we have considered the regularity of solutions of \eqref{EigProb}. We conclude this section by giving a few examples with explicit solutions. \begin{ex} Assume $n=1$, and consider the eigenvalue problem $$ \begin{cases}
\max\{\lambda - u'' -x^2, |u'|-1\}=0, \quad x\in \mathbb{R}\\
\lim_{|x|\rightarrow \infty}\frac{u(x)}{|x|}=1 \end{cases}. $$ Direct computation gives the explicit eigenvalue $$ \lambda^*=(2/3)^{2/3} $$ with a corresponding eigenfunction \begin{align*}
u^*(x)&=\inf_{|y|<(\lambda^*)^{1/2}}\left\{\frac{\lambda^*}{2}y^2-\frac{1}{12}y^4 +|x-y|\right\}\\
&=
\begin{cases}
\frac{\lambda^*}{2}x^2-\frac{1}{12}x^4, \quad |x|<(\lambda^*)^{1/2}\\
\frac{\lambda^*}{2}[(\lambda^*)^{1/2}]^2-\frac{1}{12}[(\lambda^*)^{1/2}]^4 +(x-(\lambda^*)^{1/2}),\quad x\ge (\lambda^*)^{1/2} \\
\frac{\lambda^*}{2}[(\lambda^*)^{1/2}]^2-\frac{1}{12}[(\lambda^*)^{1/2}]^4 -(x+(\lambda^*)^{1/2}),\quad x\le -(\lambda^*)^{1/2} \\
\end{cases}. \end{align*} One checks additionally that $u^*\in C^2(\mathbb{R})$. In fact, searching for a solution that is twice continuously differentiable lead us to the particular value of $\lambda^*$. \end{ex} \begin{ex} The problem in the previous example can be generalized to any dimension $n\in \mathbb{N}$ \begin{equation}\label{SepVarProb} \begin{cases}
\max\left\{\lambda - \Delta u -|x|^2, \max_{1\le i\le n}|u_{x_i}|-1\right\}=0, \quad x\in \mathbb{R}^n\\
\lim_{|x|\rightarrow \infty}u(x)/\sum^n_{i=1}|x_i|=1 \end{cases}. \end{equation}
Note that this problem corresponds to \eqref{EigProb} when $F(M)=-\tr M$, $f(x)=|x|^2$ and $H(p)=\max_{1\le i\le n}|p_i|-1$. In this case, $\ell(v)=\sum^{n}_{i=1}|v_i|$. Now assume $(\lambda_1, u_1)$ is a solution of the eigenvalue problem in the previous example. Then $\lambda^*=n\lambda_1$ and $$ u^*(x)=\sum^{n}_{i=1}u_1(x_i) $$ is a solution of the eigenvalue problem \eqref{SepVarProb} with $\lambda=\lambda^*$. Moreover, $u^*\in C^2(\mathbb{R}^n)$. \end{ex}
\section{Existence of an eigenvalue}\label{ExistSec} In order to prove the existence of an eigenvalue, we will study solutions of the following PDE for $\delta>0$. \begin{equation}\label{deltaProb} \max\left\{\delta u + F(D^2u) -f(x),H(Du)\right\}=0, \quad x\in \mathbb{R}^n. \end{equation} In particular, we will follow section 3 our previous work \cite{Hynd}, which was inspired by the approach of J. Menaldi, M. Robin and M. Taksar \cite{Menaldi}. Employing the same techniques used to verify Proposition \ref{lamCompProp} above, we can establish the following assertion. \begin{prop}\label{DeltaCompProp} Assume $\delta>0$, $u\in USC(\mathbb{R}^n)$ is a subsolution of \eqref{deltaProb} and $v\in LSC(\mathbb{R}^n)$ is a supersolution of \eqref{deltaProb}. If $u$ and $v$ satisfy \eqref{growthComp}, then $u\le v$. \end{prop} It is now immediate that there can be at most one solution of \eqref{deltaProb} that satisfies the growth condition \eqref{ellgrowth}. We will call this solution $u_\delta$. To verify that $u_\delta$ exists, we can appeal to Perron's method once we have appropriate sub and supersolutions. To this end, we first characterize the largest function $v$ that is less than a given function $g$ and satisfies $H(Dv)\le 0$. \begin{lem}\label{infConvLem} Assume $g\in C(\mathbb{R}^n)$ is superlinear. The unique solution of the PDE \begin{equation}\label{DeterministicEq} \max\{v-g,H(Dv)\}=0, \quad x\in \mathbb{R}^n \end{equation} that satisfies the growth condition \eqref{ellgrowth} is given by the inf-convolution of $g$ and $\ell$ \begin{equation}\label{vInfConv} v(x):=\inf_{y\in\mathbb{R}^n}\left\{g(y)+\ell(x-y)\right\} \end{equation} \end{lem} \begin{proof} The uniqueness follows from Proposition \ref{DeltaCompProp}. In particular, this equation corresponds to \eqref{deltaProb} with $F\equiv 0$ and $\delta=1$. Therefore,
we only verify that $v$ given in \eqref{vInfConv} is a solution that satisfies the growth condition \eqref{ellgrowth}. Choosing $y=x$ gives, $v(x)\le g(x)$. Also note $x\mapsto g(y) +\ell(x-y)$ satisfies \eqref{ellLip}, which implies that $v$ does as well. Hence, $v$ is a subsolution of \eqref{DeterministicEq}. In particular, $\limsup_{|x|\rightarrow\infty}v(x)/\ell(x)\le 1$. Using $\ell(x-y)\ge \ell(x)-\ell(y)$, $$ v(x)\ge \inf_{y\in\mathbb{R}^n}\left\{g(y)-\ell(y)\right\}+\ell(x). $$
As $g$ is assumed superlinear, $\inf_{\mathbb{R}^n}\left\{g(y)-\ell(y)\right\}$ is finite. Thus, $\liminf_{|x|\rightarrow\infty}v(x)/\ell(x)\ge 1$.
\par Finally, if $\psi$ is another subsolution of \eqref{DeterministicEq} \begin{align*} v(x)&=\inf_{y\in\mathbb{R}^n}\left\{g(y)+\ell(x-y)\right\}\\ &\ge\inf_{y\in\mathbb{R}^n}\left\{\psi(y)+\ell(x-y)\right\}\\ &\ge \psi(x). \end{align*} By Lemma 4.4 of \cite{CIL}, $v$ must be a supersolution of \eqref{DeterministicEq}. \end{proof}
The solution of \eqref{DeterministicEq} when $g(x)=\frac{1}{2}|x|^2$ will be of particular interest to us and will help us construct a useful supersolution of PDE \eqref{deltaProb}.
\begin{lem}\label{xsquaredLemma}
Let $g(x):=\frac{1}{2}|x|^2$ and $v$ the solution of \eqref{DeterministicEq} subject to the growth condition \eqref{ellgrowth}. Then $$
v(x)=\frac{1}{2}|x|^2 $$ when $H(x)\le 0$, and $$
H(Dv)=0 $$ in $\{x\in \mathbb{R}^n: H(x)>0\}$ \end{lem} \begin{proof} Recall that $H(x)\le 0$ implies $\ell(v)\ge x\cdot v$ for all $v\in \mathbb{R}^n$. Thus \begin{align*}
v(x)&=\inf_{y\in\mathbb{R}^n}\left\{\frac{1}{2}|y|^2+\ell(x-y)\right\}\\
&\ge\inf_{y\in\mathbb{R}^n}\left\{\frac{1}{2}|y|^2+x\cdot (x-y)\right\}\\
&=\inf_{y\in\mathbb{R}^n}\left\{\frac{1}{2}|y-x|^2+\frac{1}{2}|x|^2\right\}\\
&=\frac{1}{2}|x|^2. \end{align*}
As $v(x)\le \frac{1}{2}|x|^2$ for all $x$, the first claim follows.
\par Now suppose that $H(x)>0$. Then there is a $v_0\in \mathbb{R}^n$ with $|v_0|=1$ such that $\ell(v_0)< x \cdot v_0$. Fix $\epsilon>0$ so small that $\ell(v_0)< x\cdot v_0-\epsilon$. Then \begin{align*}
v(x)&=\inf_{y\in\mathbb{R}^n}\left\{\frac{1}{2}|y-x|^2+\ell(y)\right\}\\
&\le \frac{1}{2}\left|(\epsilon v_0)-x\right|^2+\ell(\epsilon v_0)\\
&=\frac{1}{2}|x|^2 +\frac{\epsilon^2}{2}|v_0|^2-(\epsilon v_0)\cdot x+\ell(\epsilon v_0)\\
&=\frac{1}{2}|x|^2 +\frac{\epsilon^2}{2}+\epsilon[- v_0\cdot x+\ell(v_0)] \\
&\le \frac{1}{2}|x|^2 +\frac{\epsilon^2}{2}-\epsilon^2\\
&<\frac{1}{2}|x|^2. \end{align*} Since $v$ satisfies \eqref{DeterministicEq}, the PDE $H(Du)=0$ holds on the open set $\{x\in \mathbb{R}^n: H(x)>0\}$. \end{proof} We are now ready to exhibit sub and supersolutions of \eqref{deltaProb} that are
comparable to $\ell(x)$ for large values of $|x|$. \begin{lem} Let $\delta\in (0,1)$. There are constants $K_1, K_2\ge 0$ such that \begin{equation}\label{uUpper}
\overline{u}(x)= \frac{K_1}{\delta}+\inf_{y\in\mathbb{R}^n}\left\{\frac{1}{2}|y|^2+\ell(x-y)\right\} \end{equation} is a supersolution of \eqref{deltaProb} satisfying \eqref{ellgrowth} and \begin{equation}\label{uLower} \underline{u}(x)=(\ell(x)-K_2)^++\inf_{\mathbb{R}^n}f \end{equation} is a subsolution of \eqref{deltaProb} satisfying \eqref{ellgrowth}. \end{lem}
\begin{proof} 1. Choose $$ K_1:= - F(I_n) + \sup_{H(x)\le 0}f(x). $$
Lemma \ref{xsquaredLemma} implies $\underline{u}(x)=\frac{K_1}{\delta}+\frac{1}{2}|x|^2$ when $H(x)\le 0$. Thus, $$ \delta \underline{u}+F(D^2u)-f\ge K_1+F(I_n)-f\ge 0 $$ on $\{x\in\mathbb{R}^n: H(x)< 0\}$.
\par We also have by Lemma \ref{xsquaredLemma} that $H(D\underline{u})=0$ on $\{x\in\mathbb{R}^n: H(x)> 0\}$. We will now verify that $H(D\underline{u}(x_0))=0$ when $H(x_0)=0$. To this end, suppose that $$
\underline{u}(x_0)+p\cdot (x-x_0)+o(|x-x_0|) \le \underline{u}(x) $$
as $x\rightarrow x_0$. Using $\underline{u}(x_0)=\frac{K_1}{\delta}+\frac{1}{2}|x_0|^2$ and $\underline{u}(x)\le \frac{K_1}{\delta}+\frac{1}{2}|x|^2$ with the above inequality gives $$
\frac{1}{2}|x_0|^2+p\cdot (x-x_0)+o(|x-x_0|) \le \frac{1}{2}|x|^2, $$ as $x\rightarrow x_0$. It follows that $p=x_0$, and so $H(p)=H(x_0)=0$.
\par 2. Choose $K_2\ge 0$ so large that $$ (\ell(x)-K_2)^+\le f(x)-\inf_{\mathbb{R}^n}f, \quad x\in \mathbb{R}^n. $$ Such a $K_2$ exists by the assumption that $f$ is superlinear and \eqref{UpperLowerell}. Observe $\underline{u}$ defined in \eqref{uLower} satisfies \eqref{ellLip}; thus $H(D\underline{u})\le 0$. And as $\ell$ is convex, $\underline{u}$ is convex. Therefore, $F(D^2\underline{u})\le 0$ and $$ \delta\underline{u}+F(D^2\underline{u})-f\le \delta\underline{u}- f\le (\ell-K_2)^++\inf_{\mathbb{R}^n}f-f\le 0, \quad x\in \mathbb{R}^n $$ for $\delta\le 1$. \end{proof}
A key property of $u_\delta$ is that it is a convex function. This is critical to the arguments to follow. We also remark that our proof of this fact below was inspired by Korevaar's work \cite{KO} and is an adaption of Lemma 3.7 in \cite{Hynd}. The new feature we verify here is that the assumption that $F$ is superadditive still produces a convex solution.
\begin{prop}\label{UdelConvex} The function $u_\delta$ is convex. \end{prop}
\begin{proof} For $\tau\in (0,1)$ and $\eta>0$, we define $$ w^\tau(x,y,z):=\tau u(z) -\frac{u(x)+u(y)}{2} $$ and $$
\varphi^\eta(x,y,z)=\frac{1}{2\eta}\left|\frac{x+y}{2}-z\right|^2 $$ for $x,y,z\in \mathbb{R}^n.$ Notice that \begin{eqnarray}\label{SimpleEstW}
(w^\tau -\varphi_\eta)(x,y,z)&=& \tau \left\{u\left(z\right) - u\left(\frac{x+y}{2}\right)\right\} - \frac{1}{2\eta}\left|\frac{x+y}{2}-z\right|^2\nonumber \\
& & + \tau u\left(\frac{x+y}{2}\right) - \frac{u(x) + u(y)}{2}\nonumber \\
&\le &\left( \tau\ell\left(\frac{x+y}{2}-z\right) - \frac{1}{2\eta}\left|\frac{x+y}{2}-z\right|^2\right) \nonumber \\
&& + \tau u\left(\frac{x+y}{2}\right)-\frac{u(x) + u(y)}{2}. \end{eqnarray} By the growth condition \eqref{ellgrowth}, it follows that $$
\lim_{|x|+|y|\rightarrow \infty}\left\{\tau u\left(\frac{x+y}{2}\right)-\frac{u(x) + u(y)}{2}\right\}=-\infty $$ and therefore $$
\lim_{|x|+|y|+|z|\rightarrow \infty}(w^\tau-\varphi_\eta)(x,y,z)=-\infty. $$ In particular, there is $(x_\eta,y_\eta,z_\eta)\in \mathbb{R}^n\times \mathbb{R}^n\times \mathbb{R}^n$ maximizing $w^\tau-\varphi_\eta.$ By Theorem 3.2 in \cite{CIL}, there are $X,Y,Z\in {\mathcal S}(n)$ such that \begin{equation}\label{3JetInc} \begin{cases} \left( -2D_x\varphi_\eta(x_\eta,y_\eta,z_\eta), X\right)\in \overline{J}^{2,-}u(x_\eta)\\ \left( -2D_{y}\varphi_\eta(x_\eta,y_\eta,z_\eta), Y\right)\in \overline{J}^{2,-}u(y_\eta)\\ \left( \frac{1}{\tau}D_{z}\varphi_\eta(x_\eta,y_\eta,z_\eta), Z\right)\in \overline{J}^{2,+}u(z_\eta)\\ \end{cases} \end{equation} and \begin{equation}\label{XYZineq} \tau Z\le \frac{1}{2}(X+Y). \end{equation}
\par Now set $$ p_\eta := -2D_x\varphi_\eta(x_\eta,y_\eta,z_\eta)=-2D_y\varphi_\eta(x_\eta,y_\eta,z_\eta)=D_z\varphi_\eta(x_\eta,y_\eta,z_\eta)=\frac{1}{\eta}\left(z_\eta -\frac{x_\eta+y_\eta}{2}\right). $$ By the bottom inclusion in \eqref{3JetInc}, $$ \max\{\delta u(z_\eta) +F(Z) - f(z_\eta), H(p_\eta/\tau)\}\le 0. $$ It follows that $$ H(p_\eta)=H\left(\tau\frac{p_\eta}{\tau}+(1-\tau)0\right)<0 $$ and by the top two inclusions in \eqref{3JetInc}, $$ \begin{cases} \delta u(x_\eta) + F(X) - f(x_\eta)\ge 0\\ \delta u(y_\eta) + F(Y) - f(y_\eta)\ge 0 \end{cases}. $$ Combining these inequalities with \eqref{XYZineq} gives
\begin{align}\label{usingfconvex} \delta w^\tau(x,y,z)&\le \delta w(x_\eta,y_\eta,z_\eta) \nonumber \\ & = \tau\delta u(z_\eta) -\frac{\delta u(x_\eta)+\delta u(y_\eta)}{2} \nonumber \\ &\le \tau(-F(Z) + f(z_\eta)) -\frac{(-F(X)+f(x_\eta) ) +(-F(Y)+ f(y_\eta))}{2} \nonumber \\ &= \left[-F(\tau Z) +\frac{F(X)+F(Y)}{2}\right] +\tau f(z_\eta)-\frac{f(x_\eta) + f(y_\eta)}{2}\nonumber \\ &\le \left[-F\left(\frac{X+Y}{2}\right) +\frac{F(X)+F(Y)}{2}\right] +\tau f(z_\eta)-\frac{f(x_\eta) + f(y_\eta)}{2}\nonumber \\ &\le f(z_\eta)-\frac{f(x_\eta) + f(y_\eta)}{2}+(\tau -1)f(z_\eta)\nonumber\\ &\le f(z_\eta)-\frac{f(x_\eta) + f(y_\eta)}{2}+(\tau -1)\inf_{\mathbb{R}^n}f \end{align} for each $(x,y,z)\in \mathbb{R}^n.$
\par Another basic estimate for $w^\tau -\varphi_\eta$ that stems from \eqref{SimpleEstW} and \eqref{UpperLowerell} is $$ (w^\tau-\varphi_\eta)(x,y,z)\le \tau u\left(\frac{x+y}{2}\right) - \frac{u(x)+u(y)}{2} + \tau^2c_1^2\eta. $$ This inequality gives that $(x_\eta, y_\eta)_{\eta>0}\subset\mathbb{R}^n\times\mathbb{R}^n$ is bounded. For were this not the case, $(w^\tau-\varphi_\eta)(x_\eta,y_\eta,z_\eta)$ tends to $-\infty$ yet \begin{eqnarray} (w^\tau-\varphi_\eta)(x_\eta,y_\eta,z_\eta)&=&\max_{x,y,z}(w^\tau-\varphi_\eta)(x,y,z) \nonumber \\ &\ge &(w^\tau-\varphi_\eta)(0,0, 0) \nonumber \\ & =& (\tau -1) u(0) \nonumber \\ &>& -\infty, \nonumber \end{eqnarray} for each $\eta>0.$ Similarly, $(z_\eta)_{\eta>0}\subset \mathbb{R}^n$ is bounded.
\par Again we appeal to Lemma 3.1 in \cite{CIL}, which asserts the existence of a cluster point $(x_\tau,y_\tau, (x_\tau+ y_\tau)/2)$ of $((x_\eta, y_\eta,z_\eta))_{\eta>0}$ that maximizes $$ (x,y)\mapsto \tau u\left(\frac{x+y}{2}\right) - \frac{u(x)+u(y)}{2}. $$ Thus, we may pass to the limit through an appropriate sequence of $\eta$ tending to $0$ in \eqref{usingfconvex} to find for any $x,y\in\mathbb{R}^n$ $$ \tau u\left(\frac{x+y}{2}\right) - \frac{u(x)+u(y)}{2} \le f\left(\frac{x_\tau+y_\tau}{2}\right) - \frac{f(x_\tau)+f(y_\tau)}{2}+(\tau -1)\inf_{\mathbb{R}^n}f\le (\tau -1)\inf_{\mathbb{R}^n}f. $$ Here we have used the convexity of $f$. Finally, we conclude upon sending $\tau\rightarrow 1^-$. \end{proof}
By Aleksandrov's theorem (section 6.4 of \cite{Gariepy}), $u_\delta$ is twice differentiable at Lebesgue almost every $x\in\mathbb{R}^n$. At any such $x$, if $H(Du(x))<0$, then $x$ must be uniformly bounded for $$ f(x)=\delta u_\delta(x) +F(D^2u_\delta)\le \delta u_\delta(x). $$ Recall that $f$ is superlinear and $u_\delta$ grows at most linearly. As precise statement is as follows. \begin{cor}\label{boundedDerOmega}
There is a constant $R$, independent of $\delta\in (0,1)$, such that if $p\in J^{1,-}u_\delta(x)$ and $H(p)<0$, then $|x|\le R$. \end{cor} \begin{proof} As $u_\delta$ is convex, $J^{1,-}u_\delta(x)=\partial u(x)$; see proposition 4.7 in \cite{Bardi}. It then follows that $(p,O_n)\in J^{2,-}u_\delta(x)$. Thus, $$ \max\{\delta u_\delta(x)-f(x),H(p)\}\ge 0. $$ As $H(p)<0$, it must be that $\delta u_\delta(x)-f(x)\ge 0$. As a result, $$
f(x)\le \delta u_\delta(x)\le K_1+\ell(x)\le K_1 +c_1|x|. $$
Thus, $|x|\le R$ for some $R$ that is independent of $\delta\in (0,1)$. \end{proof}
Another important corollary is the following ``extension formula" for solutions. We interpret this formula informally as: once the values of $u_\delta(x)$ are known for each $x$ satisfying $H(Du_\delta(x))<0$, $u_\delta$ is determined on all of $\mathbb{R}^n$. \begin{cor}\label{ExtCor} Let \begin{equation}\label{OmegaDel} \Omega_\delta:=\mathbb{R}^n\setminus\{x\in \mathbb{R}^n: H(Du_\delta(x))\ge 0\;\text{in the viscosity sense}\;\}. \end{equation} Then \begin{equation}\label{ExtensionForm} u_\delta(x)=\inf\left\{u_\delta(y)+\ell(x-y): y\in \Omega_\delta\right\}, \quad x\in \mathbb{R}^n. \end{equation} Moreover, the infimum in \eqref{ExtensionForm} can be taken over $\partial\Omega_\delta$ when $x\notin\Omega_\delta$. \end{cor} \begin{proof} Set $u=u_\delta$ and define $v$ to be the right hand side of \eqref{ExtensionForm}. Since $u(x)\le u(y)+\ell(x-y)$ for each $x,y\in\mathbb{R}^n$, $u\le v$. If $x\in \overline{\Omega}_\delta$, there is a sequence $(x_k)_{k\in \mathbb{N}}\subset \Omega_\delta$ converging to $x$ as $k\rightarrow\infty$. Clearly, $v(x)\le u(x_k)+\ell(x-x_k)$ and sending $k\rightarrow\infty$ gives $v(x)\le u(x).$ Thus, $u(x)=v(x)$ for $x\in \overline{\Omega}_\delta$.
\par Observe that $v(x)-v(y)\le \ell(x-y)$ for all $x,y\in \mathbb{R}^n$. Therefore, $v$ satisfies the PDE $H(Dv)\le 0$ on $\mathbb{R}^n.$ In particular, $$ \begin{cases} H(Dv)\le 0\le H(Du), \quad & x\in \mathbb{R}^n\setminus\overline{\Omega}_\delta\\ v=u, \quad & x\in\partial\Omega_\delta \end{cases} $$ while $$
\limsup_{|x|\rightarrow \infty}\frac{v(x)}{\ell(x)}\le 1\le \limsup_{|x|\rightarrow \infty}\frac{u(x)}{\ell(x)}. $$ It follows from an argument similar to one given in Proposition \ref{lamCompProp} used to derive \eqref{ComparisonIneq2}, that $$ \tau v-u\le (\tau- 1)\inf_{\mathbb{R}^n}f $$ for each $\tau\in (0,1)$. In particular, $v\le u$ on $\mathbb{R}^n\setminus\overline{\Omega}_\delta$. So we are able to conclude \eqref{ExtensionForm}.
\par Now suppose $x\notin\Omega_\delta$ and choose $y\in\Omega_\delta$ such that $u(x)=u(y)+\ell(x-y)$. There is a $t\in [0,1]$ such that $$ z=t y +(1-t)x \in \partial \Omega_\delta. $$ Observe that since $u$ is convex and $\ell$ is positively homogeneous \begin{align*} u(z)+\ell(x-z)&=u(t y +(1-t)x)+\ell(t(x-y))\\ &\le t(u(y) + \ell(x-y)) +(1-t)u(x)\\ &=tu(x)+(1-t)u(x)\\ &\le u(x). \end{align*} Thus, the minimum in \eqref{ExtensionForm} occurs on the boundary of $\partial \Omega_\delta$ when $x\notin \Omega_\delta$. \end{proof} We will now verify the existence of an eigenvalue. Let $\delta\in (0,1)$ and $x_\delta$ denote a global minimizer of $u_\delta$ $$ \min_{x\in\mathbb{R}^n}u_\delta(x)=u_\delta(x_\delta). $$
Clearly, $0\in J^{1,-}u(x_\delta)$ and by assumption $H(0)<0$; thus $x_\delta\in \Omega_\delta$. And by Corollary \ref{boundedDerOmega}, $|x_\delta|\le R$. Set $$ \begin{cases} \lambda_\delta:=\delta u_\delta(x_\delta)\\ v_\delta(x):=u_\delta(x)-u_\delta(x_\delta),\quad x\in \mathbb{R}^n \end{cases}. $$ In view of \eqref{uUpper}, \eqref{uLower}, \begin{equation}\label{LamDelBounds} -\left(\inf_{\mathbb{R}^n}f\right)^{-}\le \lambda_\delta \le K_1 +\frac{1}{2}R^2; \end{equation} and by \eqref{UpperLowerell} \begin{equation}\label{veeDelBounds} \begin{cases}
0\le v_\delta(x)\le c_1(|x|+R)\\
|v_\delta(x)-v_\delta(y)|\le c_1|x-y| \end{cases} \end{equation} for $x,y\in \mathbb{R}^n$ and $0<\delta<1$. \begin{proof} (part $(i)$ of Theorem \ref{Thm1}) By \eqref{LamDelBounds} and \eqref{veeDelBounds}, there is a sequence of positive numbers $(\delta_k)_{k\in \mathbb{N}}$ tending to $0$, $\lambda^*\in \mathbb{R}$ and $u^*\in C(\mathbb{R}^n)$ such that $\lambda_{\delta_k}\rightarrow \lambda^*$ and $v_{\delta_k}\rightarrow u^*$ locally uniformly on $\mathbb{R}^n$. By the stability of viscosity solutions under locally uniform convergence (Lemma 6.1 in \cite{CIL}), $u^*$ satisfies \eqref{EigProb} with $\lambda=\lambda^*$.
\par In view of the extension formula \eqref{ExtensionForm}, \begin{align*} v_{\delta_k}(x)&=u_{\delta_k}(x)-u_{\delta_k}(x_{\delta_k})\\ &=\inf_{y\in \Omega_{\delta_k}}\{u_{\delta_k}(y)-u_{\delta_k}(x_{\delta_k})+\ell(x-y)\}\\ &\ge \inf_{y\in \Omega_{\delta_k}}\{\ell(x-y)\}\\ &\ge \inf_{y\in \Omega_{\delta_k}}\{\ell(x)-\ell(y)\}\\ &=\ell(x)-\sup_{y\in\Omega_{\delta_k}}\ell(y)\\
&\ge \ell(x)-\sup_{|y|\le R}\ell(y).
\end{align*}
Thus, $u^*(x)\ge \ell(x)-\sup_{|y|\le R}\ell(y)$ and in particular, $u^*$ satisfies the growth condition \eqref{ellgrowth}. It now follows that $\lambda^*$ is the desired eigenvalue. \end{proof} We now have the following characterization of the eigenvalue $\lambda^*$. See also \cite{Armstrong} for a similar characterization of eigenvalues of operators that are uniformly elliptic, fully nonlinear, and positively homogeneous. \begin{cor} Let $\lambda^*$ be as described in part $(i)$ of Theorem \ref{Thm1}. Then \begin{align}\label{LamChar1} \lambda^*&=\sup\{\lambda\in \mathbb{R}: \text{there is a subsolution $u$ of \eqref{EigProb} with eigenvalue $\lambda$} \nonumber \\
&\left.\hspace{1in} \text{satisfying}\; \limsup_{|x|\rightarrow \infty}\frac{u(x)}{\ell(x)}\le 1\right\}. \end{align} and \begin{align}\label{LamChar2} \lambda^*&=\inf\{\mu\in \mathbb{R}: \text{there is a supersolution $v$ of \eqref{EigProb} with eigenvalue $\mu$} \nonumber \\
&\left.\hspace{1in} \text{satisfying}\; \liminf_{|x|\rightarrow \infty}\frac{v(x)}{\ell(x)}\ge 1\right\}. \end{align} \end{cor} In particular, choosing $\lambda=\inf_{\mathbb{R}^n}f$ and $u\equiv 0$ in \eqref{LamChar1} gives $\lambda^*\ge \inf_{\mathbb{R}^n}f$. And selecting
$\mu=-F(I_n)+\sup_{H(x)\le 0}f(x)$ and $v(x)=\inf_{\mathbb{R}^n}\{|y|^2/2+\ell(x-y)\}$ in \eqref{LamChar2} gives $\lambda^*\le -F(I_n)+\sup_{H(x)\le 0}f(x)$. In summary, we have the bounds on $\lambda^*$ $$ \inf_{x\in\mathbb{R}^n}f(x)\le \lambda^*\le-F(I_n)+\sup_{H(x)\le 0}f(x). $$
\section{Regularity of solutions}\label{RegSect}
Our goal in this section is to prove part $(ii)$ of Theorem \ref{Thm1}. To this end, we will assume that $F$ is uniformly elliptic, assume $H$ satisfies \eqref{Hassump2}
and derive a uniform upper bound on $D^2u_\delta$. Recall $u_\delta$ is the unique solution of \eqref{deltaProb} that satisfies \eqref{ellgrowth}. We will first use an easy semiconcavity argument to bound $D^2u_\delta(x)$ for all large values of $|x|$. Then we will
pursue second derivatives bounds on $u_\delta$ for smaller values of $|x|$. To this end, we will employ to the so-called ``penalty method" introduced by L. C. Evans \cite{Evans}. For other related work, consult also \cite{HyndMawi,Ishii, Soner, Wiegner}.
\subsection{Preliminaries}
An important identity for us will be \begin{equation}\label{ellFormula} \ell(v)=\inf_{\lambda>0}\lambda H^*\left(\frac{v}{\lambda}\right), \quad v\in \mathbb{R}^n\setminus\{0\} \end{equation} where $H^*(w)=\sup_{p\in \mathbb{R}^n}\{p\cdot w - H(p)\}$ is the Legendre transform of $H$; see exercise 11.6 of \cite{Rock}. This formula
is crucial to our method for deriving second derivates estimates on $u_\delta$ for large values of $|x|$.
\begin{lem}\label{W2infFarOutBound} Define $\Omega_\delta$ as in \eqref{OmegaDel}. There is a constant $C$ such that $$ D^2u_\delta(x)\le \frac{C}{\text{dist}(x,\Omega_\delta)}I_n $$ for Lebesgue almost every $x\in \mathbb{R}^n\setminus\overline{\Omega}_\delta$. \end{lem} \begin{proof} We will employ formula \eqref{ellFormula}. We will also use that \begin{equation}\label{Hstar} H^*(0)>0 \end{equation} and \begin{equation}\label{Hstar2}
\frac{1}{\Sigma}|\xi|^2\le D^2H^*(w)\xi\cdot \xi\le \frac{1}{\sigma}|\xi|^2,\quad \xi\in\mathbb{R}^n \end{equation} for almost every $w\in \mathbb{R}^n$. Let $v\in \mathbb{R}^n\setminus\{0\}$ and $\lambda>0$. Note \eqref{Hstar2} implies \begin{equation}\label{lowerHstar}
\lambda H^*(0)+DH^*(0)\cdot v +\frac{1}{2\Sigma \lambda}|v|^2\le \lambda H^*\left(\frac{v}{\lambda}\right)\le \lambda H^*(0)+DH^*(0)\cdot v +\frac{1}{2\sigma \lambda}|v|^2. \end{equation} Thus, $\lim_{\lambda\rightarrow 0^+}\lambda H^*\left(v/\lambda\right)=+\infty$. And with \eqref{Hstar}, we also conclude that $\lim_{\lambda\rightarrow \infty}\lambda H^*\left(v/\lambda\right)=+\infty.$ As $\lambda\mapsto \lambda H^*\left(v/\lambda\right)$ is strictly convex, there is a unique $\lambda=\lambda(v)>0$ for which $\ell(v)=\lambda(v) H^*(v/\lambda(v))$.
\par Using the positive homogeneity of $\ell$, for $t>0$ \begin{align*} \lambda(tv) H^*\left(\frac{tv}{\lambda(tv)}\right)&=\ell(tv)\\ &=t\ell(v)\\ &=t\lambda(v) H^*\left(\frac{v}{\lambda(v)}\right)\\ &=t\lambda(v) H^*\left(\frac{tv}{t\lambda(v)}\right). \end{align*} Thus, $\lambda(tv)=t\lambda(v)$. It also follows from \eqref{lowerHstar} that $$
\gamma:=\inf_{|v|=1}\lambda(v)>0. $$
In particular, $\lambda(v)\ge \gamma |v|$, for each $v\neq 0.$
\par Again let $v\neq 0$, and choose $h\in \mathbb{R}^n$ so small that $v\pm h\neq 0$. Then for $\lambda=\lambda(v)$ \begin{align*} \ell(v+h)-2\ell(v)+\ell(v-h)&\le \lambda H^*\left(\frac{v+h}{\lambda}\right) - 2\lambda H^*\left(\frac{v}{\lambda}\right)+\lambda H^*\left(\frac{v-h}{\lambda}\right)\\ &=\lambda\left[ H^*\left(\frac{v}{\lambda}+\frac{h}{\lambda}\right)-2H^*\left(\frac{v}{\lambda}\right)+H^*\left(\frac{v}{\lambda}-\frac{h}{\lambda}\right) \right]\\
&\le \lambda \frac{1}{\sigma}\left|\frac{h}{\lambda}\right|^2\\
&=\frac{1}{\sigma \lambda}|h|^2\\
&\le \frac{1}{\gamma \sigma|v|}|h|^2. \end{align*}
\par Now we can employ the extension formula \eqref{ExtensionForm}. Let $x\in \mathbb{R}^n\setminus\overline{\Omega}_\delta$ and choose $h$ so small that $x\pm h\in\mathbb{R}^n\setminus\overline{\Omega}_\delta$. Selecting $y\in \partial\Omega_\delta$ so that $u_\delta(x)=u_\delta(y)+\ell(x-y)$ gives \begin{align*} u_\delta(x+h)-2u_\delta(x)+u_\delta(x-h)&\le \ell(x-y+h)-2\ell(x-y)+\ell(x-y-h)\\
&\le \frac{1}{\gamma \sigma |x-y|}|h|^2\\
&\le \frac{C }{\text{dist}(x,\partial \Omega_\delta)}|h|^2. \end{align*} The claim follows as $u_\delta$ is differentiable Lebesgue almost everywhere. \end{proof} In order to complete the proof of part $(ii)$ of Theorem \ref{Thm1}, we must bound the second derivatives on $u_\delta$ on some subset of $\mathbb{R}^n$ that includes $\overline{\Omega}_\delta$. Before we detail our approach, it will be necessary for us to differentiate (a smoothing) of $F$. To this end, we extend $F$ to the space $M_n(\R)$ of all $n\times n$ real matrices as follows $$ \overline{F}(M):=F\left(\frac{1}{2}(M+M^t)\right), \quad M\in M_n(\R). $$ We can then treat $\overline{F}(M)$ as a function of the $n^2$ real entries of the matrix $M\in M_n(\R)$. It is readily checked that $\overline{F}$ is uniformly elliptic, positively homogeneous and superadditive on $M_n(\R)$. In particular, $\overline{F}$ satisfies \eqref{Fassump} for each $M,N\in M_n(\R)$ and $t\ge 0$. This allows us to identify $F$ with $\overline{F}$ and we shall do this for the remainder of this section.
\par We now define $F^\varrho$ as the standard mollification of $F$ $$ F^\varrho(M):=\int_{M_n(\R)}\eta^\varrho(N)F(M-N)dN, \quad M\in M_n(\R). $$
The integral above is over the $n^2$ real variables $N=(N_{ij})\in M_n(\R)$, and as in Lemma \ref{HLipLem}, $\eta\in C^\infty_c(M_n(\R))$ is a nonnegative function that is supported in $\{M\in M_n(\R): |M|\le 1\}$ and
$\eta(M)$ only depends on $|M|$. Moreover, $\eta$ satisfies $\int_{\mathbb{R}^n}\eta(Z)dZ=1$ and we have defined $\eta^\varrho:=\varrho^{-n^2}\eta(\cdot/\varrho)$. See also section 4 of \cite{HyndMawi} or Proposition 9.8 in \cite{CC} for more details on mollifying functions of matrices.
\par It is readily verified that $F^\varrho\in C^\infty(M_n(\R))$ and, with the help of \eqref{Fassump}, $F^\varrho$ is uniformly elliptic, concave and satisfies \begin{equation}\label{FFvarrhoEst} F^\varrho(M)\le F(M)\le F^\varrho(M)+\sqrt{n}\Theta \varrho, \quad M\in M_n(\R). \end{equation} However, $F^\varrho$ is not in general positively homogeneous. Nevertheless, $F^\varrho$ inherits a certain almost homogeneity property. \begin{lem}\label{HomogeneityLEM} For every $M\in M_n(\R)$, $$ F^\varrho(M)=F^\varrho_{M_{ij}}(M)M_{ij} - \int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)N_{ij}dN. $$ In particular, \begin{equation}\label{FrhoAlmostHomo}
|F^\varrho(M)-F^\varrho_{M_{ij}}(M)M_{ij}|\le \sqrt{n}\Theta\varrho, \quad M\in M_n(\R). \end{equation} \end{lem} \begin{proof} By the ellipticity assumption \eqref{Fassump}, $F$ is Lipschitz continuous. Rademacher's Theorem then implies that $F$ is differentiable for Lebesgue almost every $M\in M_n(\R)$, which we identify with $\mathbb{R}^{n^2}$. Therefore, $$ F^\varrho_{M_{ij}}(M)=\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)dN. $$ See Theorem 1 of section 5.3 in \cite{Evans2} for an easy verification of this equality. Since $F$ is positively homogenous of degree one, $$ F(M)=F_{M_{ij}}(M)M_{ij} $$ for Lebesgue almost every $M\in M_n(\R)$. And therefore, \begin{align*} F^\varrho_{M_{ij}}(M)M_{ij}&=\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)M_{ij}dN\\ &=\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)(M_{ij}-N_{ij})dN\\ &\quad +\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)N_{ij}dN\\ &=\int_{M_n(\R)}\eta^\varrho(N)F(M-N)dN+\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)N_{ij}dN\\ &=F^\varrho(M)+\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)N_{ij}dN. \end{align*} \par The ellipticity assumption \eqref{Fassump} also implies $$
-\Theta|\xi|^2\le F_{M_{ij}}(M)\xi_i\xi_j\le-\theta|\xi|^2, \quad \xi\in \mathbb{R}^n $$ for almost every $M\in M_n(\R)$. Therefore, \begin{align*}
\left|\int_{M_n(\R)}\eta^\varrho(N)F_{M_{ij}}(M-N)N_{ij}dN\right|&\le\int_{M_n(\R)}\eta^\varrho(N)\left|F_{M_{ij}}(M-N)N_{ij}\right|dN \\
&\le \int_{M_n(\R)}\eta^\varrho(N)\sqrt{\sum^n_{ij=1}\left(F_{M_{ij}}(M-N)\right)^2}\; |N|dN\\
&\le \sqrt{n}\Theta \int_{M_n(\R)}\eta^\varrho(N)|N|dN\\
& = \sqrt{n}\Theta\varrho \int_{|Z|\le 1}\eta(Z)|Z|dZ \quad (Z=N/\varrho)\\
& \le \sqrt{n}\Theta\varrho \int_{|Z|\le 1}\eta(Z)dZ \\ & = \sqrt{n}\Theta\varrho. \end{align*} \end{proof}
\par We will additionally need to smooth out $H$ and $f$, and we will do so by using the standard mollifications
$H^\varrho=\eta^\varrho*H$ and $f^\varrho=\eta^\varrho*f$. Here $\eta$ is a standard mollifier on $\mathbb{R}^n$. We also select $\varrho_1$ so small that
\begin{equation}\label{Hsmallvarrho1}
H^\varrho(0)<0, \quad \varrho\in (0,\varrho_1).
\end{equation}
The following lemma asserts that the solution of the PDE \eqref{deltaProb} is well approximated by a solution of the same equation with $H^\varrho$ and $f^\varrho$ replacing $H$ and $f$.
\begin{lem}\label{firstApproxLem} Assume $\delta\in (0,1)$ and $\varrho\in (0,\varrho_1)$. Let $u_{\delta,\varrho}$ be solution of \eqref{deltaProb} with $F, H^\varrho$, and $f^\varrho$ subject to the growth condition \eqref{ellgrowth} with $\ell^\varrho(v)=\sup\{p\cdot v: H^\varrho(p)\le 0\}$ replacing $\ell$. Then $\lim_{\varrho \rightarrow 0^+} u_{\delta,\varrho}=u_\delta$ locally uniformly on $\mathbb{R}^n$. \end{lem} \begin{proof} Using test functions as in \eqref{uUpper} and \eqref{uLower} that correspond to \eqref{deltaProb} with $F, H^\varrho$, and $f^\varrho$ we find $$ \inf_{\mathbb{R}^n}f^\varrho\le u_{\delta,\varrho}\le \frac{1}{\delta}\left(-F(I_n) + \sup_{H^\varrho\le 0}f^\varrho\right)+\ell^\varrho. $$ By the convexity of $H$ and $f$, Jensen's inequality implies $H\le H^\varrho$ and $f\le f^\varrho$. It then follows that $\ell^\varrho\le \ell$. By the ellipticity of $F$, $-F(I_n)\le n\Theta$ and so \begin{equation}\label{udeltarhoBounds} \inf_{\mathbb{R}^n}f\le u_{\delta,\varrho}\le \frac{1}{\delta}\left(n\Theta + \sup_{H\le 0}f^\varrho\right)+\ell. \end{equation} Since $f^\varrho\rightarrow f$ locally uniformly on $\mathbb{R}^n$, $u_{\delta,\varrho}$ is locally bounded on $\mathbb{R}^n$ independently of $\varrho\in (0,\varrho_1)$.
\par Also notice that $H(Du_{\delta,\varrho})\le H^\varrho(Du_{\delta,\varrho})\le 0$ which implies that $u_{\delta,\varrho}$ is uniformly equicontinuous on $\mathbb{R}^n$. It follows that for each sequence of positive numbers $(\varrho_k)_{k\in \mathbb{N}}$ tending to 0, there is a subsequence of $(u_{\delta,\varrho_k})_{k\in \mathbb{N}}$ converging locally uniformly to some $u\in C(\mathbb{R}^n)$. By the stability of viscosity solutions under local uniform convergence, $u$ is a solution of \eqref{deltaProb}. In order to conclude, it suffices to verify that $u$ satisfies \eqref{ellgrowth}. Then by uniqueness we would have $u=u_\delta$ and the full sequence $(u_{\delta,\varrho_k})_{k\in \mathbb{N}}$ must converge to $u_\delta$.
\par We now employ the extension formula \eqref{ExtensionForm} with $$ \Omega_{\delta,\varrho}:=\mathbb{R}^n\setminus\{x\in \mathbb{R}^n: H(Du_{\delta,\varrho}(x))\ge 0\;\text{in the viscosity sense}\;\} $$ to get \begin{align}\label{Lowerudeltarhobound} u_{\delta,\varrho}(x)&=\inf_{y\in \Omega_{\delta,\varrho}}\{u_{\delta,\varrho}(x)+\ell^\varrho(x-y) \}\nonumber \\ &\ge\inf_{y\in \Omega_{\delta,\varrho}}\left\{ \inf_{\mathbb{R}^n}f+\ell^\varrho(x)-\ell^\varrho(y)\right\} \nonumber \\ &= \inf_{\mathbb{R}^n}f+\ell^\varrho(x)-\sup_{y\in \Omega_{\delta,\varrho}}\ell(y). \end{align} It is immediate from the proof of Corollary \eqref{boundedDerOmega} that there is an $R>0$ such that $\Omega_{\delta,\varrho}\subset B_R(0)$ for each $\delta\in (0,1)$ and $\varrho\in (0,\varrho_1)$. We also leave it to the reader to verify that $\ell(v)=\lim_{\varrho\rightarrow 0^+}\ell^\varrho(v)$ for each $v\in \mathbb{R}^n$. Passing to the limit along an appropriate sequence of $\varrho$ tending to 0 in \eqref{Lowerudeltarhobound} gives $$
u(x)\ge \inf_{\mathbb{R}^n}f+\ell(x)-\sup_{|y|\le R}\ell(y). $$ Hence, $u$ satisfies \eqref{ellgrowth}. \end{proof}
\subsection{The penalty method}
Now we fix $\delta\in (0,1)$, $\rho\in (0,\rho_1)$ and a choose a ball $B=B_R(0)\subset\mathbb{R}^n$ so large that \begin{equation}\label{BbigEnough}
H(p)\le 0\quad \Longrightarrow\quad |p|\le R. \end{equation} For $\epsilon>0$, we will now focus on solutions of the fully nonlinear PDE \begin{equation}\label{PenalizedEqn} \delta u+F^\varrho(D^2u)+\beta_\epsilon(H^\varrho(Du))=f^\varrho, \quad x\in B \end{equation} subject to the boundary condition \begin{equation}\label{PenalizedBC} u(x)=u_{\delta, \varrho}(x), \quad x\in \partial B. \end{equation} Recall that $u_{\delta,\varrho}$ is the solution of \eqref{deltaProb} with $F, H^\varrho$, and $f^\varrho$ subject to the growth condition \eqref{ellgrowth} with $\ell^\varrho(v)=\sup\{p\cdot v: H^\varrho(p)\le 0\}$ instead of $\ell$.
\par In \eqref{PenalizedEqn}, $F^\varrho$ is a standard mollification of $F$ and the family $\{\beta_\epsilon\}_{\epsilon> 0}$ of functions each satisfy
\begin{equation}\label{betaAss} \begin{cases} \beta_\epsilon\in C^\infty(\mathbb{R})\\ \beta_\epsilon(z)=0, \quad z\le 0\\ \beta_\epsilon(z)>0, \quad z>0\\ \beta_\epsilon'\ge0,\\ \beta_\epsilon''\ge0,\\ \beta_\epsilon(z)=(z-\epsilon)/\epsilon, \quad z\ge 2\epsilon\\ \end{cases}. \end{equation} Our intuition is that $\beta_\epsilon$ is a smoothing of Lipschitz function $z\mapsto (z/\epsilon)^+$; and therefore,
solutions of \eqref{PenalizedEqn} should be close to solutions of $\max\{\delta u+F^\varrho(D^2u)-f^\varrho,H^\varrho(Du)\}=0$ that satisfy \eqref{PenalizedBC}. These solutions will in turn be very close to $u_{\delta,\varrho}|_{B}$ for $\varrho$ small (see Lemma \ref{FFvarrholocLem} below).
\par By a theorem of N. Trudinger (Theorem 8.2 in \cite{Trudinger}) there is a unique classical solution $u^\epsilon\in C^\infty(B)\cap C(\overline{B})$ solving \eqref{PenalizedEqn} and satisfying the boundary condition \eqref{PenalizedBC}. This result relies on the Evans--Krylov a priori estimates for solutions of concave, fully nonlinear elliptic equations and the continuity method \cite{EvansC2, KrylovC2}.
Along with the concavity of $F$, the main structural condition that allows us to apply this theorem is that $p\mapsto \beta_\epsilon(H^\varrho(p))$ grows at most quadratically for each $\epsilon>0$. We remark that $u^\epsilon$ naturally depends on the other parameters $\delta\in (0,1)$ and $\varrho\in (0,\varrho_1)$; we have chosen not to indicate this dependence for ease of notation.
\par Since $u_{\delta,\varrho}$ solves \eqref{deltaProb} with $F, H^\varrho$, and $f^\varrho$, we have from \eqref{betaAss} and \eqref{FFvarrhoEst} that \begin{align*} \delta u_{\delta,\varrho}+F^\varrho(D^2u_{\delta,\varrho}) + \beta_\epsilon(H^\varrho(Du_{\delta,\varrho}))&= \delta u_{\delta,\varrho}+F^\varrho(D^2u_{\delta,\varrho})\\ &\le\delta u_{\delta,\varrho}+ F(D^2u_{\delta,\varrho})\\ &\le f^\varrho. \end{align*} In view of \eqref{PenalizedEqn} and \eqref{PenalizedBC}, $u_{\delta,\varrho}\le u^\epsilon$ by a routine maximum principle argument. Also note $$ F^\varrho(D^2u^\epsilon)\le f^\varrho -\delta u^\epsilon\le f^\varrho -\delta u_{\delta,\varrho}. $$ The Aleksandrov-Bakelman-Pucci estimate (Theorem 3.6 in \cite{CC}, Theorem 17.3 in \cite{Gilbarg}) then implies $$
\sup_{B}u^\epsilon\le C\left(\sup_{\partial B}|u_{\delta,\varrho}|+\sup_{B}|f^\varrho-\delta u_{\delta,\varrho}|\right) $$ for some constant $C=C(\text{diam}(B),n,\theta,\Theta)$. Combined with \eqref{udeltarhoBounds} and \eqref{BbigEnough}, we have the following supremum norm bound $$
|u^\epsilon|_{L^\infty(B)}\le C\left\{\left(\inf_{\mathbb{R}^n}f\right)^- + \sup_B\ell + \frac{1}{\delta}\left(n\Theta+\sup_B|f^\varrho|\right)\right\}. $$ We will use this estimate to obtain bounds on the higher derivatives of $u^\epsilon$ that will be independent of all $\epsilon>0$ and sufficiently small.
We are now in a position to derive uniform estimates on the derivatives of $u^\epsilon$. We will borrow from the recent work by the author and H. Mawi on fully nonlinear elliptic equations with convex gradient constraints \cite{HyndMawi}. Note however, one of the main assumptions in \cite{HyndMawi} is that the nonlinearity is uniformly elliptic and {\it convex}; note the class of nonlinearities we study in this paper satisfy \eqref{Fassump} and are {\it concave}. We will make use of Lemma \ref{HomogeneityLEM} instead of a convexity assumption on $F$.
\par We will also employ the uniform convexity assumption \eqref{Hassump2}, which implies \begin{align}\label{Coercive} \begin{cases}
H^\varrho(p)\ge H^\varrho(0)+DH^\varrho(0)\cdot p+\frac{\sigma}{2}|p|^2\\
DH^\varrho(p)\cdot p-H^\varrho(p)\ge -H^\varrho(0)+\frac{\sigma}{2}|p|^2\\
|DH^\varrho(p)|\le |DH^\varrho(0)| + \sqrt{n}\Sigma|p| \end{cases}(p\in \mathbb{R}^n). \end{align} And we choose $\varrho_1>0$ sufficiently smaller if necessary so that \eqref{Hsmallvarrho1} holds and $$ \begin{cases}
|H^\varrho(0)|\le |H(0)|+1\\
|DH^\varrho(0)|\le |DH(0)|+1\\
|f^\varrho|_{W^{1,\infty}(B)}\le |f|_{W^{1,\infty}(B_{R+1}(0))} \quad (B=B_R(0)) \end{cases} $$ for $0<\varrho<\varrho_1$. In stating our uniform estimates below, it will be convenient for us to label the following list $$
\Pi:=\left(\sigma,\Sigma,\theta,\Theta, n,\text{diam}(B), H(0),|DH(0)|, |f|_{W^{1,\infty}(B_{R+1}(0))}|, \inf_{\mathbb{R}^n}f,\sup_B\ell, \varrho_1\right). $$
\begin{lem}
Let $\delta\in(0,1)$, $\varrho\in (0,\varrho_1)$, $\epsilon\in (0,1)$ and suppose $\zeta\in C^\infty_c(B)$ is nonnegative. There is a constant $C$ depending only on the list $\Pi$ and $|\zeta|_{W^{2,\infty}(B)}$ such that $$
\zeta(x) |Du^\epsilon(x)|\le C, \quad x\in B. $$ \end{lem} \begin{proof} 1. Set $$
M_\epsilon:=\sup_{x\in B}|\zeta(x)Du^\epsilon(x)| $$ and define $$
v^\epsilon(x):=\frac{1}{2}\zeta^2(x)|Du^\epsilon(x)|^2 - \alpha_\epsilon u^\epsilon(x). $$ Here $\alpha_\epsilon$ is a positive constant that will be chosen below. We will first obtain a bound on $v^\epsilon$ from above and then
use the resulting estimate to bound $M_\epsilon$. We emphasize that each constant below will only depend on the list $\Pi$ and $|\zeta|_{W^{2,\infty}(B)}$; in particular, the constants will not depend on $\epsilon$ and $\alpha_\epsilon$.
\par 2. We first differentiate equation \eqref{PenalizedEqn} with respect to $x_k$ $(k=1\dots, n)$ to get \begin{equation}\label{1stDerPenEqn} \delta u^\epsilon_{x_k}+F^\varrho_{M_{ij}}(D^2u^\epsilon)u^\epsilon_{x_i x_j x_k} + \beta'_\epsilon(H^\varrho(Du^\epsilon))DH^\varrho(Du^\epsilon)\cdot Du^\epsilon_{x_k}=f^\varrho_{x_k}. \end{equation} We suppress $\epsilon, \varrho$ dependence and function arguments and use \eqref{1stDerPenEqn} to compute \begin{align}\label{BernIdentity1}
F_{M_{ij}}v_{x_i x_j}+ \beta' H_{p_k}v_{x_k}&=\left(F_{M_{ij}}\zeta_{x_i}\zeta_{x_j} + \zeta F_{M_{ij}}\zeta_{x_i x_j}\right)|Du|^2 + \nonumber \\ & \quad\quad 4F_{M_{ij}}\zeta\zeta_{x_i} Du\cdot Du_{x_j} +\zeta^2 F_{M_{ij}}Du_{x_i}\cdot Du_{x_j}\nonumber \\
&\quad\quad - \beta' H_{p_k}(\alpha u_{x_k}- \zeta\zeta_{x_k}|Du|^2) \nonumber \\ & \quad\quad +\zeta^2 u_{x_k}(f_{x_k}-\delta u_{x_k}) - \alpha F_{M_{ij}}u_{x_i x_j}. \end{align} We reiterate that in \eqref{BernIdentity1}, we have written $u$ for $u^\epsilon$, $v$ for $v^\epsilon$, $F$ for $F^\varrho(D^2u^\epsilon)$, $\beta$ for $\beta_\epsilon(H^\varrho(Du^\epsilon))$, $H$ for $H^\varrho(Du^\epsilon)$ and $f$ for $f^\varrho$. We will continue this convention for the remainder of this proof.
\par 3. Now we recall Lemma \ref{HomogeneityLEM}. In particular, the inequality \eqref{FrhoAlmostHomo} along with the convexity of $\beta=\beta_\epsilon$ implies \begin{align*} -F_{M_{ij}}u_{x_i x_j} &:=-F_{M_{ij}}(D^2u)(D^2u)_{ij}\\ & \le -F(D^2u)+\sqrt{n}\Theta\varrho_1\\ &= \beta(H(Du))+\delta u -f+\sqrt{n}\Theta\varrho_1\\ &\le H(Du)\beta'(H(Du))+\delta u -f+\sqrt{n}\Theta\varrho_1. \end{align*} Combining with \eqref{BernIdentity1} gives \begin{align}\label{BernIdentity2}
F_{M_{ij}}v_{x_i x_j}+ \beta' H_{p_k}v_{x_k}&\le \left(F_{M_{ij}}\zeta_{x_i}\zeta_{x_j} + \zeta F_{M_{ij}}\zeta_{x_i x_j}\right)|Du|^2 + \nonumber \\ & \quad\quad 4F_{M_{ij}}\zeta\zeta_{x_i} Du\cdot Du_{x_j} +\zeta^2 F_{M_{ij}}Du_{x_i}\cdot Du_{x_j}\nonumber \\
&\quad\quad - \beta' (\alpha(H_{p_k}u_{x_k}-H)-\zeta H_{p_k}\zeta_{x_k}|Du|^2) \nonumber \\ & \quad\quad +\zeta^2 u_{x_k}(f_{x_k}-\delta u_{x_k}) +\alpha(\delta u -f+\sqrt{n}\Theta\varrho_1). \end{align} \par 3. Assume $x_0\in \overline{B}$ is a maximizing point for $v$. If $x_0\in \partial B$, then $v\le -\alpha u_{\delta,\varrho}(x_0)\le -\alpha \inf_{\mathbb{R}^n} f$. Therefore, \begin{equation}\label{v1Upp} v\le C (\alpha+1). \end{equation} Alternatively, suppose $x_0\in B.$ If $\beta'=\beta'(H(Du(x_0)))\le 1<1/\epsilon$, then $H(Du(x_0))\le 2\epsilon\le 2$. By \eqref{Coercive},
$|Du(x_0)|$ is bounded from above independently of $\epsilon$. Hence, the \eqref{v1Upp} holds for an appropriate constant $C$. The final situation to consider is when $\beta'=\beta'(H(Du(x_0)))>1$.
\par Recall the uniform ellipticity assumption gives $$
\eta^2 F_{M_{ij}}Du_{x_i}\cdot Du_{x_j}\le -\zeta^2\theta |D^2u|^2. $$ And employing necessary conditions $Dv(x_0)=0$ and $D^2v(x_0)\le 0$ and the Cauchy-Schwarz inequality to the term
$4F_{M_{ij}}\eta\eta_{x_i} Du\cdot Du_{x_j}\le (\zeta|D^2u|) (C|D\zeta||Du|)$ allow us to evaluate \eqref{BernIdentity2} at the point $x_0$ to get \begin{align*}
0 & \le C(|Du|^2+1+\alpha) - \beta' (\alpha(H_{p_k}u_{x_k}-H)-\zeta H_{p_k}\zeta_{x_k}|Du|^2)\\
& \le C(|Du|^2+1+\alpha) - \beta' (\sigma\alpha |Du|^2- C_0 (1+\zeta|Du|)|Du|^2)\\
& \le C\beta'\left\{|Du|^2+1+\alpha - \sigma\alpha |Du|^2+C_0 (1+\zeta|Du|)|Du|^2\right\}. \end{align*}
After multiplying through by $\zeta=\zeta(x_0)^2$ we have \begin{equation}\label{betaprimeINeq}
0\le C\beta'\left\{(\zeta|Du|)^2+1+\alpha - \sigma\alpha (\zeta |Du|)^2+C_0 (1+\zeta|Du|)(\zeta |Du|)^2\right\} \end{equation} which of course holds at $x_0$.
\par We now choose $$ \alpha:=\frac{2C_0}{\sigma}M_\epsilon. $$
Note $\sigma\alpha \ge 2C_0 \zeta(x_0)|Du(x_0)|$ and so \eqref{betaprimeINeq} gives $$
0\le C\beta'\left\{(\zeta|Du|)^2+1+\alpha - 2C_0 (\zeta |Du|)^3+C_0 (1+\zeta|Du|)(\zeta |Du|)^2\right\}. $$ As $\beta'>1$, the expression in the parentheses is necessarily nonnegative. It follows that there is constant $C$ such that $$
\zeta(x_0) |Du(x_0)|\le C(1+\alpha)^{1/3}. $$ As a result, \eqref{v1Upp} holds for another appropriately chosen constant $C$. \par 4. Therefore, $$
M_\epsilon^2=\sup_{B}|\zeta Du^\epsilon|^2 =2\sup_{B}(v^\epsilon + \alpha_\epsilon u^\epsilon)\le C(\alpha_\epsilon+1) \le C\left(\frac{2C_0}{\sigma}M_\epsilon+1\right). $$ Consequently, $M_\epsilon$ is bounded above independently of $\epsilon\in (0,1)$. \end{proof} Next we assert that $\beta_\epsilon(H^\varrho(Du^\epsilon))$ is locally bounded, independently of all $\epsilon$ sufficiently small.
\begin{lem}\label{betaboundLem}
Let $\delta\in(0,1)$, $\varrho\in (0,\varrho_1)$, $\epsilon\in (0,1)$ and suppose $\zeta\in C^\infty_c(B)$ is nonnegative. There is a constant $C$ depending only on the list $\Pi$ and $|\zeta|_{W^{2,\infty}(B)}$ such that $$ \zeta(x)\beta_\epsilon(H^\varrho(Du^\epsilon(x)))\le C, \quad x\in B. $$ \end{lem} We omit a proof of Lemma \ref{betaboundLem} as the proof of Lemma 3.3 in our recent work \cite{HyndMawi} immediately applies here. We also note that $$ F^\varrho(D^2u^\epsilon)=-\beta_\epsilon(H^\varrho(Du^\epsilon)) +f^\varrho-\delta u^\epsilon $$ is locally bounded, independently of $\epsilon \in (0,1]$. By the $W^{2,p}_{\text{loc}}$ estimates for fully nonlinear elliptic equations due to L. Caffarelli (Theorem 1 in \cite{CaffAnn}, Theorem 7.1 in \cite{CC}), we have the following.
\begin{lem}\label{W2pBoundUeps} Let $\delta\in(0,1)$, $\varrho\in (0,\varrho_1)$, $\epsilon\in (0,1)$, $p\in (n,\infty)$, and assume $G\subset B$ is open with $\overline{G}\subset B$. There is a constant $C$ depending on $p$, the list $\Pi$, $1/\text{dist}(\partial G,B)$ and $G$ such that $$
|D^2u^\epsilon|_{L^p(G)}\le C\left\{|u^\epsilon|_{L^\infty(B)}+1\right\}. $$ \end{lem} \begin{proof} Assume $B_r(x_0)\subset B$ is nonempty, and choose $\zeta\in C^\infty_c(B_r(x_0))$ such that $0\le \zeta\le 1$, $\zeta\equiv 1$ on $B_{r/2}(x_0)$ and \begin{equation}\label{DzetaBounds}
|D\zeta|_{L^\infty(B_{r/2}(x_0))}\le \frac{C}{r}, \quad |D^2\zeta|_{L^\infty(B_{r/2}(x_0))}\le \frac{C}{r^2}. \end{equation} From Lemma \ref{betaboundLem}, $\beta_\epsilon(H^\varrho(Du^\epsilon(x)))\le C_1$ for $x\in B_{r/2}(x_0)$ for some $C_1$ depending only on the list $\Pi$ and $r$. By the assumption that $F$ is uniformly elliptic and concave, Theorem 7.1 in \cite{CC} implies there is a universal constant $c_0$ such that \begin{align*}
r^2|D^2u^\epsilon|_{L^p(B_{r/4}(x_0))}&\le c_0\left\{|u^\epsilon|_{L^\infty(B_{r/2}(x_0))} + |f^\varrho-\delta u^\epsilon - \beta_\epsilon(H^\varrho(Du^\epsilon))|_{L^\infty(B_{r/2}(x_0))}\right\}\\
&\le c_0\left\{|u^\epsilon|_{L^\infty(B_{r/2}(x_0))} + |f^\varrho-\delta u^\epsilon|_{L^\infty(B_{r/2}(x_0))}+C_1\right\}\\
&\le C_0\left\{|u^\epsilon|_{L^\infty(B)} + |f^\varrho|_{L^\infty(B)}+C_1 \right\}. \end{align*} Here $C_0$ only depends only on the list $\Pi$.
\par Now select $r=\frac{1}{2}\text{dist}(\partial G, B)$ and cover $\overline{G}$ with finitely many balls $B_{r/4}(x_1), \dots, B_{r/4}(x_m)$, with each $x_1,\dots,x_m\in G$. Then \begin{align*}
\int_G|D^2u^\epsilon(x)|^pdx&\le \int_{\cup^m_{i=1}B_{r/4}(x_i)}|D^2u^\epsilon(x)|^pdx \\
&\le \sum^m_{i=1}\int_{B_{r/4}(x_i)}|D^2u^\epsilon(x)|^pdx \\
&\le mC_0^p\left(|u^\epsilon|_{L^\infty(B)} + |f^\varrho|_{L^\infty(B)}+C_1\right)^p. \end{align*} \end{proof}
In view of our uniform estimates, we are in position to send $\epsilon\rightarrow 0^+$ in the equation \eqref{PenalizedEqn}. \begin{prop} Let $\delta\in (0,1)$, $\varrho\in (0,\varrho_1)$, $p\in (n,\infty)$ and assume $G\subset B$ is open with $\overline{G}\subset B$. \\ (i) There is $v_{\delta,\varrho}\in C(\overline{B})\cap W^{2,p}_{\text{loc}}(B)$ such that $u^\epsilon\rightarrow v_{\delta,\varrho}$, as $\epsilon\rightarrow 0^+$, uniformly in $\overline{B}$ and weakly in $W^{2,p}(G)$. \\ (ii) Moreover, $v_{\delta,\varrho}$ is the unique solution of the boundary value problem \begin{equation}\label{vdeltavarrhoPDE} \begin{cases} \max\{\delta v+F^\varrho(D^2v)-f^\varrho,H^\varrho(Dv)\}=0&\quad x\in B \\ \hspace{2.38in}v=u_{\delta,\varrho}& \quad x\in \partial B \end{cases}. \end{equation} (iii) There is a constant $C$ depending on $p$, the list $\Pi$, $1/\text{dist}(\partial G,B)$ and $G$ such that \begin{equation}\label{vdeltarhoEst1}
|D^2v_{\delta,\varrho}|_{L^p(G)}\le C\left\{|v_{\delta,\varrho}|_{L^\infty(B)} +1\right\} \end{equation} and \begin{equation}\label{vdeltarhoEst2} -C\le F^\varrho(D^2v_{\delta,\varrho}(x)) \end{equation} for Lebesgue almost every $x\in G$. \end{prop} \begin{proof} $(i)-(ii)$ The convergence to $v$ satisfying \eqref{vdeltavarrhoPDE} is proved very similar to Proposition 4.1 in \cite{Hynd} and part $(ii)$ of Theorem 1.1 in \cite{HyndMawi}, so we omit the details. In both arguments, the uniqueness of solutions of a related boundary value problem of the type \eqref{vdeltavarrhoPDE} is crucial; in our case, uniqueness follows from the estimate \eqref{GenComparisonEst} below.
\par $(iii)$ The bound \eqref{vdeltarhoEst1} follows from part $(i)$ and Lemma \ref{W2pBoundUeps}. Let us now verify \eqref{vdeltarhoEst2}. Recall that $F^\varrho$ is concave. As $u^\epsilon$ converges to $v_{\delta,\varrho}$ weakly $W^{2,p}_{\text{loc}}(B)$, for each $\zeta\in C^\infty_c(B)$ that is nonnegative, \begin{equation}\label{LimSupConcav} \limsup_{\epsilon\rightarrow 0^+}\int_B F^\varrho(D^2u^\epsilon(x))\zeta(x)dx\le \int_B F^\varrho(D^2v_{\delta,\varrho}(x))\zeta(x)dx. \end{equation}
By Lemma \ref{betaboundLem}, there is a constant $C$ depending only the list $\Pi$ and $|\zeta|_{W^{2,\infty}(B)}$ such that $\zeta F^\varrho(D^2u^\epsilon)\ge -C$. Inequality \eqref{LimSupConcav} then gives \begin{equation}\label{vdeltarhoEst3} -C\le \zeta(x) F^\varrho(D^2v_{\delta,\varrho}(x)) \end{equation} for almost every $x\in B$. Let $x_0\in G$ and $r:=\frac{1}{2}\text{dist}(\partial G, B)$, and choose $0\le \zeta\le 1$ to be supported in $B_r(x_0)$ and satisfy $\zeta\equiv 1$ on $B_{r/2}(x_0)$ and \eqref{DzetaBounds}. Then \eqref{vdeltarhoEst3} implies that \eqref{vdeltarhoEst2} holds for almost every $x\in B_{r/2}(x_0)$ for some constant $C$ depending on $\Pi$ and $r$. The general bound follows by a routine covering argument. \end{proof}
\begin{prop}\label{FFvarrholocLem} Let $\delta\in(0,1)$, $p\in (n,\infty)$ and assume $G\subset B$ is open with $\overline{G}\subset B$.\\ $(i)$ Then $v_{\delta,\varrho}\rightarrow u_\delta$, as $\varrho\rightarrow 0^+$, uniformly on $\overline{B}$ and weakly in $W^{2,p}(G)$. \\ $(ii)$ There is a constant $C$ depending on $p$, the list $\Pi$, $1/\text{dist}(\partial G,B)$ and $G$ such that $$ -C\le F(D^2u_{\delta}(x)) $$ for almost every $x\in G$. \end{prop} \begin{proof} $(i)$ We first claim \begin{equation}\label{UdeltarhoVdeltarhoEst} u_{\delta,\varrho}(x)\le v_{\delta,\varrho}(x)\le u_{\delta,\varrho}(x) +\frac{1}{\delta}\sqrt{n}\Theta\varrho \end{equation} for $x\in B$ and $\varrho\in (0,\varrho_1)$. And in order to prove \eqref{UdeltarhoVdeltarhoEst}, we will need the estimate \begin{equation}\label{GenComparisonEst} \max_{\overline{B}}\{u-v\}\le \max_{\partial B}\{u-v\}+\frac{1}{\delta}\max_{\overline{B}}\{g-h\} \end{equation} which holds for each $u\in USC(\overline{B})$ and $v\in LSC(\overline{B})$ that satisfy \begin{equation}\label{GenComparisonPDE} \max\{\delta u+F(D^2u)-g,H^\varrho(Du)\}\le 0\le \max\{\delta v+F(D^2v)-h,H^\varrho(Dv)\}, \quad x\in B. \end{equation} Here $g,h\in C(\overline{B})$. The estimate \eqref{GenComparisonEst} can be proved with the ideas used to verify Proposition \eqref{lamCompProp}; see also Proposition 2.2 of \cite{HyndMawi}. We leave the details to the reader.
\par Using $F^\varrho\le F$, the inequality $u_{\delta,\varrho}\le v_{\delta,\varrho}$ follows from \eqref{GenComparisonEst} as $u=u_{\delta,\varrho}$, $v=v_{\delta,\varrho}$ satisfy \eqref{GenComparisonPDE} $g=h=f^\varrho$. Likewise, we can use the bound $F^\varrho+\sqrt{n}\Theta \varrho$ to show the inequality $v_{\delta,\varrho}\le u_{\delta,\varrho} + \sqrt{n}\Theta \varrho/\delta$ follows from \eqref{GenComparisonEst} as $u=v_{\delta,\varrho}$, $v=u_{\delta,\varrho}$ satisfy \eqref{GenComparisonPDE} with $g=f^\varrho+\frac{1}{\delta}\sqrt{n}\Theta\varrho$ and $h=f^\varrho$. The assertion that $v_{\delta,\varrho}$ converges to $u_\delta$ in $W^{2,p}(G)$ weakly follows from \eqref{vdeltarhoEst1}.
\par $(ii)$ Let $U\subset G$ be measurable and recall that $F^\varrho\le F$ and $F$ is concave. By \eqref{vdeltarhoEst2}, we have there is a constant $C$ depending on $p$, the list $\Pi$, $1/\text{dist}(\partial G,B)$ and $G$ such that \begin{align*}
-C|U|&\le \limsup_{\varrho \rightarrow 0^+}\int_{U}F^\varrho(D^2v_{\delta,\varrho}(x))dx \\ &\le \limsup_{\varrho \rightarrow 0^+}\int_{U}F(D^2v_{\delta,\varrho}(x))dx \\ &\le \int_{U}F(D^2u_{\delta}(x))dx. \end{align*} \end{proof}
\begin{cor}\label{W2infuDel} For each $\delta\in (0,1)$, $D^2u_\delta\in L^\infty(\mathbb{R}^n;S_n(\R))$. Moreover, there is a constant $C$ depending only on the list $\Pi$ for which $$
|D^2u_\delta|_{ L^\infty(\mathbb{R}^n;S_n(\R))}\le C. $$ for each $\delta\in(0,1)$. \end{cor} \begin{proof} Choose $R_1>0$ so that $\Omega_\delta\subset B_{R_1}(0)$ for all $\delta\in (0,1)$; such an $R_1$ exists by corollary \ref{boundedDerOmega}. Lemma \ref{W2infFarOutBound} gives that there is a universal constant $C$ such that $$ D^2u_\delta(x)\le \frac{C}{R_1}I_n. $$
for almost every $|x|\ge 2R_1$.
\par Now select $R>2R_1$ so large that \eqref{BbigEnough} is satisfied. Part $(ii)$ of Proposition \ref{FFvarrholocLem}, with $G=B_{2R_1}(0)$ and $B=B_R(0)$, gives a constant $C_1$ depending on $R_1$ and the list $\Pi$ such that \begin{equation}\label{FD2uBounds} -C_1\le F(D^2u_\delta(x)) \end{equation}
for almost every $|x|\le 2R_1$. Since $u_\delta$ is convex (Proposition \ref{UdelConvex}), the uniform ellipticity assumption on $F$ implies \begin{equation}\label{FD2uBounds2} F(D^2u_\delta(x))\le -\theta\Delta u_\delta(x) \end{equation} for almost every $x\in \mathbb{R}^n$. Therefore, we can again appeal to the convexity of $u_\delta$ and employ \eqref{FD2uBounds} and \eqref{FD2uBounds2} to get $$ D^2u_\delta(x)\le \Delta u_\delta(x) I_n\le \frac{C_1}{\theta}I_n $$
for almost every $|x|\le 2R_1$. \end{proof}
\begin{proof} (part $(ii)$ of Theorem \ref{Thm1}) By the convexity of $u_\delta$ and Corollary \ref{W2infuDel}, there is a constant $C$ independent of $\delta\in (0,1)$ for which $$
0\le u_\delta(x+h)-2u_\delta(x)+u_\delta(x-h)\le C|h|^2 $$ for every $x, h\in \mathbb{R}^n$. The assertion now follows from passing to the limit along an appropriate sequence $\delta$ tending to $0$ as was done in the proof of part $(i)$ of Theorem \ref{Thm1}. \end{proof} \begin{rem}\label{remboundedOmega} By part $(ii)$ of Theorem \ref{Thm1}, $Du_\delta$ exists everywhere and is continuous. By Corollary \ref{boundedDerOmega} $$ \Omega_\delta=\{x\in \mathbb{R}^n : H(Du_\delta(x))<0\} $$ is open and bounded. \end{rem}
\section{1D and rotationally symmetric problems}\label{1DandRotSymmSect} Now we will discuss a few results for solutions of the eigenvalue problem \eqref{EigProb} when the dimension $n=1$ and when $F,f,H$ satisfy the symmetry hypothesis \eqref{SymmetryCond}: $$ \begin{cases} f(Ox)=f(x)\\ H(O^tp)=H(p)\\ F(OMO^t)=F(M) \end{cases} $$ for each $x,p\in \mathbb{R}^n$, $M\in S_n(\R)$ and orthogonal $n\times n$ matrix $O$. First, we prove Theorem \ref{SymmRegThm} which involves the regularity of symmetric eigenfunctions. Then we consider the uniqueness of eigenfunctions of \eqref{EigProb} that satisfy the growth condition \eqref{ellgrowth}.
\begin{proof} (Theorem \ref{SymmRegThm}) The assumption \eqref{SymmetryCond} implies that $u_\delta$ is radial; this follows from the uniqueness assertion \ref{DeltaCompProp}. In particular, $u^*$ constructed in the proof of part $(i)$ of Theorem \ref{Thm1} will also be radial. Consequently, there is a function $\phi: [0,\infty)\rightarrow \mathbb{R}$ such that $u^*(x)=\phi(|x|)$. As $u^*$ is convex, $\phi$ is nondecreasing and convex. Moreover, for almost every $x\in \mathbb{R}^n$ $$ \begin{cases}
Du^*(x)=\phi'(|x|)\frac{x}{|x|}\\
D^2u^*(x)=\phi''(|x|)\frac{x\otimes x}{|x|^2} +\frac{\phi'(|x|)}{|x|}\left(I_n - \frac{x\otimes x}{|x|^2}\right) \end{cases}. $$
\par Similar arguments imply $f(x)=f_0(|x|)$ for a nondecreasing, convex function $f_0$. Likewise $H(p)$ only depends on $|p|$ and so $\{p\in \mathbb{R}^n: H(p)\le 0\}$ is a ball. Thus, $\ell(v)=a|v|$ for some
$a>0$, and as a result $H_0(p)=|p|-a$. The assumption \eqref{SymmetryCond} also implies $F=F(M)$ only depends on the eigenvalues of $M$. In particular, the symmetric function $G(\mu_1,\dots,\mu_n):=F(\diag(\mu_1,\dots,\mu_n))$ completely determines $F$. And as $F$ is uniformly elliptic $$ G(\mu_1+h,\dots,\mu_n)-G(\mu_1,\dots,\mu_n)\le-\theta h. $$ for $h\ge 0$.
\par From our comments above, $\phi$ satisfies \begin{equation}\label{phiEq} \max\left\{\lambda^*+G\left(\frac{\phi'}{r},\dots,\frac{\phi'}{r}, \phi''\right) - f_0(r), \phi'-a\right\}=0, \quad r>0. \end{equation} And since $\phi'$ is nondecreasing, $$ \{r>0: \phi'(r)<a\}=(0,r_0) $$ for some $r_0>0$; this is another way of expressing $\Omega:=\{x\in \mathbb{R}^n: H(Du^*(x))<0\}=B_{r_0}(0)$. Part $(ii)$ of Theorem \ref{Thm1} then implies $u^*\in C^2(\Omega)\cap C^{1,1}_\text{loc}(\mathbb{R}^n)$. Thus, $\phi'=a$ for $r\ge a$ and $\phi\in C^2(\mathbb{R}\setminus\{r_0\})\cap C^{1,1}_\text{loc}(\mathbb{R})$. Furthermore, as $\phi''(r_0+)=0$, we just need to show $\phi''(x_0-)=0$.
\par Recall the left hand limit $\phi''(x_0-)$ exists and is nonnegative since $\phi$ is convex. By \eqref{phiEq}, $$ \lambda^*+G\left(\frac{\phi'}{r},\dots,\frac{\phi'}{r}, \phi''\right) - f_0(r)\le 0, \quad r>0. $$ Sending $r\rightarrow r_0^+$ gives \begin{equation}\label{Geq1} \lambda^*+G\left(\frac{a}{r_0},\dots,\frac{a}{r_0}, 0\right) - f_0(r_0)\le 0. \end{equation} Now, $$ \lambda^*+G\left(\frac{\phi'}{r},\dots,\frac{\phi'}{r}, \phi''\right) - f_0(r)= 0, \quad r\in (0,r_0) $$ and sending $r\rightarrow r_0^-$ gives \begin{equation}\label{Geq2} \lambda^*+G\left(\frac{a}{r_0},\dots,\frac{a}{r_0},\phi''(x_0-) \right) - f_0(r_0)=0. \end{equation} Combining \eqref{Geq1} and \eqref{Geq2} gives $$ G\left(\frac{a}{r_0},\dots,\frac{a}{r_0}, 0\right) \le f_0(r_0)-\lambda^*=G\left(\frac{a}{r_0},\dots,\frac{a}{r_0},\phi''(x_0-) \right). $$ By the monotonicity of $G$ in each of its arguments, $\phi''(x_0-)\le 0$. Thus $\phi''(x_0)=0$, and as a result, $u^*\in C^2(\mathbb{R}^n)$. \end{proof}
\begin{prop}\label{UniqunessN1} Assume $n=1$. Any two convex solutions of \eqref{EigProb} that satisfy \eqref{ellgrowth} differ by an additive constant. \end{prop} \begin{proof} Assume $u_1, u_2$ are convex and satisfy $$ \begin{cases} \max\{\lambda^*+F(u'')-f,H(u')\}=0, \quad x\in \mathbb{R}\\ \lim\frac{u(x)}{\ell(x)}=1 \end{cases}. $$ As in the proof of Theorem \ref{SymmRegThm}, we may deduce that necessarily $u_1,u_2\in C^2(\mathbb{R})$. Also observe $$ H_0(p)=\max_{v\pm 1}\left\{pv-\ell(v)\right\}=\max\{p-\ell(1), -p -\ell(-1)\}. $$ In particular, $$ \{p\in \mathbb{R}: H(p)\le 0\}=[-\ell(-1), \ell(1)]. $$ It then follows from the convexity of $u_1$ and $u_2$ that $$ I_1:=\{x\in \mathbb{R}: H(u_1'(x))<0\}\quad \text{and}\quad I_2:=\{x\in \mathbb{R}: H(u_2'(x))<0\} $$ are bounded, open intervals.
\par Let us first assume $I_1=I_2=(\alpha,\beta)$. Then $$ \lambda^*+F(u_1'')-f=0=\lambda^*+F(u_2'')-f, \quad x\in (\alpha,\beta) $$ As $F$ is uniformly elliptic $u_1''=u_2''=F^{-1}(f-\lambda^*)$ for $x\in (\alpha,\beta)$. Hence, $u'_1-u_2'$ is constant. The above characterization of $\{p\in \mathbb{R}: H(p)\le 0\}$ also implies $$ \begin{cases} u_1'=u_2'=-\ell(-1), \quad x\in (-\infty,\alpha]\\ u_1'=u_2'=\ell(1), \quad x\in [\beta,\infty) \end{cases} $$ It now follows that necessarily $u'_1=u'_2$ and so $u_1-u_2$ is constant.
\par Now we are left to prove that $I_1=I_2$; for definiteness, we shall assume $I_1= (\alpha_1,\beta_1)$ and $I_2= (\alpha_2,\beta_2)$. First suppose that $I_1\cap I_2=\emptyset$ and without loss of generality $\beta_1<\alpha_2$. Then on $I_1$, $\lambda^*+F(u_1'')-f=0$ and $u_2'=-\ell(-1)$. We always have $\lambda^*+F(u_2'')-f\le 0$ which implies $\lambda^*-f\le 0$ on $I_1$ since $u_2''=0$. It then follows that $F(u_1'')=f-\lambda^*\ge 0$ and thus $u_1''\le 0.$ As $u$ is convex, $u_1''=0$ in $I_1.$ However, $u_1'$ is constant and it would then be impossible for $u'_1(\alpha_1)=-\ell(-1)<0$ and $u'_1(\beta_1)=\ell(1)>0$. Therefore, $I_1\cap I_2\neq\emptyset$.
\par Without any loss of generality, we may assume $\alpha_1<\alpha_2<\beta_1$. Repeating our argument above, we find $u_1''=0$ on $(\alpha_1,\alpha_2)$. It must be that $u_1'$ is constant and thus equal to $-\ell(-1)$ on $[\alpha_1,\alpha_2]$. But then $H(u_1')=0$ on $[\alpha_1,\alpha_2]$, which contradicts the definition of $I_1$. Hence, $I_1=I_2$ and the assertion follows. \end{proof}
\begin{prop}\label{ConvUniqueness} Assume the symmetry condition \eqref{SymmetryCond} and that $F$ is uniformly elliptic. Then any two convex, rotationally symmetric solutions of \eqref{EigProb} that satisfy \eqref{ellgrowth} differ by an additive constant. \end{prop}
\begin{proof}
As remarked in the above proof of Theorem \ref{SymmRegThm}, the symmetry assumption on $H$ results in $H_0(p)=|p|-a$ for some $a>0$. Now assume $u_1, u_2$ are convex, rotationally symmetric solutions of \eqref{EigProb} that satisfy \eqref{ellgrowth}. Then it follows $$ \{x\in\mathbb{R}^n: H(Du_i(x))<0\}=B_{r_i}(0) $$ for $i=1,2$ and some $r_1,r_2>0$. Thus, \begin{equation}\label{u1u2const}
u_i(x)=a|x|+b_i, \quad |x|\ge r_i \end{equation} for some constants $b_i$. If $r_1=r_2=:r$, then $$ \begin{cases} F(D^2u_1)=f(x)-\lambda^*=F(D^2u_2), \quad &x\in B_{r}(0)\\ u_1=ar+b_1, \quad u_2=ar+b_2, \quad & x\in \partial B_r(0). \end{cases} $$ As $F$ is uniformly elliptic, $u_1\equiv u_2+b_1-b_2$ on $\overline{B_r(0)}$ and thus on $\mathbb{R}^n$.
\par Now suppose $r_1<r_2$. And set $v:= u_2+b_1-b_2$; from \eqref{u1u2const} $u_1\equiv v$ for $|x|\ge r_2$. Since, $$ F(D^2u_1)\le f(x)-\lambda^*=F(D^2v),\quad x\in B_{r_2} $$ the maximum principle implies $u_1\le v$ in $\overline{B}_{r_2}$. The strong maximum principle implies $u_1\equiv v$ in $\overline{B}_{r_2}$, from which we conclude the proof, or $u_1< v$ in $B_{r_2}$. However if $u_1< v$ in $B_{r_2}$, Hopf's Lemma (see the appendix of \cite{Armstrong}) implies \begin{equation}\label{HopfCond} \frac{\partial v}{\partial \nu}(x_0)<\frac{\partial u_1}{\partial \nu}(x_0) \end{equation}
for each $x_0\in\partial B_{r_2}$. Here $\nu=x_0/|x_0|$. As $u$ and $v$ are rotational and convex, \eqref{HopfCond} implies $$
|Dv(x_0)|<|Du_1(x_0)|\le a. $$
However, $|Dv|=a$ on $\partial B_{r_2}$. This contradicts the hypothesis that $r_1<r_2$.
\end{proof}
\section{Minmax formulae}\label{MinMaxSect} This final section is devoted entirely to the proof of Theorem \ref{minmaxThm}. In particular, we will make use of the characterizations of $\lambda^*$ given in \eqref{LamChar1} and \eqref{LamChar2}. We will also use that the functions $H$ and $H_0$ have the same sign.
\par Let $\phi \in C^2(\mathbb{R}^n)$ and suppose that $H(D\phi)\le 0$. If $$ \lambda_\phi:=\inf_{\mathbb{R}^n}\left\{-F(D^2\phi(x))+f(x)\right\}>-\infty, $$ then $\phi$ is a subsolution of \eqref{EigProb} with eigenvalue $\lambda_\phi$. By \eqref{LamChar1}, $\lambda_\phi\le \lambda^*$. Hence, $\lambda_-=\sup_\phi\lambda_\phi\le \lambda^*.$ Now let $\psi\in C^2(\mathbb{R}^n)$ satisfy $$
\liminf_{|x|\rightarrow \infty}\frac{\psi(x)}{\ell(x)}\ge 1. $$ If $$ \mu_\psi:=\sup_{H(D\psi)<0}\left\{-F(D^2\psi(x))+f(x)\right\}<\infty, $$ then $\psi$ is a supersolution of \eqref{EigProb} with eigenvalue $\mu_\psi$. It follows from \eqref{LamChar2} that $\lambda_\psi\ge \lambda^*$. As a result, $\lambda_+=\inf_\psi\mu_\psi\ge \lambda^*.$
\par Let $u^*$ be an eigenfunction associated with $\lambda^*$ that satisfies $D^2u^*\in L^\infty(\mathbb{R}^n; S_n(\mathbb{R}))$. As in Remark \ref{remboundedOmega}, $$ \Omega_0:=\{x\in \mathbb{R}^n: H(Du^*(x))<0\} $$ is open and bounded. For $\epsilon>0$ and $\tau>1$, set $$ u^{\epsilon,\tau}=\tau u^\epsilon=\tau(\eta^\epsilon*u^*). $$ Here $u^\epsilon=\eta^\epsilon*u^*$ is the standard mollification of $u^*$. Observe that $H_0$
is Lipschitz continuous with Lipschitz constant no more than one; in view of the basic estimate $|Du^*-Du^\epsilon|_{L^\infty(\mathbb{R}^n)}\le \epsilon |D^2u^*|_{L^\infty(\mathbb{R}^n)}$, \begin{equation}\label{LipHCond}
H_0(Du^*(x))\le H_0(Du^\epsilon(x))+\epsilon |D^2u^*|_{L^\infty(\mathbb{R}^n)}, \quad x\in \mathbb{R}^n. \end{equation}
\par So for any $x\in \mathbb{R}^n$ where $H(Du^{\epsilon,\tau}(x))<0$, \begin{equation}\label{LipHCond2} H_0(Du^{\epsilon}(x))=H_0\left(\frac{1}{\tau}Du^{\epsilon,\tau}(x)+\frac{\tau-1}{\tau}0\right)< \frac{\tau-1}{\tau}H_0(0)<0. \end{equation} In view of \eqref{LipHCond} and \eqref{LipHCond2}, we can choose $\epsilon_1=\epsilon_1(\tau)>0$ such that $$
\varrho:=-\left(\frac{\tau-1}{\tau}H_0(0)+\epsilon_1 |D^2u^*|_{L^\infty(\mathbb{R}^n)}\right)>0 $$ and $$ \{x\in \mathbb{R}^n: H(Du^{\epsilon,\tau}(x))<0\}\subset\{x\in \mathbb{R}^n: H_0(Du^*(x))<-\varrho\} $$ for $\epsilon\in (0,\epsilon_1)$. Since $\{x\in \mathbb{R}^n: H_0(Du^*(x))<-\varrho\}$ is a proper open subset of $\Omega_0$ we can further select $\epsilon_2=\epsilon_2(\tau)>0$ so that \begin{equation}\label{ImportantIncludsionepstau} \{x\in \mathbb{R}^n: H_0(Du^*(x))<-\varrho\}\subset \Omega^\epsilon:= \{x\in \mathbb{R}^n: \text{dist}(x,\partial\Omega_0)>\epsilon\} \end{equation} for $\epsilon\in (0,\epsilon_2)$.
\par By assumption, $u^*$ satisfies $\lambda^* +F(D^2u^*)-f=0$ for almost every $x\in \Omega_0$. Mollifying both sides of this equation gives $ \lambda^*+F(D^2u^*)^\epsilon-f^\epsilon=0$ in $\Omega^\epsilon$. Since $F$ is concave $$ F(D^2u^\epsilon(x))=F\left(\int_{\mathbb{R}^n}\eta^\epsilon(y)D^2u^*(x-y)dy\right)\ge \int_{\mathbb{R}^n}\eta^\epsilon(y)F(D^2u^*(x-y))dy=F(D^2u^*)^\epsilon(x). $$ Consequently, $\lambda^*+F(D^2u^\epsilon)-f^\epsilon\ge 0,$ in $\Omega^\epsilon$. And since
$\Omega_0$ is bounded, $|f^\epsilon-f|_{L^\infty(\Omega_0)}=o(1)$ as $\epsilon \rightarrow 0^+$. Therefore, \begin{equation}\label{lambdaMollifiedEqn} \lambda^*+F(D^2u^\epsilon)-f\ge o(1), \quad x\in \Omega^\epsilon. \end{equation} as $\epsilon\rightarrow 0^+$.
\par We can now combine the inclusion \eqref{ImportantIncludsionepstau} and the inequality \eqref{lambdaMollifiedEqn}. For $\epsilon\in (0,\min\{\epsilon_1,\epsilon_2\}$
\begin{align*} \lambda^+&\le \sup_{H(Du^{\epsilon,\tau})<0}\left\{-F(D^2u^{\epsilon,\tau}(x)) +f(x)\right\}\\ &= \sup_{H(Du^{\epsilon,\tau})<0}\left\{-\tau F(D^2u^{\epsilon}(x)) +f(x)\right\}\\ &= \sup_{H(Du^{\epsilon,\tau})<0}\left\{- F(D^2u^{\epsilon}(x)) +f(x)\right\}+O(\tau-1)\\ &\le \sup_{\Omega^\epsilon}\left\{- F(D^2u^{\epsilon}(x)) +f(x)\right\}+O(\tau-1)\\ &\le \lambda^* +o(1)+O(\tau-1). \end{align*} We conclude by first ending $\epsilon\rightarrow 0^+$ and then $\tau\rightarrow 1^+$.
\end{document} |
\begin{document}
\begin{abstract} In this paper we report on massive computer experiments aimed at finding spherical point configurations that minimize potential energy. We present experimental evidence for two new universal optima (consisting of $40$ points in $10$ dimensions and $64$ points in $14$ dimensions), as well as evidence that there are no others with at most $64$ points. We also describe several other new polytopes, and we present new geometrical descriptions of some of the known universal optima. \end{abstract}
\maketitle
\begin{quote} [T]he problem of finding the configurations of stable equilibrium for a number of equal particles acting on each other according to some law of force\dots is of great interest in connexion with the relation between the properties of an element and its atomic weight. Unfortunately the equations which determine the stability of such a collection of particles increase so rapidly in complexity with the number of particles that a general mathematical investigation is scarcely possible.
\break \phantom{}\
J.~J.~Thomson, 1897 \end{quote}
\tableofcontents
\section{Introduction} \label{section:intro}
What is the best way to distribute $N$ points over the unit sphere $S^{n-1}$ in ${\mathbb R}^n$? Of course the answer depends on the notion of ``best.'' One particularly interesting case is energy minimization. Given a continuous, decreasing function $f \colon (0,4] \to {\mathbb R}$, define the $f$-potential energy of a finite subset $\mathcal{C} \subset S^{n-1}$ to be $$ E_f(\mathcal{C}) = \frac{1}{2}\sum_{\genfrac{}{}{0pt}{}{\scriptstyle x,y \in
\mathcal{C}}{\scriptstyle x \ne y}} f\big(|x-y|^2\big). $$
(We only need $f$ to be defined on $(0,4]$ because $|x-y|^2 \le 4$
when $|x|^2=|y|^2=1$. The factor of $1/2$ is chosen for compatibility with the physics literature, while the use of squared distance is incompatible but more convenient.) How can one choose $\mathcal{C} \subset S^{n-1}$ with $|\mathcal{C}|=N$ so as to minimize $E_f(\mathcal{C})$? In this paper we report on lengthy computer searches for configurations with low energy. What distinguishes our approach from most earlier work on this topic (see for example \cite{AP-G1, AP-G2, AP-G3, AWRTSDW, AW, BBCDHNNTW, BCNT1, BCNT2, C, DM, DLT, E, ELSBB, EH, F, GE, HS1, HS2, H, KaS, KuS, K, LL, M-FMRS, MKS, MDH, P-GDMOD-S, P-GDM, P-GM, RSZ1, RSZ2, SK, S, T, Wh, Wi}) is that we attempt to treat many different potential functions on as even a footing as possible. Much of the mathematical structure of this problem becomes apparent only when one varies the potential function $f$. Specifically, we find that many optimal configurations vary in surprisingly simple, low-dimensional families as $f$ varies.
The most striking case is when such a family is a single point: in other words, when the optimum is independent of $f$. Cohn and Kumar \cite{CK1} defined a configuration to be \textit{universally optimal\/} if it minimizes $E_f$ for all completely monotonic $f$ (i.e., $f$ is infinitely differentiable and $(-1)^k f^{(k)}(x) \ge 0$ for all $k \ge 0$ and $x \in (0,4)$, as is the case for inverse power laws). They were able to prove universal optimality only for certain very special arrangements. One of our primary goals in this paper is to investigate how common universal optimality is. Was the limited list of examples in \cite{CK1} an artifact of the proof techniques or a sign that these configurations are genuinely rare?
Every universally optimal configuration is an optimal spherical code, in the sense that it maximizes the minimal distance between the points. (Consider an inverse power law $f(r) = 1/r^s$. If there were a configuration with a larger minimal distance, then its $f$-potential energy would be lower when $s$ is sufficiently large.) However, universal optimality is a far stronger condition than optimality as a spherical code. There are optimal spherical codes of each size in each dimension, but they are rarely universally optimal. In three dimensions, the only examples are a single point, two antipodal points, an equilateral triangle on the equator, or the vertices of a regular tetrahedron, octahedron, or icosahedron. Universal optimality was proved in \cite{CK1}, building on previous work by Yudin, Kolushov, and Andreev \cite{Y,KY1,KY2,A1,A2}, and the completeness of this list follows from a classification theorem due to Leech \cite{L}. See \cite{CK1} for more details.
In higher dimensions much less is known. Cohn and Kumar's main theorem provides a general criterion from which they deduced the universal optimality of a number of previously studied configurations. Specifically, they proved that every spherical $(2m-1)$-design in which only $m$ distances occur between distinct points is universally optimal. Recall that a spherical $d$-design in $S^{n-1}$ is a finite subset $\mathcal{C}$ of $S^{n-1}$ such that every polynomial on ${\mathbb R}^n$ of total degree $d$ has the same average over $\mathcal{C}$ as over the entire sphere. This criterion holds for every known universal optimum except one case, namely the regular $600$-cell in ${\mathbb R}^4$ (i.e., the $H_4$ root system), for which Cohn and Kumar proved universal optimality by a special argument.
\begin{table} \begin{center} \begin{tabular}{cccc} $n$ & $N$ & $t$ & Description\\ \hline $2$ & $N$ & $\cos (2\pi /N)$ & $N$-gon\\ $n$ & $N \le n+1$ & $-1/(N-1)$ & simplex\\ $n$ & $2n$ & $0$ & cross polytope\\ $3$ & $12$ & $1/\sqrt{5}$ & icosahedron\\ $4$ & $120$ & $(1 + \sqrt{5})/4$ & regular $600$-cell\\ $5$ & $16$ & $1/5$ & hemicube/Clebsch graph\\ $6$ & $27$ & $1/4$ & Schl\"afli graph/isotropic subspaces\\ $7$ & $56$ & $1/3$ & equiangular lines\\ $8$ & $240$ & $1/2$ & $E_8$ root system\\ $21$ & $112$ & $1/9$ & isotropic subspaces\\ $21$ & $162$ & $1/7$ & $(162, 56, 10, 24)$ strongly regular graph\\ $22$ & $100$ & $1/11$ & Higman-Sims graph\\ $22$ & $275$ & $1/6$ & McLaughlin graph\\ $22$ & $891$ & $1/4$ & isotropic subspaces\\ $23$ & $552$ & $1/5$ & equiangular lines\\ $23$ & $4600$ & $1/3$ & kissing configuration of the following\\ $24$ & $196560$ & $1/2$ & Leech lattice minimal vectors\\ $q\frac{q^3+1}{q+1}$ & $(q+1)(q^3+1)$ & $ 1/q^2$ & isotropic subspaces ($q$ is a prime power)\\ \\ \end{tabular} \end{center} \caption{The known universal optima.}\label{table:universal} \end{table}
A list of all known universal optima is given in Table~\ref{table:universal}. Here $n$ is the dimension of the Euclidean space, $N$ is the number of points, and $t$ is the greatest inner product between distinct points in the configuration (i.e., the cosine of the minimal angle). For detailed descriptions of these configurations, see Section~1 of \cite{CK1}. Each is uniquely determined by the parameters listed in Table~\ref{table:universal}, except for the configurations listed on the last line. For that case, when $q=p^\ell$ with $p$ an odd prime, there are at least $\lfloor (\ell-1)/2 \rfloor$ distinct universal optima (see \cite{CGS} and \cite{Ka}). Classifying these optima is equivalent to classifying generalized quadrangles with parameters $(q,q^2)$, which is a difficult problem in combinatorics. In the other cases from Table~\ref{table:universal}, when uniqueness holds, we use the notation $\universal{N}{n}$ for the unique $N$-point universal optimum in ${\mathbb R}^n$.
Each of the configurations in Table~\ref{table:universal} had been studied before it appeared in \cite{CK1}, and was already known to be an optimal spherical code. In fact, when $N \ge 2n+1$ and $n > 4$, the codes on this list are exactly those that have been proved optimal. Cohn and Kumar were unable to resolve the question of whether Table~\ref{table:universal} is the complete list of universally optimal codes, except when $n \le 3$. All that is known in general is that any new universal optimum must have $N \ge 2n+1$ (Proposition~1.4 in \cite{CK1}). It does not seem plausible that the current list is complete, but it is far from obvious where to find any others.
Each known universal optimum is a beautiful mathematical object, connected to various important exceptional structures (such as special lattices or groups). Our long-term hope is to develop automated tools that will help uncover more such objects. In this paper we do not discover any configurations as fundamental as those in Table~\ref{table:universal}, but perhaps our work is a first step in that direction.
Table~\ref{table:universal} shows several noteworthy features. When $n \le 4$, the codes listed are the vertices of regular polytopes, specifically those with simplicial facets. When $5 \le n \le 8$, the table also includes certain semiregular polytopes (their facets are simplices and cross polytopes, with two cross polytopes and one simplex arranged around each $(n-3)$-dimensional face). The corresponding spherical codes are all affine cross sections of the minimal vectors of the $E_8$ root lattice. Remarkably, no universal optima are known for $9 \le n \le 20$, except for the simplices and cross polytopes, which exist in every dimension. This gap is troubling---why should these dimensions be disfavored? For $21 \le n \le 24$ nontrivial universal optima are known; they are all affine cross sections of the minimal vectors of the Leech lattice (and are no longer the vertices of semiregular polytopes). Finally, in high dimensions a single infinite sequence of nontrivial universal optima is known.
It is not clear how to interpret this list. For example, is the dimension gap real, or merely an artifact of humanity's limited imagination? One of our conclusions in this paper is that Table~\ref{table:universal} is very likely incomplete but appears closer to complete than one might expect.
\subsection{Experimental results}
\begin{table} \begin{center} \begin{tabular}{cccc} $n$ & $N$ & $t$ & References\\ \hline $10$ & $40$ & $1/6$ & Conway, Sloane, and Smith \cite{S}, Hovinga \cite{H}\\ $14$ & $64$ & $1/7$ & Nordstrom and Robinson \cite{NR}, de Caen and van Dam \cite{dCvD},\\ & & & Ericson and Zinoviev \cite{EZ}\\ \\ \end{tabular} \end{center} \caption{New conjectured universal optima.}\label{table:conjectures} \end{table}
One outcome of our computer searches is two candidate universal optima, listed in Table~\ref{table:conjectures} and described in more detail in Section~\ref{section:newunivopt}. These configurations were located through massive computer searches: for each of many pairs $(n,N)$, we repeatedly picked $N$ random points on $S^{n-1}$ and performed gradient descent to minimize potential energy. We focused on the potential function $f(r) =
1/r^{n/2-1}$, because $x \mapsto 1/|x|^{n-2}$ is the unique nonconstant radial harmonic function on ${\mathbb R}^n\setminus \{0\}$, up to scalar multiplication (recall that distance is squared in the definition of $E_f$). When $n=3$, the $f$-potential energy for this function $f$ is the Coulomb potential energy from electrostatics, and this special case has been extensively studied by mathematicians and other scientists. In higher dimensions, this potential function has frequently been studied as a natural generalization of electrostatics; we call it the harmonic potential function.
Because there are typically numerous local minima for harmonic energy, we repeated this optimization procedure many times with the hope of finding the global minimum. For low numbers of points in low dimensions, the apparent global minimum occurs fairly frequently. Figure~\ref{fig:probs} shows data from three dimensions. In higher dimensions, there are usually more local minima and the true optimum can occur very infrequently.
\begin{figure}
\caption{Probabilities of local minima for harmonic energy in ${\mathbb R}^3$ (based on $1000$ trials). White circles denote the conjectured harmonic optima.}
\label{fig:probs}
\end{figure}
For each conjectured optimum for harmonic energy, we attempted to determine whether it could be universally optimal. We first determined whether it is in equilibrium under all possible force laws (i.e., ``balanced'' in the terminology of Leech \cite{L}). That holds if and only if for each point $x$ in the configuration and each distance $d$, the sum of all points in the code at distance $d$ from $x$ is a scalar multiple of $x$. If this criterion fails, then there is some inverse power law under which the code is not even in equilibrium, let alone globally minimal, so it cannot possibly be universally optimal. Most of the time, the code with the lowest harmonic potential energy is not balanced. When it is balanced, we compared several potential functions to see whether we could disprove universal optimality. By Theorem~9b in \cite[p.~154]{Wid}, it suffices to look at the potential functions $f(r) = (4-r)^k$ with $k \in \{0,1,2,\dots\}$ (on each compact subinterval of $(0,4]$, every completely monotonic function can be approximated arbitrarily closely by positive linear combinations of these potential functions). Because these functions do not blow up at $r=0$, numerical calculations with them often converge more slowly than they do for inverse power laws (nearby points can experience only a bounded force pushing them apart), so they are not a convenient choice for initial experimentation. However, they play a fundamental role in detecting universal optima.
To date, our search has led us to $58$ balanced configurations with at most $64$ points (and at least $2n+1$ in dimension $n$) that appear to minimize harmonic energy and were not already known to be universally optimal. In all but two cases, we were able to disprove universal optimality, but the remaining two cases (those listed in Table~\ref{table:conjectures}) are unresolved. We conjecture that they are in fact universally optimal.
\begin{figure}
\caption{Status of conjectured harmonic optima with up to $64$ points in at most $32$ dimensions: white circle denotes universal optimum, large gray circle denotes conjectured universal optimum, black circle denotes balanced configuration that is not universally optimal, tiny black circle denotes unbalanced configuration.}
\label{fig:balanced}
\end{figure}
Figure~\ref{fig:balanced} presents a graphical overview of our data. The triangle of white circles on the upper left represents the simplices, and the diagonal line of white circles represents the cross polytopes. Between them, one can see that the pattern is fairly regular, but as one moves right from the cross polytopes all structure rapidly vanishes. There is little hope of finding a simple method to predict where balanced harmonic optima can be found, let alone universal optima. It also does not seem likely that general universal optima can be characterized by any variant of Cohn and Kumar's criterion.
Besides the isotropic subspace universal optima from Table~\ref{table:universal} and the other universal optima with the same parameters, we can conjecture only one infinite family of balanced harmonic optima with more than $2n$ points in ${\mathbb R}^n$, namely the diplo-simplices with $2n+2$ points in ${\mathbb R}^n$ for $n \ge 6$ (see Subsection~\ref{subsec:diplo}). Certainly no others are apparent in Figure~\ref{fig:balanced}, but the isotropic subspace optima from Table~\ref{table:universal} are sufficiently large and exotic that it would be foolish to conjecture that there are no other infinite families.
\begin{table} \begin{center} \begin{tabular}{ccc} $n$ & $N$ & $t$\\ \hline $3$ & $32$ & $\sqrt{75+30\sqrt{5}}/15$\\ $4$ & $10$ & $1/6$ or $(\sqrt{5}-1)/4$\\
$4$ & $13$ & $\big(\!\cos(4\pi/13) + \cos(6\pi/13)\big)/2$\\ $4$ & $15$ & $1/\sqrt{8}$\\ $4$ & $24$ & $1/2$\\ $4$ & $48$ & $1/\sqrt{2}$\\ $5$ & $21$ & $1/\sqrt{10}$\\ $5$ & $32$ & $1/\sqrt{5}$\\ $n \ge 6$ & $2n+2$ & $1/n$\\ $6$ & $42$ & $2/5$\\ $6$ & $44$ & $1/\sqrt{6}$\\ $6$ & $126$ & $\sqrt{3/8}$\\ $7$ & $78$ & $3/7$\\ $7$ & $148$ & $\sqrt{2/7}$\\ $8$ & $72$ & $5/14$\\ $9$ & $96$ & $1/3$\\ $14$ & $42$ & $1/10$\\ $16$ & $256$ & $1/4$\\ \\ \end{tabular} \end{center} \caption{Conjectured harmonic optima that are balanced, irreducible, and not universally optimal (see Section~\ref{section:balanced} for descriptions).} \label{table:balanced} \end{table}
\begin{table} \begin{center} \begin{tabular}{ccc} $n$ & $N$ & $t$\\ \hline $7$ & $182$ & $1/\sqrt{3}$\\ $15$ & $128$ & $1/5$\\ \\ \end{tabular} \end{center} \caption{Unresolved conjectured harmonic optima.} \label{table:unresolved} \end{table}
Table~\ref{table:balanced} lists the cases in which we found a balanced harmonic optimum but were able to disprove universal optimality, with one systematic exception: we omit configurations that are reducible, in the sense of being unions of orthogonal, lower-dimensional configurations (this terminology is borrowed from the theory of root systems). Reducible configurations are in no sense less interesting or fruitful than irreducible ones. For example, cross polytopes can be reduced all the way to one-dimensional pieces. However, including reducible configurations would substantially lengthen Table~\ref{table:balanced} without adding much more geometrical content.
Table~\ref{table:unresolved} lists two more unresolved cases. They both appear to be harmonic optima and are balanced, and we have not been able to prove or disprove universal optimality. Unlike the two cases in Table~\ref{table:conjectures}, we do not conjecture that they are universally optimal, because each is closely analogous to a case in which universal optimality fails ($182$ points in ${\mathbb R}^7$ is analogous to $126$ points in ${\mathbb R}^6$, and $128$ points in ${\mathbb R}^{15}$ is analogous to $256$ points in ${\mathbb R}^{16}$). On the other hand, each is also analogous to a configuration we know or believe is universally optimal ($240$ points in ${\mathbb R}^8$ and $64$ points in ${\mathbb R}^{14}$, respectively). We have not been able to disprove universal optimality in the cases in Table~\ref{table:unresolved}, but they are sufficiently large that our failure provides little evidence in favor of universal optimality.
Note that the data presented in Tables~\ref{table:balanced} and~\ref{table:unresolved} may not specify the configurations uniquely. For example, for $48$ points in ${\mathbb R}^4$ there is a positive-dimensional family of configurations with maximal inner product $1/\sqrt{2}$ (which is not the best possible value, according to Sloane's tables \cite{S}). See Section~\ref{section:balanced} for explicit constructions of the conjectured harmonic optima.
It is worth observing that several famous configurations do not appear in Tables~\ref{table:balanced} or~\ref{table:unresolved}. Most notably, the cubes in ${\mathbb R}^n$ with $n \ge 3$, the dodecahedron, the $120$-cell, and the $D_5$, $E_6$, and $E_7$ root systems are suboptimal for harmonic energy. Many of these configurations have more than $64$ points, but we have included in the tables all configurations we have analyzed, regardless of size.
In each case listed in Tables~\ref{table:balanced} and~\ref{table:unresolved}, our computer programs returned floating point approximations to the coordinates of the points in the code, but we have been able to recognize the underlying structure exactly. That is possible largely because these codes are highly symmetric, and once one has uncovered the symmetries the remaining structure is greatly constrained. By contrast, for most numbers of points in most dimensions, we cannot even recognize the minimal harmonic energy as an exact algebraic number (although it must be algebraic, because it is definable in the first-order theory of the real numbers).
\subsection{New universal optima}
Both codes listed in Table~\ref{table:conjectures} have been studied before. The first code was discovered by Conway, Sloane, and Smith \cite{S} as a conjecture for an optimal spherical code (and discovered independently by Hovinga \cite{H}). The second can be derived from the Nordstrom-Robinson binary code \cite{NR} or as a spectral embedding of an association scheme discovered by de Caen and van Dam \cite{dCvD} (take $t=1$ in Theorem~2 and Proposition~7(i) in \cite{dCvD} and then project the standard orthonormal basis into a common eigenspace of the operators in the Bose-Mesner algebra of the association scheme). We describe both codes in greater detail in Section~\ref{section:newunivopt}.
Neither code satisfies the condition from \cite{CK1} for universal optimality: both are spherical $3$-designs (but not $4$-designs), with four distances between distinct points in the $40$-point code and three in the $64$-point code. That leaves open the possibility of an ad hoc proof, similar to the one Cohn and Kumar gave for the regular $600$-cell, but the techniques from \cite{CK1} do not apply.
To test universal optimality, we have carried out $1000$ random trials with the potential function $f(r) = (4-r)^k$ for each $k$ from $1$ to $25$. We have also carried out $1000$ trials using Hardin and Sloane's program Gosset \cite{HSl} to construct good spherical codes (to take care of the case when $k$ is large). Of course these experimental tests fall far short of a rigorous proof, but the codes certainly appear to be universally optimal.
We believe that they are the only possible new universal optima consisting of at most $64$ points, because we have searched the space of such codes fairly thoroughly. By Proposition~1.4 in \cite{CK1}, any new universal optimum in ${\mathbb R}^n$ must contain at least $2n+1$ points. There are $812$ such cases with at most $64$ points in dimension at least $4$. In each case, we have completed at least $1000$ random trials (and usually more). There is no guarantee that we have found the global optimum in any of these cases, because it could have a tiny basin of attraction. However, a simple calculation shows that it is 99.99\% likely that in every case we have found every local minimum that occurs at least 2\% of the time. We have probably not always found the true optimum, but we believe that we have found every universal optimum within the range we have searched.
We have made our tables of conjectured harmonic optima for up to $64$ points in up to $32$ dimensions available via the world wide web at \begin{center} \url{http://aimath.org/data/paper/BBCGKS2006/}.
\end{center} They list the best energies we have found and the coordinates of the configurations that achieve them. We would be grateful for any improvements, and we intend to keep the tables up to date, with credit for any contributions received from others.
In addition to carrying out our own searches for universal optima, we have examined Sloane's tables \cite{S} of the best spherical codes known with at most $130$ points in ${\mathbb R}^4$ and ${\mathbb R}^5$, and we have verified that they contain no new universal optima. We strongly suspect that there are no undiscovered universal optima of any size in ${\mathbb R}^4$ or ${\mathbb R}^5$, based on Sloane's calculations as well as our searches, but it would be difficult to give definitive experimental evidence for such an assertion (we see no convincing arguments for why huge universal optima should not exist).
In general, our searches among larger codes have been far less exhaustive than those up to $64$ points: we have at least briefly examined well over four thousand different pairs $(n,N)$, but generally not in sufficient depth to make a compelling case that we have found the global minimum. (Every time we found a balanced harmonic optimum, with the exception of $128$ points in ${\mathbb R}^{15}$ and $256$ points in ${\mathbb R}^{16}$,
we completed at least $1000$ trials to test whether it was really optimal. However, we have not completed nearly as many trials in most other cases, and in any case $1000$ trials is not enough when studying large configurations.) Nevertheless, our strong impression is that universal optima are rare, and certainly that there are few small universal optima with large basins of attraction.
\section{Methodology}
\subsection{Techniques}
As discussed in the introduction, to minimize potential energy we apply gradient descent, starting from many random initial configurations. That is an unsophisticated approach, because gradient descent is known to perform more slowly in many situations than competing methods such as the conjugate gradient algorithm. However, it has performed adequately in our computations. Furthermore, gradient descent has particularly intuitive dynamics. Imagine particles immersed in a medium with enough viscosity that they never build up momentum. When a force acts on them according to the potential function, the configuration undergoes gradient descent. By contrast, for most other optimization methods the motion of the particles is more obscure, so for example it is more difficult to interpret information such as sizes of basins of attraction.
Once one has approximate coordinates, one can use the multivariate analogue of Newton's method to compute them to high precision (by searching for a zero of the gradient vector). Usually we do not need to do this, because the results of gradient descent are accurate enough for our purposes, but it is a useful tool to have available.
Obtaining coordinates is simply the beginning of our analysis. Because the coordinates encode not only the relative positions of the points but also an arbitrary orthogonal transformation of the configuration, interpreting the data can be subtle. A first step is to compute the Gram matrix. In other words, given points $x_1,\dots,x_N$, compute the $N \times N$ matrix $G$ whose entries are given by $G_{i,j} = \langle x_i, x_j \rangle$. The Gram matrix is invariant under orthogonal transformations, so it encodes almost precisely the information we care about. Its only drawback is that it depends on the arbitrary choice of how the points are ordered. That may sound like a mild problem, but there are many permutations of the points and it is far from clear how to choose one that best exhibits the configuration's underlying structure: compare Figure~\ref{fig:120gram} with Figure~\ref{fig:120gram2}.
\begin{figure}
\caption{The Gram matrix for a regular $600$-cell (black denotes $1$, white denotes $-1$, and gray interpolates between them), with the points ordered as returned by our gradient descent software.}
\label{fig:120gram}
\end{figure}
\begin{figure}
\caption{The Gram matrix for a regular $600$-cell, with the points ordered so as to display structure.}
\label{fig:120gram2}
\end{figure}
With luck, one can recognize the entries of the Gram matrix as exact algebraic numbers: more frequently than one might expect, they are rational or quadratic irrationals. Once one specifies the entire Gram matrix, the configuration is completely determined, up to orthogonal transformations. Furthermore, one can easily prove that the configuration exists (keep in mind that it may not be obvious that there actually is such an arrangement of points, because it was arrived at via inexact calculations). To do so, one need only check that the Gram matrix is symmetric, it is positive semidefinite, and its rank is at most $n$. Every such matrix is the Gram matrix of a set of $N$ points in ${\mathbb R}^n$, and if the diagonal entries are all $1$ then the points lie on $S^{n-1}$.
\begin{figure}
\caption{An orthogonal projection of the conjectured harmonic optimum with $44$ points in ${\mathbb R}^3$ onto a random plane. Line segments connect points at the minimal distance.}
\label{fig:3-44}
\end{figure}
\begin{figure}
\caption{An orthogonal projection of the conjectured harmonic optimum with $48$ points in ${\mathbb R}^4$ onto a random plane. Line segments connect points at the minimal distance.}
\label{fig:4-48}
\end{figure}
Unfortunately, the exact Gram matrix entries are not always apparent from the numerical data. There is also a deeper reason why simply recognizing the Gram matrix is unsatisfying: it provides only the most ``bare bones'' description of the configuration. Many properties, such as symmetry or connections to other mathematical structures, are far from apparent given only the Gram matrix, as one can see from Figures~\ref{fig:120gram} and~\ref{fig:120gram2}.
Choosing the right method to visualize the data can make the underlying patterns clearer. For example, projections onto low-dimensional subspaces are often illuminating. Determining the most revealing projection can itself be difficult, but sometimes even a random projection sheds light on the structure. For example, Figures~\ref{fig:3-44} and~\ref{fig:4-48} are projections of the harmonic optima with $44$ points in ${\mathbb R}^3$ and $48$ points in ${\mathbb R}^4$, respectively, onto random planes. The circular outline is the boundary of the projection of the sphere, and the line segments pair up points separated by the minimal distance. Figure~\ref{fig:3-44} shows a disassembled cube (in a manner described later in this section), while Figure~\ref{fig:4-48} is made up of octagons (see Subsection~\ref{subsec:48in4} for a description).
The next step in the analysis is the computation of the automorphism group. In general that is a difficult task, but we can make use of the wonderful software Nauty written by McKay \cite{M}. Nauty can compute the automorphism group of a graph as a permutation group on the set of vertices; more generally, it can compute the automorphism group of a vertex-labeled graph. We make use of it as follows.
Define a combinatorial automorphism of a configuration to be a permutation of the points that preserves inner products (equivalently, distances). If one forms an edge-labeled graph by placing an edge between each pair of points, labeled by their inner product, then the combinatorial automorphism group is the automorphism group of this labeled graph. Nauty is not directly capable of computing such a group, but it is straightforward to reduce the problem to that of computing the automorphism group of a related vertex-labeled graph. Thus, one can use Nauty to compute the combinatorial automorphism group.
Fortunately, combinatorial automorphisms are the same as geometric symmetries, provided the configuration spans ${\mathbb R}^n$. Specifically, every combinatorial automorphism is induced by a unique orthogonal transformation of ${\mathbb R}^n$. (When the points do not span ${\mathbb R}^n$, the orthogonal transformations are not unique, because there are nontrivial orthogonal transformations that fix the subspace spanned by the configuration.) Thus, Nauty provides an efficient method for computing the symmetry group.
Unfortunately, it is difficult to be certain that one has computed the correct group. Two inner products that appear equal numerically may differ by a tiny amount, in which case the computed symmetry group may be too large. However, that is rarely a problem even with single-precision floating point arithmetic, and it is difficult to imagine a fake symmetry that appears real to one hundred decimal places.
Once the symmetry group has been obtained, many further questions naturally present themselves. Can one recognize the symmetry group as a familiar group? How does its representation on ${\mathbb R}^n$ break up into irreducibles? What are the orbits of its action on the configuration?
Analyzing the symmetries of the configuration frequently determines much of the structure, but usually not all of it. For example, consider the simplest nontrivial case, namely five points on $S^2$. There are two natural ways to arrange them: with two antipodal points and three points forming an equilateral triangle on the orthogonal plane between them, or as a pyramid with four points forming a square in the hemisphere opposite a single point (and equidistant from it). In the first case everything is determined by the symmetries, but in the second there is one free parameter, namely how far the square is from the point opposite it. As one varies the potential function, the energy-minimizing value of this parameter will vary. (We conjecture that for every completely monotonic potential function, one of the configurations described in this paragraph globally minimizes the energy, but we cannot prove it.)
We define the \textit{parameter count\/} of a configuration to be the dimension of the space of nearby configurations that can be obtained from it by applying an arbitrary radial force law between all pairs of particles. For example, balanced configurations are those with zero parameters, and the family with a square opposite a point has one parameter.
To compute the parameter count for an $N$-point configuration, start by viewing it as an element of $(S^{n-1})^N$ (by ordering the points). Within the tangent space of this manifold, for each radial force law there is a tangent vector. To form a basis for all these force vectors, look at all distances $d$ that occur in the configuration, and for each of them consider the tangent vector that pushes each pair of points at distance $d$ in opposite directions but has no other effects. All force vectors are linear combinations of these ones, and the dimension of the space they span is the parameter count for the configuration. (One must be careful to use sufficiently high-precision arithmetic, as when computing the symmetry group.)
This information is useful because in a sense it shows how much humanly understandable structure we can expect to find. For example, in the five-point configuration with a square opposite a point, the distance between them will typically be some complicated number depending on the potential function. In principle one can describe it exactly, but in practice it is most pleasant to treat it as a black box and describe all the other distances in the configuration in terms of it. The parameter count tells how many independent parameters one should expect to arrive at. When the count is zero or one, it is reasonable to search for an elegant description, whereas when the count is twenty, it is likely that the configuration is unavoidably complex.
Figure~\ref{fig:params} shows the parameter counts of the conjectured harmonic optima in ${\mathbb R}^3$ with at most $64$ points, compared with the dimension of the full space of all configurations of their size. The counts vary wildly but are often quite a bit smaller than one might expect. Two striking examples are $61$ points with $111$ parameters, for which there is likely no humanly understandable description, and $44$ points with one parameter. The $44$-point configuration consists of the vertices of a cube and centers of its edges together with the $24$-point orbit (under the cube's symmetry group) of a point on a diagonal of a face, all projected onto a common sphere. The optimal choice of the point on the diagonal appears complicated.
\begin{figure}
\caption{Parameter counts for conjectured harmonic optima in ${\mathbb R}^3$. Horizontal or vertical lines occur at multiples of ten, and white circles denote the dimension of the configuration space.}
\label{fig:params}
\end{figure}
One subtlety in searching for local minima is that any given potential function will usually not detect all possible families of local minima that could occur for other potential functions. For example, for five points in ${\mathbb R}^3$, the family with a square opposite a point does not contain a local minimum for harmonic energy. One can attain a local minimum compared to the other members of the family, but it will be a saddle point in the space of all configurations. Nevertheless, the family does contain local minima for some other completely monotonic potential functions (such as $f(r)=1/r^s$ with $s$ large).
\subsection{Example} \label{subsec:27in6}
\begin{table} \begin{tabular}{ccccc} Harmonic energy & Frequency & Parameters & Maximal cosine & Symmetries\\ \hline $111.0000000000$ & $99971504$ & $0$ & $0.2500000000$ & $51840$\\ $112.6145815185$ & $653$ & $9$ & $0.4306480635$ & $120$\\ $112.6420995468$ & $22993$ & $18$ & $0.3789599707$ & $24$\\ $112.7360209988$ & $10$ & $2$ & $0.4015602076$ & $1920$\\ $112.8896851626$ & $4840$ & $13$ & $0.4041651631$ & $48$\\ \\ \end{tabular} \caption{Local minima for $27$ points in ${\mathbb R}^6$ (with frequencies out of $10^8$ random trials).} \label{table:27} \end{table}
For a concrete example, consider Table~\ref{table:27}, which shows the results of $10^8$ random trials for $27$ points in ${\mathbb R}^6$ (all decimal numbers in tables have been rounded). These parameters were chosen because, as shown in \cite{CK1}, there is a unique $27$-point universal optimum in ${\mathbb R}^6$, with harmonic energy $111$; it is called the Schl\"afli configuration. The column labeled ``frequency'' tells how many times each local minimum occurred. As one can see, the universal optimum occurred more than 99.97\% of the time, but we found a total of four others.
Strictly speaking, we have not proved that the local minima listed in Table~\ref{table:27} (other than the Schl\"afli configuration) even exist. They surely do, because we have computed them to five hundred decimal places and checked that they are local minima by numerically diagonalizing the Hessian matrix of the energy function on the space of configurations. However, we used high-precision floating point arithmetic, so this calculation does not constitute a rigorous proof, although it leaves no reasonable doubt. It is not at all clear whether there are additional local minima. We have not found any, but the fact that one of the local minima occurs only once in every ten million trials suggests that there might be others with even smaller basins of attraction.
The local minimum with energy $112.736\dots$ stands out in two respects besides its extreme rarity: it has many symmetries and it depends on few parameters. That suggests that it should have a simple description, and in fact it does, as a modification of the universal optimum. Only two inner products occur between distinct points in the Schl\"afli configuration, namely $-1/2$ and $1/4$. In particular, it is not antipodal, so one can define a new code by replacing a single point $x$ with its antipode $-x$. The remaining $26$ points can be divided into two clusters according to their distances from $-x$. Immediately after replacing $x$ with $-x$ the code will no longer be a local minimum, but if one allows it to equilibrate a minimum is achieved. (That is not obvious: the code could equilibrate to a saddle point, because it is starting from an unusual position.) All that changes is the distances of the two clusters from $-x$, while the relative positions within the clusters remain unchanged (aside from rescaling). These two distances are the two parameters of the code. The symmetries of the new code are exactly those of the universal optimum that fix $x$, so the size of the symmetry group is reduced by a factor of $27$.
The Schl\"afli configuration in ${\mathbb R}^6$ corresponds to the $27$ lines on a smooth cubic surface: there is a natural correspondence between points in the configuration and lines on a cubic surface so that the inner products of $-1/2$ occur between points corresponding to intersecting lines. (This dates back to Schoutte \cite{Sch}. See also the introduction to \cite{CK1} for a brief summary of the correspondence.) One way to view the other local minima in Table~\ref{table:27} is as competitors to this classical configuration. It would be intriguing if they also had interpretations or consequences in algebraic geometry, but we do not know of any.
\section{Experimental phenomena}
\subsection{Analysis of Gram matrices} \label{subsec:gramanalysis}
\begin{figure}
\caption{The Clebsch graph.}
\label{fig:clebsch}
\end{figure}
For an example of how one might analyze a Gram matrix, consider the case of sixteen points in ${\mathbb R}^5$. This case also has a universal optimum, in fact the smallest known one that is not a regular polytope (although it is semiregular). It is the five-dimensional hemicube, which consists of half the vertices of the cube. More precisely, it contains the points $(\pm 1, \pm 1, \pm 1, \pm 1, \pm 1)/\sqrt{5}$ with an even number of minus signs. One can recover the full cube by including the antipode of each point, so the symmetries of the five-dimensional hemicube consist of half of those of the five-dimensional cube (namely, those that preserve the hemicube, rather than exchanging it with its complementary hemicube).
It is essentially an accident of five dimensions that the hemicube is universally optimal. Universal optimality also holds in lower dimensions, but only because the hemicubes turn out to be familiar codes (two antipodal points in two dimensions, a tetrahedron in three dimensions, and a cross polytope in four dimensions). In six dimensions the hemicube appears to be an optimal spherical code, but it does not minimize harmonic energy and is therefore not universally optimal. In seven dimensions, and presumably all higher dimensions, the hemicube is not even an optimal code.
The five-dimensional hemicube has the same structure as the Clebsch graph (see Figure~\ref{fig:clebsch}). The sixteen points correspond to the vertices of the graph; two distinct points have inner product $-3/5$ if they are connected by an edge in the graph and $1/5$ otherwise. This determines the Gram matrix and hence the full configuration.
For the harmonic potential energy, the hemicube appears to be the only local minimum with sixteen points in ${\mathbb R}^5$, but we do not know how to prove that. To construct another local minimum, one can attempt constructions such as moving a point to its antipode, as in Subsection~\ref{subsec:27in6}, but they yield saddle points. However, for other potential functions one sometimes finds other local minima (we have found up to two other nontrivial local minima). To illustrate the techniques from the previous section, we will analyze one of them here. It will turn out to have a fairly simple conceptual description; our goal here is to explain how to arrive at it, starting from the Gram matrix.
\begin{table} \begin{center}
\begin{tabular}{c||ccc|ccc||ccc|ccc|ccc} $1$ & $a$ & $a$ & $a$ & $a$ & $a$ & $a$ & $b$ & $b$ & $b$ & $b$ & $b$ & $b$ & $b$ & $b$ & $b$\\ \hline \hline $a$ & $1$ & $e$ & $e$ & $a^2$ & $a^2$ & $a^2$ & $d$ & $c$ & $c$ & $c$ & $c$ & $d$ & $c$ & $d$ & $c$\\ $a$ & $e$ & $1$ & $e$ & $a^2$ & $a^2$ & $a^2$ & $c$ & $d$ & $c$ & $d$ & $c$ & $c$ & $c$ & $c$ & $d$\\ $a$ & $e$ & $e$ & $1$ & $a^2$ & $a^2$ & $a^2$ & $c$ & $c$ & $d$ & $c$ & $d$ & $c$ & $d$ & $c$ & $c$\\ \hline $a$ & $a^2$ & $a^2$ & $a^2$ & $1$ & $e$ & $e$ & $d$ & $c$ & $c$ & $c$ & $d$ & $c$ & $c$ & $c$ & $d$\\ $a$ & $a^2$ & $a^2$ & $a^2$ & $e$ & $1$ & $e$ & $c$ & $d$ & $c$ & $c$ & $c$ & $d$ & $d$ & $c$ & $c$\\ $a$ & $a^2$ & $a^2$ & $a^2$ & $e$ & $e$ & $1$ & $c$ & $c$ & $d$ & $d$ & $c$ & $c$ & $c$ & $d$ & $c$\\ \hline\hline $b$ & $d$ & $c$ & $c$ & $d$ & $c$ & $c$ & $1$ & $f$ & $f$ & $f$ & $g$ & $g$ & $f$ & $g$ & $g$\\ $b$ & $c$ & $d$ & $c$ & $c$ & $d$ & $c$ & $f$ & $1$ & $f$ & $g$ & $f$ & $g$ & $g$ & $f$ & $g$\\ $b$ & $c$ & $c$ & $d$ & $c$ & $c$ & $d$ & $f$ & $f$ & $1$ & $g$ & $g$ & $f$ & $g$ & $g$ & $f$\\ \hline$b$ & $c$ & $d$ & $c$ & $c$ & $c$ & $d$ & $f$ & $g$ & $g$ & $1$ & $f$ & $f$ & $f$ & $g$ & $g$\\ $b$ & $c$ & $c$ & $d$ & $d$ & $c$ & $c$ & $g$ & $f$ & $g$ & $f$ & $1$ & $f$ & $g$ & $f$ & $g$\\ $b$ & $d$ & $c$ & $c$ & $c$ & $d$ & $c$ & $g$ & $g$ & $f$ & $f$ & $f$ & $1$ & $g$ & $g$ & $f$\\ \hline$b$ & $c$ & $c$ & $d$ & $c$ & $d$ & $c$ & $f$ & $g$ & $g$ & $f$ & $g$ & $g$ & $1$ & $f$ & $f$\\ $b$ & $d$ & $c$ & $c$ & $c$ & $c$ & $d$ & $g$ & $f$ & $g$ & $g$ & $f$ & $g$ & $f$ & $1$ & $f$\\ $b$ & $c$ & $d$ & $c$ & $d$ & $c$ & $c$ & $g$ & $g$ & $f$ & $g$ & $g$ & $f$ & $f$ & $f$ & $1$ \end{tabular} \end{center} \caption{Gram matrix for $16$ points in ${\mathbb R}^5$; here $c = ab + (1/2)\sqrt{(1-a^2)(1-b^2)/2}$, $d = ab - \sqrt{(1-a^2)(1-b^2)/2}$,
$e = (3a^2-1)/2$, $f = (3b^2-1)/2$, and $g = (3b^2+1)/4$.} \label{table:gram16in5} \end{table}
The specific example we will analyze arises as a local minimum for the potential function $r \mapsto (4-r)^{12}$. It is specified by Table~\ref{table:gram16in5} with $$ a \approx -0.499890010934 $$ and $$ b \approx 0.201039702365 $$ (the lines in the table are just for visual clarity).
The first step is to recognize the structure in the Gram matrix. Table~\ref{table:gram16in5} highlights this structure, but of course it takes effort to bring the Gram matrix into such a simple form (by recognizing algebraic relations between the Gram matrix entries and reordering the points so as to emphasize patterns). The final form of the Gram matrix exhibits the configuration as belonging to a family specified by two parameters $a$ and $b$ with absolute value less than $1$. As described in the table's caption, all the other inner products are simple algebraic functions of $a$ and $b$. To check that this Gram matrix corresponds to an actual code in $S^4$, it suffices to verify that its eigenvalues are $0$ ($11$ times), $1+6a^2+9b^2$, and $(15-(6a^2+9b^2))/4$ ($4$ times): there are only five nonzero eigenvalues and they are clearly positive.
Table~\ref{table:gram16in5} provides a complete description of the configuration, but it is unilluminating. To describe the code using elegant coordinates, one must have a more conceptual understanding of it. A first step in that direction is the observation that the first point in Table~\ref{table:gram16in5} has inner product $a$ or $b$ with every other point. In other words, the remaining $15$ points lie on two parallel four-dimensional hyperplanes, equidistant from the first point. A natural guess is that as $a$ and $b$ vary, the structures within these hyperplanes are simply rescaled as the corresponding cross sections of the sphere change in size, and some calculation verifies that this guess is correct.
To understand these two structures and how they relate to each other, set $a=b=0$ so that they form a $15$-point configuration in ${\mathbb R}^4$. Its Gram matrix is of course obtained by removing the first row and column of Table~\ref{table:gram16in5} and setting $a=b=0$, $e=f=-1/2$, $g=1/4$, $c=\sqrt{2}/4$, and $d=-\sqrt{2}/2$. The two substructures consist of the first six points and the last nine, among the fifteen remaining points.
Understanding the $16$-point codes in ${\mathbb R}^5$ therefore simply comes down to understanding this single $15$-point code in ${\mathbb R}^4$. (It is also the $15$-point code from Table~\ref{table:balanced}. Incidentally, Sloane's tables \cite{S} show that it is not an optimal spherical code.) The key to understanding it is choosing the right coordinates. The first six points form two orthogonal triangles, and they are the simplest part of this configuration, so it is natural to start with them.
Suppose the points $v_1,v_2,v_3$ and $v_4,v_5,v_6$ form two orthogonal equilateral triangles in a four-dimensional vector space. The most natural coordinates to choose for the vector space are the inner products with these six points. Of course the sum of the three inner products with any triangle must vanish (because $v_1+v_2+v_3=v_4+v_5+v_6=0$), so there are only four independent coordinates, but we prefer not to break the symmetry by discarding two coordinates.
The other nine points in the configuration are determined by their inner products with $v_1,\dots,v_6$. Each of them will have inner product $d$ with one point in each triangle and $c$ with the remaining two points. As pointed out above we must have $d+2c=0$, and in fact $d=-\sqrt{2}/2$ and $c=\sqrt{2}/4$ because the points are all unit vectors. Note that one can read off all this information from the $c$ and $d$ entries in Table~\ref{table:gram16in5}.
There is an important conceptual point in the last part of this analysis. Instead of focusing on the internal structure among the last nine points, it is most fruitful to study how they relate to the previously understood subconfiguration of six points. However, once one has a complete description, it is important to examine the internal structure as well.
The pattern of connections among the last nine points in Table~\ref{table:gram16in5} is described by the Paley graph on nine vertices, which is the unique strongly regular graph with parameters $(9,4,1,2)$. (The Paley graph is isomorphic to its own complement, so the edges could correspond to inner product either $f$ or $g$.) Strongly regular graphs, and more generally association schemes, frequently occur as substructures of minimal-energy configurations. It is remarkable to see such highly ordered structures spontaneously occurring via energy minimization.
\subsection{Other small examples}
To illustrate some of the other phenomena that can occur, in this subsection we will analyze the case of $12$ points in ${\mathbb R}^4$. We have observed two families of local minima, both of which are slightly more subtle than the previous examples.
For $0 < a < 1/2$, set $b = a - 1$, $c = -3a + 1$, and $d = 4a - 1$, and consider the Gram matrix shown in Table~\ref{table:gram12in4a} (its nonzero eigenvalues are $12a$ and $6-12a$, each with multiplicity $2$). Unlike the example in Subsection~\ref{subsec:gramanalysis}, the symmetry group acts transitively on the points, so there are no distinguished points to play a special role in the analysis. Nevertheless, one can analyze it as follows.
\begin{table} \begin{center}
\begin{tabular}{ccc|ccc|ccc|ccc} $1$ & $c$ & $c$ & $d$ & $b$ & $b$ & $-2a$ & $a$ & $a$ & $-2a$ & $a$ & $a$\\ $c$ & $1$ & $c$ & $b$ & $d$ & $b$ & $a$ & $-2a$ & $a$ & $a$ & $-2a$ & $a$\\ $c$ & $c$ & $1$ & $b$ & $b$ & $d$ & $a$ & $a$ & $-2a$ & $a$ & $a$ & $-2a$\\ \hline$d$ & $b$ & $b$ & $1$ & $c$ & $c$ & $-2a$ & $a$ & $a$ & $-2a$ & $a$ & $a$\\ $b$ & $d$ & $b$ & $c$ & $1$ & $c$ & $a$ & $-2a$ & $a$ & $a$ & $-2a$ & $a$\\ $b$ & $b$ & $d$ & $c$ & $c$ & $1$ & $a$ & $a$ & $-2a$ & $a$ & $a$ & $-2a$\\ \hline$-2a$ & $a$ & $a$ & $-2a$ & $a$ & $a$ & $1$ & $c$ & $c$ & $d$ & $b$ & $b$\\ $a$ & $-2a$ & $a$ & $a$ & $-2a$ & $a$ & $c$ & $1$ & $c$ & $b$ & $d$ & $b$\\ $a$ & $a$ & $-2a$ & $a$ & $a$ & $-2a$ & $c$ & $c$ & $1$ & $b$ & $b$ & $d$\\ \hline$-2a$ & $a$ & $a$ & $-2a$ & $a$ & $a$ & $d$ & $b$ & $b$ & $1$ & $c$ & $c$\\ $a$ & $-2a$ & $a$ & $a$ & $-2a$ & $a$ & $b$ & $d$ & $b$ & $c$ & $1$ & $c$\\ $a$ & $a$ & $-2a$ & $a$ & $a$ & $-2a$ & $b$ & $b$ & $d$ & $c$ & $c$ & $1$ \end{tabular} \end{center} \caption{Gram matrix for $12$ points in ${\mathbb R}^4$; here $0 < a < 1/2$, $b = a - 1$, $c = -3a + 1$, and $d = 4a - 1$.} \label{table:gram12in4a} \end{table}
Let $v_1,v_2,v_3 \in S^1$ be the vertices of an equilateral triangle in ${\mathbb R}^2$, and let $v_4$ and $v_5$ be unit vectors that are orthogonal to each other and to each of $v_1$, $v_2$, and $v_3$. For $0 < \alpha < 1$, consider the twelve points $\alpha v_i \pm \sqrt{1-\alpha^2} v_4$ and $-\alpha v_i \pm \sqrt{1-\alpha^2} v_5$ with $1 \le i \le 3$. If one sets $a = \alpha^2/2$ then they have Table~\ref{table:gram12in4a} as a Gram matrix.
The Gram matrix shown in Table~\ref{table:gram12in4b} is quite different. There, $0 < a < 1/3$, $b = 1-12a^2$, $c = 6a^2-1$, and $d = 18a^2-1$. The nonzero eigenvalues are $4/3+24a^2$ (with multiplicity $3$) and $8-72a^2$, which are positive because $a < 1/3$.
\begin{table} \begin{center}
\begin{tabular}{cccc|cccc|cccc} $1$ & $-1/3$ & $-1/3$ & $-1/3$ & $-3a$ & $a$ & $a$ & $a$ & $-3a$ & $a$ & $a$ & $a$\\ $-1/3$ & $1$ & $-1/3$ & $-1/3$ & $a$ & $-3a$ & $a$ & $a$ & $a$ & $-3a$ & $a$ & $a$\\ $-1/3$ & $-1/3$ & $1$ & $-1/3$ & $a$ & $a$ & $-3a$ & $a$ & $a$ & $a$ & $-3a$ & $a$\\ $-1/3$ & $-1/3$ & $-1/3$ & $1$ & $a$ & $a$ & $a$ & $-3a$ & $a$ & $a$ & $a$ & $-3a$\\ \hline $-3a$ & $a$ & $a$ & $a$ & $1$ & $b$ & $b$ & $b$ & $d$ & $c$ & $c$ & $c$\\ $a$ & $-3a$ & $a$ & $a$ & $b$ & $1$ & $b$ & $b$ & $c$ & $d$ & $c$ & $c$\\ $a$ & $a$ & $-3a$ & $a$ & $b$ & $b$ & $1$ & $b$ & $c$ & $c$ & $d$ & $c$\\ $a$ & $a$ & $a$ & $-3a$ & $b$ & $b$ & $b$ & $1$ & $c$ & $c$ & $c$ & $d$\\ \hline $-3a$ & $a$ & $a$ & $a$ & $d$ & $c$ & $c$ & $c$ & $1$ & $b$ & $b$ & $b$\\ $a$ & $-3a$ & $a$ & $a$ & $c$ & $d$ & $c$ & $c$ & $b$ & $1$ & $b$ & $b$\\ $a$ & $a$ & $-3a$ & $a$ & $c$ & $c$ & $d$ & $c$ & $b$ & $b$ & $1$ & $b$\\ $a$ & $a$ & $a$ & $-3a$ & $c$ & $c$ & $c$ & $d$ & $b$ & $b$ & $b$ & $1$ \end{tabular} \end{center} \caption{Gram matrix for $12$ points in ${\mathbb R}^4$; here $0 < a < 1/3$, $b = 1-12a^2$, $c = 6a^2-1$, and $d = 18a^2-1$.} \label{table:gram12in4b} \end{table}
In this Gram matrix the first four points form a distinguished tetrahedron, and the remaining eight points form two identical tetrahedra. They lie in hyperplanes parallel to and equidistant from the (equatorial) hyperplane containing the distinguished tetrahedron. If one sets $a=1/3$, then all three tetrahedra lie in the same hyperplane, with $b=-1/3$, $c=-1/3$, $d=1$, and $-3a=-1$. In particular, one can see that the two parallel tetrahedra are in dual position to the distinguished tetrahedron. As the parameter $a$ varies, all that changes is the distance between the parallel hyperplanes. (As $a$ tends to zero some points coincide. One could also use $a$ between $0$ and $-1/3$, but that corresponds to using parallel tetrahedra oriented the same way, instead of dually, which generally yields higher potential energy.)
This sort of layered structure occurs surprisingly often. One striking example is $74$ points in ${\mathbb R}^5$. The best such spherical code known consists of a regular $24$-cell on the equatorial hyperplane together with two dual $24$-cells on parallel hyperplanes as well as the north and south poles. If one chooses the two parallel hyperplanes to have inner products $\pm \sqrt{\sqrt{5}-2}$ with the poles, then the cosine of the minimal angle is exactly $(\sqrt{5}-1)/2$. That agrees numerically with Sloane's tables \cite{S} of the best codes known, but of course there is no proof that it is optimal.
There is almost certainly no universally optimal $12$-point configuration in ${\mathbb R}^4$. Aside from some trivial examples for degenerate potential functions, the two cases we have analyzed in this subsection are the only two types of local minima we have observed. For $f(r) = (4-r)^k$ with $k \in \{1,2\}$ they both achieve the same minimal energy (along with a positive-dimensional family of other configurations). For $3 \le k \le 9$ the first family appears to achieve the global minimum, while for $k \ge 10$ the second appears to. As $k$ tends to infinity the energy minimization problem turns into the problem of finding the optimal spherical code. That problem appears to be solved by taking $a=1/4$ in the second family, so that the minimal angle has cosine $1/4$, which agrees with Sloane's tables \cite{S}.
We conjecture that one or the other of these two families minimizes each completely monotonic potential function. This conjecture is somewhat difficult to test, but we are not aware of any counterexamples.
The examples we have analyzed so far illustrate three basic principles: \begin{enumerate} \item Small or medium-sized local minima tend to occur in low-dimensional families as one varies the potential function. The dimension is not usually as low as in these examples, but it is typically far lower than the dimension of the space of all configurations (see Figure~\ref{fig:params}).
\item These families frequently contain surprisingly symmetrical substructures (such as regular polytopes or configurations described by strongly regular graphs or other association schemes).
\item The same substructures and construction methods occur in many different families. \end{enumerate}
\subsection{$2n+1$ points in ${\mathbb R}^n$}
Optimal spherical codes are known for up to $2n$ points in ${\mathbb R}^n$ (see Theorem~6.2.1 in \cite{B}), but not for $2n+1$ points, except in ${\mathbb R}^2$ and ${\mathbb R}^3$. Here we present a natural conjecture for all dimensions.
These codes consist of a single point we call the north pole together with two $n$-point simplices on hyperplanes parallel to the equator; the simplices are in dual position relative to each other. Each point in the simplex closer to the north pole will have inner product $\alpha$ with the north pole, and the inner product between any two points in the further simplex will be $\alpha$. The number $\alpha$ can be chosen so that each point in either one of the simplices has inner product $\alpha$ with each point in the other simplex except the point furthest from it. To achieve that, $\alpha$ must be the unique root between $0$ and $1/n$ of the cubic equation $$ (n^3-4n^2+4n)x^3 - n^2x^2 -nx + 1 = 0. $$ As $n \to \infty$, $\alpha = 1/n - \sqrt{2}/n^{3/2} + O(1/n^2)$.
Let $\mathcal{C}_n \subset S^{n-1}$ be this spherical code, with $\alpha$ chosen as above. The cosine of the minimal angle in $\mathcal{C}_n$ is $\alpha$.
\begin{conjecture} \label{conj:opt} For each $n \ge 2$, the code $\mathcal{C}_n$ is an optimal spherical code. Furthermore, every optimal $(2n+1)$-point code in $S^{n-1}$ is isometric to $\mathcal{C}_n$. \end{conjecture}
On philosophical grounds it seems reasonable to expect to be able to prove this conjecture: most of the difficulty in packing problems comes from the idiosyncrasies of particular spaces and dimensions, so when a phenomenon occurs systematically one expects a conceptual reason for it. However, we have made no serious progress towards a proof.
One can also construct $\mathcal{C}_n$ as follows. Imagine adding one point to a regular cross polytope by placing it in the center of a facet. The vertices of that facet form a simplex equidistant from the new point, as do the vertices of the opposite facet. The structure is identical to the code $\mathcal{C}_n$, except for the distances from the new point, and the proper distances can be obtained by allowing the code to equilibrate with respect to increasingly steep potential functions.
It appears that for $n>2$ these codes do not minimize harmonic energy, so they are not universally optimal. When $n=4$, something remarkable occurs with the (conjectured) minimum for harmonic energy. That configuration consists of a regular pentagon together with two pairs of antipodal points that are orthogonal to each other and the pentagon. If one uses gradient descent to minimize harmonic energy, it seems to converge with probability $1$ to this configuration, but the convergence is very slow, much slower than for any other harmonic energy minimum we have found. The reason is that this configuration is a degenerate minimum for the harmonic energy, in the sense that the Hessian matrix has more zero eigenvalues than one would expect.
Each of the nine points has three degrees of freedom, so the Hessian matrix has twenty-seven eigenvalues. Specifically, they are $0$ (ten times), $4$, $7/4$ (twice), $9/2$ (four times), $9$ (twice), $25/8\pm\sqrt{209}/8$ (twice), and $31/8 \pm \sqrt{161}/8$ (twice). Six of the zero eigenvalues are unsurprising, because they come from the problem's invariance under the six-dimensional Lie group $O(4)$, but the remaining four are surprising indeed.
The corresponding eigenvectors are infinitesimal displacements of the nine points that produce only a fourth-order change in energy, rather than the expected second-order change. To construct them, do not move the antipodal pairs of points at all, and move the pentagon points orthogonally to the plane of the pentagon. Each must be displaced by $(1-\sqrt{5})/2$ times the sum of the displacements of its two neighbors. This yields a four-dimensional space of displacements, which are the surprising eigenvectors.
This example is noteworthy because it shows that harmonic energy is not always a Morse function on the space of all configurations. One might hope to apply Morse theory to understand the relationship between critical points for energy and the topology of the configuration space, but the existence of degenerate critical points could substantially complicate this approach.
\subsection{$2n+2$ points in ${\mathbb R}^n$} \label{subsec:diplo}
After seeing a conjecture for the optimal $(2n+1)$-point code in $S^{n-1}$, it is natural to wonder about $2n+2$ points. A first guess is the union of a simplex and its dual simplex (in other words, the antipodal simplex), which was named the diplo-simplex by Conway and Sloane \cite{CS2}. One can prove using the linear programming bounds for real projective space that this code is the unique optimal antipodal spherical code of its size and dimension (see Chapter~9 of \cite{CS}), but for $n>2$ it is not even locally optimal as a spherical code (see Appendix~\ref{appendix:diplo}) and we do not have a conjecture for the true answer.
For the problem of minimizing harmonic energy, the diplo-simplex is suboptimal for $3 \le n \le 5$ but appears optimal for all other $n$.
\begin{figure}
\caption{The Petersen graph.}
\label{fig:petersen}
\end{figure}
One particularly elegant case is when $n=4$. The midpoints of the edges of a regular simplex form a $10$-point code in $S^3$ with maximal inner product $1/6$, and Bachoc and Vallentin \cite{BV} have proved that it is the unique optimal spherical code. It is also the kissing configuration of the five-dimensional hemicube (the universally optimal $16$-point configuration in ${\mathbb R}^5$). In other words, it consists of the ten nearest neighbors of any point in that code. This code appears to minimize harmonic energy, but it is not the unique minimum: two orthogonal regular pentagons have the same harmonic energy.
As pointed out in the introduction of \cite{CK1}, this code is not universally optimal, but it nevertheless seems to be an exceedingly interesting configuration. Only the inner products $-2/3$ and $1/6$ occur (besides $1$, of course). If one forms a graph whose vertices are the points in the code and whose edges correspond to pairs of points with inner product $-2/3$, then the result is the famous Petersen graph (Figure~\ref{fig:petersen}).
Like the nontrivial universal optima in dimensions $5$ through $8$, this code consists of the vertices of a semiregular polytope that has simplices and cross polytopes as facets, with a simplex and two cross polytopes meeting at each face of codimension~$3$. Its kissing configuration is also semiregular, with square and triangular facets, but it is a suboptimal code (specifically, a triangular prism).
\subsection{$48$ points in ${\mathbb R}^4$} \label{subsec:48in4}
One of the most beautiful configurations we have found is a $48$-point code in ${\mathbb R}^4$. The points form six octagons that map to the vertices of a regular octahedron under the Hopf map from $S^3$ to $S^2$. Recall that if we identify ${\mathbb R}^4$ with ${\mathbb C}^2$ using the inner product $\langle x,y \rangle = \mathop{\textup{Re}} \bar x ^t y$ on ${\mathbb C}^2$, then the Hopf map sends $(z,w)$ to $z/w \in {\mathbb C} \cup \{\infty\}$, which we can identify with $S^2$ via stereographic projection to a unit sphere centered at the origin. The fibers of the Hopf map are the circles given by intersecting $S^3$ with the complex lines in ${\mathbb C}^2$.
Sloane, Hardin, and Cara \cite{SHC} found a spherical $7$-design of this form, consisting of two dual $24$-cells, and it has the same minimal angle as our code (which is the minimal angle in an octagon), but it is a different code. In ${\mathbb C}^2$, the Sloane-Hardin-Cara code is the union of the orbits under multiplication by eighth roots of unity of the points $(1,0)$, $(0,1)$, $(\pm 1, 1)/\sqrt{2}$, and $(\pm i, 1)/\sqrt{2}$. Our code is the union of the orbits of $(1,0)$, $(0,1)$, $(\pm\zeta,\zeta)/\sqrt{2}$, and $(\pm i\zeta^2,\zeta^2)/\sqrt{2}$, where $\zeta = e^{\pi i/12}$. Each octagon has been rotated by a multiple of $\pi/12$ radians. Because a regular octagon is invariant under rotation by $\pi/4$ radians, there are only three distinct rotations by multiples of $\pi/12$. Each such rotation occurs for the octagons lying over two antipodal vertices of the octahedron in the base space $S^2$ of the Hopf fibration.
It is already remarkable that performing these rotations yields a balanced configuration with lower harmonic energy than the union of the $24$-cell and its dual, but the structure of the code's convex hull is especially noteworthy. The facets can be computed using the program Polymake \cite{GJ}. The facets of the dual $24$-cell configuration are $288$ irregular tetrahedra, all equivalent under the action of the symmetry group (and each possessing $8$ symmetries). By contrast, our code has $128$ facets forming two orbits under the symmetry group: one orbit of $96$ irregular tetrahedra and one of $32$ irregular octahedra. The irregular octahedra are obtained from regular ones by rotating one of the facets, which are equilateral triangles, by an angle of $\pi/12$. We will use the term ``twisted facets'' to denote the rotated facet and its opposite facet (by symmetry, either one could be viewed as rotated relative to the other).
The octahedra in our configuration meet other octahedra along their twisted facets and simplices along their other facets. Grouping the octahedra according to adjacency therefore yields twisted chains of octahedra. Each chain consists of eight octahedra, and they span the $3$-sphere along great circles. The total twist amounts to $8\pi/12=2\pi/3$, from which it follows that the chains close with facets aligned correctly. The $32$ octahedra form four such chains, and the corresponding great circles are fibers in the same Hopf fibration as the vertices of the configuration. These Hopf fibers map to the vertices of a regular tetrahedron in $S^2$. It is inscribed in the cube dual to the octahedron formed by the images of the vertices of the code.
Another way to view the facets of this polytope, or any spherical polytope, is as holes in the spherical code. More precisely, the (outer) facet normals of any full-dimensional polytope inscribed in a sphere are the holes in the spherical code (i.e., the points on the sphere that are local maxima for distance from the code). The normals of the octahedral facets are the deep holes in this code (i.e., the points at which the distance is globally maximized). Notice that these points are defined using the intrinsic geometry of the sphere, rather than relying on its embedding in Euclidean space.
The octahedral facets of our code can be thought of as more important than the tetrahedral facets. The octahedra appear to us to have prettier, clearer structure, and once they have been placed, the entire code is determined (the tetrahedra simply fill the gaps). This idea is not mathematically precise, but it is a common theme in many of our calculations: when we examine the facet structure of a balanced code, we often find a small number of important facets and a large number of less meaningful ones.
\subsection{Hopf structure} \label{subsec:Hopf}
As in the previous example, many notable codes in $S^3$, $S^7$, or $S^{15}$ can be understood using the complex, quaternionic, or octonionic Hopf maps (see for example \cite{D} and \cite{AP-G3}). In this subsection, we describe this phenomenon for the regular $120$-cell and $600$-cell in $S^3$. The Hopf structure on the $600$-cell is mathematical folklore, but we have not been able to locate it in the published literature, while the case of the $120$-cell is more subtle and may not have been previously examined.
\begin{figure}
\caption{The $60$-point polytope in ${\mathbb R}^3$ over which the regular $120$-cell fibers. }
\label{fig:poly60}
\end{figure}
The $H_4$ reflection group (which is the symmetry group of both polytopes) contains elements of order $10$ that act on ${\mathbb R}^4$ with no fixed points other than the origin. If one chooses such an element, then ${\mathbb R}^4$ has the structure of a two-dimensional complex vector space such that this element acts via multiplication by a primitive $10$-th root of unity. The orbits are regular $10$-gons lying in Hopf fibers. In the case of the regular $600$-cell, this partitions the $120$ vertices into $12$ regular $10$-gons lying in Hopf fibers over the vertices of a regular icosahedron in $S^2$. For the regular $120$-cell (with $600$ vertices), the corresponding polyhedron in $S^2$ has $60$ vertices, but it is far from obvious what it is. We know of no way to determine it without calculation, but computing with coordinates reveals that it is a distorted rhombicosidodecahedron, with the square facets replaced by golden rectangles. Specifically, its facets are $12$ regular pentagons, $20$ equilateral triangles, and $30$ golden rectangles. The golden rectangles meet pentagons along their long edges and triangles along their short edges. Figure~\ref{fig:poly60} shows the orthogonal projection into the plane containing a pentagonal face (gray vertices and edges are on the far side of the polyhedron).
\subsection{Facet structure of universal optima} \label{subsec:facet}
The known low-dimensional universal optima (through dimension $8$) are all regular or semiregular polytopes, whose facets are well known to be regular simplices or cross polytopes. However, there seems to have been little investigation of the facets of the higher-dimensional universal optima from Table~\ref{table:universal}. In this subsection we will look at the smallest higher-dimensional cases: $\universal{100}{22}$, $\universal{112}{21}$, $\universal{162}{21}$, $\universal{275}{22}$, $\universal{552}{23}$, and $\universal{891}{22}$ (recall that $\universal{N}{n}$ denotes the $N$-point code in ${\mathbb R}^n$ from Table~\ref{table:universal}, when it is unique). Each of the first four is a two-distance set given by a spectral embedding of a strongly regular graph. (Recall that a spectral embedding is obtained by orthogonally projecting the standard orthonormal basis into an eigenspace of the adjacency matrix of the graph.) The last two have three distances between distinct points.
These codes have enormous numbers of facets (more than seventy-five trillion for $\universal{552}{23}$), so it is not feasible to find the facets using general-purpose methods. Instead, one must make full use of the large symmetry groups of these configurations. With Dutour Sikiri\' c's package Polyhedral \cite{DS} for the program GAP \cite{GAP}, that can be done for these configurations. We have used it to compute complete lists of orbits of facets under the action of the symmetry group. (The results are rigorous, because we use exact coordinates for the codes. In particular, when necessary we use the columns of the Gram matrices to embed scalar multiples of these codes isometrically into high-dimensional spaces using only rational coordinates.) Of course, the results of this computation then require analysis by hand to reveal their structure.
For an introductory example, it is useful to review the case of the five-dimensional hemicube (see Subsection~\ref{subsec:gramanalysis}). It has ten obvious facets contained in the ten facets of the cube. Each is a four-dimensional hemicube, i.e., a regular cross polytope. The remaining facets are regular simplices (one opposite each point in the hemicube).
One can view the five-dimensional hemicube as an antiprism formed by two four-dimensional cross polytopes in parallel hyperplanes. The cross polytopes are arranged so that each one's vertices point towards deep holes of the other. (The deep holes of a cross polytope are the vertices of the dual cube, and in four dimensions the vertices of that cube consist of two cross polytopes. The fact that the deep holes of a four-dimensional cross polytope contain another such cross polytope is crucial for this construction to make sense.) Of course, the distance between the parallel hyperplanes is chosen so as to maximize the minimal distance. What is remarkable about this antiprism is that it is far more symmetrical than one might expect: normally the two starting facets of an antiprism play a very different role from the facets formed when taking the convex hull, but in this case extra symmetries occur. The simplest case of such extra symmetries is the construction of a cross polytope as an antiprism made from two regular simplices in dual position.
The three universal optima $\universal{100}{22}$, $\universal{112}{21}$, and $\universal{162}{21}$ are each given by an unusually symmetric antiprism construction analogous to that of the hemicube. In each case, the largest facets (i.e., those containing the most vertices) contain half the vertices. These facets are themselves spectral embeddings of strongly regular graphs (the Hoffman-Singleton graph, the Gewirtz graph, and the unique $(81,20,1,6)$ strongly regular graph). Within the universal optima, the largest facets occur in pairs in parallel hyperplanes, and the vertices of each facet in a pair point towards holes in the other. These holes belong to a single orbit under the symmetry group of the facet, and that orbit is the disjoint union of several copies of the vertices of the facet: two copies for the Hoffman-Singleton and Gewirtz cases and four in the third case. These holes are the deepest holes in the Hoffman-Singleton case; in the other two cases, they are not quite the deepest holes (there are not enough deep holes for the construction to work using them).
Brouwer and Haemers \cite{BH1,BH2} discovered the underlying combinatorics of these constructions (i.e., that the strongly regular graphs corresponding to the universal optima can be naturally partitioned into two identical graphs). However, the geometric interpretation as antiprisms appears to be new.
The universal optima $\universal{100}{22}$, $\universal{112}{21}$, and $\universal{162}{21}$ are antiprisms, but that cannot possibly be true for $\universal{275}{22}$, because $275$ is odd. Instead, the McLaughlin configuration $\universal{275}{22}$ is analogous to the Schl\"afli configuration $\universal{27}{6}$. Both are two-distance sets. In the Schl\"afli configuration, the neighbors of each point form a five-dimensional hemicube and the non-neighbors form a five-dimensional cross polytope. Both the hemicube and the cross polytope are unusually symmetric antiprisms, and their vertices point towards each other's deep holes. (The deep holes of the hemicube form a cross polytope, and those of the cross polytope form a cube consisting of two hemicubes.) The McLaughlin configuration is completely analogous: the neighbors of each point form $\universal{162}{21}$ and the non-neighbors form $\universal{112}{21}$. They point towards each other's deep holes; this is possible because the deep holes of $\universal{112}{21}$ consist of four copies of $\universal{162}{21}$, and its deep holes consist of two copies of $\universal{112}{21}$. Furthermore, the deep holes in these two universal optima are of exactly the same depth (i.e., distance to the nearest point in the code), as is also the case for the five-dimensional cross polytope and hemicube used to form the Schl\"afli configuration.
The Schl\"afli and McLaughlin configurations both have the property that their deep holes are the antipodes of their vertices. Thus, it is natural to form antiprisms from two parallel copies of them, with vertices pointed at each other's deep holes. That yields antipodal configurations of $54$ points in ${\mathbb R}^7$ and $550$ points in ${\mathbb R}^{23}$. If one also includes the two points orthogonal to the parallel hyperplanes containing the original two copies, then this construction gives the universal optima $\universal{56}{7}$ and $\universal{552}{23}$.
\begin{table} \begin{center}
\begin{tabular}{cc|cc} Vertices & Number of orbits & Vertices & Number of orbits\\ \hline $22$ & $92$ & $30$ & $1$\\ $23$ & $13$ & $31$ & $1$\\ $24$ & $6$ & $36$ & $1$\\ $25$ & $3$ & $42$ & $1$\\ $27$ & $3$ & $50$ & $1$\\ $28$ & $1$\\ \\ \end{tabular} \end{center} \caption{Number of orbits of facets of different sizes in the Higman-Sims configuration $\universal{100}{22}$.}\label{table:22-100facets} \end{table}
Each high-dimensional universal optimum has many types of facets of different sizes. For example, the facets of the Higman-Sims configuration $\universal{100}{22}$ form $123$ orbits under the action of the symmetry group (see Table~\ref{table:22-100facets}). The largest facets, which come from the Hoffman-Singleton graph as described above, are by far the most important, but each type of facet appears to be of interest. They are often more subtle than one might expect. For example, it is natural to guess that the facets with $42$ vertices would be regular cross polytopes, based on the number of vertices, but they are not. Instead, when rescaled to the unit sphere they have the following structure:
The facets with $42$ vertices are two-distance sets on the unit sphere in ${\mathbb R}^{21}$, with inner products $1/29$ and $-13/29$. If we define a graph on the vertices by letting edges correspond to pairs with inner product $-13/29$, then this graph is the bipartite incidence graph for points and lines in the projective plane ${\mathbb P}^2({\mathbb F}_4)$. To embed this graph in ${\mathbb R}^{21}$, represent the $21$ points in ${\mathbb P}^2({\mathbb F}_4)$ as the permutations of $(a,b,\dots,b)$, where $a^2+20b^2=1$ and $2ab+19b^2=1/29$. Specifically, take $a = 0.9977\dots$ and $b = 0.0151\dots$ (these are fourth degree algebraic numbers). Choose $c$ and $d$ so that $5c^2+16d^2=1$ and $8cd+c^2+12d^2=1/29$ (specifically, take $c = -0.4362\dots$ and $d = 0.0550\dots$). Then embed the $21$ lines into ${\mathbb R}^{21}$ as permutations of $(c,c,c,c,c,d,\dots,d)$, where the five $c$ entries correspond to the points contained in the line. This embedding gives the inner products of $1/29$ and $-13/29$, as desired (and in fact those are the only inner products for which a construction of this form is possible).
As shown in Table~\ref{table:22-100facets}, there are $92$ different types of simplicial facets in the Higman-Sims configuration. One orbit consists of regular simplices: for each point in the configuration, the $22$ points at the furthest distance from it form a regular simplex. All the other simplices are irregular. Nine orbits consist of simplices with no symmetries whatsoever, and the remaining ones have some symmetries but not the full symmetric group.
The universal optima $\universal{552}{23}$ and $\universal{891}{22}$ have more elaborate facet structures, but we have completely classified their facets (which form $116$ and $422$ orbits, respectively). The facets corresponding to their deep holes form single orbits, consisting of $\universal{100}{22}$ in the first case and $\universal{162}{21}$ in the second. These results can all be understood in terms of the standard embeddings of these configurations into the Leech lattice, as follows:
Let $v$ be any vector with norm $6$ in the Leech lattice $\Lambda_{24}$. Among the $196560$ minimal vectors in
$\Lambda_{24}$ (those with norm $4$), there are $552$ minimal vectors $w$ satisfying $|w-v|^2=4$, and they form a copy of the $552$-point universal optimum. This shows that $\universal{552}{23}$ is a facet of $\universal{196560}{24}$. Taking kissing configurations shows that $\universal{275}{22}$ is a facet of $\universal{4600}{23}$ and that $\universal{162}{21}$ is a facet of $\universal{891}{22}$. We conjecture that each of these facets corresponds to a deep hole in the code, and that all of the deep holes arise this way, but we have not proved this conjecture beyond $\universal{891}{22}$. The $\universal{100}{22}$ facets of $\universal{552}{23}$ can also be seen in this picture: given two vectors $v_1,v_2 \in \Lambda_{24}$
with $|v_1|^2=|v_2|^2=6$ and $|v_1-v_2|^2=4$, the corresponding $\universal{552}{23}$ facets of $\universal{196560}{24}$ intersect in a $\universal{100}{22}$ facet of $\universal{552}{23}$, which corresponds to a deep hole.
\subsection{$96$ points in ${\mathbb R}^9$} \label{subsec:96in9}
Another intriguing code that arose in our computer searches is a $96$-point code in ${\mathbb R}^9$ (see Table~\ref{table:balanced}). This code was known previously: it is mentioned but not described in Table~9.2 of \cite{CS}, which refers to a paper in preparation that never appeared, and it is described in Appendix~D of \cite{EZ}. Here we describe it in detail, with a different approach from that in \cite{EZ}.
The code is not universally optimal, but it is balanced and it appears to be an optimal spherical code. What makes it noteworthy is that the cosine of its minimal angle is $1/3$. Any such code corresponds to an arrangement of unit balls in ${\mathbb R}^{10}$ that are all tangent to two fixed, tangent balls, where the interiors of the balls are not allowed to overlap (this condition forces the cosine of the minimal angle between the sphere centers to be at most $1/3$, when the angle is centered at the midpoint between the fixed balls). The largest such arrangement most likely consists of $96$ balls.
To construct the code, consider three orthogonal tetrahedra in ${\mathbb R}^9$. Call the points in the first $v_1$, $v_2$, $v_3$, $v_4$, in the second $v_5$, $v_6$, $v_7$, $v_8$, and in the third $v_9$, $v_{10}$, $v_{11}$, $v_{12}$. Within each of these tetrahedra, all inner products between distinct points are $-1/3$, and between tetrahedra they are all $0$. Call these tetrahedra the basic tetrahedra.
The points $\pm v_1, \dots, \pm v_{12}$ will all be in the code, and we will identify $72$ more points in it. Each of the additional points will have inner product $\pm 1/3$ with each of $v_1,\dots,v_{12}$, and we will determine them via those inner products. Because $v_1+\dots+v_4 = v_5+\dots+v_8 = v_9+\dots+v_{12} = 0$, the inner products with the elements of each basic tetrahedron must sum to zero. In particular, two must be $1/3$ and the other two $-1/3$. That restricts us to $\binom{4}{2}=6$ patterns of inner products with each basic tetrahedron, so there are $6^3=216$ points satisfying all the constraints so far. We must cut that number down by a factor of $3$.
The final constraint comes from considering the inner products between the new points. A simple calculation shows that one can reconstruct a point $x$ from its inner products with $v_1,\dots,v_{12}$ via $$ x = \frac{3}{4}\sum_{i=1}^{12} \langle x,v_i\rangle v_i, $$ and inner products are computed via $$ \langle x,y \rangle = \frac{3}{4} \sum_{i=1}^{12} \langle x, v_i \rangle \langle y, v_i \rangle. $$ In other words, if $x$ and $y$ have identical inner products with one of the basic tetrahedra, that contributes $1/3$ to their own inner product. If they have opposite inner products with one of the basic tetrahedra, that contributes $-1/3$. Otherwise the contribution is $0$.
The situation we wish to avoid is when $x$ and $y$ have identical inner products with two basic tetrahedra, or opposite inner products with both, and neither identical nor opposite inner products with the third. In that case, $\langle x,y \rangle = \pm 2/3$.
To rule out this situation, we assign elements of ${\mathbb Z}/3{\mathbb Z}$ to quadruples by $$ \pm (1/3,1/3,-1/3,-1/3) \mapsto 0, $$ $$ \pm (1/3,-1/3,1/3,-1/3) \mapsto 1, $$ and $$ \pm (1/3,-1/3,-1/3,1/3) \mapsto -1. $$ Consider the $72$ points with inner products $\pm 1/3$ with each of $v_1,\dots,v_{12}$ such that exactly two inner products with each basic tetrahedron are $1/3$ and furthermore the elements of ${\mathbb Z}/3{\mathbb Z}$ coming from the inner products with the basic tetrahedra sum to $0$. Given any two such points, if they have identical or opposite inner products with two basic tetrahedra, then the same must be true with the third. Thus, we have constructed $24+72=96$ points in ${\mathbb R}^9$ such that all the inner products between them are $\pm 1$, $\pm 1/3$ or $0$.
\begin{table} \begin{center} \begin{tabular}{ccc} Vertices & Automorphisms & Orbit size \\ \hline $9$ & $16$ & $27648$\\ $9$ & $48$ & $13824$\\ $9$ & $48$ & $4608$\\ $9$ & $96$ & $18432$\\ $9$ & $1440$ & $4608$\\ $12$ & $1024$ & $864$\\ $12$ & $31104$ & $512$\\ $16$ & $10321920$ & $18$\\ \\ \end{tabular} \end{center} \caption{Facets of the convex hull of the configuration of $96$ points in ${\mathbb R}^9$, modulo the action of the symmetry group.}\label{table:9-96facets} \end{table}
The facets of this code form eight orbits under the action of its symmetry group; they are listed in Table~\ref{table:9-96facets}. The most interesting facets are those with $16$ vertices, which form regular cross polytopes. These facets and the two orbits with $12$ vertices all correspond to deep holes.
\subsection{Distribution of energy levels}
Typically, there are many local minima for harmonic energy. One intriguing question is how the energies of the local minima are distributed. For example, Table~\ref{table:energy} shows the thirty lowest energies obtained in $2\cdot10^5$ trials with $120$ points in ${\mathbb R}^4$, together with how often they occurred. The regular $600$-cell is the unique universal optimum (with energy $5395$), but we found $5223$ different energy levels. This table is probably not a complete list of the lowest thirty energies (five of them occurred only once, so it is likely there are more to be found), but we suspect that we have found the true lowest ten.
\begin{table} \begin{center}
\begin{tabular}{cc|cc} Energy & Frequency & Energy & Frequency\\ \hline $5395.000000$ & $186418$ & $5402.116636$ & $1$\\ $5398.650556$ & $4393$ & $5402.152619$ & $1$\\ $5398.687876$ & $2356$ & $5402.213231$ & $2$\\ $5400.842726$ & $18$ & $5402.366164$ & $1$\\ $5400.880057$ & $149$ & $5402.922701$ & $1$\\ $5400.890460$ & $47$ & $5403.091064$ & $111$\\ $5400.894513$ & $26$ & $5403.115123$ & $1$\\ $5400.928674$ & $25$ & $5403.129076$ & $108$\\ $5400.936106$ & $41$ & $5403.271100$ & $66$\\ $5400.940237$ & $28$ & $5403.319898$ & $157$\\ $5400.940550$ & $7$ & $5403.326719$ & $84$\\ $5400.943094$ & $38$ & $5403.347209$ & $24$\\ $5402.029556$ & $7$ & $5403.455701$ & $7$\\ $5402.088248$ & $3$ & $5403.462898$ & $8$\\ $5402.093726$ & $10$ & $5403.488923$ & $4$\\ \\ \end{tabular} \end{center} \caption{Thirty lowest harmonic energies observed for local minima with $120$ points on $S^3$ ($2 \cdot 10^5$ trials, $5223$ different energy levels observed).} \label{table:energy} \end{table}
The most remarkable aspect of Table~\ref{table:energy} is the three gaps in it. There are huge gaps from $5395$ to $5398.65$, from $5398.69$ to $5400.84$, and from $5400.95$ to $5402.02$. Each gap is far larger than the typical spacing between energy levels. Perhaps one of these gaps contains some rare local minima, but they appear to be real gaps.
What could cause such gaps? We do not have a complete theory, but we believe the gaps reflect bottlenecks in the process of constructing the code by gradient descent. Figure~\ref{fig:progress} is a graph of energy as a function of time for gradient descent, starting at a random configuration of $120$ points in $S^3$. (The figure represents a single run of the optimization procedure, so it should be viewed as a case study, not a statistical argument.) The graph begins when the energy has just reached $5405$ and ends once convergence to the universal optimum is apparent. Of course the convergence is monotonic, but its speed varies dramatically. The rate of decrease is slowest near energy $5398.66$, which is indicated by a horizontal line. We do not believe it could be a coincidence that that is very nearly the energy of the two lowest-energy local minima in Table~\ref{table:energy}: the slowdown probably occurs because of a bottleneck. More precisely, in order to achieve the ground state the system must develop considerable large-scale order and symmetry. Probably short-range order develops first and then slowly extends to long-range order. During this process there may be bottlenecks in which different parts of the system must come into alignment with each other. The local minima correspond to the rare cases in which the system gets stuck in the middle of a bottleneck, but even when it does not get stuck it still slows down.
It is not completely clear why the gaps are separated by several energy levels that are surprisingly close to each other. The most likely explanation is that there are several slightly different ways to get stuck during essentially the same bottleneck, but we have no conceptual understanding of what these ways are. It would be very interesting to have a detailed theory of this sort of symmetry breaking.
\begin{figure}
\caption{Energy as a function of time under gradient descent.}
\label{fig:progress}
\end{figure}
\section{Conjectured universal optima} \label{section:newunivopt}
\subsection{$40$ points in ${\mathbb R}^{10}$}
The $40$-point code from Table~\ref{table:conjectures} consists of $40$ points on the unit sphere in ${\mathbb R}^{10}$. The only inner products that occur are $1$, $-1/2$, $-1/3$, $0$, and $1/6$; each point has these inner products with $1$, $8$, $3$, $4$, and $24$ points, respectively. Grouping pairs of points according to their inner product yields a $4$-class association scheme, which Bannai, Bannai, and Bannai \cite{BBB} have recently shown is uniquely determined by its intersection numbers.
The $40$ points form $10$ regular tetrahedra, and more specifically $5$ orthogonal pairs of tetrahedra. That accounts for all the inner products of $1$, $-1/3$, and $0$. Each point has inner product $-1/2$ with one point in each of the other $9$ tetrahedra, except for the tetrahedron orthogonal to the one containing it. All remaining inner products are $1/6$. The configuration is chiral (i.e., not equivalent to any reflection of itself under the action of $SO(10)$) and has a symmetry group of order $1920$. Specifically, the symmetry group is the semidirect product of the symmetric group $S_5$ with the subgroup of $({\mathbb Z}/2{\mathbb Z})^5$ consisting of all vectors that sum to zero, where $S_5$ acts by permuting the five coordinates.
We conjecture that this code is the unique $40$-point code in ${\mathbb R}^{10}$ with maximal inner product $1/6$, but that appears difficult to prove. For example, it is an even stronger assertion than optimality as a spherical code. We are unaware of any occurrence of this code in the published literature, but it appears in Sloane's online tables \cite{S} with the annotation that it was found by Smith and ``beautified'' by Conway and Sloane (in the sense of recognizing it using elegant coordinates). It also appears in Hovinga's online report \cite{H}.
Conway, Sloane, and Smith construct the code as an explicit list of $40$ vectors with entries $-1/\sqrt{6}$, $0$, or $1/\sqrt{6}$. Here we explain the combinatorics that underlies this construction. That may well be how Conway and Sloane beautified the code, but \cite{S} presents no details about the construction beyond the list of vectors.
Consider the $10$-dimensional vector space $V$ spanned by the orthonormal basis vectors $v_{\{i,j\}}$ for all two-element subsets $\{i,j\} \subset {\mathbb Z}/5{\mathbb Z}$ with $i \ne j$. Say a vector in $V$ has type $i$ if for every basis vector $v_S$, its coefficient vanishes if and only if $i \in S$. Each vector in the code will have type $i$ for some $i \in {\mathbb Z}/5{\mathbb Z}$, and the six nonzero coefficients will equal $\pm 1/\sqrt{6}$. Given such a vector, define a graph on the vertex set $({\mathbb Z}/5{\mathbb Z}) \setminus \{i\}$ by connecting $j$ to $k$ if the coefficient of $v_{\{j,k\}}$ is $-1/\sqrt{6}$. If the graph has $e$ edges and vertex $j$ has degree $d_j$, then the vector is in the code if and only if $d_{i+1} \equiv d_{i+2} \equiv e \pmod{2}$ and $d_{i-1} \equiv d_{i-2} \not\equiv e \pmod{2}$ (where $i$ is the type of the vector).
The facet structure of this code seems surprisingly unilluminating compared to the others we have analyzed. There are $24$ orbits of facets: $21$ orbits of irregular simplices (with symmetry groups ranging from $2$ to $768$ in size), one orbit of $11$-vertex facets, and two orbits of $12$-vertex facets. The most symmetrical facets are those in one of the two orbits of $12$-vertex facets. They have $3072$ symmetries and are given by the orthogonal union of three identical irregular tetrahedra in ${\mathbb R}^3$ (with $8$ symmetries).
Our confidence in this code's universal optimality is based on detailed numerical experiments. One reason a configuration could be universally optimal is that it has no competitors (i.e., except in degenerate cases there are no other local minima). That is not true for the $40$-point code, but there are remarkably few competitors. In particular, it appears to have only one ``serious'' competitor. We have found five other families of local minima, but four of them are rare and never seem to come close to beating our conjectured universal optimum. The best experimental evidence we can imagine for universal optimality would be to describe explicitly each competing family that has been observed and prove that it never contains the global energy minimum. That might be possible for this code, but we have not completed it. The four rare families are sufficiently complicated that we have not analyzed them explicitly (under the circumstances it did not seem worth the effort). However, we have a complete description of the serious competitor.
That family depends on a parameter $\alpha$ that must satisfy $0 < \alpha^2 \le 1/27$. The configuration always contains a fixed $16$-point subset with the following structure. If we call the $16$ points $w_{i,j}$ with $i,j \in \{1,2,3,4\}$, then $$ \langle w_{i,j}, w_{k,\ell}\rangle = \begin{cases} 1 & \textup{if $(i,j) = (k,\ell)$},\\ -1/3 & \textup{if $i=k$ or $j =\ell$ but not both, and}\\ 1/9 & \textup{otherwise.} \end{cases} $$ In other words, if we arrange the points in a $4 \times 4$ grid, then the rows and columns are regular tetrahedra and all other inner products are $1/9$. To construct such a configuration, take the tensor product in ${\mathbb R}^3 \otimes_{\mathbb R} {\mathbb R}^3 = {\mathbb R}^9$ of two regular tetrahedra in ${\mathbb R}^3$.
To describe the remaining $24$ points, we will specify their inner products with the first $16$ points. That will determine their projections into the $9$-dimensional subspace containing the $16$ points, so the only additional information needed to pin them down will be whether they are above or below that hyperplane (relative to some orientation).
Each of the $24$ points will have inner product $-3\alpha$ with four of the first $16$ points and $\alpha$ with each of the others. The only constraint is that it must have inner product $-3\alpha$ with exactly four points, one in each of the eight tetrahedra (i.e., one in each row and column of the $4\times 4$ grid). The $4\times 4$ grid exhibits a one-to-one correspondence with permutations of four elements, so there are $4!=24$ ways to satisfy these constraints. The points corresponding to even permutations will be placed above the $9$-dimensional hyperplane, and those corresponding to odd permutations will be placed below it. This construction yields a $40$-point code in ${\mathbb R}^{10}$ provided that $0 < \alpha^2 \le 1/27$. (When $\alpha=0$ some points coincide, and when $\alpha^2 > 1/27$ the inner products cannot be achieved by unit vectors.)
We have not proved that the codes in this family never improve on the conjectured universal optimum, but we are confident that it is true. The best spherical code in the family occurs when $\alpha = (\sqrt{109}-1)/54 = 0.1748\dots$; in this special case, $\alpha$ is also the maximal inner product, which is quite a bit larger than the maximal inner product $1/6$ in the conjectured universal optimum. That implies that when $k$ is sufficiently large, the conjectured optimum is better for the potential function $f(r) = (4-r)^k$ (because the energy is dominated asymptotically by the contribution from the minimal distance). In principle one could verify the finitely many remaining values of $k$ by a finite computation. We have done enough exploration to convince ourselves that it is true, but we have not found a rigorous proof.
\subsection{$64$ points in ${\mathbb R}^{14}$} \label{subsection:64}
The simplest construction we are aware of for the $64$-point configuration in ${\mathbb R}^{14}$ uses the Nordstrom-Robinson binary code \cite{NR,G}. Shortening that code twice yields a binary code of length $14$, size $64$, and minimal distance $6$, which is known to be unique (see \cite[pp.~74--75]{MS}). One can view it as a subset of the cube $\{-1,1\}^{14}$ instead of $\{0,1\}^{14}$. Then after rescaling by a factor of $1/\sqrt{14}$ to yield unit vectors, this code is the $64$-point configuration in ${\mathbb R}^{14}$ that we conjecture is universally optimal. The same process with less shortening yields the codes from Tables~\ref{table:balanced} and~\ref{table:unresolved} with $128$ points in ${\mathbb R}^{15}$ and $256$ points in ${\mathbb R}^{16}$;
the $64$-point and $128$-point codes have previously appeared in Appendix~D of \cite{EZ} via the same approach, as conjectures for optimal spherical codes.
This construction makes some of the facet structure of the code clear. There are $28$ facets with $32$ vertices that come from the facets of the cube containing the code. In fact, the code is obtained from exactly the same antiprism construction as described in Subsection~\ref{subsec:facet} (the vertices of these facets point towards deep holes of the opposite facets). There are also $66$ other orbits of facets under the action of the symmetry group, but those orbits seem to be less interesting.
An alternative construction of the code is by describing its Gram matrix explicitly. As mentioned above, this construction amounts to forming an association scheme by taking $t=1$ in Theorem~2 and Proposition~7(i) in \cite{dCvD}, and then performing a spectral embedding. Bannai, Bannai, and Bannai \cite{BBB} have shown that this association scheme is uniquely determined by its intersection numbers.
More concretely, the points correspond to elements of ${\mathbb F}_8^2$, where ${\mathbb F}_8$ is the finite field of order $8$, and the inner products are determined by Table~\ref{table:gram64}. The Gram matrix has $14$ eigenvalues equal to $32/7$ and the others equal to $0$, so it is indeed the Gram matrix of a $14$-dimensional configuration.
\begin{table} \begin{tabular}{cc} Inner product & Condition\\ \hline $1$ & $(x_1,x_2)=(y_1,y_2)$\\ $-1/7$ & $x_1=y_1$ but $x_2 \ne y_2$\\ $-3/7$ & $x_1 \ne y_1$ and $x_2+y_2 \in \{ (x_1+y_1)^3, x_1y_1(x_1+y_1)\}$\\ $1/7$ & otherwise\\ \\ \end{tabular} \caption{The inner products for the $64$-point code.} \label{table:gram64} \end{table}
Unfortunately, this $64$-point code has many competitors. We have found over two hundred local minima for harmonic energy and expect that the total number is much larger. That makes it difficult to imagine an ironclad experimental argument for universal optimality. We suspect that the code is universally optimal for two reasons: we have failed to find any counterexample, and during the process no competitor came close enough to worry us. (By contrast, in many cases in which one can disprove universal optimality, one finds worrisomely close competitors before tweaking the construction to complete the disproof.) However, we realize that the evidence is far from conclusive.
\section{Balanced, irreducible harmonic optima} \label{section:balanced}
In this section we briefly describe each of the configurations in Tables~\ref{table:balanced} and~\ref{table:unresolved}.
\begin{description}
\item[$32$ points in ${\mathbb R}^3$]
The union of a regular icosahedron and its dual dodecahedron.
\item[$10$ points in ${\mathbb R}^4$]
Both configurations are described in Subsection~\ref{subsec:diplo}.
\item[$13$ points in ${\mathbb R}^4$]
In ${\mathbb C}^2$ with the inner product $\langle x,y \rangle = \mathop{\textup{Re}} \bar x ^t y$, the points are $(\zeta/\sqrt{2},\zeta^5/\sqrt{2})$, where $\zeta$ runs over all $13$-th roots of unity. This code was discovered by Sloane, Hardin, and Cara \cite{SHC}.
For a less compact description, view ${\mathbb R}^4$ as the orthogonal direct sum of two planes, and let $R$ be the operation of rotating the first plane by $2\pi/13$ and the second by five times that angle. The unit sphere in ${\mathbb R}^4$ contains the direct product of the circles of radius $1/\sqrt{2}$ in the two planes, and the $13$-point code is the orbit of a point in this direct product under the group generated by $R$.
The factor of $5$ is special because $5^2 \equiv -1 \pmod{13}$. In particular, $R^8$ rotates the second plane by $2\pi/13$ and the first by $8$ times that amount, which is the same as $5$ times it in the opposite direction. In other words, the two planes play the same role, if one ignores their orientations. Only the square roots of $-1$ modulo $13$ (or, trivially, the square roots of $1$) have that property.
\begin{figure}
\caption{The $13$-point harmonic optimum in $S^3$, drawn on a torus by plotting $(\phi,\psi)$ for the point $(e^{i\phi}/\sqrt{2},e^{i\psi}/\sqrt{2})$.}
\label{fig:torus13}
\end{figure}
For all the points in this configuration, both complex coordinates have absolute value $1/\sqrt{2}$. Therefore the configuration is contained in a flat two-dimensional torus sitting inside $S^3$ (namely, the product of the circles of radius $1/\sqrt{2}$ in the two complex coordinate axes). Figure~\ref{fig:torus13} shows the complex phases of the $13$ points. For each $N$ we can ask whether there is an $N$-point harmonic optimum in $S^3$ that is contained in such a torus. For $1 \le N \le 10$ it seems that there is. We conjecture that $N=13$ is the only larger value of $N$ for which this happens.
For other potential functions, similar phenomena can occur in more cases. For example, for the logarithmic potential function $f(r) = -\log r$, Jaron Lanier has conjectured in a private communication that the minimal-energy configuration of $11$ points on $S^3$ also lies on a flat torus. In ${\mathbb C}^2$, the points are $(\alpha\zeta,\sqrt{1-\alpha^2}\zeta^4)$, where $\zeta$ runs over all $11$-th roots of unity and $\alpha$ is the unique root between $0$ and $1$ of $$ 5 \alpha^8 - 36 \alpha^6 + 51 \alpha^4 - 4 \alpha^2 - 7= 0. $$
\item[$15$ points in ${\mathbb R}^4$]
Described in Subsection~\ref{subsec:gramanalysis}.
\item[$24$ points in ${\mathbb R}^4$]
The regular $24$-cell (equivalently, the $D_4$ root system).
\item[$48$ points in ${\mathbb R}^4$]
Described in Subsection~\ref{subsec:48in4}.
\item[$21$ points in ${\mathbb R}^5$]
The edge midpoints and face centers of a regular simplex (rescaled to lie on the same sphere).
\item[$32$ points in ${\mathbb R}^5$]
Start with a regular simplex $v_1,\dots,v_6$ in ${\mathbb R}^5$. The $32$ points will be these six points and their negatives, along with $20$ points determined as follows by their inner products with $v_1,\dots,v_6$. Each will have inner product $\pm 1/\sqrt{5}$ with each of $v_1,\dots,v_6$, with three plus signs and three minus signs. There are $\binom{6}{3}=20$ ways to choose these signs.
\item[$2n+2$ points in ${\mathbb R}^n$ (for $n \ge 6$)]
Described in Subsection~\ref{subsec:diplo}.
\item[$42$ points in ${\mathbb R}^6$]
The edge midpoints of a regular simplex and their antipodes.
\item[$44$ points in ${\mathbb R}^6$]
This code contains plus or minus an orthonormal basis of ${\mathbb R}^6$ together with the $32$ vectors whose coordinates with respect to that basis are $\pm 1/\sqrt{6}$ and where an even number of minus signs occur. It other words, it consists of a cross polytope and a hemicube within the cube dual to the cross polytope. This code was previously conjectured to be an optimal spherical code (see Table~D.6 in \cite{EZ}).
\item[$126$ points in ${\mathbb R}^6$]
The union of the minimal vectors of the $E_6$ and $E_6^*$ lattices (rescaled to lie on the same sphere). Equivalently, one can project the $E_7$ root system orthogonally to a minimal vector in $E_7^*$, followed by rescaling as in the first construction.
\item[$78$ points in ${\mathbb R}^7$]
Like the $44$ points in ${\mathbb R}^6$, this code consists of a cross polytope and a hemicube within the cube dual to the cross polytope.
\item[$148$ points in ${\mathbb R}^7$]
The points are all the permutations of $$ \frac{(\pm1,\pm1,0,0,0,0,0)}{\sqrt{2}} $$ and the hemicube consisting of the points $$ \frac{(\pm1,\pm1,\pm1,\pm1,\pm1,\pm1,\pm1)}{\sqrt{7}} $$ that have an even number of minus signs. This is a seven-dimensional analogue of a construction of the $E_8$ root system, but it is less symmetric, because the two types of points form distinct orbits under the action of the symmetry group. This code was previously conjectured to be an optimal spherical code (see Table~D.7 in \cite{EZ}).
\item[$182$ points in ${\mathbb R}^7$]
The union of the minimal vectors of the $E_7$ and $E_7^*$ lattices (rescaled to lie on the same sphere). Equivalently, one can project the $E_8$ root system orthogonally to any root; $126$ roots are unchanged, $112$ project to $56$ nonzero points, and $2$ project to the origin. Rescaling the nonzero projections to lie on the unit sphere yields the $182$-point configuration.
\item[$72$ points in ${\mathbb R}^8$]
The edge midpoints of a regular simplex and their antipodes. This code was previously conjectured to be an optimal spherical code (see Table~D.8 in \cite{EZ}).
\item[$96$ points in ${\mathbb R}^9$]
Described in Subsection~\ref{subsec:96in9}.
\item[$42$ points in ${\mathbb R}^{14}$]
This code consists of seven disjoint, five-dimensional regular simplices, whose vertices have inner products $-1/5$ with each other. Each point has inner product $-1/2$ with a unique point in each simplex other than the one containing it, and all other inner products between points in different simplices are $1/10$. Grouping pairs of points according to their inner product yields a three-class association scheme that can be derived from the Hoffman-Singleton graph (see Subsection~5.1 of \cite{vD}). Let $G$ be the Hoffman-Singleton graph, and let $H$ be its second subconstituent. In other words, take any vertex $v$ in $G$, and let $H$ be the vertices not equal to or adjacent to $v$. The vertices in $H$ correspond to points in the $14$-dimensional configuration, with inner product $-1/2$ between adjacent vertices, $-1/5$ between non-adjacent vertices with no common neighbor, and $1/10$ between non-adjacent vertices with one common neighbor. This code was previously conjectured to be an optimal spherical code (see Table~D.14 in \cite{EZ}).
\item[$128$ points in ${\mathbb R}^{15}$]
Described in Subsection~\ref{subsection:64}.
\item[$256$ points in ${\mathbb R}^{16}$]
Described in Subsection~\ref{subsection:64}.
\end{description}
\section{Challenges}
We conclude with a list of computational and theoretical challenges:
\begin{enumerate} \item How often do $196560$ randomly chosen points on $S^{23}$ converge to the Leech lattice minimal vectors under gradient descent for harmonic energy? For $240$ points on $S^7$, one frequently obtains the $E_8$ root system ($855$ times out of $1000$ trials), and the Higman-Sims configuration of $100$ points on $S^{21}$ occurs fairly often ($257$ times out of $1000$ trials); by contrast, the universal optimum with $112$ points on $S^{20}$ occurs rarely (once in $1000$ trials). This approach could be an intriguing construction of the Leech lattice, but we have no intuition for how likely it is to work.
\item What are the potential energy barriers that separate the local minima in Table~\ref{table:27} (or any other case)? In other words, if one continuously transforms one configuration into another, how low can one make the greatest energy along the path connecting them? The lowest possible point of greatest energy will always be a saddle point for the energy function.
\item How many harmonic local minima are there for $120$ points on $S^3$ (Table~\ref{table:energy}), or even $64$ points on $S^{13}$? Is the number small enough that one could conceivably compile a complete list? Is the list for $27$ points on $S^5$ in Table~\ref{table:27} complete?
\item For large numbers of points, what can one say (experimentally, heuristically, or rigorously) about the distribution of energy levels for local minima, or about the gaps in the distribution? \end{enumerate}
\section{Local non-optimality of diplo-simplices} \label{appendix:diplo}
In this appendix we prove that (above dimension two) diplo-simplices are not even locally optimal as spherical codes. The motivation behind the calculations is the case of $n=3$, where the diplo-simplex is a cube. One can improve this spherical code by rotating one face relative to the opposite face while moving them slightly closer together. To carry out this approach in higher dimensions, we must understand the faces of the diplo-simplex (which is in general quite different from the hypercube). The diplo-simplex in ${\mathbb R}^n$ is the orthogonal projection of the vertices of the cross polytope in ${\mathbb R}^{n+1}$ onto the hyperplane on which the sum of the coordinates is zero. Its dual polytope is therefore the cross section of the hypercube in ${\mathbb R}^{n+1}$ by that hyperplane. The vertices of the cross section differ depending on whether $n$ is odd or even: $(1,\dots,1,-1,\dots,-1)$ is a vertex when $n+1$ is even, and $(1,\dots,1,0,-1,\dots,-1)$ is when $n+1$ is odd. That leads to the following description of the diplo-simplex, based on identifying its faces using this correspondence.
Let $v_1,\dots,v_k$ and $w_1,\dots,w_k$ be unit vectors forming two orthogonal, $(k-1)$-dimensional regular simplices, and let $t$ be a unit vector orthogonal to both simplices. The $(2k-1)$-dimensional diplo-simplex consists of the points $$ \pm(\alpha v_i + \sqrt{1-\alpha^2}t) $$ and $$ \pm(\alpha w_i + \sqrt{1-\alpha^2}t), $$ where $\alpha = \sqrt{(2k-2)/(2k-1)}$. To improve its minimal angle, use the points $$ \alpha v_i + \sqrt{1-\alpha^2}t, $$ $$ \alpha w_i + \sqrt{1-\alpha^2}t, $$ $$ -\big(\alpha (\beta v_i + \sqrt{1-\beta^2} w_i) + \sqrt{1-\alpha^2}t\big), $$ and $$ -\big(\alpha (\sqrt{1-\beta^2} v_i - \beta w_i) + \sqrt{1-\alpha^2}t\big), $$ with $\beta$ a small positive number and $\alpha$ slightly greater than $\sqrt{(2k-2)/(2k-1)}$. When $k=1$ choosing $\alpha$ and $\beta$ optimally leads to the optimal $8$-point code in ${\mathbb R}^3$, but when $k=3$ Sloane's tables \cite{S} show that this approach is suboptimal.
The situation is slightly more complicated for even-dimensional diplo-simplices. Again, let $v_1, \dots, v_k$ and $w, \dots, w_k$ be unit vectors forming two orthogonal $(k-1)$-dimensional simplices. Let $t$ and $z$ be two unit vectors orthogonal to both simplices and each other. The $2k$-dimensional diplo-simplex consists of the points $z$, $-z$, $$ \pm (\alpha v_i + \beta z + \sqrt{1-\alpha^2-\beta^2}t), $$ and $$ \pm (\alpha w_i - \beta z + \sqrt{1-\alpha^2-\beta^2}t), $$ where $\alpha=\sqrt{(2k+1)(2k-2)}/(2k)$ and $\beta=1/(2k)$. Note that $\alpha$ vanishes when $k=1$ and our construction below will not work. Indeed the hexagon, which is the diplo-simplex in the plane, is an optimal spherical code. To improve the minimal angle for $k \geq 2$ use the points $z$, $-z$, $$ \alpha v_i + \beta z + \sqrt{1-\alpha^2-\beta^2}t, $$ $$ \alpha w_i - \beta z + \sqrt{1-\alpha^2-\beta^2}t, $$ $$ -\big(\alpha(\gamma v_i + \sqrt{1-\gamma^2}w_i)-\beta z + \sqrt{1-\alpha^2 - \beta^2}t\big), $$ and $$ -\big(\alpha(\sqrt{1-\gamma^2}v_i - \gamma w_i)+\beta z + \sqrt{1-\alpha^2 - \beta^2}t\big), $$ with $\gamma$ a small positive number, $\alpha$ slightly larger than $\sqrt{(2k+1)(2k-2)}/(2k)$ and $\beta$ slightly less than $1/(2k)$. The numbers $\alpha$ and $\beta$ should be chosen such that $\alpha^2 + 2 \beta^2$ increases from its original value of $(2k-1)/(2k)$.
\end{document} |
\begin{document}
\title[Composite Wavelet Transforms]{ Composite Wavelet Transforms: Applications and Perspectives}
\author{Ilham A. Aliev} \address{Department of Mathematics, Akdeniz University, 07058 Antalya TURKEY} \email{ialiev@akdeniz.edu.tr} \author{Boris Rubin} \address{Department of Mathematics, Louisiana State University, Baton Rouge, Louisiana 70803} \email{borisr@math.lsu.edu}
\author{Sinem Sezer} \address{Faculty of Education, Akdeniz University, 07058 Antalya TURKEY} \email{sinemsezer@akdeniz.edu.tr} \author{Simten B. Uyhan} \address{Department of Mathematics, Akdeniz University, 07058 Antalya TURKEY} \email{simten@akdeniz.edu.tr} \thanks{The research was supported by the Scientific Research Project Administration Unit of the Akdeniz University (Turkey) and TUBITAK (Turkey). The second author was also supported by the NSF grants EPS-0346411 (Louisiana Board of Regents) and DMS-0556157.}
\renewcommand{\subjclassname}{
\textup{2000} Mathematics Subject Classification} \subjclass[2000]{42C40, 44A12, 47G10.}
\keywords{Wavelet transforms, potentials, semigroups, generalized translation, Radon transforms, inversion formulas, matrix spaces.}
\begin{abstract} We introduce a new concept of the so-called {\it composite wavelet transforms}. These transforms are generated by two components, namely, a kernel function and a wavelet function (or a measure). The composite wavelet transforms and the relevant Calder\'{o}n-type reproducing formulas constitute a unified approach to explicit inversion of the Riesz, Bessel, Flett, parabolic and some other
operators of the potential type generated by ordinary (Euclidean) and generalized (Bessel) translations. This approach is exhibited in the paper.
Another concern is application of the composite wavelet transforms to explicit inversion of the k-plane Radon transform on ${\Bbb R}^n$. We also discuss in detail a series of open problems arising in wavelet analysis of $L_p$-functions of matrix argument.
\end{abstract}
\maketitle
\centerline{Contents} \centerline{} 1. Introduction.
2. Composite wavelet transforms for dilated kernels.
3. Wavelet transforms associated to one-parametric semigroups and inversion ${}\qquad {}\quad$ of potentials.
4. Wavelet transforms with the generalized translation operator.
5. Beta-semigroups.
6. Parabolic wavelet transforms.
7. Some applications to inversion of the $k$-plane Radon transform.
8. Higher-rank composite wavelet transforms and open problems.
References.
\section{Introduction}
Continuous wavelet transforms \begin{equation*} \mathcal{W}f(x,t)=t^{-n}\int_{{\Bbb R}^n}f(y)\, w \left (\frac{x-y}{t}\right )\, dy, \qquad x\in \mathbb{R} ^{n},\ \ \ t>0, \end{equation*} where $w$ is an integrable radial function satisfying $\int_{{\Bbb R}^n}w(x)dx=0$, have proved to be a powerful tool in analysis and applications. There is a vast literature on this subject (see, e.g., \cite {Da}, \cite{16}, \cite{20}, just for few). Owing to the formula
\begin{equation} \int_{0}^{\infty }\mathcal{W}f(x,t)\text{ }\frac{dt}{t^{1+\alpha }} =c_{\alpha ,w}(-\Delta )^{\alpha /2}f(x),\qquad \alpha \in \mathbb{C},\quad \Delta =\sum\limits_{k=1}^{n}\frac{\partial ^{2}}{\partial x_{k}^{2}}, \label{1.2} \end{equation} that can be given precise meaning, continuous wavelet transforms enable us to resolve a variety of problems dealing with powers of differential operators. Such problems arise, e.g., in potential theory, fractional calculus, and integral geometry; see, \cite{16}, \cite{21}-\cite{27}, \cite{32}. Dealing with functions of several variables, it is always tempting to reduce the dimension of the domain of the wavelet function $w$ and find new tools to gain extra flexibility. This is actually a motivation for our article.
We introduce a new concept of the so-called {\it composite wavelet transforms}. Loosely speaking, this is a class of wavelet-like transforms generated by two components, namely, a kernel function and a wavelet. Both are in our disposal. The first one depends on as many variables as we need for our problem. The second component, which is a wavelet function (or a measure), depends only on one variable. Such transforms are usually associated with one-parametric semigroups, like Poisson, Gauss-Weierstrass, or metaharmonic ones, and can be implemented to obtain explicit inversion formulas for diverse operators of the potential type and fractional integrals. These arise in integral geometry in a canonical way; see, e.g., \cite{15, 22,
26, R9}.
In the present article we study different types of composite wavelet transforms in the framework of the $L_p$-theory and the relevant Fourier and Fourier-Bessel harmonic analysis. The main focus is
reproducing formulas of Calder\'{o}n's type and explicit inversion of Riesz, Bessel,
Flett, parabolic, and some other potentials. Apart of a brief review
of recent developments in the area, the paper contains a
series of
new results. These include wavelet transforms for
dilated kernels and wavelet transforms generated by Beta-semigroups associated
to multiplication by $\exp(-t|\xi|^{\beta}), \; \beta>0$, in terms of the Fourier transform.
Such semigroups arise in the context of stable random processes in probability
and enjoy a number of remarkable properties \cite{Ko}, \cite{La}.
Special emphasis is made on
detailed discussion of open
problems arising in wavelet analysis of functions of matrix
argument. Important results for $L_2$-functions in this ``higher-rank" set-up were obtained in \cite{OOR} using the Fourier transform
technique. The $L_p$-case for $p\neq 2$ is still mysterious. The main
difficulties are related to correct definition and handling of admissible wavelet functions on the cone of positive definite symmetric matrices.
The paper is organized according to the Contents
presented above.
\section{Composite Wavelet Transforms for Dilated Kernels}
\subsection{Preliminaries}
Let $L_{p}\equiv L_{p}(\mathbb{R}^{n}), \; 1\le p<\infty,$ be the standard space of functions with the norm \begin{equation*}
\left\| f\right\| _{p}=\Big( \int_{\mathbb{R}^{n}}\left| f(x)\right| ^{p}dx\Big )^{1/p}<\infty. \end{equation*} For technical reasons, the notation $L_{\infty }$ will be used for the space $C_{0}\equiv C_{0}(\mathbb{R}^{n})$ of all continuous functions on $ \mathbb{R}^{n}$ vanishing at infinity. The Fourier transform of a function $f$ on $\mathbb{R}^{n}$ is defined by \begin{equation*} Ff(\xi)=\int_{\mathbb{R}^{n}}f(x)\, e^{ix\cdot \xi }\,dx, \qquad x \cdot \xi=x_{1}\xi _{1}+\cdots +x_{n}\xi _{n}. \end{equation*} For $ 0\le a<b\le\infty$, we write $\int_a^b f(\eta)d\mu (\eta)$ to denote the integral of the form $\int_{[a,b)} f(\eta)d\mu (\eta)$.
\noindent\begin{definition}\label{d1} Let $q$ be a measurable function on ${\Bbb R}^n$ satisfying the following conditions:
(a) $q\in L_1\cap L_r$ for some $r>1$;
(b) the least radial decreasing majorant of $q$ is integrable, i.e.
$$\tilde q (x)=\sup_{|y|>|x|} |q(y)| \in L_1;$$
(c)$\qquad \int_{{\Bbb R}^n} q(x)\, dx =1.$
\noindent We denote \begin{equation}\label {qu} q_t (x)=t^{-n} q(x/t), \qquad Q_t f(x)=(f*q_t)(x), \qquad t>0,\end{equation} and set \begin{equation} \label{cwtr}Wf(x,t)= \int_0^\infty Q_{t\eta} f(x)\,d\mu (\eta),\end{equation} where $\mu$ is a finite Borel measure on $[0,\infty)$. If $\mu$ is {\it a wavelet measure} (i.e., $\mu$
has a certain number of vanishing moments and obeys suitable decay conditions)
then (\ref{cwtr}) will be called the {\it composite wavelet transform} of
$f$. The function $q$ will be called
a {\it kernel function} and $Q_t$ a {\it kernel operator}
of the composite transform $W$. \end {definition}
The integral (\ref{cwtr}) is well-defined for any function $f \in L_p$, and $$
||Wf(\cdot,t)||_p\le ||\mu || \, ||q||_{1} \, ||f||_p, $$
where $||\mu ||=\int_{[0,\infty)}d|\mu| (\eta)$. We will also consider a more general weighted transform
\begin{equation} \label{cwtrw}W_af(x,t)= \int_0^\infty Q_{t\eta} f(x)\,e^{-at\eta}\,d\mu (\eta),\end{equation} where $a\geq 0$ is a fixed parameter.
The kernel function $q$, the wavelet measure $\mu$, and the parameter $a \geq 0$ are in our disposal. This feature makes the new transform convenient in applications.
\subsection{Calder\`on's identity} An analog of Calder\'on's reproducing formula for $W_af$ is
given by the following theorem. \begin{theorem}\label{teo:34}
Let $\mu$ be a finite Borel measure on $[0,\infty)$ satisfying \begin{equation}\label{eq:32}
\mu([0,\infty))=0 \quad \text{and} \quad \int_0^\infty |\log \eta|\, d|\mu|(\eta) <\infty.\end{equation} If $f\in L_p, \; 1 \le p \le \infty$\footnote{We remind that $L_\infty$ is interpreted as the space $C_0$ with the uniform convergence.}, and $$c_\mu=\int_0^\infty \log \frac{1}{\eta} \,\, d\mu (\eta),$$ then \begin{equation}\label{eq33}\int_0^\infty W_af(x,t)\frac{dt}{t}\equiv \lim\limits_{\varepsilon \rightarrow 0}\int_\varepsilon^\infty W_af(x,t)\frac{dt}{t}=c_\mu f(x) \end{equation} where the limit exists in the $L_p$-norm and pointwise for almost all $x$. If $f\in C_0 $, this limit is uniform on ${\Bbb R}^n$. \end{theorem} \begin{proof}
Consider the truncated integral \begin{equation}\label{eq:38} I_\varepsilon f(x)=\int_\varepsilon^\infty W_af(x,t)\frac{dt}{t}, \qquad \varepsilon >0. \end{equation} Our aim is to represent it in the form \begin{equation}\label{eq:3100} I_\varepsilon f(x)= \int_{0}^{\infty}Q_{\varepsilon s} f(x) \,e^{-a\varepsilon s}\, k(s) \, ds \end{equation} where \begin{equation}\label{ka} k\in L_1 (0, \infty) \qquad \text{\rm and} \qquad \int_0^\infty k(s) ds=c_\mu. \end{equation} Once (\ref{eq:3100}) is established, all the rest follows from properties (a)-(c) in Definition \ref{d1} according to the standard machinery of approximation to the identity; see \cite {St}.
Equality (\ref{eq:3100}) can be formally obtained by changing the order of integration, namely, \begin{eqnarray} I_\varepsilon f(x) &=&\int_0^\infty d\mu(\eta) \int_{\varepsilon }^\infty Q_{t\eta} f(x)\,e^{-at\eta}\,\frac{dt}{t}\nonumber \\&=& \int_{0}^{\infty}d\mu(\eta) \int_{\eta}^\infty Q_{\varepsilon s} f(x)\,e^{-a\varepsilon s}\,\frac{ds}{s}\nonumber \\&=&\int_{0}^{\infty}Q_{\varepsilon s} f(x)\,e^{-a\varepsilon s} k(s)\, ds, \qquad k(s)=s^{-1}\int_{0}^{s}d\mu(\eta). \nonumber \end{eqnarray} Furthermore, since $\mu([0,\infty))=0$, then \begin{eqnarray} \int_0^\infty
|k(s)| ds&=&\int_0^1 \Big |\int_0^s d\mu(\eta)\Big
|\frac{ds}{s}+\int_1^{\infty} \Big |\int_s^{\infty} d\mu(\eta)\Big
|\frac{ds}{s}\nonumber
\\&\le&\int_0^1 d|\mu|(\eta)\int_\eta^1 \frac{ds}{s}+\int_1^{\infty}d|\mu|(\eta)\int_1^\eta \frac{ds}{s}\nonumber
\\&=&\int_0^\infty |\log \eta|\, d|\mu|(\eta) <\infty. \nonumber\end{eqnarray} Similarly we have $$\int_0^\infty k(s) ds=\int_0^\infty \log \frac{1}{\eta} \, d\mu (\eta)=c_\mu,$$ which gives (\ref{ka}). Thus, to complete the proof, it remains to justify application of Fubini's theorem leading to (\ref{eq:3100}). To this end, it suffices to show that the repeated integral $$
\int_\varepsilon^\infty \frac{dt}{t}\int_0^\infty |Q_{t\eta} f(x)|\,d|\mu| (\eta)$$ is finite for almost all $x$ in ${\Bbb R}^n$. We write it as $A(x)+B(x)$, where $$
A(x)=\int_\varepsilon^\infty \frac{dt}{t}\int_0^{1/t}|Q_{t\eta} f(x)|\,d|\mu| (\eta), \quad B(x)=\int_\varepsilon^\infty
\frac{dt}{t}\int_{1/t}^\infty|Q_{t\eta} f(x)|\,d|\mu| (\eta).$$ Since the least radial decreasing majorant of $q$ is integrable
(see property (b) in Definition \ref {d1}), then $\sup_{t>0}|Q_{t}
f(x)|\le c\, M_f (x)$ where $M_f (x)$ is the Hardy-Littlewood maximal function, which is finite for almost $x$; see e.g., \cite[Theorem 2, Section 2, Chapter III]{St}. Hence, for almost $x$,
$$ A(x)\le c\,M_f (x)\int_\varepsilon^\infty \frac{dt}{t}\int_0^{1/t}d|\mu| (\eta)= c\,M_f (x)\int_0^{1/\varepsilon}\big(\log \frac{1}{\eta}-\log
\varepsilon\big) d|\mu| (\eta)<\infty.$$ To estimate $B(x)$, we observe that since $q\in L_r, \; r>1$, then, by Young's inequality $$
||Q_t f||_s \le ||f||_p\, ||q_t||_r=t^{-\delta}||f||_p\, ||q||_r, \qquad \delta=n(1-1/r)>0, \quad \frac{1}{s}=\frac{1}{r}+\frac{1}{p}-1.$$ This gives $$\Big
\|\int_{1/t}^\infty|Q_{t\eta} f(x)|\,d|\mu| (\eta)\Big \|_s \le t^{-\delta}||f||_p\,\int_{1/t}^\infty \eta^{-\delta}\,d|\mu| (\eta),$$
and therefore, \begin{eqnarray} ||B||_s&\le&||f||_p\,\int_\varepsilon^\infty
\frac{dt}{t^{1+\delta}}\int_{1/t}^\infty\eta^{-\delta}\,d|\mu|
(\eta)\nonumber\\&=&\frac{||f||_p}{\delta}\Big (\,
\int_0^{1/\varepsilon}d|\mu|(\eta)+\frac{1}{\varepsilon^\delta}\int_{1/\varepsilon}^\infty
\eta^{-\delta} d|\mu|(\eta)\Big )\le \frac{||f||_p\,
||\mu||}{\delta}<\infty. \nonumber \end{eqnarray} This completes the proof. \end{proof}
\section{Wavelet Transforms Associated to One-parametric Semigroups and Inversion of Potentials}
In this section we consider an important subclass of wavelet
transforms, generated by certain one-parametric semigroups of
operators. Some composite wavelet
transforms from the previous section belong to this subclass.
\subsection{Basic examples} \begin{example}\label{e1} Consider the {\it Poisson semigroup} $\mathcal{P}_{t}$ generated by the Poisson integral \begin{equation} \mathcal{P}_{t}f(x)=\int_{\mathbb{R}^{n}}p(y,t)f(x-y)\,dy\text{ \ , \ } t>0 \label{1.1} \end{equation}
with the Poisson kernel \begin{equation} p(y,t)=\frac{\Gamma \left( (n+1)/2\right) }{
\pi ^{(n+1)/2}}\frac{t}{(t^{2}+\left| y\right| ^{2})^{(n+1)/2}}=t^{-n}p(y/t, 1); \label{1.2} \end{equation} see \cite{31}, \cite {St}. In this specific case, the kernel function of the relevant composite wavelet transform is $q(x)\equiv p(x, 1)$ and on the Fourier transform side we have \begin{equation}\label{puf}
F[\mathcal{P}_{t}f](\xi)=e^{-t\left| \xi\right| } Ff(\xi).\end{equation} \end{example} \begin{example} Another important example is the {\it Gauss-Weierstrass semigroup} $\mathcal{W}_{t}$ defined by \begin{equation}
\mathcal{W}_{t}f(x)=\int_{\mathbb{R}^{n}}w(y,t)f(x-y)\,dy,\qquad F[w(\cdot ,t)](\xi )=e^{-t|\xi |^{2}}, \quad t>0; \label{2.4} \end{equation}
see \cite{31}. The Gauss-Weierstrass kernel $w(y,t)$ is explicitly computed as \begin{equation} w(y,t)=(4\pi t)^{-n/2}\exp (-\left| y\right| ^{2}/4t).\end{equation} In comparison with (\ref{qu}), here the scaling parameter $t$ is replaced by $\sqrt{t}$, so that \begin{equation}\label{scp2}w(y,t)=(\sqrt{t})^{-n}q (y/\sqrt{t}), \qquad q (y)=w
(y, 1)=(4\pi)^{-n/2}\exp (-\left| y\right| ^{2}/4), \end{equation} and the corresponding wavelet transform has the form \begin{equation} \label{GWtr} Wf(x,t)=\int_0^\infty \mathcal{W}_{t\eta} f(x)\, e^{-at\eta}\, d\mu(\eta), \qquad x \in {\Bbb R}^n, \;\; t>0,\; \; a\ge 0.\end{equation} This
agrees with (\ref{cwtrw}) up to an obvious change of scaling parameters. \end{example} \begin{example} The following interesting example does not fall into the scope of wavelet transforms in Section 2, however, it has a very close nature. Consider the {\it metaharmonic semigroup} ${\Cal M}_t$ defined by \begin{equation}\label{sg3}({\Cal M}_t f)(x) \! = \! \int\limits_{\mathbb R^n}
m(y,t)f(x \! - \! y)\,dy, \quad F[m(\cdot,t)](\xi) \! = \! e^{-t\sqrt{1+|\xi|^2}};\end{equation} see \cite[p. 257-258]{21}. The corresponding kernel has the form \begin{equation} m
(y,t)=\frac{2t}{(2\pi)^{(n+1)/2}}\, \frac{K_{(n+1)/2} (\sqrt{|y|^2
+t^2})}{(\sqrt{|y|^2 +t^2})^{(n+1)/2}},\end{equation} where $K_{(n+1)/2}(\cdot)$ is the McDonald function. The
relevant wavelet transform is \begin{equation} \label{mh} Wf(x,t)=\int_0^\infty \mathcal{{\Cal M}}_{t\eta} f(x)\, d\mu(\eta), \qquad x \in {\Bbb R}^n, \;\; t>0.\end{equation} \end{example}
This list of examples can be continued \cite{8}.
\subsection{Operators of the potential type} One of the most remarkable applications of wavelet transforms associated to the Poisson, Gauss-Weierstrass, and metaharmonic semigroups is that they pave the way to a series of explicit inversion formulas for operators of the potential type arising in analysis and mathematical physics. Typical examples of such
operators are the following: \begin{eqnarray}
\qquad I^{\alpha }f &=&F^{-1}\left| \xi \right| ^{-\alpha }Ff\equiv (-\Delta )^{-\alpha /2}f \quad \text{\rm (Riesz potentials)}, \label{1.8} \\
\qquad J^{\alpha }f &=&F^{-1}(1+\left| \xi \right| ^{2})^{-\alpha /2}Ff\equiv (E-\Delta )^{-\alpha /2}f \quad \text{\rm (Bessel potentials)}, \label{1.9} \\
\qquad \mathcal{F}^{\alpha }f &=&F^{-1}(1+\left| \xi \right| )^{-\alpha }Ff\equiv (E+\sqrt{-\Delta })^{-\alpha }f \quad \text{\rm (Flett potentials)}.\label{1.10} \end{eqnarray}
Here $Re\,\alpha >0$, \ $\left| \xi \right| =\left( \xi _{1}^{2}+\cdots +\xi _{n}^{2}\right) ^{1/2},$ \ $\Delta =\sum_{k=1}^{n}\frac{\partial ^{2}}{\partial x_{k}^{2}}$ is the Laplacean, and $E$ is the identity operator. For \ $f\in L_{p}(\mathbb{R}^{n}),$ \ $1\leq p<\infty ,$ these potentials have
remarkable integral representations via the Poisson and Gauss-Weierstrass semigroups, namely, \begin{eqnarray} \label{pot1}I^{\alpha }f(x)&=&\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }t^{\alpha -1}\mathcal{P}_{t}f(x)\,dt,\qquad 0<Re\,\alpha <n/p\,;\\ \label{pot2}J^\alpha f(x) &=& \frac{1}{\Gamma (\alpha/2 )} \int_0^\infty t^{\alpha/2-1}e^{-t}\,\mathcal{W}_t f(x) \,dt,\qquad 0<Re\,\alpha <\infty ;\\ \label {fla}\mathcal{F}^{\alpha }f(x)&=&\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }t^{\alpha -1}e^{-t}\mathcal{P}_{t}f(x)\,dt,\,\qquad 0<Re\,\alpha <\infty ;\end{eqnarray} see \cite{30}, \cite{21}, \cite{12}. Regarding Flett potentials, see, in particular, \cite[p. 446-447]{12}, \cite[p. 541-542]{28}, \cite{9}. We also mention another interesting representation of the Bessel potential, which is due to Lizorkin \cite{19} and employs the metaharmonic semigroup, namely, \begin{equation} J^{\alpha }f(x)=\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }t^{\alpha -1}\mathcal{M}_{t}f(x)\,dt,\qquad 0<Re\,\alpha <\infty . \label{1.13} \end{equation}
Equalities (\ref{pot1})-(\ref{1.13}) have the same nature as classical Balakrishnan's formulas for fractional powers of operators (see \cite[p. 121]{28}).
Let us show how these equalities generate wavelet inversion formulas for the corresponding potentials. The core of the method is the following statement which is a particular case of Lemma 1.3 from \cite{23}. \begin{lemma}\label{lB} Given a finite Borel measure $\mu $ on $[0,\infty )$ and a complex number $\alpha ,$ \ $ \alpha'=Re\,\alpha \geq 0$, let \begin{equation} \lambda _{\alpha }(s)=s^{-1} I_+^{\alpha +1}\mu (s), \label{1.15} \end{equation} where \begin{equation}
I_+^{\alpha +1}\mu(s)=\frac{1}{\Gamma (\alpha +1)} \int_{0}^{s}(s-\eta )^{\alpha }d\mu (\eta ) \label{1.16} \end{equation} is the Riemann-Liouville fractional integral of order $\alpha +1$ of the measure $\mu$. Suppose that $\mu $ satisfies the following conditions: \begin{eqnarray} \label{con1}&{}&\text{ \ }\int_{1}^{\infty
}\eta^{\gamma }d|\mu |(\eta )<\infty \text{ \ \ \textit{for some} \ }\gamma >\alpha' ; \\ \label{con2}&{}&\text{ \ }\int_{0}^{\infty }\eta^{j}d\mu (\eta)=0\text{ \ \ }\forall j=0,1,\ldots ,[Re\,\alpha ]\text{ \ \textit{(the integer part of} }\alpha' \text{).} \end{eqnarray} Then
\begin{eqnarray}\label{impo} \lambda _{\alpha }(s)=\left\{ \begin{array}{lcl}
O(s^{\alpha'-1}), & \text{if} & 0<s<1,\\ O(s ^{-1-\delta})\; \text{for some $\delta >0$}, \; & \text{if} & s>1, \end{array} \right. \end{eqnarray}
and \begin{eqnarray} c_{\alpha,\mu } &\equiv &\int_{0}^{\infty }\lambda _{\alpha }(s)\,ds=\int_{0}^{\infty }\frac{\tilde{\mu}(t)}{t^{\alpha +1}}\,dt \notag \\ {}\nonumber\\ &=&\label{1.17n}\left\{ \begin{array}{lcl}\displaystyle{ \Gamma (-\alpha )\int_{0}^{\infty }\eta^{\alpha }\,d\mu (\eta)} & \text{if} & \alpha \notin \mathbb{N}_{0}=\{0,1,2,\ldots \}, \\ {}\\ \displaystyle{\frac{(-1)^{\alpha +1}}{\alpha !}\int_{0}^{\infty }\eta^{\alpha }\log \eta \, d\mu (\eta)} & \text{if} & \alpha \in \mathbb{N}_{0}, \end{array} \right. \end{eqnarray} where $\tilde{\mu}(t)=\int_{0}^{\infty }e^{-t\eta}d\mu (\eta)$ is the Laplace transform of $\mu$. \end{lemma}
The estimate (\ref{impo}) is important in proving almost everywhere convergence in forthcoming inversion formulas.
Consider, for example, Flett potential (\ref{1.10}), (\ref{fla}), and make use of the composite wavelet transform \begin{equation} \label {pwt} W\varphi (x,t)=\int_0^\infty \mathcal{P}_{t\eta}\varphi(x)\,e^{-t\eta}\,d\mu (\eta),\end{equation} cf. Example \ref{e1} and (\ref{cwtrw}) with $a=1$. \begin{theorem} \label{t1.5}\ Let $f\in L_{p},$ \ $1\leq p\leq \infty ,$ \
and let $\varphi =\mathcal{F}^{\alpha }f,$ \ $\alpha >0$, be the Flett potentials of $f.$ Suppose that $\mu $ is a finite Borel measure on $[0,\infty )$ satisfying (\ref{con1}) and (\ref{con2}).
Then \begin{equation} \int_{0}^{\infty }W_{\mu }\varphi (x,t )\,\frac{dt }{t ^{1+\alpha }}\equiv \lim_{\varepsilon \rightarrow 0}\int_{\varepsilon }^{\infty }W_{\mu }\varphi (x,t )\,\frac{dt }{t^{1+\alpha }} =c_{\alpha ,\mu }f(x), \label{1.20} \end{equation} where \ $c_{\alpha ,\mu }$ is defined by (\ref{1.17n}) and the limit is interpreted in the $L_{p}-$norm\ and pointwise a.e. on \ ${\Bbb R}^n$. If $f\in C_{0}$, the statement remains true with the limit in (\ref {1.20}) interpreted in the sup-norm. \end{theorem} \begin{proof} We sketch the proof and address the reader to \cite{9} for details. Changing the order of integration, owing to (\ref{pwt}), (\ref{fla}), and the semigroup property of the Poisson integral, we get \begin{equation} W\varphi (x,t )=\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }d\mu (\eta)\int_{t\eta}^{\infty }(\rho -t\eta )_{+}^{\alpha -1}\,e^{-\rho }\,\mathcal{ P}_{\rho }f(x)\,d\rho . \label{1.21}\end{equation} Then further calculations give \begin{equation} \label {kuku}\int_{\varepsilon }^{\infty }W\varphi (x,t )\frac{dt }{t^{1+\alpha }}= \int_{0}^{\infty }e^{-\varepsilon s }\mathcal{P}_{\varepsilon s }f(x)\,\lambda _{\alpha}(s) \,ds, \quad \lambda _{\alpha}\left(s \right)= s^{-1}I_+^{\alpha +1} \mu (s),\end{equation} cf. (\ref{1.15}). It remains to applied Lemma \ref{lB} combined with the standard machinery of approximation to the identity. \end{proof}
Potentials (\ref{1.8})-(\ref{1.10}) and many others can be similarly inverted by making use of the wavelet transforms associated with suitable semigroups; see \cite{8}, \cite{9}.
\subsection{Examples of wavelet measures}
Examples of wavelet measures, that obey the conditions of Lemma \ref{lB} with $c_{\alpha ,\mu }\neq 0$, are the following.
1. Fix an integer $m>Re \,\alpha $ and choose an even Schwartz function $h(\eta )$ \ on $\mathbb{R}^{1}$ so that $$h^{\left( k\right) }(0)=0 \quad \forall \,k=0,1,2,...,\quad \text{\rm and}\quad \int_{0}^{\infty }\eta^{\alpha -m}h\left( \eta \right) d\eta \neq 0.$$ One can take, for instance, $h\left( \eta \right) =\exp \left( -\eta ^{2}-1/\eta ^{2}\right) ,$ \ $h\left( 0\right) =0.$ Set $d\mu \left( \eta \right) =h^{\left( m\right) }\left( \eta \right) d\eta .$ It is not difficult to show that $\int_{0}^{\infty }\eta ^{k}d\mu \left( \eta \right) =0$, $\forall \ k=0,1,...,[Re \,\alpha]$, \ and $c_{\alpha,\mu }\neq 0.$
2. Let $\mu =\sum\limits_{j=0}^{m}\binom{m}{j}\left( -1\right) ^{j}\delta _{j}$, where $m>Re \,\alpha$ \ is a fixed integer and $ \delta _{j}=\delta _{j}\left( \eta \right) $ denotes the unit mass at the point $\eta =j$, i.e., $\left\langle \delta _{j},f\right\rangle =f(j).$ It is known \cite[p. 117]{28}, \ that \begin{equation*} \int_{0}^{\infty }\eta ^{k}d\mu \left( \eta \right) \equiv \sum\limits_{j=0}^{m}\binom{m}{j}\left( -1\right) ^{j}j^{k}=0,\text{ \ } \forall \text{ \ }k=0,1,...,m-1\text{ \ }\mathrm{(we\ set\ 0^{0}=1\ ).} \end{equation*} Moreover, $c_{\alpha ,\mu }=$ $\int_{0}^{\infty }t^{-\alpha -1}\left( 1-e^{-t}\right) ^{m}dt\neq 0.$
\section{Wavelet transforms with the generalized translation operator}
Continuous wavelet transforms, studied in the previous sections,
rely on the classical Fourier analysis on ${\Bbb R}^n$. Interesting modifications of these transforms and the corresponding potential operators arise in the framework of the Fourier-Bessel harmonic analysis associated to the Laplace-Bessel differential operator \begin{equation} \label{fb} \Delta_\nu =\sum\limits_{k=1}^{n} \frac{\partial^2}{\partial x_k^2}+\frac{2\nu}{x_n}\frac{\partial}{\partial x_n}\ , \qquad \nu>0.\end{equation}
This analysis amounts to pioneering works by Delsarte \cite{De} and Levitan \cite{18}, and was extensively developed in subsequent publications; see \cite{17}, \cite{32}, \cite{bh}, and references therein.
Let $\mathbb R^n_+=\{x: \, x=(x_1,\ldots , x_n)\in \mathbb R^n, \ x_n > 0\}$ and $x'=(x_1,\ldots , x_{n-1})$. Denote $$
L_{p,\nu}({\Bbb R}^n_+)=\Big \{f: ||f||_{p, \nu}=\Big (
\int_{{\Bbb R}^n_+}\left| f(x)\right| ^{p}x_n^{2\nu}dx\Big )^{1/p}<\infty \Big \}. $$ The Fourier-Bessel harmonic analysis is adopted to {\it the generalized convolutions} \begin{equation}\label{conv} (f \ast g)(x)=\int_{\mathbb
R^n_+} f(y)(T^y g)(x) \, y_n^{2\nu}dy, \qquad x \in \mathbb
R^n_+ ,\end{equation} with the {\it generalized translation operator} \begin{equation}\label{gtop} (T^yf)(x)=\frac{\Gamma(\nu +1/2)}{\Gamma(\nu)\Gamma(1/2)}\int_0^\pi \!\! f(x'-y',\sqrt{x_n^2-2x_ny_n{\hbox{\rm cos}} \alpha +y_n^2})\,\sin^{2\nu-1}\alpha \,d\alpha, \end{equation}
\cite {17}, \cite {18}, \cite {32}. The Fourier-Bessel transform $F_\nu$, for which $F_{\nu }\left( f* g\right) =F_{\nu }\left( f\right) F_{\nu }(g),$ \ is defined by \begin{equation}\label{eq:211} (F_\nu f)(\xi)=\int_{\mathbb R^n_+} f (x)\,e^{i\xi' \cdot x'}j_{\nu-1/2}(\xi_n x_n) \, x_n^{2\nu} dx, \qquad \xi \in \mathbb
R^n_+. \end{equation} Here $j_\lambda (\tau)=2^\lambda\Gamma (\lambda+1) \, \tau^{-\lambda} J_\lambda(\tau)$, where $J_\lambda(\tau)$ is the Bessel function of the first kind. The {\it generalized Gauss-Weierstrass, Poisson, and metaharmonic semigroups} $ \; \{\mathcal{W}_t^{(\nu)}\}, \; \{{\Cal P}_t^{(\nu)}\}, \; \{{\Cal M}_t^{(\nu)}\}$ are defined as follows: \begin{eqnarray}
\label{k11}(\mathcal{W}_t^{(\nu)}f)(x)&=&\int\limits_{\mathbb
R^n_+}w^{(\nu)}(y,t)(T^yf)(x) \, y_n^{2\nu}dy,\\
&& \hskip -1.5truecm F_\nu [w^{(\nu)}(\cdot,t)](\xi) =e^{-t|\xi|^2}; \nonumber \\
\label{k21}({\Cal P}_t^{(\nu)}f)(x)&=&\int\limits_{\mathbb
R^n_+}p^{(\nu)}(y,t)(T^yf)(x) \, y_n^{2\nu}dy, \\
&& \hskip -1.5truecm F_\nu [p^{(\nu)}(\cdot,t)](\xi) =e^{-t|\xi|}; \nonumber \\
\label{k31} ({\Cal M}_t^{(\nu)}f)(x)&=& \int\limits_{\mathbb
R^n_+}m^{(\nu)}(y,t)(T^yf)(x) \, y_n^{2\nu}dy, \\
&& \hskip -1.5truecm F_\nu [m^{(\nu)}(\cdot,t)](\xi) =e^{-t\sqrt{1+|\xi|^2}}. \nonumber \end{eqnarray} The corresponding kernels $ w^{(\nu)}(y,t), \; p^{(\nu)}(y,t)$, and $m^{(\nu)}(y,t)$ have the form \begin{eqnarray}
\label{nu1} w^{(\nu)}(y,t)&=& \frac{2\pi^{\nu +1/2}}{\Gamma(\nu +1/2)} \, (4\pi t)^{-(n+2\nu)/2} e^{-|y|^2/4t},\\ \label{nu2} p^{(\nu)}(y,t)&=& \frac{2 \, \Gamma\big((n+2\nu+1)/2\big)}{\pi^{n/2}\Gamma (\nu+1/2)}\,
\frac{t}{(|y|^2 +t^2)^{(n+2\nu +1)/2}},\\ \qquad \label{nu3} m^{(\nu)}(y,t)&=& \frac{ 2^{-\nu+3/2}t}{\Gamma
(\nu+1/2)(2\pi)^{n/2}}\, \frac{K_{(n+2\nu +1)/2} (\sqrt{|y|^2
+t^2})}{(\sqrt{|y|^2 +t^2})^{(n+2\nu +1)/2}}.\end{eqnarray}
More information about these semigroups and their modifications $$\{e^{-t}\mathcal{W}^{(\nu)}_t\}, \qquad \{e^{-t}{\Cal P}^{(\nu)}_t\}, \qquad \{ e^{-t}{\Cal M}^{(\nu)}_t\},$$ can be found in \cite {2}, \cite {3}, \cite {13}.
Modified Riesz, Bessel, and Flett potentials with the generalized translation operator (\ref{gtop}) are formally defined in terms of the Fourier-Bessel transform by \begin{eqnarray}
I_{\nu }^{\alpha }f &=&F_{\nu }^{-1}\left| \xi \right| ^{-\alpha }F_{\nu }f\equiv \left( -\Delta _{\nu }\right) ^{-\alpha /2}f, \label{2.15} \\
\mathcal{J}_{\nu }^{\alpha }f &=&F_{\nu }^{-1}(1+\left| \xi \right| ^{2})^{-\alpha /2}F_{\nu }f\equiv \left( E-\Delta _{\nu }\right) ^{-\alpha /2}f, \label{2.17}\\
\mathcal{F}_{\nu }^{\alpha }f &=&F_{\nu }^{-1}(1+\left| \xi \right| )^{-\alpha }F_{\nu }f\equiv \left( E+\sqrt{-\Delta _{\nu }}\right) ^{-\alpha }f, \label{2.16} \end{eqnarray} respectively. Here $Re\,\alpha >0$ and $\Delta _{\nu }$ is the Laplace-Bessel differential operator (\ref{fb}). These generalized potentials have analogous to (\ref{pot1})-(\ref{fla}) representations in terms of the semigroups (\ref{k11})-(\ref{k31}), namely, if $ f \in L_{p,\nu}({\Bbb R}^n_+)$ then \begin{eqnarray} \label{pot1n}I_{\nu }^{\alpha }f(x)&=&\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }t^{\alpha -1}\mathcal{P}_{t}^{(\nu)}f(x)\,dt,\qquad 0<Re\,\alpha <(n+2\nu)/p\,,\\ J_{\nu }^\alpha f(x) &=& \frac{1}{\Gamma (\alpha/2 )} \int_0^\infty t^{\alpha/2-1}e^{-t}\,\mathcal{W}_t^{(\nu)} f(x) \,dt,\qquad 0<Re\,\alpha <\infty ,\\ \label {flan}\mathcal{F}_{\nu }^{\alpha }f(x)&=&\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }t^{\alpha -1}e^{-t}\mathcal{P}_{t}^{(\nu)}f(x)\,dt,\,\qquad 0<Re\,\alpha <\infty .\end{eqnarray} Moreover, \begin{equation} J_{\nu }^{\alpha }f(x)=\frac{1}{\Gamma (\alpha )}\int_{0}^{\infty }t^{\alpha -1}\mathcal{M}_{t}^{(\nu)}f(x)\,dt,\qquad 0<Re\,\alpha <\infty . \label{1.13n} \end{equation}
We denote by $S_{t}^{(\nu)}$ any of the semigroups \begin{equation}\label{sgr} \mathcal{W}_t^{(\nu)}, \; \;e^{-t}\mathcal{W}_t^{(\nu)}, \;\; \mathcal{P}_{t}^{(\nu)},\; \;e^{-t}{\Cal P}^{(\nu )}_t, \;\; \mathcal{M}_{t}^{(\nu)}, \;\; e^{-t}{\Cal M}^{(\nu)}_t,\end{equation} and define the relevant wavelet transform (cf. (\ref{cwtr})) \begin{equation} \mathfrak{S}^{(\nu)}f(x,t)=\int_{0}^{\infty }S_{t\eta }^{(\nu)}\,f(x)\, d\mu (\eta ), \qquad t>0, \label{2.10} \end{equation} generated by a finite Borel measure $\mu$ on $[0,\infty )$.
There exist analogs of Calder\'{o}n's reproducing formula for wavelet transforms (\ref{2.10}) of functions belonging to the weighted space $L_{p,\nu}({\Bbb R}^n_+)$ and inversion formulas for potentials $I_{\nu }^{\alpha }f, \; J_{\nu }^{\alpha }f, \; \mathcal{F}_{\nu }^{\alpha }f$, when $f\in L_{p,\nu}({\Bbb R}^n_+)$. For example, the following statement holds. \begin{theorem} \label{t2.6} Let $\varphi =I_{\nu }^{\alpha }f$, $f\in L_{p,\nu}({\Bbb R}^n_+)$, $1\leq p<\left( n+2\nu \right) /\alpha $, and suppose that $\mu $ is a finite Borel measure on $[0,\infty )$ satisfying (\ref{con1}) and (\ref{con2}). If \ $\mathfrak{S}^{(\nu)}\varphi $ is the wavelet transform of $\varphi$ associated with the generalized Poisson semigroup $\mathcal{P}_{t}^{(\nu)}$, then \begin{equation} \int_{0}^{\infty }\frac{\mathfrak{S}^{(\nu)}\varphi (x,t)}{t^{1+\alpha }}dt=\lim_{\varepsilon \rightarrow 0}\int_{\varepsilon }^{\infty } \frac{\mathfrak{S}^{(\nu)}\varphi \left( x,t\right) }{t^{1+\alpha }} dt=c_{\alpha ,\mu }f(x), \label{2.19} \end{equation} where $c_{\alpha ,\mu }$ is defined by (\ref{1.17n}). The limit in (\ref{2.19}) exists in the $L_{p,\nu}({\Bbb R}^n_+)$-norm and in the a.e. sense. If $f\in C_{0}$, the convergence in (\ref{2.19}) is uniform. \end{theorem} The proof of this theorem is presented in \cite{8} in the general context of the so-called admissible semigroups. This context includes all semigroups (\ref{sgr}).
\section{ Beta-semigroups}
We remind basic formulas from Section 3.1 for the kernels of the Poisson and Gauss-Weierstrass semigroups: \begin{equation}\label{beta1}
F[p(\cdot ,t)](\xi )=e^{-t\left| \xi\right| }, \qquad p(y,t)=\frac{\Gamma \left( (n+1)/2\right) }{
\pi ^{(n+1)/2}}\frac{t}{(t^{2}+\left| y\right| ^{2})^{(n+1)/2}};\end{equation}
\begin{equation} \label{beta2} F[w(\cdot ,t)](\xi )=e^{-t|\xi |^{2}}, \qquad w(y,t)=(4\pi t)^{-n/2}\exp (-\left| y\right| ^{2}/4t).\end{equation} It would be natural to consider a more general semigroup generated by the kernel $w^{(\beta)}(y,t)$ defined by \begin{equation} \label{beta} F[w^{(\beta)}(\cdot
,t)](\xi )=e^{-t|\xi |^{\beta}}, \qquad \beta>0.\end{equation} This semigroup arises in diverse contexts of analysis, integral geometry, and probability; see, e.g., \cite{Fe}, \cite{Ko}, \cite{La}, \cite{R8}. Unlike (\ref{beta1}) and (\ref{beta2}), the kernel function $w^{(\beta)}(y,t)$ cannot be computed explicitly, however, by taking into account that \begin{equation} w^{(\beta)}(y,t) =t^{-n/\beta } w^{(\beta)}( t^{-1/\beta }y),\qquad w^{(\beta)}(y)\equiv w^{(\beta)}(y,1), \end{equation}
properties of $w^{(\beta)}(y,t)$ are well
determined by the following lemma. \begin{lemma}\label{lb} The function
\begin{equation}\label{gaql}
w^{(\beta)}(y) =F^{-1}[e^{-|\cdot|^\beta}](y)=(2\pi)^{-n}
\int_{{\Bbb R}^n}e^{-|\xi|^\beta} e^{i y\cdot \xi}\, d\xi, \qquad \beta>0,\end{equation} is uniformly continuous on ${\Bbb R}^n$. If $\beta$ is an even integer, then
$w^{(\beta)}(y)$ is infinitely smooth
and rapidly decreasing. More generally, if $\beta\neq 2,4,\ldots $, then $w^{(\beta)}(y)$
has the following behavior when $|y| \to \infty$: \begin{equation}\label{cbe}
w^{(\beta)}(y) =c_\beta |y|^{-n-\beta} (1+o(|y|)), \quad c_\beta=-\frac{2^{\beta}\pi^{-n/2} \Gamma ((n+\beta)/2)}{ \Gamma (-\beta/2)}.\end{equation} If $0<\beta\le 2$, then $w^{(\beta)}(y)>0$ for all $y \in {\Bbb R}^n$. \end{lemma}
\begin{proof} (Cf. \cite[p. 44, for $n=1$]{Ko}). The uniform continuity of $w^{(\beta)}(y)$ follows immediately from (\ref{gaql}). Note that if $\beta$ is an even integer, then $e^{-|\cdot|^\beta}$ is a Schwartz
function and therefore, $w^{(\beta)}(y)$ is infinitely smooth
and rapidly decreasing. Let us prove positivity of $w^{(\beta)}(y)$ when $0<\beta\le 2$. For $y=0$ and for the cases $\beta=1$ and $\beta=2$, this is obvious.
Let $0<\beta< 2$. By Bernstein's theorem \cite[Chapter 18, Sec. 4]{Fel}, there is a non-negative finite measure $\mu_\beta$ on $[0,\infty)$ so that $ e^{-z^{\beta/2}}=\int_0^\infty e^{-tz}\,d\mu_\beta (t)$, $z\in
[0,\infty)$. Replace $z$ by $|\xi|^2$ to get \begin{equation}\label{751}
e^{-|\xi|^{\beta}}=\int_0^\infty e^{-t|\xi|^2}\,d\mu_\beta (t).\end{equation} Then the equality \begin{equation}\label{75}
[e^{-t|\cdot\,|^2}]^{\wedge}(y)=\pi^{n/2}t^{-n/2}e^{-|y|^2/4t}, \qquad t>0,\end{equation} yields \begin{eqnarray}
(2\pi)^{n}\,w^{(\beta)}(y)&=&\int_{{\Bbb R}^n}e^{i \xi\cdot
y}d\xi\int_0^\infty e^{-t|\xi|^2}\,d\mu_\beta (t)= \int_0^\infty d\mu_\beta
(t)\int_{{\Bbb R}^n}e^{i \xi\cdot y} e^{-t|\xi|^2}\,d\xi\nonumber\\&=&
\pi^{n/2}\int_0^\infty t^{-n/2}e^{-|y|^2/4t}\,d\mu_\beta (t)>0.\nonumber\end{eqnarray} The Fubini theorem is applicable here, because, by (\ref{751}), $$
\int_{{\Bbb R}^n}|e^{i \xi\cdot y}|d\xi\int_0^\infty e^{-t|\xi|^2}\,d\mu_\beta (t)=\int_{{\Bbb R}^n}
e^{-|\xi|^{\beta}}d\xi<\infty.$$
Let us prove (\ref{cbe}). It suffices to show
that
\begin{equation}\label{73p}
\lim\limits_{|y| \to \infty}|y|^{n +\beta}w^{(\beta)}(y)=2^{\beta}\pi^{-n/2-1}\Gamma (1+\beta/2)\Gamma ((n+\beta)/2)\, \sin (\pi \beta/2)\end{equation} (we leave to the reader to check that the right-hand side coincides with $c_\beta$). For $n=1$, this statement can be found in \cite [Chapter 3, Problem 154]{PS} and in \cite [p. 45]{Ko}. In the general case, the proof is more sophisticated and relies on the properties of Bessel functions. By the well-known formula for the Fourier transform of a radial function (see, e.g., \cite{31}), we write
$(2\pi)^{n}\,w^{(\beta)}(y)=I(|\eta|)$, where \begin{eqnarray} I(s)&=&(2\pi)^{n/2}s^{1-n/2}\int_0^\infty e^{-r^{\beta}} r^{n/2} J_{{n/2-1}} (rs)\, dr\nonumber\\&=&(2\pi)^{n/2}s^{-n}\int_0^\infty e^{-r^{\beta}} \frac{d}{dr}\,[(rs)^{n/2} J_{{n/2}} (rs)]\, dr.\nonumber\end{eqnarray} Integration by parts yields $$ I(s)=\beta(2\pi)^{n/2}s^{-n/2}\int_0^\infty e^{-r^{\beta}} r^{n/2+\beta-1} J_{{n/2}} (rs)\, dr.$$ Changing variable $z=s^\beta r^\beta$, we obtain $$ s^{n +\beta}I(s)=(2\pi)^{n/2} A(s^{-\beta}), \qquad A(\delta)= \int_0^\infty e^{-z\delta} z^{n/2\beta} J_{n/2} (z^{1/\beta})\,dz. $$ We actually have to compute the limit $A_0=\lim\limits_{\delta \to 0} A(\delta)$. To this end, we invoke Hankel functions $H_\nu^{(1)} (z)$, so that $ J_\nu (z)=Re \,H_\nu^{(1)} (z)$ if $z$ is real \cite{Er}.
Let $h_\nu (z)=z^\nu H_\nu^{(1)} (z)$. This is a single-valued analytic function in the $z$-plane with cut $(-\infty, 0]$. Using the properties of the Bessel functions \cite{Er}, we get
\begin{equation}\label {as} \lim\limits_{z \to 0}h_\nu (z)=2^\nu \Gamma (\nu)/\pi i,\end{equation} \begin{equation}\label {as1} h_\nu (z) \sim \sqrt{2/\pi} \, z^{\nu -1/2}e^{iz-\frac{\pi i }{2}(\nu +\frac{1}{2})}, \qquad z \to \infty.\end{equation} Then we write $A(\delta)$ as $ A(\delta)= Re \,\int_0^\infty e^{-z\delta} h_{n/2} (z^{1/\beta})\,dz$ and change the line of integration from $[0,\infty)$ to $n_\theta=\{z: z=re^{i\theta}, \; r>0\}$ for small $\theta<\pi \beta/2$. By Cauchy's theorem, owing to (\ref{as}) and (\ref{as1}), we obtain $ A(\delta)= Re \,\int_{n_\theta} e^{-z\delta} h_{n/2} (z^{1/\beta})\,dz$. Since for $z=re^{i\theta}$, $ h_{n/2}
(z^{1/\beta})=O(1)$ when $r=|z|\to 0$ and $ h_{n/2} (z^{1/\beta})=O(r^{(n -1)/2\beta} e^{-r^{1/\beta}\sin (\theta /\beta)})$ as $r\to \infty$, by the Lebesgue theorem on dominated convergence, we get $ A_0=Re \,\int_{n_\theta} h_{n/2} (z^{1/\beta})\,dz$. To evaluate
the last integral, we again use analyticity and replace $n_\theta$ by $n_{\pi \beta/2}=\{z: z=re^{i\pi \beta/2}, \; r>0\}$ to get $$ A_0=Re \,\Big [e^{i\pi \beta/2}\int_0^\infty h_{n/2} (r^{1/\beta}e^{i\pi/2})\,dr\Big ].$$ To finalize calculations, we invoke McDonald's function $K_\nu (z)$ so that $$ h_\nu (z)=z^\nu H_\nu^{(1)} (z)=-\frac{2i}{\pi}(z e^{-i\pi/2})^\nu K_\nu (z e^{-i\pi/2}).$$ This gives $$ A_0=\frac{2\beta}{\pi}\, \sin (\pi \beta/2) \int_0^\infty s^{n/2 +\beta-1} K_{n/2} (s)\, ds. $$ The last integral can be explicitly evaluated by the formula 2.16.2 (2) from \cite {PBM}, and we obtain the result. \end{proof}
The Beta-semigroup $\mathcal{B}_{t}$ generated by the kernel $w^{(\beta)} (y,t)$ (see (\ref{beta})) is defined by \begin{equation} \mathcal{B}_{t}f(x)=\int_{\mathbb{R}^{n}}w^{\left( \beta \right) }\left( y,t\right) f\left( x-y\right) dy, \qquad t>0, \label{2.21} \end{equation} and the corresponding weighted wavelet transform has the form \begin{equation}\label{bew} W_a f(x,t)=\int_0^\infty {\Cal B}_{t\eta} f (x)\, e^{-at\eta}\,d\mu (\eta),\end{equation}
where $a\ge 0$ is a fixed number which is in our disposal; cf
(\ref{cwtrw}).
Following \cite{A}, we introduce Beta-potentials
\begin{equation}\label{bpot}
J_\beta^\alpha f = (E+ (-\Delta^{\beta/2}))^{-\alpha/\beta}f,\qquad \alpha >0, \quad \beta >0,\end{equation}
that can be realized through the Beta-semigroup as
\begin{equation}\label{bpott} J_\beta^\alpha f (x)=\frac{1}{\Gamma (\alpha /\beta )} \int_{0}^{\infty }t^{\alpha/\beta-1}\text{ }e^{-t}\text{ } \mathcal{B}_{t}f(x)\text{ }dt. \end{equation} For $\beta=2$, (\ref{bpot}) coincides with the classical Bessel potential (\ref{1.9}), and (\ref{bpott}) mimics (\ref{pot2}). Similarly, for $\beta=1$, the Beta-potentials coincide with the Flett potential (\ref{fla}).
Explicit inversion formulas for Beta-potentials can be obtained with the aid of the wavelet transform (\ref{bew}) as follows. \begin{theorem} \label{t2.7} Let $f\in L_{p}(\mathbb{R}^{n})$, \ $1\leq p< \infty $, $\alpha >0, \; \beta>0$. Suppose that $\mu $ is a finite Borel measure on $[0,\infty )$ satisfying \begin{eqnarray*} &\text{(a) \ }&\int_{1}^{\infty }\eta^{\gamma }\text{
}d\left| \mu \right| \left( \eta\right) <\infty \text{ \ \ for some \ }\gamma >\alpha /\beta ; \\ &\text{(b) \ }&\int_{0}^{\infty }\eta^{j}\text{ }d\mu \left( \eta\right) =0, \quad \forall \,j=0,1,...,[\alpha /\beta ]. \end{eqnarray*} If $\varphi =J_{\beta }^{\alpha }f$, then \begin{equation}\label{form} \int_{0}^{\infty }W\varphi \left( x,t\right) \frac{dt}{t^{1+\alpha /\beta }}\equiv \lim_{\varepsilon \rightarrow 0}\int_{\varepsilon }^{\infty }W\varphi \left( x,t\right) \frac{dt}{t^{1+\alpha /\beta }}=c_{\alpha /\beta ,\mu }f(x), \end{equation} where $c_{\alpha/\beta,\mu }$ \ is defined by (\ref{1.17n}) (with $\alpha$ replaced by $\alpha/\beta$). The limit in (\ref{form}) exists in the $L_{p}$-norm and pointwise
for almost all x. If $f\in C_{0}$, the convergence is uniform. \end{theorem}
The proof of this theorem mimics that of Theorem \ref {t1.5}; see \cite {A} for details.
\begin{remark} The classical Riesz potential $I^{\alpha}f$ has an integral representation via the Beta-semigroup, namely, \begin{equation}\label{form} I^{\alpha}f(x)= \frac{1}{\Gamma (\alpha /\beta )} \int_{0}^{\infty }t^{{\alpha/\beta}-1}\text{ } \mathcal{B}_{t}f(x)\text{ }dt.\end{equation} Here $f \in L_{p}(\mathbb{R}^{n}), \ 1\leq p < \infty$ , and \ $ 0< Re \alpha < n/p$. For the cases $\beta=1$ and $\beta=2$ we have the representations in terms of the Poisson and Gauss-Weierstrass semigroups, respectively.
The potential $I^{\alpha}f$ can be inverted in the framework of the $L_{p}$-theory by making use of (\ref{form}) and the composite wavelet transform (\ref{bew}) with $a=0$.
\end{remark}
\section{Parabolic Wavelet Transforms}
The following anisotropic wavelet transforms of the composite type, associated with the heat operators \begin{equation}\label{Ho} \partial /\partial t -\Delta, \qquad
E+\partial /\partial t -\Delta, \end{equation} were introduced by Aliev and Rubin \cite{7}. These transforms are constructed using the Gauss-Weierstrass kernel $w(y, t) = (4\pi t)^{-n/2} \exp (- |y|^2/4t)$ as follows. Let ${\Bbb R}^{n+1}$ be the $(n+1)$-dimensional Euclidean space of points $(x, t)$, $x = (x_1, \ldots, x_n) \in {\Bbb R}^n, \, $ $t \in {\Bbb R}^1$. We pick up a wavelet measure $\mu$ on $[0,\infty)$, a scaling parameter $a>0$, and set \begin{equation} \label{pwtr} P_\mu f(x, t;a) = \int_{{\Bbb R}^n \times (0, \infty)} f(x-\sqrt{a} y, t-a\tau) \,w(y, \tau) \, dyd\mu (\tau), \end{equation} \begin{equation} \label{pwtrw}{\Cal P}_\mu f(x, t; a) = \int_{{\Bbb R}^n \times (0, \infty)} f(x - \sqrt{a} y, t- a\tau) \,w(y, \tau)\, e^{-a\tau} \,dyd\mu(\tau)\end{equation} (to simplify the notation, without loss of generality we can assume $\mu (\{0\})=0$). We call (\ref{pwtr}) and (\ref{pwtrw}) the {\it parabolic wavelet transform} and the {\it weighted parabolic wavelet transform}, respectively.
Parabolic potentials $H^\alpha f$ and ${\Cal H}^\alpha f$, associated to differential operators in (\ref{Ho}), are defined in the Fourier terms by \begin{equation}\label{ppo} F[H^\alpha f](\xi, \tau) = (|\xi|^2 + i
\tau)^{-\alpha/2} F[f] (\xi, \tau),\end{equation} \begin{equation}\label{ppo1} F[\mathcal H^\alpha f](\xi, \tau) = (1+ |\xi|^2 + i \tau)^{-\alpha/2} F[f](\xi, \tau),\end{equation} where $F$ stands for the Fourier transform in ${\Bbb R}^{n+1}$. These potentials were introduced by Jones \cite{Jo} and Sampson \cite{Sa} and used as a tool for characterization of anisotropic function spaces of fractional smoothness; see \cite{7} and references therein. For $\alpha>0$, potentials $H^\alpha f$ and ${\Cal H}^\alpha f$ are representable by the integrals \begin{eqnarray} \qquad H^\alpha f(x, t) &=& {1\over \Gamma(\alpha/2)} \int_{{\Bbb R}^n \times (0,\infty)} \tau^{\alpha/2-1} w(y, \tau) \,f(x-y, t-\tau)\, dyd\tau,\\
{\Cal H}^\alpha f(x, t)&=&{1\over \Gamma (\alpha/2)} \int_{{\Bbb R}^n \times (0, \infty)} \tau^{\alpha/2-1} e^{-\tau} w(y, \tau) \,f(x-y, t-\tau) \, dyd\tau.\end{eqnarray} Their behavior on functions $f \in L_p \equiv L_p ({\Bbb R}^{n+1})$ is characterized by the following theorem.
\begin{theorem} \cite {Ba}, \cite {Ra} \newline {\rm I.} \ Let $f \in L_{p}, \; 1 \le p < \infty, \; 0 < \alpha < (n+2)/p, \quad q = (n+2-\alpha p)^{-1} (n+2) p$.
{\rm (a)} \ The integral $(H^\alpha f)(x, t)$ converges absolutely for almost all $(x, t) \in {\Bbb R}^{n+1}$.
{\rm (b)} \ For $p > 1$, \ \ the operator $H^\alpha$ is bounded from $L_{p}$ into $L_{q}$.
{\rm (c)} \ For $p = 1$, $H^\alpha$ is an operator of the weak $(1, q)$ type: $$
|\{ (x, t): |(H^\alpha f) (x, t) | > \gamma\}| \le \left({c\| f \|_1 \over \gamma}\right)^q. $$ \newline {\rm II.} \ The operator ${\Cal H}^\alpha$ is bounded on $L_{p}$ for all $\alpha \ge 0, \quad 1 \le p \le \infty$. \end{theorem}
Explicit inversion formulas for parabolic potentials in terms of wavelet transforms (\ref{pwtr}) and (\ref{pwtrw}) are given by the following theorem. \begin{theorem} \cite{7} Let $\mu$ be a finite Borel measure on $[0, \infty)$ satisfying the following conditions:
\begin{eqnarray} \label{conn1}&{}&\text{ \ }\int_{1}^{\infty }\tau^{\gamma
}d|\mu |(t )<\infty \text{ \ \ \textit{for some} \ }\gamma >\alpha/2 ; \\ \label{conn2}&{}&\text{ \ }\int_{0}^{\infty }t^{j}d\mu (t)=0\text{ \ , \ }\forall j=0,1,\ldots ,[\alpha/2 ]. \end{eqnarray} Suppose that $\varphi = H^\alpha f, \; \; f \in L_p, \; \; 1 \le p < \infty, \; \; 0 < \alpha < (n+2)/p$. Then
\begin{equation}\label {inf}\int_0^\infty P_\mu \varphi(x, t; a) \; {da\over a^{1+\alpha/2}} \, \equiv \, \lim_{\varepsilon \to 0} \int^\infty_\varepsilon (\dots) = c_{{\alpha}/2, \mu} \ f(x, t),\end{equation} where $c_{{\alpha}/2, \mu}$ is defined by (\ref{1.17n}) (with $\alpha$ replaced by $\alpha /2$).
The limit in (\ref {inf}) is interpreted in the $L_{p}$-norm for $1 \le p < \infty$ and a.e. on ${\Bbb R}^{n+1}$ for $1 < p < \infty$.
The same statement holds for all $\alpha > 0$ and $1 \le p \le \infty$ ($L_\infty$ is identified with $C_0$)
provided that $H^\alpha$ and $P_\mu$ are replaced by ${\Cal H}^\alpha$ and ${\Cal P}_\mu$, respectively. \end{theorem}
More general results for parabolic wavelet transforms with the generalized translation associated to singular heat operators \begin{equation}\label{Hos} \partial /\partial t -\Delta_{\nu}, \qquad
E+\partial /\partial t -\Delta_{\nu}, \qquad \qquad\Big (\Delta _{\nu }=\sum\limits_{k=1}^{n}\frac{\partial ^{2}}{\partial x_{k}^{2}}+\frac{2\nu}{x_{n}}\,\frac{\partial}{\partial x_{n}}\Big ),\end{equation}
were obtained in \cite{6}. These include the Calder\'{o}n-type reproducing formula and explicit $L_p$-inversion formulas for parabolic potentials with the generalized translation defined by \begin{eqnarray*}
H_{\nu }^{\alpha }f(x,t) &=&F_{\nu }^{-1}[(\left| x\right| ^{2}+it)^{-\alpha /2}\text{ }F_{\nu }f(x,t)], \\
\mathcal{H}_{\nu }^{\alpha }f(x,t) &=&F_{\nu }^{-1}[(1+\left| x\right| ^{2}+it)^{-\alpha /2}\text{ }F_{\nu }f(x,t)]. \end{eqnarray*} In the last two expressions, $x\in \mathbb{R}_{+}^{n}=\{x\in \mathbb{R}^{n}:$ \ $ x_{n}>0\}$, $\; t\in \mathbb{R}^{1}$, and $F_{\nu}$ \ is the Fourier-Bessel transform, i.e., the Fourier transform with respect to the variables $ t $ and $x^{\prime }=(x_{1},...,x_{n-1}),$ and the Bessel transform with respect to $x_{n}>0.$ These results were applied in \cite{6, 7} to wavelet-type
characterization of the parabolic Lebesque spaces.
\section{Some Applications to Inversion of the $k$-plane Radon Transform}
We recall some basic definitions. More information can be found in \cite{{GGG}, 15, 10, 24, 25}. Let $\ \mathcal{G}_{n,k}$ \ and $ G_{n,k}$ be the affine Grassmann manifold of all non-oriented $k$-dimensional planes ($k$-planes) $\tau $ \ in $\mathbb{R}^{n}$ and the ordinary Grassmann manifold of $k$-dimensional linear subspaces $\zeta $ of $ \mathbb{R}^{n}$, respectively. Each $k$-plane $\tau \in \mathcal{G}_{n,k}$ is parameterized as $\tau =\left( \zeta \text{, }u\right) $, where $\zeta \in G_{n,k}$ and $u\in \zeta ^{\perp }$ (the orthogonal complement of $\zeta $ in $\mathbb{R}^{n}$). We endow $\mathcal{G}_{n,k\text{ }}$ with the product measure $d\tau =d\zeta du$, where $d\zeta $ is the $O(n)$-invariant measure on $G_{n,k\text{ }}$ of total mass $1,$ and $du$ denotes the Euclidean volume element on $\zeta ^{\perp }$. The \textit{\ k-plane Radon transform }of a function $f$ on $\mathbb{R}^{n}$ is defined by \begin{equation} \hat f (\tau )\equiv \hat f (\zeta \text{, } u)=\int_{\zeta }f(y+u)\,dy, \label{3.1} \end{equation} where $dy$ is the induced Lebesque measure on the subspace \ $\zeta \in G_{n,k}.$ \ This transform assigns to a function $f$ a collection of integrals of $f$ over all $k$-planes in ${\Bbb R}^n$. The corresponding \textit{dual $k$-plane transform} of a function $\varphi$ on $ \mathcal{G}_{n,k}$ is defined as the mean value of $\varphi \left( \tau \right) $ over all $k$-planes $\tau $ through $x\in \mathbb{R}^{n}$: \begin{equation} \check{\varphi}\left( x\right) =\int_{O(n)}\varphi (\mathcal{\sigma }\zeta _{0}+x) \, d\mathcal{\sigma }, \qquad x\in \mathbb{R}^{n}. \label{3.2} \end{equation} Here $\zeta _{0}\in G_{n,k}$ \ is an arbitrary fixed $k-$plane through the origin. If $f\in L_{p}(\mathbb{R}^{n}),$ then $\hat f $ is finite a.e. on $\mathcal{G}_{n,k}$ \ if and only if $1\leq p<n/k.$
Several inversion procedures are known for $\hat f$. One of the most popular, which amounts to Blaschke and Radon, relies on the Fuglede formula \cite[p. 29]{15}, \begin{equation} ( \hat f)^{\vee }=d_{k,n}I^{k}f, \qquad d_{k,n}=\left( 2\pi \right) ^{k}\sigma _{n-k-1}/\sigma _{n-1}, \label{3.3} \end{equation} and reduces reconstruction of $f$ to inversion of the Riesz potentials $I^{k}f.$ The latter can also be inverted in many number of ways \cite{29}, \cite {28}, \cite{21}. In view of considerations in Section 3.2 and 5, one can employ a composite wavelet transform generated by the Poisson, Gauss-Weierstrass, or Beta semigroup and thus obtain new inversion formulas for the $k$-plane transform on ${\Bbb R}^n$ in terms of a wavelet measure on the one-dimensional set $[0,\infty)$. For instance, this way leads to the following \begin{theorem} \label{t3.1} Let $\varphi =\hat f$ be the $k$-plane Radon transform of a function $f\in L_{p}$, $1\leq p<n/k$. $\ $Let $\mu $ be a finite Borel measure on $ [0,\infty )$ satisfying \begin{eqnarray*}
&\text{(a) \ }&\int_{1}^{\infty }\eta ^{\gamma }d\left| \mu \right| \left( \eta \right) <\infty \text{ \ for some }\gamma >k; \\ &\text{(b) \ }&\int_{0}^{\infty }\eta ^{j}d\mu \left( \eta \right) =0 \quad \forall \text{ }j=0,1,...,k. \end{eqnarray*} Let $W\check{\varphi}$ be the wavelet transform of $\check{\varphi}$, associated with the Poisson semigroup (\ref{1.1}), namely, \begin{equation} \label{pWtr} W\check{\varphi}(x,t)=\int_0^\infty \mathcal{P}_{t\eta} \check{\varphi}(x)\, d\mu(\eta), \qquad x \in {\Bbb R}^n, \;\; t>0.\end{equation}
Then \begin{equation} \int_{0}^{\infty }W\check{\varphi}\left( x,t\right) \frac{dt}{t^{1+k}}\equiv \lim_{\varepsilon \rightarrow \infty }\int_{\varepsilon }^{\infty }W\check{\varphi} \left( x,t\right) \frac{dt}{t^{1+k}}=c_{k,\mu }f(x), \label{3.4} \end{equation} where (cf. (\ref{1.17n})), \begin{equation*} c_{k,\mu }=\frac{\left( -1\right) ^{k+1}}{k!}\int_{0}^{\infty }t^{k}\log t\text{ }d\mu \left( t\right) . \end{equation*} The limit in (\ref{3.4}) exists in the $L_{p}$-norm and pointwise almost everywhere. If $f\in C_{0}\cap L_{p}$, the convergence is uniform on ${\Bbb R}^n$. \end{theorem}
\begin{remark} The following observation might be interesting. Let \begin{equation} I_-^\alpha u(t)=\frac{1}{\Gamma (\alpha)}\int_t^\infty (t-s)^{\alpha -1} u(s)\,ds, \qquad t>0,\end{equation} be the Riemann-Liouville integral of $u$. It is known \cite[formula (16.9)]{21} that the Poisson integral takes the Riesz potential $I^\alpha f$ to the Riemann-Liouville integral of the function $t \to {\Cal P}_t f$, namely, \begin{equation} \label {comb4} {\Cal P}_t I^\alpha f=I_-^\alpha {\Cal P}_{(\cdot)} f.\end{equation}
Denoting by $R$ and $R^{\ast }$ the Radon $k$-plane transform and its dual, owing to Fuglede's formula
(\ref{3.3}), we have \begin{equation} R^{\ast }Rf=d_{k,n}\,I^{k}f. \label{3.5} \end{equation} Combining (\ref{3.5}) and (\ref{comb4}), we get \begin{equation} R_{t}^{\ast }Rf=d_{k,n}\,I_-^k {\Cal P}_{(\cdot)} f, \qquad R_{t}^{\ast }\varphi (x)=(\mathcal{P}_{t}R^{\ast }\varphi )(x) . \label{3.7} \end{equation} This formula has the same nature as the following one in terms of the spherical means, that lies in the scope of the classical Funk-Radon-Helgason theory: \begin{equation}\label{frh} (\hat f)^\vee_r (x)=\sigma_{k-1}\int_r^\infty (\fr{M}} \def\frm{\fr{m}_t f)(x) (t^2 -r^2)^{k/2 -1} t \, dt; \end{equation} see Lemma 5.1 in \cite{24}. Here $\sigma_{k-1}$ is the volume of the $(k-1)$-dimensional unit sphere, \begin{equation} (\fr{M}} \def\frm{\fr{m}_t f)(x)=\frac{1}{\sigma_{n-1}}\int_{S^{n-1}} f(x+t\theta ) \,d \theta, \qquad t>0, \end{equation} and $(\hat f)^\vee_r (x)$ is the so-called {\it shifted dual $k$-plane transform}, which is the mean value of $\hat f (\tau)$ over all $k$-planes $\tau$ at distance $r$ from $x$.
\end{remark}
\section{Higher-rank Composite Wavelet Transforms and Open Problems}
Challenging perspectives and open problems for composite wavelet transforms are connected with functions of matrix argument and their application to integral geometry. This relatively new area encompasses the so-called higher-rank problems, when traditional scalar notions, like distance or scaling, become matrix-valued.
\subsection{Matrix spaces, preliminaries} We remind basic notions, following \cite{R9}. Let $\fr{M}} \def\frm{\fr{m}_{n,m} \sim {\Bbb R}^{nm}$ be the space of real matrices $x=(x_{i,j})$ having $n$ rows and $m$
columns, $n\geq m$; $dx=\prod^{n}_{i=1}\prod^{m}_{j=1}
dx_{i,j}$ is the volume element on $\fr{M}} \def\frm{\fr{m}_{n,m}$, $x'$ denotes the transpose of $x$, and $I_m$
is the identity $m \times m$
matrix. Given a square matrix $a$, we denote by
${\hbox{\rm det}}(a)$
the determinant of $a$, and by $|a|$ the absolute value of
${\hbox{\rm det}}(a)$;
${\hbox{\rm tr}} (a)$ stands for the trace of $a$. For $x\in \fr{M}} \def\frm{\fr{m}_{n,m}$, we denote \begin{equation}\label{xm}|x|_m ={\hbox{\rm det}} (x'x)^{1/2}.\end{equation} If $m=1$, this is the usual Euclidean norm on ${\Bbb R}^n$. For $m>1$,
$|x|_m$ is the volume of the parallelepiped spanned by the column-vectors of $x$.
We use standard
notations $O(n)$ and $SO(n)$ for the orthogonal group and the
special orthogonal group of ${\Bbb R}^{n}$ with the normalized
invariant measure of total mass 1. Let ${\Cal S}_m \sim {\Bbb R}^{m(m+1)/2}$ be the space of $m \times m$ real symmetric matrices $s=(s_{i,j})$
with the volume element $ds=\prod_{i \le j} ds_{i,j}$. We denote by $\P_m$ the cone of positive definite matrices in ${\Cal S}_m$; $\overline\P_m$ is the closure of $\P_m$, that is, the set of all positive semi-definite $m\times m$ matrices. For $r\in\P_m$ ($r\in\overline\P_m$), we write $r>0$ ($r\geq 0$). Given
$a$ and $b$ in $S_m$, the inequality $a >b$ means $a - b \in
\P_m$ and the symbol $\int_a^b f(s) ds$ denotes
the integral over the set $(a +\P_m)\cap (b -\P_m)$.
The group $G=GL(m,{\Bbb R})$ of
real non-singular $m \times m$ matrices $g$ acts transitively on $\P_m$
by the rule $r \to g rg'$. The corresponding $G$-invariant
measure on $\P_m$ is \begin{equation}\label{2.1}
d_{*} r = |r|^{-d} dr, \qquad |r|={\hbox{\rm det}} (r), \qquad d= (m+1)/2 \end{equation}
\cite[p. 18]{Te}.
\begin{lemma}\label{12.2} \cite[pp. 57--59] {Mu}\hskip10truecm
\noindent
{\rm (i)} \ If $ \; x=ayb$ where $y\in\fr{M}} \def\frm{\fr{m}_{n,m}, \; a\in GL(n,{\Bbb R})$, and $ b \in GL(m,{\Bbb R})$, then
$dx=|a|^m |b|^ndy$. \\
{\rm (ii)} \ If $ \; r=q'sq$ where $s\in S_m$, and $q\in GL(m,{\Bbb R})$,
then $dr=|q|^{m+1}ds$. \\
{\rm (iii)} \ If $ \; r=s^{-1}$ where $s\in \P_m$, then $r\in
\P_m$,
and $dr=|s|^{-m-1}ds$. \end{lemma}
For $Re
\, \alpha >d-1$, the Siegel gamma function of $\P_m$
is defined by \begin{equation}\label{2.444}
\Gamma_m (\alpha)=\int_{\P_m} \exp(-{\hbox{\rm tr}} (r)) |r|^{\alpha } d_*r
=\pi^{m(m-1)/4}\prod\limits_{j=0}^{m-1} \Gamma (\alpha- j/2), \end{equation} \cite{FK, Te}. The relevant beta function has the form \begin{equation}\label{2.6}
B_m (\alpha ,\beta)=\int_0^{I_m} |r|^{\alpha -d} |I_m-r|^{\beta -d} dr=
\frac{\Gamma_m (\alpha)\Gamma_m (\beta)}{\Gamma_m (\alpha+\beta)}, \quad d= (m+1)/2. \end{equation} This integral converges absolutely if and only if $Re
\, \alpha, Re \, \beta >d-1$.
All function spaces on $\fr{M}} \def\frm{\fr{m}_{n,m}$ are identified with the corresponding spaces on ${\Bbb R}^{nm}$. For instance, ${\Cal S}(\fr{M}} \def\frm{\fr{m}_{n,m})$ denotes the Schwartz space of infinitely differentiable rapidly decreasing functions. The Fourier transform
of a function $f\in L_{1}(\fr{M}} \def\frm{\fr{m}_{n,m})$ is defined by \begin{equation}\label{ft} {\Cal F} f(y)=\int_{\fr{M}} \def\frm{\fr{m}_{n,m}} \exp({\hbox{\rm tr}}(iy'x)) f (x) dx,\qquad y\in\fr{M}} \def\frm{\fr{m}_{n,m} \; .\end{equation}
The {\it Cayley-Laplace operator} $\Delta$
on $ \fr{M}} \def\frm{\fr{m}_{n,m}$ is defined by \begin{equation}\label{K-L} \Delta={\hbox{\rm det}}(\partial '\partial), \qquad \partial=(\partial/\partial x_{i,j}). \end{equation} In terms of the Fourier transform, the action of $\Delta$ represents a multiplication by the homogeneous polynomial
$(-1)^m |y|_m^2$ of degree $2m$ of $nm$ variables $y_{i,j}$.
For the sake of simplicity, for some operators on functions of matrix argument we will use the same notation as in the previous sections.
The G{\aa}rding-Gindikin integrals of functions $f$ on $\P_m$
are defined by
\begin{equation}\label{3.1} (I_{+}^\alpha f)(s) \! = \! \frac {1}{\Gamma_m(\a)} \int\limits_0^s \! f(r)|s \! - \! r|^{\alpha-d} dr, \quad (I_{-}^\alpha f)(s) \! = \! \frac
{1}{\Gamma_m(\a)} \int\limits_s^\infty \! f(r)|r \! - \! s|^{\alpha-d} dr,\end{equation} where $s \in \P_m$ in the first integral and $s \in \overline\P_m$ in the second one. We assume $Re \, \alpha > d-1$, $ d=(m+1)/2$ (this condition is necessary for absolute convergence of these integrals). The first integral exists a.e. for arbitrary locally integrable function $f$. Existence of the second integral requires extra assumptions for $f$ at infinity.
The {\it Riesz potential} of a function $f\in{\Cal S}(\fr{M}} \def\frm{\fr{m}_{n,m})$
is defined by \begin{equation}\label{rie} (I^\alpha f)(x)=\frac{1}{\gamma_{n,m} (\alpha)} \int_{\fr{M}} \def\frm{\fr{m}_{n,m}}
f(x-y) |y|^{\alpha-n}_m dy;\end{equation} \begin{equation}\label{gam} \gamma_{n,m} (\alpha)=\frac{2^{\alpha m} \, \pi^{nm/2}\, \Gamma_m (\alpha/2)}{\Gamma_m ((n-\alpha)/2)}, \ \ \ Re \, \alpha>m-1, \ \ \alpha\neq n-m+1, \, n-m+2, \ldots \end{equation} This integral is finite a.e. for $f \in L_{p}(\fr{M}} \def\frm{\fr{m}_{n,m})$ provided $ 1 \le p <n(Re \, \alpha +m-1)^{-1}$ \cite[Theorem 5.10]{R9}.
An application of the Fourier transform gives \begin{equation}\label{hek} {\Cal F}
[I^\alpha f](\xi)=|\xi|_m^{-\alpha} {\Cal F} f(\xi)\end{equation} (as in the case of ${\Bbb R}^n$), so that $I^\alpha$ can be formally identified with the negative power of the Cayley-Laplace operator (\ref{K-L}), namely, $I^\alpha=(-\Delta_m)^{-\alpha/2}$. Discussion of precise meaning of the equality (\ref{hek}) and related references can be found in \cite{R9}, \cite {OR2}.
\begin{definition} For $x \in \fr{M}} \def\frm{\fr{m}_{n,m}, \; n \ge m$, and $t \in \P_m$, we define the (generalized) heat kernel $h_t (x)$ by the formula
\begin{equation}\label{heat} h_t (x)=(4\pi)^{-nm/2}|t|^{-n/2} \exp (-{\hbox{\rm tr}} (t^{-1}
x'x)/4), \qquad |t|={\hbox{\rm det}} (t), \end{equation} and set \begin{equation}\label{ga} H_t f(x)=\int\limits_{\fr{M}} \def\frm{\fr{m}_{n,m}} h_t (x-y) f(y) dy=\int\limits_{\fr{M}} \def\frm{\fr{m}_{n,m}} h_{I_m} (y) f(x-yt^{1/2}) \, dy. \end{equation} \end{definition}
Clearly, $H_t f(x)$ is a generalization of the Gauss-Weierstrass integral (\ref{2.4}).
\begin{lemma}\label{hke}\cite{R9}{}\hfil
\noindent {\rm (i)} For each $ \; t \in \P_m$, \begin{equation}\label{ed} \int_{\fr{M}} \def\frm{\fr{m}_{n,m}} h_t (x) \,dx =1.\end{equation}
\noindent {\rm (ii)} The Fourier transform of $ \; h_t (x)$ has the form \begin{equation}\label{ft}{\Cal F} h_t(y)= \exp (-{\hbox{\rm tr}} (ty'y), \end{equation} which implies the semi-group property \begin{equation}\label{cnv} h_t \ast h_\tau=h_{t+\tau}, \qquad t, \tau \in \P_m. \end{equation}
\noindent {\rm (iii)} If $f \in L_{p}(\fr{M}} \def\frm{\fr{m}_{n,m}), \; 1\le p \le \infty$, then \begin{equation}\label{gw} ||H_t f||_p \le ||f||_p \, , \qquad \quad H_t H_\tau f=H_{t+\tau}f, \end{equation} and \begin{equation}\label{lim}\lim\limits_{t \to 0}(H_t f)(x)=f(x) \end{equation}
in the $L_{p}$-norm. If $f$ is a continuous function vanishing at infinity, then
(\ref{lim}) holds in the $\sup$-norm. \end{lemma} \begin{theorem}\label{rrg} \cite{R9} \ Let $ \; m-1<Re \, \alpha<n-m+1$, $d=(m+1)/2$. Then \begin{equation}\label{rg} (I^\alpha f)(x) = \frac {1}{\Gamma_m(\alpha/2)} \int_{\P_m}
|t|^{\alpha/2}H_t f(x) \,d_*t, \qquad d_*t=|t|^{-d}\, dt,\end{equation} \begin{equation}\label{rgg} H_t [I^\alpha f](x) =I_{-}^{\alpha/2}[H_{(\cdot)} f(x)](t),\end{equation} provided that integrals on either side of the corresponding equality exist in the Lebesgue sense. \end{theorem}
\subsection{Composite wavelet transforms: open problems}
Formula (\ref{rg}) provokes a natural construction of the relevant
composite wavelet transform on $\fr{M}} \def\frm{\fr{m}_{n,m}$ associated with the heat
kernel and containing a $\P_m$-valued scaling parameter. To find
this construction, we first obtain an auxiliary integral
representation of a power function of the form $|t|^{\lambda -d}$, $d=(m+1)/2$.
\begin{definition} A function $w$ on $\P_m$ is said to be symmetric
if \begin{equation}\label{defs} w(g\eta g^{-1})= w (\eta) \quad \text{for all} \quad g \in
GL(m,{\Bbb R}), \quad \eta\in \P_m.\end{equation} \end{definition} Note that if $w$ is symmetric, then for any $s,t \in \P_m$, \begin{equation}\label{wsy} w(t^{1/2} s t^{1/2})= w(s^{1/2} t s^{1/2})\quad \text{and} \quad w(ts)=w(st).\end{equation} Indeed, the second equality follows from (\ref{defs}) if we set $\eta= ts, \; g=t^{-1}$. The first equality in (\ref{wsy}) is a consequence of the second one:
$$ w(t^{1/2} s t^{1/2})= w(t^{-1/2}[t^{1/2} s
t^{1/2}]t^{1/2})=w(st)=w(ts)=w(s^{1/2} t s^{1/2}).$$ \begin{lemma} Let $w$ be a symmetric function on $\P_m$ satisfying \begin{equation}
\int_{\P_m}\frac{|w(\eta)|}{|\eta|^\lambda}\, d\eta < \infty, \qquad c=\int_{\P_m}\frac{w(\eta)}{|\eta|^\lambda}\, d\eta\neq 0, \qquad
|\eta|={\hbox{\rm det}} (\eta). \end{equation} Then for $t \in \P_m$, \begin{equation}\label{gra}
|t|^{\lambda -d}=c^{-1}\, \int_{\P_m}\frac{w(a^{-1}t)}{|a|^{m+1-\lambda}}\, da, \qquad d=(m+1)/2.\end{equation} \end{lemma} \begin{proof} By (\ref{wsy}) we have (set $a=\rho^{-1}, \;da=
\rho^{-2d}d\rho$ ) \begin{eqnarray} \int_{\P_m}\frac{w(a^{-1}t)}{|a|^{m+1-\lambda}}\, da&=&\int_{\P_m}\frac{w(t^{1/2}a^{-1}t^{1/2})}{|a|^{m+1-\lambda}}\, da=\int_{\P_m}\frac{w(t^{1/2}\rho t^{1/2})}{|\rho|^{\lambda-d}}\, d_*\rho \nonumber\\
&=&|t|^{\lambda -d}\int_{\P_m}\frac{w(\eta)}{|\eta|^\lambda}\, d\eta=c\,
|t|^{\lambda -d}.\nonumber\end{eqnarray} \end{proof}
Now we replace a power function in (\ref{rg}) according to (\ref{gra}) with $\lambda=\alpha/2$. For $Re \, \alpha> (m-1)/2$, we obtain
\begin{eqnarray} (I^\alpha f)(x) &=& \frac {c^{-1}}{\Gamma_m(\alpha/2)} \int_{\P_m} H_t f(x)\, dt \int_{\P_m}\frac{w(a^{-1}t)}{|a|^{m+1-\alpha/2}}\, da\nonumber \\ &=&\frac {c^{-1}}{\Gamma_m(\alpha/2)} \int_{\P_m}\frac{d_*
a}{|a|^{d-\alpha/2}}\int_{\P_m} H_t f(x) \,w (a^{-1}t)\, dt.\nonumber \end{eqnarray} This gives \begin{equation} \label {prot}(I^\alpha f)(x) = \frac
{c^{-1}}{\Gamma_m(\alpha/2)}\int_{\P_m} {\Cal H} f(x,a)|a|^{\alpha/2} \,d_*a,\qquad Re \, \alpha> (m-1)/2,\end{equation} with \begin{equation} {\Cal H} f(x,a)=|a|^{-d}\int_{\P_m} H_t f(x)\,w (a^{-1}t)\, dt\end{equation} or, by the symmetry of $w$, after changing variable, \begin{equation} \label {hewtr}{\Cal H} f(x,a)=\int_{\P_m} H_{a^{1/2}\eta a^{1/2}} f(x) w(\eta)\, d\eta, \qquad x \in \fr{M}} \def\frm{\fr{m}_{n,m}, \quad a \in \P_m.\end{equation} Taking into account an obvious similarity between (\ref{hewtr})
and the corresponding ``rank-one" formula for $m=1$, we call ${\Cal H} f(x,a)$ the {\it composite wavelet transform of $f$ associated to the heat semigroup} $H_t$. Here $w$ is a symmetric integrable function on $\P_m$ (that will be endowed later with some cancelation properties) and $a$ is a $\P_m$-valued scaling parameter. One can replace $w$ by a more general {\it wavelet measure}, as we did in the previous sections, but here we want to minimize technicalities.
Owing to (\ref{hek}), it is natural to expect that the inverse of $I^\alpha$ has the same form (\ref{prot}) with $\alpha$ formally replaced by $-\alpha$, and the case $\alpha=0$ gives a variant of Calder\'{o}n's reproducing formula.
Thus, we encounter the following open problem:
{\bf Problem A.} {\it Give precise meaning to the inversion formula \begin{equation}\label{rghy} f(x) =c_{m,\alpha} \int_{\P_m} \frac{{\Cal H} \varphi
(x,a)}{|a|^{\alpha/2}} \,d_*a, \qquad \varphi=I^\alpha f,\end{equation} and the reproducing formula \begin{equation}\label{rghr} f(x) =c_{m} \int_{\P_m} {\Cal H} f (x,a)\, d_*a,\end{equation} say, for $ f\in L_p$ or any other ``natural" function space. Give examples of wavelet functions $w$ for which (\ref{rghy}) and (\ref{rghr}) hold. Find explicit formulas for the normalizing coefficients $c_{m,\alpha}$ and $c_{m}$, depending on $w$.}
Solution of this problem would give a series of pointwise inversion formulas for diverse Radon-like transforms on matrix spaces; see, e.g., \cite {OR2}, \cite {OR3}, \cite {R9},
where such formulas are available in terms of distributions.
Justification of (\ref{rghy}) and (\ref{rghr}) would also bring new light
to a variety of inversion formulas for Radon transforms on
Grassmannians, cf. \cite {GRu}.
\subsection{Some discussion} Trying to solve Problem A, we come across new problems that are of independent interest.
Let $Re \, \alpha>d-1, \; d=(m+1)/2$. Suppose, for instance, that $f(x), \; x \in \fr{M}} \def\frm{\fr{m}_{n,m}$, is
a Schwartz function and $w (\eta), \; \eta \in \P_m$, is ``good
enough". We anticipate the following equality:
\begin{equation}\label{ant} I_\varepsilon f(x)\equiv \int_{\varepsilon I_m}^\infty\frac{{\Cal H} [I^\alpha
f]
(x,a)}{|a|^{\alpha/2}} \,d_*a= \int_{\P_m} \Lambda_{\alpha/2}(s)\, H_{\varepsilon s} f(x)\, ds,\end{equation} where $\Lambda_{\alpha/2}(s)$ expresses through the G{\aa}rding-Gindikin integral in (\ref{3.1}) as \begin{equation}\label {ky}
\Lambda_{\alpha/2}(s)=\frac{\Gamma_m (d)}{|s|^d}\, I_+^{\alpha/2 +d} w (s), \qquad s\in \P_m.\end{equation} If $m=1$ and $\alpha/2$ is replaced by $\alpha$, then (\ref{ky}) coincides with the function $\lambda _{\alpha }(s)=s^{-1} I_+^{\alpha +1}\mu (s)$ in Lemma \ref{lB}. Now, we give the following \begin{definition} An integrable symmetric function $w$ on $\P_m$ is called an {\it admissible wavelet} if \begin{equation}\label {wad}
\Lambda_{\alpha/2}(s)\equiv \frac{\Gamma_m (d)}{|s|^d}\, I_+^{\alpha/2 +d} w (s) \in L_1 (\P_m)\quad \text{\rm and} \quad c_\alpha=\int_{\P_m}\Lambda_{\alpha/2}(s)\,ds\neq 0.\end{equation} \end{definition} If $w$ is admissible, then, by Lemma \ref {hke}, the $L_p$-limit as $\varepsilon \to 0$ of the right-hand side of (\ref{ant}) is $c_\alpha \,f$, and we are done. This discussion includes the case $\alpha=0$ corresponding to the reproducing formula.
Thus, our attempt to solve Problem A rests upon the following
{\bf Problem B.} {\it Find examples of admissible wavelets (both for $\alpha\neq 0$ and $\alpha=0$) and compute $c_\alpha$.}
Now, let us try to prove (\ref{ant}). We say ``try", because along the way, we come across one more open problem related to application of the Fubini theorem; cf. justification of interchange of the order of integration in the proof of Theorem \ref{teo:34}.
By (\ref{hewtr}) and (\ref{rgg}), \begin{eqnarray} {\Cal H} I^\alpha f(x,a)&=&\int_{\P_m} H_{a^{1/2}\eta a^{1/2}} I^\alpha f(x) \,w(\eta)\, d\eta\nonumber \\ &=&\int_{\P_m} I_{-}^{\alpha/2}[H_{(\cdot)} f(x)](a^{1/2}\eta a^{1/2})\,w(\eta)\, d\eta.\nonumber \end{eqnarray} Assume that $x$ is fixed and denote $\psi (s)=H_{s} f(x)$. Then \begin{eqnarray}
{\Cal H} I^\alpha f(x,a)&=&\int_{\P_m}w(\eta)\,I_{-}^{\alpha/2}\psi (a^{1/2}\eta a^{1/2})\,
d\eta\nonumber \\ &=& \frac {1}{\Gamma_m(\alpha/2)} \int_{\P_m}w(\eta)\,d\eta \int_{a^{1/2}\eta
a^{1/2}}^\infty \psi (s) |s-a^{1/2}\eta a^{1/2}|^{\alpha/2 -d}\,
ds\nonumber \\
&=&\frac {1}{\Gamma_m(\alpha/2)} \int_{\P_m}\psi (s)\, ds\int_0^{a^{-1/2}\eta a^{-1/2}}w(\eta)\,|s-a^{1/2}\eta a^{1/2}|^{\alpha/2 -d}\,d\eta\nonumber \\
&=&|a|^{\alpha/2 -d} \int_{\P_m}\psi (s)\,I_{+}^{\alpha/2}w (a^{-1/2}s a^{-1/2})\, ds. \nonumber \end{eqnarray} Hence, the left-hand side of (\ref{ant}) transforms as follows.
\begin{eqnarray} I_\varepsilon f(x)&=& \int_{\varepsilon I_m}^\infty \frac{da}{|a|^{m+1}} \int_{\P_m}\psi (s)\,I_{+}^{\alpha/2}w (a^{-1/2}s a^{-1/2})\, ds\nonumber \\
&=&\int_{\P_m}\psi (s)\,\int_{\varepsilon I_m}^\infty I_{+}^{\alpha/2}w (a^{-1/2}s a^{-1/2})\frac{da}{|a|^{m+1}} \qquad \text{\rm (set $a=\tau^{-1}$)}\nonumber \\ &=&\int_{\P_m}\psi (s)\,\int_0^{\varepsilon^{-1}I_m} I_{+}^{\alpha/2}w (\tau^{1/2}s \tau^{1/2})\, d\tau\nonumber \\ &=&\varepsilon^{md}\int_{\P_m}\psi (\varepsilon s)\, dy\int_0^{\varepsilon^{-1}I_m} I_{+}^{\alpha/2}w (\tau^{1/2}\varepsilon^{1/2}s\varepsilon^{1/2} \tau^{1/2})\, d\tau.\nonumber\end{eqnarray} Thus we have \begin{equation} I_\varepsilon f(x)=\int_{\P_m}\psi (\varepsilon s)\,k(s)\, ds=\int_{\P_m}H_{\varepsilon s} f(x) \,k(s)\, ds, \end{equation} where $$k(s)=\int_0^{I_m} I_{+}^{\alpha/2}w(\lambda^{1/2}s\lambda^{1/2})\, d\lambda.$$ To get (\ref{ant}), it remains to show that $k(s)$ coincides with the function (\ref{ky}). We have \begin{eqnarray} k(s)&=&\frac {1}{\Gamma_m(\alpha/2)}\int_0^{I_m}d\lambda \int_0^{\lambda^{1/2}s\lambda^{1/2}}
w(s) |\lambda^{1/2}s\lambda^{1/2} -s|^{\alpha/2 -d}\, ds\nonumber \\ &&\text{(set $s=\lambda^{1/2}z\lambda^{1/2}$ and note that $w(\lambda^{1/2}z\lambda^{1/2})=w(z^{1/2}\lambda z^{1/2})$)}\nonumber \\
&=&\frac {1}{\Gamma_m(\alpha/2)}\int_0^{I_m} |\lambda|^{\alpha/2}d\lambda \int_0^s
|s-z|^{\alpha/2 -d}w(z^{1/2}\lambda z^{1/2})\, dz\nonumber \\
&=&\frac {1}{\Gamma_m(\alpha/2)} \int_0^s |s-z|^{\alpha/2 -d}\, dz\int_0^{I_m}
|\lambda|^{\alpha/2}\,w(z^{1/2}\lambda z^{1/2})\,d\lambda\nonumber \\
&=&\frac {1}{\Gamma_m(\alpha/2)} \int_0^s |s-z|^{\alpha/2 -d}\,
\frac{dz}{|z|^{\alpha/2 +d}}\int_0^z w(b) |b|^{\alpha/2}\, db\nonumber \\
&=&\frac {1}{\Gamma_m(\alpha/2)} \int_0^s w(b) |b|^{\alpha/2}\,u(b,s)\, db,\nonumber \end{eqnarray} where
\begin{eqnarray} u(b,s)&=& \int_b^s |s-z|^{\alpha/2 -d}\,
\frac{dz}{|z|^{\alpha/2 +d}} \qquad \text{(set $z=r^{-1}$)}\nonumber \\
&=& \int_{s^{-1}}^{b^{-1}}|sr -I_m|^{\alpha/2 -d}\, dr=|s|^{\alpha/2
-d}\int_{s^{-1}}^{b^{-1}}|r -s^{-1}|^{\alpha/2 -d}\, dr.\nonumber \end{eqnarray} The last integral can be easily computed using the well-known formula for Siegel Beta functions \begin{equation}\int\limits_a^b |r-a|^{\alpha -d}
|b-r|^{\beta -d} dr= B_m (\alpha ,\beta) |b-a|^{\alpha+\beta -d}\end{equation} (many such formulas can be found, e.g., in \cite{OR2}), and we have \begin{equation} u(b,s)=
B_m (\alpha/2, d)\, \frac{|s-b|^{\alpha/2}}{|s|^{d}\, |b|^{\alpha/2}}, \qquad B_m (\alpha/2, d)=\frac{\Gamma_m(\alpha/2)\, \Gamma_m(d)}{\Gamma_m(\alpha/2 +d)}.\end{equation} Finally, we get
$$ k(s)=\frac{\Gamma_m(d)}{|s|^{d}\,\Gamma_m(\alpha/2 +d)} \int_0^s w(b)|s-b|^{\alpha/2}\, ds=\frac{\Gamma_m(d)}{|s|^{d}}\, I^{\alpha/2 +d}_+ w(s)= \Lambda_{\alpha/2}(s).$$
{\bf Problem C.} Although all calculations above go through smoothly, interchange of the order of integration remains unjustified. We do not know how to justify it and what additional requirements on the wavelet $w$ should be imposed (if any). One of the obstacles is that $\int_0^\infty \neq \int_0^s +\int_s^\infty$, when we integrate over the higher-rank cone.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Epidemics on critical random graphs with heavy-tailed degree distribution}
\runtitle{Epidemics on critical graphs}
\begin{aug}
\author[A]{\fnms{David} \snm{Clancy, Jr. }\ead[label=e1]{djclancy@uw.edu}},
\address[A]{University of Washington, Department of Mathematics, \printead{e1}}
\end{aug}
\begin{abstract}
We study the susceptible-infected-recovered (SIR) epidemic on a random graph chosen uniformly over all graphs with certain critical, heavy-tailed degree distributions. For this model, each vertex infects all its susceptible neighbors and recovers the day after it was infected. When a single individual is initially infected, the total proportion of individuals who are eventually infected approaches zero as the size of the graph grows towards infinity. Using different scaling, we prove process level scaling limits for the number of individuals infected on day $h$ on the largest connected components of the graph. The scaling limits are contain non-negative jumps corresponding to some vertices of large degree, that is these vertices are super-spreaders. Using weak convergence techniques, we can describe the height profile of the $\alpha$-stable continuum random graph \cite{GHS.18, CKG.20}, extending results known in the Brownian case \cite{MS.19}. We also prove abstract results that can be used on other critical random graph models.
\end{abstract}
\begin{keyword}[class=MSC2020]
\kwd[Primary ]{92D30}
\kwd{60F17}
\kwd[; secondary ]{05C80}
\end{keyword}
\begin{keyword}
\kwd{configuraiton model}
\kwd{stable excursions}
\kwd{random graphs}
\kwd{Lamperti transform}
\kwd{SIR model}
\end{keyword}
\end{frontmatter}
\section{Introduction}
Consider the following simple susceptible-infected-recovered (SIR) model of disease spread in discrete time. On day $0$, a single individual becomes infected with a disease. On day 1, that single infected individual comes into contact with some random number (possibly zero) of non-infected individuals and transmits the disease. After transmitting the disease to others, this initial infected individual is cured and can never catch the disease again. On subsequent days each infected individual does the same thing: they come into contact with some non-infected individuals, transmit the disease but then are cured. The study of how the disease spreads over time naturally gives rise to a graph \cite{BM.90} constructed in a breadth-first order, see Figure \ref{fig:examplepicture1} for an example of a small outbreak and Figure \ref{fig:bigoutbreak1} for an example of a larger outbreak. The individuals are represented by vertices, and an edge between two vertices represents that a vertex closer to the source transmitted the disease to the other. Knowing the graph and the source tells us more information than the number of individuals infected on a particular day, it tells us the history of how the disease spread from individual from individual.
The size of the outbreak then corresponds to the size of a connected component in the graph and, more importantly for our work, the number of people infected on day $h = 0,1,\dotsm$ is just the number of vertices at distance $h$ from a root vertex corresponding to the initially infected individual. Let $Z_n(h)$ represent the number of people infected on day $h\ge 0$ when the total population is of size $n$. The process $Z_n(h)$ is just the \textit{height profile} of the component containing the initially infected individual. We are interested in the describing $n\to\infty$ scaling limits of $Z_{n}(h)$ for the macroscopic outbreaks for certain critical random graphs which exhibit a ``super-spreader'' phenomena - that is they possess vertices with large degree.
\begin{figure}
\caption{A small outbreak. Here, on day 0 the vertex labeled 1 is infected. The vertex 1 transmits the disease to vertices 2, 3 and 4 (in blue) who become the infected population on day 1. The vertices infected on day 1 will infect the green vertices (5 through 9) who are infected on day 2. This continues with the yellow vertices becoming infected on day 3, and the grey vertices on day 4. }
\label{fig:examplepicture1}
\end{figure}
A classical probabilistic model in this area is the so-called Reed-Frost model, where each individual comes into contact with every non-infected individual independently with probability $p$. It is not hard to see that the corresponding graph is the Erd\H{o}s-R\'{e}nyi random graph $G(n,p)$ where each edge is independently added with probability $p$. This object is well-studied, and we know that in the critical window $p = p(n) = n^{-1}+\lambda n^{-4/3}$ the size of the macroscopic outbreaks are of order $n^{2/3}$ \cite{Aldous.97}. Within this critical window each vertex has approximately Poisson(1) many neighbors, so in particular it has light tails. In turn, the process $Z_n(h)$ corresponding to the largest component has a scaling limit and that limit is a continuous process \cite{MS.19}. We stress that this is not because we are looking only at an epidemic started from a single individual. The same can be said if we infect $O(n^{1/3})$ individuals on day $0$ \cite{Clancy.20}.
To capture some super-spreading phenomena we focus mostly on the configuration model with a heavy-tailed degree distribution: $\mathbb{P}(\deg(i) = k) \sim c k^{-(2+\alpha)}$ for some $\alpha\in(1,2)$, along with some other technical assumptions dealing with criticality. The configuration model is a graph on $n$ vertices chosen randomly over all graphs with a prescribed degree sequence. See Chapter 7 of \cite{vanderHofstad.17} for an introduction to this model. We omit the case $\alpha = 2$ because this model falls within the same universality class as the critical Erd\H{o}s-R\'{e}nyi random graph $G(n,n^{-1}+\lambda n^{-4/3})$ \cite{BBSW.14,CKG.20} and so, up to some scaling factors, the structure of the processes $Z_{n}(h)$ on largest components (which correspond to the largest possible outbreaks) will be asymptotically the same as those in the Erd\H{o}s-R\'{e}nyi random graph. In the asymptotic regime we study, the largest outbreaks are of order $O(n^{\frac{\alpha}{\alpha+1}})$ and scaling limits of $Z_{n}(h)$ will possess positive jumps. These positive jumps come from presence of the super-spreading individuals.
We also restrict our focus to critical regimes. One reason is general principle that what happens at a phase transition is often interesting. Another is that while there are some important results on the structure of the largest components of the critical heavy-tailed configuration model \cite{CKG.20,Joseph.14}, there is not much information on the structure of the disease outbreaks. In this vein, there are results in the literature on the behavior of the largest outbreak when initially only a single individual is infected. While studying a model similar to ours where edges are kept with probability $p\in[0,1]$ but are otherwise deleted, the authors of \cite{BJML.07} show that there is a parameter $R_0$ such that if $R_0\le 1$ then only outbreaks of size $o(n)$ as $n\to\infty$ can occur whereas if $R_0> 1$ there is a positive probability that an outbreak of size $O(n)$ occurs as $n\to\infty$. See also \cite{MR.95,MR.98,JL.09}. A continuous time analog of that model was studied in \cite{BP.12} and there the authors show that there is a similar phase transition between outbreaks of size $o(n)$ and outbreaks which are of size $O(n)$ with positive probability. Those authors also describe some of the large $n$ behavior of $Z_n(t)$ (the number of individuals infected at a continuous time $t \ge 0$) conditionally on having an outbreak of size $O(n)$, but they do not provide information for what happens at the phase transition. We hope to fill in this gap in the literature.
\begin{figure}
\caption{A simulation of the largest outbreak on a configuration model with heavy-tailed degree distribution with $\alpha = 3/2$. This component has 735 vertices, while the entire graph has 70,000. The black node is the first vertex to be infected, and then darker shades indicate that the corresponding vertex infected earlier in the outbreak. Most of the vertices have small degree ($\le 3$); however, there are some vertices with large degree. The large red blob in the middle of the image comes from a vertex of relatively large degree, i.e. a super-spreader. We can also see that there is another super-spreader depicted just below that red blob.}
\label{fig:bigoutbreak1}
\end{figure}
\subsection{Weak convergence results}
Let us discuss a little more formally the configuration model. Before doing so, we recall that a multi-graph can have multiple edges and self-loops while a simple graph does not contain multiple edges nor self-loops. In terms of our approach to studying epidemics, self-loops and multiple edges do not make any physical sense because, for example, an infected individual cannot reinfect themself.
Given $\mathbf{d}^{n} = (d_1,\dotsm,d_n)$ a finite sequence of strictly positive integers $d_j\ge 1$, the configuration model $M(\mathbf{d}^n)$ is the random multi-graph chosen randomly over all multi-graphs $G$ on the vertex set $[n]:=\{1,\dotsm,n\}$ where the degree (counted with multiplicity) of vertex $j$ is $\deg(j) = d_j$. In order to construct such a multi-graph we need $\sum_{j=1}^n d_j$ to be even, and two algorithms for its construction will be discussed in Section \ref{sec:configuration}. We say that any such graph $G$ has degree sequence $\mathbf{d}^n$.
A priori it may not be possible to construct a simple graph on with degree sequence $\mathbf{d}^n$ because, for example, a single vertex may have degree $d_i> \sum_{j\neq i} d_j$. However, if there is a simple graph with degree sequence $\mathbf{d}^n$, then conditionally on the event $\{M(\mathbf{d}^n) \text{ is simple}\}$ the graph is uniformly distribution over all simple graphs with degree sequence $\mathbf{d}^n$ \cite[Proposition 7.15]{vanderHofstad.17}. Moreover for the asymptotic regime we study it makes no difference \cite{CKG.20} whether or not we examine simple graphs or multi-graphs so we will just say ``graph.''
One aspect of randomness for the configuration model comes from taking the graph to be randomly constructed over all graphs with a fixed deterministic degree sequence. Another comes from taking the degree sequence itself to be random, say, with a common distribution $\nu$ on $\{1,2,\dotsm\}$. We then generate the graph conditionally given this degree distribution. That is we generate $M(\mathbf{d}^n)$ where $d_j$ are i.i.d. with common law $\nu$. We may have to replace $d_n$ with $d_n+1$ to obtain the proper parity; however, this does not affect the analysis \cite{CKG.20}. To distinguish between these two situations we will write $M_n(\nu)$ instead of $M(\mathbf{d}^n)$.
We focus on the degree distributions studied by Joseph \cite{Joseph.14} and Conchon-Kerjan and Goldschmidt \cite{CKG.20}:
\begin{equation}
\label{eqn:cdelta1}
\lim_{k\to\infty} k^{(\alpha+2)}\nu(k) = c\in (0,\infty),\qquad \mathbb{E}[d_1] = \delta\in (1,2),\qquad \mathbb{E}[d_1^2] = 2\mathbb{E}[d_1],
\end{equation} for some $\alpha\in (1,2)$. The third statement about the second moment and the mean imply that we are examining the random graph at criticality \cite{MR.95,MR.98,JL.09}. This means that there is no giant component, i.e. there is no single component which contains a positive proportion of the total number of vertices. Instead, there are macroscopic components which are of order $O(n^{\frac{\alpha}{\alpha+1}})$.
In order to obtain scaling limits for a height profile represent the number of people infected on day $h$, we would either need to look at the case where a significant number of individuals are infected on day zero, or focus on the largest possible outbreaks. We focus on the latter situation and hence decompose the graph $M_n(\nu)$ into its connected components $G_n^1, G_n^2,\dotsm$ where they are indexed so
\begin{equation*}
\#G_n^1\ge \#G_n^2\ge \dotsm.
\end{equation*} In order to know how a disease spreads through $G_n^i$, we need to know its source. We will start the spread from a single vertex $\rho_n^i$ chosen with probability proportional to its degree, and we will say that the component $G_n^i$ is \textit{rooted} at the vertex $\rho_n^i$.
The selection of $\rho_n^i$ is a size-biased sample and not a uniform sample, but this is for good reason. In terms of how a disease spreads through a community, vertices with higher degree have more neighbors from whom they can catch the disease and so we should expect these vertices to be infected earlier in the outbreak. This has been observed in a survey of how influenza (seasonal or the H1N1 variant) spread through Harvard in 2009 \cite{CF.10}. Researchers surveyed two sets of students twice-weekly to see when they developed flu-like symptoms. One set was a random sample of all students and the other was a sample of friends nominated by this original set. The set of friends was size-biased sample of the students at Harvard and not a uniform sample. Sometimes called ``the friendship paradox,'' this is just the observation that the average number of friends of friends is always greater than the average number of friends \cite{Feld.91}. In the study of influenza, the set of friends showed flu-like symptoms earlier than the uniform random sample. See also \cite{GHMCCF.14}.
Of course, there are only a finite number $K_n$, say, of connected components which correspond to each of the outbreaks. To simplify the presentation we set $G_n^i$ for $i>K_n$ as the graph on a single vertex with no edges and rooted at its only vertex.
The $i^\text{th}$ largest possible outbreak is then described by the process $Z_{n,i} = (Z_{n,i}(h) : h = 0,1,\dotsm)$ defined by
\begin{equation}\label{eqn:discProfile}
Z_{n,i}(h) = \#\{v\in G_n^i: \operatorname{{dist}}(\rho_n^i, v) = h\},
\end{equation} where $\operatorname{{dist}}(-,-)$ is the graph distance on $G_n^i$. In terms of the graph, $Z_{n,i}$ is the {height profile} of the component $G_{n}^i$.
Our first result is the joint convergence of the processes $Z_{n,i}$ to a time-change of some excursion processes $\tilde{e}_i = (\tilde{e}_i(t);t\ge 0)$. The processes $\tilde{e}_i$, for $i\ge 1$, are the excursions above past minima of a certain stochastic process $\tilde{X}$ obtained by an exponential tilting of a spectrally positive $\alpha$-stable processes. See Section \ref{sec:levy} for more information on these processes.
\begin{thm}\label{thm:Epidemic3}
Fix some $\alpha\in (1,2)$, and some distribution $\nu$ satisfying \eqref{eqn:cdelta1}. In the product Skorohod topology on $\mathbb{D}(\mathbb{R}_+)^\infty$, the following convergence holds
\begin{equation*}
\left( \left(n^{-\frac{1}{\alpha+1}} Z_{n,i} (\fl{n^{\frac{\alpha-1}{\alpha+1}}t});t\ge 0 \right);i\ge 1\right) \weakarrow ((Z_i(t);t\ge 0);i\ge 1),
\end{equation*} where $Z_i$ is the unique c\`adl\`ag solution to
\begin{equation*}
Z_i(t) = \tilde{e}_i\circ C_i(t) ,\qquad C_i(t) = \int_0^t Z_i(s)\,ds,\qquad \inf\{t>0: C_i(t)>0\} = 0,
\end{equation*} where $\tilde{e}_i =(\tilde{e}_i(t): t\ge 0)$ are defined in equation \eqref{eqn:defEi} and depend on the value $\alpha$, $c$ and $\delta$ in \eqref{eqn:cdelta1}.
\end{thm}
\subsection{A single macroscopic outbreak and the $\alpha$-stable graph}
More has been said about the graph components $G_{n}^i$ in the literature. Joseph \cite{Joseph.14} has argued that the size of the component $G_{n}^i$ scaled down by $n^{\frac{\alpha}{\alpha+1}}$ converges to a random variable $\zeta_i$ for each $i\ge 1$, which in fact can be seen to be $\zeta_i =\inf \{t>0: \tilde{e}_i(t) = 0\}$. Conchon-Kerjan and Goldschmidt \cite{CKG.20} generalize Joseph's results and show that the graph $G_n^i$ itself has a scaling limit which is a random rooted compact measured metric space $\mathcal{M}_i = (\mathcal{M}_i,\operatorname{d}_i,\rho_i,\mu_i)$. Here $\operatorname{d}_i$ is a metric on $\mathcal{M}_i$, $\rho_i\in\mathcal{M}_i$ is a specified element and $\mu_i$ is a finite Borel measure on $\mathcal{M}_i$.
This means not only does height profile (the number of infected people on day $h$) converge Theorem \ref{thm:Epidemic3}, but there is some limiting continuum structure of the components $G_n^i$ which is represented by these continuum spaces $(\mathcal{M}_i;i\ge 1)$. The standard construction of these continuum limits, first obtained in the critical Erd\H{o}s-R\'{e}nyi case in \cite{ABBG.10,ABBG.12}, are constructions in a depth-first manner: the spaces are obtained by gluing together $k$ pairs of points on a continuum random tree with a depth-first selection of the pairs. This gluing procedure changes the distance from the origin and therefore it is non-trivial to argue convergence of the height profiles similar to Theorem \ref{thm:Epidemic3} from the results of \cite{CKG.20}.
The processes $\tilde{e}_i = (\tilde{e}_i(t); t\ge 0)$ in Theorem \ref{thm:Epidemic3} and the graph $\mathcal{M}_i$ are of a random length and mass, respectively. That is
\begin{equation*}
\tilde{e}_i(t) > 0 \qquad \text{if and only if} \quad t\in (0,\zeta_i)
\end{equation*} for some random $\zeta_i$ and, moreover, $\zeta_i= \mu_i(\mathcal{M}_i)$. This does complicate the analysis somewhat; however, conditionally given the values $(\zeta_i;i\ge 1)$ the excursions $\tilde{e}_i$ (resp. the spaces $\mathcal{M}_i$) are independent and are described by a scaling of an excursion (resp. metric measure space) of unit length (resp. unit mass) \cite{CKG.20}. Therefore in order to understand the scaling limit $Z_1 = (Z_1(t); t\ge 0)$ of a single macroscopic outbreak $Z_{n,1}$, we can study the structure of a the process $\tilde{e}_1$ conditioned on $\zeta_1 = 1$.
To do this we let $\mathbf{e} = (\mathbf{e}(t);t\in[0,1])$ denote a standard excursion \cite{Chaumont.97} of a spectrally positive $\alpha$-stable L\'{e}vy process $X = (X(t);t\ge 0)$. To simplify our proofs, we will work with situation where the Laplace transform of $X$ satisfies \begin{equation}\label{eqn:Adef}
\mathbb{E}\left[\exp(-\lambda X(t) ) \right] = \exp\left(A \lambda^\alpha t\right),\qquad \forall \lambda, t\ge 0,\quad \text{where}\quad
A = \frac{c \Gamma(2-\alpha)}{\delta \alpha (\alpha-1)},
\end{equation} for $c,\delta$ defined in \eqref{eqn:cdelta1}. We remark that this excursion depends on the value $A$; however, the results also hold for any value of $A$ by using scaling properties of L\'{e}vy processes and their associated height processes.
We also recall from above that $\mathcal{M}_i$ are obtained by gluing together a finite collection of pairs of points in a continuum tree. This is the surplus of the continuum random graph $\mathcal{M}_i$. We will let $\mathscr{G}^{(\alpha,k)} = (\mathscr{G}^{(\alpha,k)}, d,\rho,\mu)$ denote the graph $\mathcal{M}_1$ conditioned on $\mu_1(\mathcal{M}_1) = 1$ and having surplus $k$. A precise construction of this object will be delayed until Section \ref{sec:realGraphs}, but it suffices to say that it will be constructed from an excursion $\mathbf{e}^{(k)} = (\mathbf{e}^{(k)}: t\in[0,1])$ defined by the polynomial tilting
\begin{equation}\label{eqn:ktilt}
\mathbb{E}\left[ f(\mathbf{e}^{(k)};t\in[0,1]) \right] \propto \mathbb{E}\left[ \left(\int_0^1 \mathbf{e}(t)\,dt\right)^{k} f(\mathbf{e};t\in[0,1])\right].
\end{equation}
The continuum object $\mathscr{G}^{(\alpha,k)}$, in our case, represents the limiting structure of the history of the disease spread. With this, we can ask several questions in the hope that this will shed light on the structure of $M_n(\nu)$. What is the structure of the disease outbreak, i.e. what is the height profile of the graph $\mathscr{G}^{(\alpha,k)}$? When does a uniformly chosen person get infected, or when do a finite number of uniformly chosen individuals get infected? When does the outbreak die out, that is, when is the last person infected? In terms of the graph $\mathscr{G}^{(\alpha,k)}$ this is asking what is the radius of the graph $\mathscr{G}^{(\alpha,k)}$. What's the most number of people infected at any one time or, in terms of the continuum graph, what is the distribution of the width of $\mathscr{G}^{(\alpha,k)}$? There are many more different questions we can ask, but we have answers to these questions.
We start by answer the first question: what is the height profile of the graph $\mathscr{G}^{(\alpha,k)}$, from which the others will follow by analysis of an integral equation.
\begin{thm}\label{thm:ctsAlpha}
Fix $k\ge 0$, and $\alpha\in(1,2)$. Let $\mathscr{G}^{(\alpha,k)}$ be the $\alpha$-stable continuum random graph, constructed from a spectrally positive L\'{e}vy process with Laplace exponent \eqref{eqn:Adef} and rooted at a point $\rho\in \mathscr{G}^{(\alpha,k)}$.
\begin{enumerate}
\item Let $B(x,t)$ is the closed ball of radius $t$ centered at $x$. The process $\mathbf{c} = (\mathbf{c}(t); t\ge 0)$ defined by $\mathbf{c}(t) = \mu(B(\rho,t))$ is absolutely continuous and
\begin{equation*}
\mathbf{c}(t) = \int_0^t \mathbf{z}(s)\,ds
\end{equation*} for a c\`adl\`ag process $\mathbf{z} = (\mathbf{z}(t);t\ge 0)$.
\item The process $(\mathbf{z}(t);t\ge 0) \overset{d}{=} (z(t);t\ge 0)$ where $z$ is the unique c\`adl\`ag solution to
\begin{equation*}
z(t) = \mathbf{e}^{(k)}\left( \int_0^t z(s)\,ds\right) ,\qquad \inf\left\{t>0: \int_0^t z(s)\,ds >0\right\} = 0.
\end{equation*}
\end{enumerate}
\end{thm}
We can now answer all of the other questions once we know the height profile.
\begin{cor}\label{cor:1}
\begin{enumerate}
\item The radius of the graph $\mathscr{G}^{(\alpha,k)}$ is given by
\begin{equation*}
\sup_{v\in \mathscr{G}^{(\alpha,k)}} d(\rho,v) \overset{d}{=} \int_0^1 \frac{1}{\mathbf{e}^{(k)}(s)}\,ds.
\end{equation*}
\item The width of the graph $\mathscr{G}^{(\alpha,k)}$ is given by
\begin{equation*}
\sup_{t\ge 0} \mathbf{z}(t) \overset{d}{=} \sup_{t\in[0,1]} \mathbf{e}^{(k)}(t).
\end{equation*}
\item Let $V\in \mathscr{G}^{(\alpha,k)}$ be distributed according to the mass measure $\mu$, and let $U$ denote a uniform random variable on $(0,1)$. Then
\begin{equation*}
d(\rho,V) \overset{d}{=} \int_0^U \frac{1}{\mathbf{e}^{(k)}(s)}\,ds.
\end{equation*}
\item More generally, for any $n\ge 1$, let $V_1,\dotsm, V_n$ denote random points distributed according to $\mu$ on $\mathscr{G}^{(\alpha,k)}$. Let $R_{(1)}\le R_{(2)}\le\dotsm R_{(n)}$ denote the order statistics of $d(\rho,V_1),\dotsm, d(\rho,V_n)$. Let $U_{(1)}\le U_{(2)}\le \dotsm \le U_{(n)}$ denote the order statistics for an i.i.d. sample of $n$ uniform random variables. Then
\begin{equation*}
\left(R_{(1)},\dotsm, R_{(n)}\right) \overset{d}{=} \left(\int_0^{U_{(1)}}\frac{1}{\mathbf{e}^{(k)}(s)}\,ds,\dotsm, \int_0^{U_{(n)}} \frac{1}{\mathbf{e}^{(k)}(s)}\,ds \right).
\end{equation*}
\end{enumerate}
\end{cor}
After discussing the integral equation involved in the statement of part 2 of Theorem \ref{thm:ctsAlpha}, which is called the Lamperti transform in the literature, we will show how Corollary \ref{cor:1} follows from Theorem \ref{thm:ctsAlpha} in Section \ref{sec:lamperti}.
\subsection{Relation to other works and proof structure}
Epidemics on random graphs are important for many areas in the applied sciences, see \cite{BBPV.05,Volz.08,VM.07,PSCMV.15} and references therein for a non-exhaustive collection of such works. One difficulty in describing the limiting behavior comes from analyzing the influence that the specific degree distribution has on the local structure of the graph.
One approach to overcoming this issue is by using a \textit{mean-field} approach \cite{BBPV.05}. A typical approximation is in continuous time where each infected vertex $v$ is infected for an exponential time, and infects its neighbors at independent exponential rates. On homogeneous networks the behavior of $Z(t)$, the number of people infected at time $t\in\mathbb{R}_+$, is modeled by the ordinary differential equation \begin{equation*}
\frac{dz(t)}{dt} = \lambda \mu z(t) (1-z(t)) \qquad \text{where}\quad z(t) = \frac{1}{n}Z(t).
\end{equation*} A more careful analysis can be done on heterogeneous networks, where one can track the proportion of vertices with degree $k$ infected at a certain time. A remarkable thing is that this approach, while losing a lot of information about the specific local structure, it can be used to find heuristics on the proper scaling of the graphs or epidemic, see \cite{PSCMV.15}.
A more detailed approach to studying heterogeneous networks was taken by Volz in \cite{Volz.08}, and rigorously proved in \cite{DDMT.12} under a fifth moment condition. In these works the population of size $n$ is broken into 3 compartments - the susceptible, the infected and the recovered - and individuals in one compartment are moved to another compartment (i.e. an infected individual recovers or a susceptible individual is infected) at certain exponential rates. The global changes in the proportional size of the outbreak is described, to first order, by just the size of the respective compartments and the degree distribution. Here the limiting structure is described by a system of deterministic ordinary differential equations, which depend on the degree distribution. Deterministic limiting equations, perhaps after some random time $T_0$, were also obtained in \cite{JMW.14} under a second moment condition.
Our approach is different and takes it idea from studies of height profiles of random trees and branching processes. Particularly, we focus on the approach implicit in \cite{AMP.04}, and later studied in \cite{CPGUB.13,CPGUB.17,AUB.20} and we use the so-called Lamperti transform. This transform was originally used for a path-by-path bijection between continuous state branching processes and a certain class of L\'{e}vy processes. This transform was originally stated by Lamperti in \cite{Lamperti.67}, but was proved later by Silverstein \cite{Silverstein.67}. See also \cite{CLU.09}.
We can describe the discretized version in our situation as follows. Instead of looking at the total number of people infected on day $h$, we look at the number of individuals that person $v_j$ infects, when $v_j$ is the $j^\text{th}$ individual who contracts the disease. This corresponds to a breadth-first ordering of the underlying connected component of the graph. Call this number of newly infected individuals $\chi_j$, and let $X$ be the breadth-first walk
\begin{equation*}
X(k) = \sum_{j=1}^k (\chi_j-1).
\end{equation*}
It was this walk on the Erd\H{o}s-R\'{e}nyi random graph that Aldous used in \cite{Aldous.97} to describe the scaling limits of the component sizes of $G(n,p)$ in the critical window, and an analogous walk was used by Joseph \cite{Joseph.14} for the configuration model.
An interesting property of this walk is that the number of people infected on day $h$ solves the difference equation
\begin{equation*}
Z(h) = Z(0) + X\left( C(h-1)\right),\qquad C(h) = \sum_{j= 0}^h Z(j).
\end{equation*} As far as the author is aware, the first instance of this identity can be found in \cite{AMP.04} with a slightly more complicated formulation. See the Introduction of \cite{CPGUB.13} for a proof of this equality.
The authors of \cite{CPGUB.13,CPGUB.17} studied the scaled convergence of solutions of the above equation (with the addition of an immigration term) to its continuum analog
\begin{equation*}
Z(t) =X\left( \int_0^t Z(s)\,ds\right).
\end{equation*}
Unfortunately, there is not a unique solution to this integral equation when $X(0) = 0$ and so proving weak convergence is quite difficult. For certain models of random trees one can prove a weak convergence result \cite{AMP.04,Kersting.11,AUB.20}, and also works for the Erd\H{o}s-R\'{e}nyi random graph when $X(0) > 0$ \cite{Clancy.20}.
We overcome the uniqueness problem by arguing that the rescaled processes $Z_{n,i}(h)$ are tight, and we further show that each subsequential weak limit must be of a particular form. This approach to overcoming the uniqueness problem was used in \cite{AUB.20} to study trees with a certain degree distribution, as opposed to graphs with a given degree distribution in the present situation. While, at first, these two discrete models may seem related, the proofs are quite different. In \cite{AUB.20}, the authors use a combinatorial transformation of the tree to show that that subsequential limits must be of a particular form. We, instead, show that this follows automatically once we know the the underlying graph converges to a measured metric space. In turn, in Section \ref{sec:disc} we discuss how our abstract convergence results described in Section \ref{sec:weakGen} can be applied to the rank-1 inhomogeneous model \cite{BDW.18,BDW.20,AL.98,BvdHRS.18}.
\section{General Weak Convergence Results} \label{sec:weakGen}
\subsection{General Weak Convergence Approach}
Let us now discuss the general set up for our weak convergence arguments.
In the introduction we discussed the epidemic, which can be realized as the height profile of a connected component of a random graph. Explicitly those graphs were viewed as a metric space, but we implicitly equipped them with the counting measure. We phrase our results in terms of more general measures on random graphs, which will likely be useful in inhomogeneous models in \cite{AL.98,BDW.18,BDW.20}. The epidemiological interpretation of considering non-uniform measures is not immediately clear; however, we could think of the unequal mass of vertices as measuring the size of a clique in a community which was reduced to single vertex.
A major assumption of these results is the convergence of graphs as measured metric spaces. We delay a more detailed discussion of this topic until Section \ref{sec:ghp}. For now it suffices to say that we can equip the space $\mathfrak{X}$ of (equivalent classes) of pointed measured metric spaces with additional boundedness assumptions with a metric which turns $\mathfrak{X}$ into a Polish space. This metric is called the {Gromov-Hausdorff-Prokhorov } metric, and we will denote it by $d_{\operatorname{GHP}}$.
We will denote a generic element of $\mathfrak{X}$ as $\mathcal{M} = (\mathcal{M},\rho,\operatorname{d},\mu)$ where $(\mathcal{M},\operatorname{d})$ is a metric space such that bounded sets have compact closure, $\rho\in \mathcal{M}$ is a specified point and $\mu$ is a Borel measure on $\mathcal{M}$ such that bounded sets have finite mass. For each $\alpha>0, \beta\ge 0$ define the scaling operation by
\begin{equation*}
\operatorname{scale}(\alpha,\beta)\mathcal{M} := (\mathcal{M},\rho,\alpha\cdot \operatorname{d},\beta\cdot \mu).
\end{equation*}
Now let $G$ denote a connected graph on, say, $n$ vertices, with $\rho\in G$ a specified vertex. We view $G$ as a measured metric space with graph distance and $\mathfrak{m}$ a finite measure such that each vertex has strictly positive mass. As we did implicitly before, we explore the graph in a breadth-first manner. The precise way in which this is done can vary depending on the graph model, but we assume that the vertices are labeled by $v_1,\dotsm, v_n$ such that if $i< j$ then $d(\rho,v_i)\le d(\rho,v_j)$. This trivially implies that $\rho = v_1$. This labeling can be viewed as an indexing of each individual who gets infected, so that if person $B$ got infected after person $A$, then person $A$ has a smaller index than person $B$.
We now discuss an underlying tree structure and breadth-first walk for the graph, which draws inspiration from the breadth-first tree and walk in \cite{AL.98}. The tree is constructed by looking at which vertices $v_i$ infects in the graph $G$. More formally, we will say that vertex $v_j$ is the child of $v_i$ if $\{v_i,v_j\}$ is an edge in $G$, but $\{v_l,v_j\}$ is \textit{not} an edge for all $l<i$. This implies $i<j$. In most models with a breadth-first exploration, $v_j$ will be a child of $v_i$ if vertex $v_j$ is discovered while exploring the vertex attached to $v_i$.
We also suppose that there is some breadth-first walk $X^{\operatorname{BF}}_G$ as well:
\begin{equation}\label{eqn:BFWass1}
X^{\operatorname{BF}}_G(\tau(k)) = X_G^{{\operatorname{BF}}}(\tau(k-1)) - \mathfrak{m}(v_k) + \sum_{u \text{ child of }v_k} \mathfrak{m}(u),\qquad \tau(k) = \sum_{j\le k} \mathfrak{m}(v_j),
\end{equation} and with $X_G^{\operatorname{BF}}(0) = 0$. How the process $X^{\operatorname{BF}}_G$ behaves on the intervals $(\tau(k-1),\tau(k))$ will play no important role in this paper. The breadth-first walk used by Aldous and Limic in their classification of the multiplicative coalescence \cite{AL.98} satisfies equation \eqref{eqn:BFWass1}. Later an analogous walk satisfying \eqref{eqn:BFWass1} was used in \cite{AMP.04} to describe the inhomogeneous continuum random tree and extend Jeulin's identity \cite{JY.85}. When $\mathfrak{m}$ is a uniform measure on $G$, this walk will be the breadth-first {\L}ukasiewicz path \cite{LeGall.05}.
Importantly for us, the walk $X^{\operatorname{BF}}_G$ encodes the masses and tree structure of $v_1,\dotsm, v_n$. However there is no clean functional amenable to scaling limits which allows us to reconstruct the genealogical structure from this breadth-first walk.
We now define the height profile of $G$ by
\begin{equation*}
Z_G(h) = \mathfrak{m}\left\{v\in G: d(\rho,v) = h\right\}.
\end{equation*} It will be useful to define its cumulative sum as well:
\begin{equation*}
C_G(h) = \sum_{j=0}^h Z_{G}(j) = \mathfrak{m}\left\{ v\in G: d(\rho,v)\le h\right\}.
\end{equation*}
As observed in \cite[equations (13-14)]{AMP.04}, $Z_G(h)$ solves the following difference equation:
\begin{equation}\label{eqn:discLamperti2}
Z_G(h+1) = Z_G(0) + X_G^{\operatorname{BF}} \circ C_G(h).
\end{equation}
To describe what happens in the $n\to\infty$ limit, let $(G_n;n\ge 1)$ be a sequence of connected random graphs on a finite number of vertices, viewed as a measured metric spaces where $G_n$ is equipped with the measure $\mathfrak{m}_n$. We write $X_n^{\operatorname{BF}}$ for the breadth-first walk $X_{G_n}^{\operatorname{BF}}$.
We prove the following in Section \ref{sec:proofOfMainThm}.
\begin{thm}\label{thm:conv1}
Suppose that there exists a sequence $\alpha_n\to\infty$, and that $\gamma_n:=\mathfrak{m}_n(G_n)\to\infty$ a.s. In addition assume:
\begin{enumerate}
\item In the Skorohod space $\mathbb{D}([0,1],\mathbb{R})$, the following weak convergence holds
\begin{equation*}
\left(\frac{\alpha_n}{\gamma_n} X_n^{\operatorname{BF}} (\gamma_nt) ;t\in[0,1] \right) \weakarrow \left(X(t);t\in[0,1] \right),
\end{equation*} where $X$ is a process such that almost surely $X(0) = X(1) = 0$, $X(t)>0$ for all $t\in(0,1)$ and $X(t)-X(t-)\ge 0$ for all $t$;
\item There exists a random pointed measured metric space $\mathcal{M} = (\mathcal{M},\rho,\operatorname{d},\mu)$ which is locally compact and has a boundedly finite measure such that
\begin{equation*}
\operatorname{scale}(\alpha_n^{-1},\gamma_n^{-1})G_n\weakarrow \mathcal{M},
\end{equation*} weakly in the {Gromov-Hausdorff-Prokhorov } topology.
\item For each $\varepsilon>0$, $\mu(B(\rho,\varepsilon)\setminus\{\rho\})> 0$ for all $\varepsilon>0$.
\item $\frac{\alpha_n}{\gamma_n}\sup_{v\in G_n} \mathfrak{m}_n(v)\to 0$ as $n\to\infty$ in probability.
\end{enumerate}
Then
\begin{enumerate}
\item There is joint convergence in $\mathbb{D}(\mathbb{R}_+,\mathbb{R})^2$:
\begin{equation*}
\left(\left(\frac{\alpha_n}{\gamma_n} Z_n(\fl{\alpha_n t});t\ge 0\right),\left( \frac{1}{\gamma_n} C_n(\fl{\alpha_nt});t\ge 0 \right)\right) \weakarrow \left((Z(t);t\ge 0),(C(t);t\ge 0)\right)
\end{equation*} where $Z$ and $C$ are the unique c\`adl\`ag solution to
\begin{equation}\label{eqn:limit1}
Z(t) = X\circ C(t) ,\qquad C(t) = \int_0^t Z(s)\,ds,\qquad \inf\{t: C(t)>0\} = 0;
\end{equation}
\item The measure $\mu$ on $\mathcal{M}$ satisfies
\begin{equation*}
\left(\mu(B(\rho,t));t\ge 0 \right)\overset{d}{=} \left(C(t);t\ge 0\right)
\end{equation*}
\end{enumerate}
\end{thm}
Let us make some important remarks on the assumptions in Theorem \ref{thm:conv1}. Assumption (1) is the convergence of the breadth-first walk, which is required in order to have a description of the limiting process $Z$ as described above, barring some stochastic analysis tools that can be used in particular cases \cite{Pitman.99}. Assumptions (2) and (3) are how we overcome any possible uniqueness problems that were identified in \cite{AUB.20} (see Proposition \ref{prop:AUB} below). Particularly, assumption (3) allows for the classification of the limit $C$ satisfying $\inf\{t:C(t)>0\} = 0$. Lastly, assumption (4) is so that the term $Z_n(0) \weakarrow 0$ as $n\to\infty$. Without this assumption we are left to deal with a simpler situation to which we can use the known weak convergence results in \cite{CPGUB.13}.
As the reader may guess, this formulation will not be helpful for the proof of Theorem \ref{thm:Epidemic3} nor in the study of any of the macroscopic outbreaks for random graphs. Instead, the above theorem works only with a single macroscopic component. In order to prove Theorem \ref{thm:Epidemic3} we must develop a joint convergence result where each of the macroscopic components of a graph converge to some limiting graphs structure. This is something that appears quite often in the literature on continuum random graphs, dating back to the celebrated result of Addario-Berry, Broutin and Goldschmidt \cite{ABBG.12}. We now suppose that we have a sequence of graphs $(G_n;n\ge 1)$ on a finite number of vertices with a measure $\mathfrak{m}_n$. For each $n$ we denote the connected components of $G_n$ as $(G_n^i;i\ge 1)$, ordered so that
\begin{equation*}
\mathfrak{m}_n\left( G_n^1\right)\ge \mathfrak{m}_n\left(G_n^2\right)\ge\dotsm .
\end{equation*} Again, for convenience we will say that $G_n^i$ is a graph on a single vertex where the vertex has mass $0$ for all $i>K_n$. We view each of the components as a measured metric space with graph distance, and we select a vertex $\rho_n^i$ from each component to start the breadth-first walks. Here we write $X_{n,i}^{\operatorname{BF}}$ for the breadth-first walk on $G_n^i$ which, by assumption, satisfies equation \eqref{eqn:BFWass1} with the obvious notation changes. Additionally we extend it by constancy to be a function on all of $\mathbb{R}_+$:
\begin{equation*}
X_{n,i}^{\operatorname{BF}}(t) = X_{n,i}^{\operatorname{BF}}(\mathfrak{m}_n(G_n^i)) \qquad \forall t\ge \mathfrak{m}_n(G_n^i).
\end{equation*}
Let $Z_{n,i} = (Z_{n,i}(h))$ be the height profile of the $i^\text{th}$ component $G_n^i$. They solve an equation analogous to \eqref{eqn:discLamperti2} with the obvious notation change.
We prove the following
\begin{thm}\label{thm:conv2}
Suppose there exists two sequence $\alpha_n\to\infty$ and $\gamma_n\to\infty$ such that
\begin{enumerate}
\item In the product Skorohod space $\mathbb{D}^\infty$ the following weak convergence holds:
\begin{equation*}
\left(\left(\frac{\alpha_n}{\gamma_n}X_{n,i}^{\operatorname{BF}}(\gamma_nt);t\ge 0\right);i\ge 1\right)\weakarrow \left( \left(X_i(t);t\ge 0 \right);i\ge 1\right)
\end{equation*} where almost surely, $X_i$ does not possess negative jumps and there exists a $\zeta_i>0$ such that $X(t)>0$ if and only if $t\in(0,\zeta_i)$.
\item There exists a sequence of pointed measured metric spaces $\mathcal{M}_i = (\mathcal{M}_i,\rho_i,d_i,\mu_i)$ which is locally compact and has a boudedly finite measure such that
\begin{equation*}
\left(\operatorname{scale}(\alpha_n^{-1},\gamma_n^{-1})G_n^i;i\ge 1\right) \weakarrow (\mathcal{M}_i;i\ge 1)
\end{equation*} weakly in the product {Gromov-Hausdorff-Prokhorov } topology.
\item Suppose that $\mu_i(B(\rho_i,\varepsilon)\setminus \{\rho_i\})>0$ for all $\varepsilon>0$.
\item $\frac{\alpha_n}{\gamma_n}\sup_{v\in G_n} \mathfrak{m}_n(v)\to 0$ as $n\to\infty$ in probability.
\end{enumerate}
Then
\begin{enumerate}
\item In the product Skorohod topology \begin{equation*}
\left(\left(\frac{\alpha_n}{\gamma_n} Z_{n,i}(\fl{\alpha_nt}), \frac{1}{\gamma_n}C_{n,i}(\fl{\alpha_n t}); t\ge 0\right) ;i \ge 1\right)\weakarrow \left(\left( Z_i(t),C_i(t);t\ge 0\right);i\ge 1\right),
\end{equation*} where $(Z_i,C_i)$ is the unique c\`adl\`ag solution to
\begin{equation*}
Z_i(t) = X_i\circ C_i(t),\qquad C_i(t) = \int_0^t Z_i(s)\,ds,\qquad \inf\{t: C_i(t)>0\} =0.
\end{equation*}
\item For each $i\ge 1$,
\begin{equation*}
\left(\mu_i(B(\rho_i,t));t\ge 0\right) \overset{d}{=}\left( C_i(t);t\ge 0\right).
\end{equation*}
\end{enumerate}
\end{thm}
\subsection{Compactness Corollaries}
Let us begin with the first corollary, which follows from Theorem \ref{thm:conv1} and a result in \cite{AUB.20} recalled in Proposition \ref{prop:AUB} below.
\begin{cor}\label{cor:6}
If the hypotheses of Theorem \ref{thm:conv1} are met, then
\begin{equation*}
\int_{0+} \frac{1}{X(s)}\,ds <\infty \qquad a.s.
\end{equation*}
\end{cor}
The above corollary avoids a hypothesis in Theorem 1 in \cite{AUB.20}, but this comes at the expense of assuming convergence in the {Gromov-Hausdorff-Prokhorov } topology of an underlying metric space, which is a difficult hypothesis to verify. The inverse of Corollary \ref{cor:6} is interesting, because it gives a necessary condition for convergence in the {Gromov-Hausdorff-Prokhorov } topology.
For certain models of random trees and random graphs, determining compactness of the candidates for limiting metric space is difficult. This has been a particular problem for the inhomogeneous continuum random trees introduced by Aldous, Camarri and Pitman \cite{AP.00,CP.00}. These trees are characterized by a parameter $\theta = (\theta_0,\theta_1,\dotsm)$ and in \cite{AMP.04}, the authors showed that boundedness of the continuum random tree is equivalent to the almost sure finiteness of an integral $\int_{0}^{1} \frac{1}{X(s)}\,ds$. A question was posed in \cite{AMP.04} to develop useful criteria for compactness of the ICRT and determine if boundedness implied compactness. This problem was open for 16 years, but appears to be solved very recently in \cite{BlancRenaudie.20}.
It is in this vein that we state the next corollary. It is a more abstract version of part (1) of Corollary \ref{cor:1} above, and follows from part (2) in Theorem \ref{thm:conv1} and Proposition \ref{prop:AUB}.
\begin{cor}
Let $\mathcal{M}$ and $X$ be as in Theorem \ref{thm:conv1}, assume the hypotheses of Theorem \ref{thm:conv1} are met, and let $\operatorname{spt}(\mu)\subset\mathcal{M}$ denote the topological support of the measure $\mu$. Then
\begin{equation*}
\sup_{v\in \operatorname{spt}(\mu)} d(\rho,v) \overset{d}{=} \int_0^1\frac{1}{X(s)}\,ds.
\end{equation*}
\end{cor}
\section{Preliminaries}\label{sec:prelims}
\subsection{L\'{e}vy processes, height processes, excursions}\label{sec:levy}
In this section we recall the construction of $\Psi$-height processes and their excursions. For more in depth discussion on the height processes and their excursions see the works of Le Gall, Le Jan and Duquense in \cite{DL.02,LL.98a,LL.98b}. For information about spectrally positive L\'{e}vy processes, see Bertoin's monograph \cite{Bertoin.96}.
Let $X = (X(t);t\ge 0)$ denote a spectrally postive, i.e. no negative jumps, L\'{e}vy process, and let $-\Psi$ denote its Laplace transform:
\begin{equation*}
\mathbb{E} \left[\exp\left\{-\lambda X(t)\right\} \right] = \exp\left(t\Psi(\lambda)\right).
\end{equation*} In order to discuss $\Psi$-height processes, we restrict our attention in this situation to have $\Psi$ is of the form
\begin{equation*}
\Psi(\lambda) = \alpha\lambda +\beta \lambda^2 + \int_{(0,\infty)} (e^{-\lambda r} - 1+ \lambda r1_{[r<1]})\,\pi(dr),
\end{equation*} where $\alpha \ge 0$, $\beta \ge 0$, $(r\wedge r^2)\,\pi(dr)$ is a finite measure along with
\begin{equation*}
\beta>0 \qquad\text{or}\qquad \int_{(0,1)} r\pi(dr) = \infty.
\end{equation*} The last assumption occurs if and only if the paths of $X$ have infinite variation almost surely.
The $\Psi$-height process $H = (H(t);t\ge 0)$ is a way to give a measure (in a local time sense) to the set
\begin{equation}\label{eqn:heightProcess}
\{s\in[0,t]: X(s) = \inf_{s\le r\le t} X(r)\}.
\end{equation}
Slightly more formally, under the the additional assumption that
\begin{equation*}
\int_{1}^\infty \frac{1}{\Psi(\lambda)}\,d\lambda = \infty,
\end{equation*} there exists a continuous process $H = (H(t);t\ge 0)$ such that for all $t\ge 0$ then
\begin{equation} \label{eqn:limitDefH}
H(t) = \lim_{\varepsilon\to 0} \frac{1}{\varepsilon}\int_0^t 1_{[X(s) - \inf_{r\in [s,t]} X(r)\le \varepsilon]} \,ds,
\end{equation} where the limit is in probability. See \cite{LL.98b} and \cite[Section 1.2]{DL.02} for more details. In the case where $\beta>0$, the process $H$ can be seen \cite[equation (1.7)]{DL.02} to satisfy
\begin{equation*}
H(t) = \frac{1}{\beta} \operatorname{Leb} \left\{\inf_{s\le r\le t} X(r): s\in[0,t]\right\}.
\end{equation*} In particular, when $X$ is a standard Brownian motion then
\begin{equation*}
H(t) = 2 \left( X(t) - \inf_{s\le t} X(s) \right)
\end{equation*} is twice a reflected Brownian motion.
One can also do this same procedure to the excursions of $X$. That is, if $I(t) = \inf_{s\le t} X(s)$ is the running infimum of $X$ then the process $-I$ acts as a (Markovian) local time at level $0$ for the reflected process $X-I$ \cite[Chapter IV]{Bertoin.96}. Moreover, by looking at $T(y) = \inf\{t\ge 0: I(t) < -y\}$, we can talk about the excursions of $X-I$ between times $T(y-)$ and $T(y)$. As well \cite[Section 1.1.2]{DL.02}, it is possible to define the height process $H$ for the excursions of $X$ above its running infimum. The associated excursion measure will be denoted by $N$. To avoid confusion, we will write $(e,h)$ for $(X,H)$ under the excursion measure $N$.
\subsubsection{Stable Processes and Tilting}
We now restrict our attention to the stable case where
\begin{equation} \label{eqn:psiStable}
\Psi(\lambda) = A \lambda^\alpha, \qquad\alpha\in(1,2].
\end{equation} The process $X$ satisfies the scaling \cite{Bertoin.96}
\begin{equation*}
(X(t);t\ge 0) \overset{d}{=} \left(k^{-1/\alpha} X(kt);t\ge 0 \right),\qquad \forall k >0.
\end{equation*} Similarly, the height process $H$ satisfies the scaling
\begin{equation*}
(H(t);t\ge 0) \overset{d}{=} \left(k^{(1-\alpha)/\alpha} H(kt);t\ge 0 \right),\qquad \forall k >0,
\end{equation*} which can be derived from \eqref{eqn:limitDefH}.
\begin{remark}\label{rem:Aremark}
By scaling the L\'evy process $X$, the constant $A$ in \eqref{eqn:psiStable} can be taken to equal 1 and this is typically done in the literature. We will not do this when proving Theorems \ref{thm:Epidemic3} or \ref{thm:ctsAlpha} in order to simplify the presentation. By using scaling properties for both $X$ and $H$, it is possible to prove the results in Theorem \ref{thm:ctsAlpha} and Corollary \ref{cor:1} continue to hold when $A = 1$.
\end{remark}
As originally observed by Aldous \cite{Aldous.97}, one can encode the size of components of a random graph by a certain walk which possesses a scaling limit of the form $X(t)+f(t)$ where $X$ is a L\'evy process and $f(t)$ is a deterministic drift term. Aldous first proved this \cite{Aldous.97} within the critical window of Erd\H{o}s-R\'{e}nyi random graph where $X$ is a Brownian motion and $f$ is a quadratic function. This later extended to the $\alpha$-stable case by Joseph \cite{Joseph.14} on the configuration model where $X$ is a stable L\'{e}vy process.
For the $\alpha$-stable case $(\alpha\in (1,2))$, Conchon--Kerjan and Goldschmidt \cite{CKG.20} described the process in \cite{Joseph.14} via an exponential tilting of a L\'{e}vy process. That is they examine an $\alpha$-stable process $X$ and its associated height process $H$ of the form and define $\tilde{X}$ and $\tilde{H}$ by
\begin{equation}\label{eqn:processStableTilt}
\mathbb{E}\left[F(\tilde{X},\tilde{H}; [0,t]) \right] = \mathbb{E}\left[\exp\left(-\frac{1}{\delta}\int_0^t s\,dX(s) - A \frac{t^{\alpha+1}}{(\alpha+1)\delta^\alpha} \right) F({X},{H};[0,t]) \right]
\end{equation} where $A$ is as in \eqref{eqn:psiStable} and $F$ is a function on the paths of upto time $t$. The $A$ in our notation is $\frac{C_\alpha}{\delta}$ in the notation of \cite{CKG.20}.
The excursions of the process $\tilde{X}$ before time $t>0$ can be described via the absolute continuity relationship in \eqref{eqn:processStableTilt} and the excursions of $X$ prior to time $t$. What is very useful for us is that all the excursions of \begin{equation*} \tilde{R}(t) = \tilde{X}(t)-\tilde{I}(t)
\end{equation*} above zero can be ordered by decreasing length \cite[Lemma 3.5]{CKG.20}. That is the lengths of the excursion intervals, $(\zeta_i;i\ge 1)$, can be indexed such that $\zeta_1\ge \zeta_2\ge \dotsm\ge 0$. Corresponding the values $\zeta_i$, there is an excursion interval $(g_i,d_i)$ of length $d_i-g_i = \zeta_i$ such that $\tilde{R}(g_i) = \tilde{R}(d_i) = 0$ and $\tilde{R}(t)>0$ for all $t\in(g_i,d_i)$. We define the excursion $\tilde{e}_i = (e_i(t);t\ge 0)$ by
\begin{equation} \label{eqn:defEi}
\tilde{e}_i(t) = \tilde{R}((g_i+t)\wedge d_i),\qquad t\ge 0.
\end{equation} These are the excursion which appear in Theorem \ref{thm:Epidemic3}.
We also let $\tilde{h}_i = (\tilde{h}_i(t);t\ge 0)$ be the excursion of $\tilde{H}$ which straddles $(g_i,d_i)$ defined by
\begin{equation*}
\tilde{h}_i(t) = \tilde{H}\left((g_i+t)\wedge d_i\right).
\end{equation*}
\subsubsection{Normalized excursions and tilting} \label{sec:stableEx}
We now recall Chaumont's path construction of a normalized excursion of a spectrally positive $\alpha$-stable L\'{e}vy process $X$. See \cite{Chaumont.97} or \cite[Chapter VIII]{Bertoin.96} for more details on this.
This allows for an simple description of the conditioning the excursion measure $N(\cdot | \zeta = x)$, for a fixed constant (deterministic) $x>0$ and $\zeta$ is the duration of the excursion. These results also hold in the Brownian case $\alpha = 2$., and we refer to Chapter XII of \cite{RY.99} for that treatment.
Define $\widehat{g}_1$ and $\widehat{d}_1$ by
\begin{equation*}
\widehat{g}_1 = \sup\{s\le 1: X(s) = I(s)\},\qquad \widehat{d}_1 = \inf\{s>1 : X(s) = I(s)\},
\end{equation*} and define
\begin{equation}\label{eqn:alphaExcursion}
\mathbf{e}(t) = \frac{1}{(\widehat{d}_1 - \widehat{g}_1)^{1/\alpha}} \left( X(\widehat{g}_1 +(\widehat{d}_1-\widehat{g}_1) t) - X(\widehat{g}_1)\right),\qquad t\in [0,1].
\end{equation}
The normalized excursion $\mathbf{e} = (\mathbf{e}(t);t\in[0,1])$ has duration $\zeta =1$, and its law is $N(\cdot | \zeta = 1)$. We obtain, the law $N(\cdot | \zeta = x)$ by scaling. Namely, set
\begin{equation*}
\mathbf{e}_x(t) = x^{1/\alpha} \mathbf{e}( x^{-1} t ),\qquad t\in [0,x],
\end{equation*} and then $\mathbf{e}_x = (\mathbf{e}_x(t);t\in[0,x])$ has law $N(\cdot|\zeta = x)$.
This can also be done under the conditioning on the lifetime of the excursion of the height process $H$. See \cite{Duquesne.03} or \cite{Miermont.03} for more information. We denote $\mathbf{h} = (\mathbf{h}(t);t\in[0,1])$ as the height process under the measure $N(\cdot | \zeta = 1)$ and (by the scaling for the height process) we write
\begin{equation*}
\mathbf{h}_x (t) = x^{(\alpha-1)/\alpha} \mathbf{h}(x^{-1} t),\qquad t\in[0,x].
\end{equation*}
The normalized excursions of $\tilde{X}$ and $\tilde{H}$ are trickier to handle because the process $\tilde{X}$ does not have stationary increments. However, there is a relatively simple way of describing these in terms of an exponential tilting of the excursions $\mathbf{e}$ and $\mathbf{h}$ similar to Aldous' description in \cite{Aldous.97} in the Brownian case. We define the tilted processes denoted by, $\tilde{\mathbf{e}}^{(\delta)}_x$ and $\tilde{\mathbf{h}}^{(\delta)}_x$, by
\begin{equation}\label{eqn:tilting}
\mathbb{E}\left[F(\tilde{\mathbf{e}}_x^{(\delta)},\tilde{\mathbf{h}}^{(\delta)}_x) \right] = \frac{\mathbb{E}[\exp(\frac{1}{\delta}\int_0^x \mathbf{e}_x(t)\,dt) F({\mathbf{e}}_x,{\mathbf{h}}_x)]}{\mathbb{E}[\exp(\frac{1}{\delta}\int_0^x \mathbf{e}_x(t)\,dt)]}
\end{equation} When $x = 1$ or $\delta = 1$ we omit it from notation. The excursions $\tilde{\e}_x^{(\delta)}$ and $\tilde{\mathbf{h}}^{(\delta)}_x$ are shown in \cite{CKG.20} to be the excursions $(\tilde{e}_i,\tilde{h}_i)$ conditioned on their duration being exactly $x$.
\begin{remark}
To clear up any confusion between $\tilde{\e}^{(\delta)}$ defined in \eqref{eqn:tilting} and $\mathbf{e}^{(k)}$ defined in \eqref{eqn:ktilt}, we note that we use the tilde $\tilde{\,}$ to denote tilting associated with an exponential tilting of an excursion. We do not include a tilde when discussing the polynomial tilting in \eqref{eqn:ktilt}.
\end{remark}
\subsection{Lamperti Transform} \label{sec:lamperti}
The Lamperti transform relates continuous state branching processes and L\'{e}vy processes via a time-change. This relationship dates back to a path-by-path relationship observed by Lamperti \cite{Lamperti.67}, although only proved later by Silverstein \cite{Silverstein.67}. More recently the authors of \cite{CPGUB.13} gave a path-by-path transformation between certain pairs of L\'{e}vy processes and continuous state branching processes with immigration. The bijective relationship was known before the path-by-path connection as well, see \cite{KW.71}.
For more information on this transformation see \cite{CLU.09} for a description in the continuum, see \cite{CPGUB.13,CPGUB.17} for scaling limits related to continuous state branching processes and their generalizations affine processes, and see \cite{AUB.20} for a scaling limits involving a similar situation of non-uniqueness of the limiting equation.
We will focus on the transform applied to excursions. Given a non-decreasing function $c:\mathbb{R}_+\to \mathbb{R}$ denote its right-hand derivative by $D_+c$, i.e.
\begin{equation*}
D_+c(t) = \lim_{\varepsilon\downarrow 0} \frac{c(t+\varepsilon)- c(t)}{\varepsilon}.
\end{equation*}
We now define the Lamperti transform and the Lamperti pair.
\begin{definition}
Given a c\`adl\`ag function $f\in \mathbb{D}(\mathbb{R}_+,\mathbb{R}_+)$ let
\begin{equation*}
\iota (t) = \int_0^t \frac{1}{f(s)}\,ds.
\end{equation*} Define the right-continuous inverse of $\iota$, denoted by $c^0$, by
\begin{equation*}
c^0(t) = \inf\{s\ge 0: \iota(s)>t\},
\end{equation*} with the convention $\inf\emptyset = \inf\{t>0:f(t) = 0\}$. The \textit{Lamperti transform} of $f$ is the function $h^0 = f\circ c^0$ and we call the pair $(h^0,c^0)$ the \textit{Lamperti pair} associated to $f$.
\end{definition}
Hopefully the choice of notating the Lamperti pair by $(h^0,c^0)$ will be clear after the statement of the next proposition, which we recall from \cite{AUB.20} while introducing a trivial scaling argument and fixing a typo:
\begin{prop}(\cite[Proposition 2]{AUB.20}) \label{prop:AUB}
Let $f\in \mathbb{D}(\mathbb{R}_+,\mathbb{R}_+)$ with non-negative jumps. Assume that $f(t) = 0$ if and only if $t\in\{0\}\cup[\zeta,\infty)$ for some $\zeta\in(0,\infty)$. Let $(h^0,c^0)$ denote the Lamperti pair associated to $f$. Then, solutions to
\begin{equation}\label{eqn:lampertiODE}
c(0) = 0, \qquad D_+ c = f\circ c
\end{equation} can be characterized as follows:
\begin{enumerate}
\item[1.] If $\displaystyle \int_{0+} \frac{1}{f(s)}\,ds = \infty$ then $h = c = 0$ is the unique solution to \eqref{eqn:lampertiODE}.
\item[2.] If $\displaystyle \int_{0+} \frac{1}{f(s)}\,ds <\infty$ then $c^0$ is not identically zero, $D_+ c^0 = h^0$, and $c^0$ solves \eqref{eqn:lampertiODE}. Furthermore, solutions to \eqref{eqn:lampertiODE} are a one-parameter family $(c^\lambda; \lambda\in [0,\infty])$ given by
\begin{equation*}
c^\lambda(t) = c^0 \left((t-\lambda)_+ \right), \qquad (x)_+ := x\vee 0.
\end{equation*}
In addition,
\begin{enumerate}
\item If $\displaystyle\int^{\zeta-} \frac{1}{f(s)}\,ds = \infty$ then $c^0$ is strictly increasing with $\lim_{t\to\infty} c^0(t) = \zeta$.
\item If $\displaystyle\int^{\zeta-} \frac{1}{f(s)}\,ds <\infty$ then $c^0$ is strictly increasing until reaching $\displaystyle\iota(1) = \int_0^{\zeta} \frac{1}{f(s)}\,ds$.
\end{enumerate}
\end{enumerate}
\end{prop}
The above proposition states that all the solutions to \eqref{eqn:lampertiODE} are determined by time-shifts of the Lamperti pair associated with $f$, or is identically zero. As we will see in the sequel, a major part of the proof of Theorem \ref{thm:conv1} is showing that every subsequential (weak) limit of the $\tilde{C}_n$ is of the form $C^0$ and not a time-shift, $C^\Lambda$, of $C^0$ for some random $\Lambda$.
With Proposition \cite{AUB.20} recalled, we can prove Corollary \ref{cor:1} from Theorem \ref{thm:ctsAlpha}.
\begin{proof}[Proof of Corollary \ref{cor:1}]
We begin by observing that
\begin{equation*}
\sup_{v\in \operatorname{spt}(\mu)} \operatorname{d}(\rho,v) \overset{d}{=}\int_0^1 \frac{1}{{\mathbf{e}}^{(k)}(s)}\,ds
\end{equation*} follows from Theorem \ref{thm:ctsAlpha} by an application of conclusion (2)(b) in Proposition \ref{prop:AUB}. To replace the support of the measure $\mu$ with the graph $\mathscr{G}^{(\alpha,k)}$ we observe that \begin{equation*}
\sup_{v\in \operatorname{spt}(\mu)} \operatorname{d}(\rho,v) = \sup_{v\in \mathscr{G}^{(\alpha,k)}} \operatorname{d}(\rho,v).
\end{equation*} Indeed to prove this equality observe that the leafs of the graph $\mathscr{G}^{(\alpha,k)}$ are dense in both the support of the measure $\mu$ and the graph $\mathscr{G}^{(\alpha,k)}$ which follows from analogous results for the continuum random trees \cite{Duquesne.03,DL.05} and the observation that the exponential tilting in the construction of the graphs does not change this almost sure statement.
Part (2) trivially follows from Theorem \ref{thm:ctsAlpha} and the observation that $\mathbf{c}$ increases from $0$ to $1$ as $t$ ranges from $0$ to $\infty$. We restrict the rest our proof to part (3), the argument of which will imply part (4) with minor modifications.
We recall the well-known fact that if $X$ is a real random variable taking values in $(a,b)$ with cumulative distribution function $F$ which is strictly increasing on $(a,b)$, then $X \sim F^{-1}(U)$ where $U$ is a standard uniform random variable and $F^{-1}(y) = \inf\{t: F(t)>y\}$ is the right-continuous inverse. Typically this is stated with the left-continuos inverse of $F$; however, when $F$ is strictly increasing these two inverses agree on $(a,b)$.
Now, conditionally given $\mathscr{G}^{(\alpha,k)}$ , Theorem \ref{thm:ctsAlpha} implies that
\begin{equation*}
\mathbb{P}(\operatorname{d}(\rho,V)\le t| \mathscr{G}^{(\alpha,k)}) = \mu(B(\rho,t)) = \mathbf{c}(t).
\end{equation*} Thus,
\begin{equation*}
\operatorname{d}(\rho,V) \overset{d}{=} \inf\{t: \mathbf{c}(t) > U\} ,\qquad U\sim \operatorname{Unif}(0,1).
\end{equation*}
However, the process $\mathbf{c}$ is equal in distribution to $c(t) = \int_0^t z(s)\,ds$ where $z$ is as in part (2) of Theorem \ref{thm:ctsAlpha}. It is easy to see by examining the discussion of the Lamperti transform above, that
\begin{equation*}
\mathbf{c}(t) = \inf\left\{u: \int_0^u \frac{1}{\mathbf{e}^{(k)}(s)}\,ds>t\right\} \wedge 1.
\end{equation*} See also the discussion preceding Proposition 2 in \cite{AUB.20} and Chapter 6 of \cite{EK.86} as well. The result now follows by taking another inverse.
The proof of part (4) is a trivial generalization involving order statistics.
\end{proof}
\subsection{Convergence of Metric Spaces} \label{sec:ghp} In this section we discuss how to topologize the collection of pointed measured metric spaces with some additional compactness assumptions. We start with a definition:
\begin{definition}
A collection $\mathcal{M} = (\mathcal{M},\rho,\operatorname{d},\mu)$ is a \textit{pointed measured metric} (PMM) space if $(\mathcal{M},\operatorname{d})$ is a metric space, $\mu$ is a Borel measure on $\mathcal{M}$ and $\rho\in \mathcal{M}$ is a distinguished point. We say that $\mathcal{M}$ is \textit{boundedly compact} if bounded sets are pre-compact and we say that $\mu$ is a \textit{boundedly finite} measure if bounded sets have finite mass. We say that $\mathcal{M}$ and $\mathcal{M}'$ are \textit{isomorphic} if there exists a bijective isometry $f:\mathcal{M}\to \mathcal{M}'$ such that $f(\rho) = f(\rho')$ and $f_{\#}\mu = \mu'$ and $f^{-1}_{\#} \mu' = \mu$. We denote the collection of all (isomorphism classes of) boundedly compact PMM spaces equipped with boundedly finite measures by $\mathfrak{X}$. Let $\mathfrak{X}_c\subset \mathfrak{X}$ consist of all compact elements of $\mathfrak{X}$.
\end{definition}
We leave a more detailed accounting of the metric space structure of $\mathfrak{X}$ to the texts \cite{ADH.13,Lei.19}. We do recall some useful properties which will be used in the sequel.
\begin{thm} (\cite{ADH.13,Lei.19})
\begin{enumerate}
\item There exists a metric $d_{\operatorname{GHP}}$ on the space $\mathfrak{X}$ which makes $(\mathfrak{X},d_{\operatorname{GHP}})$ a Polish space.
\item There exists a metric $d_{\operatorname{GHP}}^c$ on the space $\mathfrak{X}_c$ which makes $(\mathfrak{X}_c,d_{\operatorname{GHP}}^c)$ a Polish space.
\end{enumerate}
\end{thm}
We now prove the following simple lemma, which we cannot find in the existing literature. This will be used in the proof of Theorem \ref{thm:conv1}. We denote by $B(y,r)$, the closed ball of radius $r$ centered at $y$ in the appropriate metric space.
\begin{lem}\label{lem:measrueOfBalls} Let $\mathcal{M}_n = (\mathcal{M}_n,\rho_n,\operatorname{d}_n,\mu_n)$, $\mathcal{M} = (\mathcal{M},\rho,\operatorname{d},\mu)$ be random elements of $\mathfrak{X}$ such that
\begin{equation*}
\mathcal{M}_n\weakarrow \mathcal{M},\qquad\text{ as random elements of }(\mathfrak{X},d_{{\operatorname{GHP}}}),
\end{equation*} and $\mu$ on $\mathcal{M}$ is almost surely not the zero measure.
Then for all but countably many $r\in(0,\infty)$:
\begin{equation*}
\mu_n(B(\rho_n,r))\weakarrow \mu(B(\rho,r)),\qquad\text{as real numbers}.
\end{equation*}
Both convergences above can be replaced with almost sure convergence as well.
\end{lem}
We prove this by first appealing to a deterministic lemma.
\begin{lem}\label{lem:ballCont}
Let $\mathcal{M}_n\to \mathcal{M}$ in $d_{{\operatorname{GHP}}}$ and suppose that the measure $\mu$ on $\mathcal{M}$ is not the zero measure. Let $r$ be a radius such that
\begin{equation*}
\mu\left\{x\in \mathcal{M}: \operatorname{d}(\rho,x) = r\right\} = 0.
\end{equation*}
Then
\begin{equation*}
\mu_n(B(\rho_n,r))\to \mu(B(\rho,r)).
\end{equation*}
\end{lem}
\begin{proof}
By Theorem 3.16 in \cite{Lei.19}, it suffices to consider the compact case where $\mathcal{M}_n,\mathcal{M}\in \mathfrak{X}_c$ and $\mathcal{M}_n\to \mathcal{M}$ with respect to the $d_{{\operatorname{GHP}}}^c$ metric and that $r = \sup_{x\in \mathcal{M}} \operatorname{d}(\rho,x)$.
We recall that, for metric spaces $X$ and $Y$, a function $f:X\to Y$ is an $\varepsilon$-isometry if $f$ is measurable and
\begin{equation*}
\sup\{|d(x_1,x_2) - d(f(x_1),f(x_2))| :x_1,x_2\in X\}\le \varepsilon
\end{equation*} and
for all $y\in Y$ there exists some $x\in X$ such that $d(y,f(x))<\varepsilon$.
By Theorem 3.18 in \cite{Lei.19}, there exists a sequence $\varepsilon_n\to 0$ and a sequence of functions $f^n:\mathcal{M}_n\to \mathcal{M}$ such that $f^n$ is an $\varepsilon_n$-isometry and such that
\begin{equation*}
f_\#^n\mu_n \to \mu,
\end{equation*} with respect to the weak-* topology of measures on $\mathcal{M}$, that is convergence of the integrals against compactly supported continuous functions. However, $1_{\mathcal{M}}$ is continuous and compactly supported since $\mathcal{M}$ is compact. So the following convergence holds in because of convergence in the weak-* topology:
\begin{equation*}
\begin{split}
\mu_n(\mathcal{M}_n) &= \int_{\mathcal{M}_n} 1_{\mathcal{M}_n} \,d\mu_n = \int_{\mathcal{M}_n} 1_{\mathcal{M}}\circ f^n(x)\,\mu_n(dx) \\
&= \int_\mathcal{M} 1_{\mathcal{M}} (x) (f_\#^n\mu_n)(dx)\to \int_\mathcal{M} 1_{\mathcal{M}}\,d\mu = \mu(\mathcal{M}).
\end{split}
\end{equation*}
Therefore, there is no loss in generality in assuming that the measures $\mu_n$ and $\mu$ are probability measures, since we can just rescale the measures by their (non-zero) total mass. Since weak-* convergence of probability measures on a compact space is simply weak convergence of probability measures, the desired convergence holds by Portmanteau.
\end{proof}
\begin{proof}[Proof of Lemma \ref{lem:measrueOfBalls}]
By Lemma \ref{lem:ballCont}, we have shown that the map $\Phi_r:\mathfrak{X}\to \mathbb{R}$ by
\begin{equation*}
\Phi_r(\mathcal{M}) = \mu(B(\rho,r))
\end{equation*} is continuous at each $\mathcal{M}$ such that $\mu(\{x\in \mathcal{M}: \operatorname{d}(\rho,x) = r\}) = 0$.
Now given a random element $\mathcal{M}$ with law $\mathbb{P}$, we just need to show
\begin{equation*}
\left\{r: \mathbb{P}\left[\mu(\{x\in \mathcal{M}: \operatorname{d}(\rho,x) = r\})> 0 \right]>0\right\} \quad\text{is countable}.
\end{equation*} This follows from the same argument that random processes in $\mathbb{D}(\mathbb{R}_+,\mathbb{R})$ cannot have a uncountably many jump-times which occur with strictly positive probability. The proof of that latter statement can be found in Section 13 of \cite{Billingsley.99}, but is omitted here.
\end{proof}
\subsection{Continuum random trees and continuum random graphs} \label{sec:realGraphs}
In this section we briefly recall the definition of continuum random trees and continuum random graphs. This will not be a full description of what these metric spaces are, but will be enough to define the metric spaces we use in the sequel. For a more abstract account of these metric spaces see Section 2.2 of \cite{ABBGM.17}, for example.
We briefly describe a real tree encoded by a continuous function $h$, see \cite{LeGall.05} and references therein for more information. Let $h:[0,x]\to [0,\infty)$ be a continuous function such that $h(0) = h(x) = 0$ and $h(t)>0$ for all $t\in (0,x)$. We can define a psuedo-distance $d_h$ on $[0,x]$ by
\begin{equation*}
d_h(s,t) = h(s)+h(t) - 2 \inf_{s\wedge t\le r \le s\vee t} h(r).
\end{equation*} We then define an equivalence relation by $s\sim_h t$ if $d_h(s,t) = 0$. The random tree $\mathscr{T}_h$ is defined as the quotient space
\begin{equation*}
\mathscr{T}_h = [0,x]/\sim_h.
\end{equation*} and let $q_h:[0,x]\to \mathscr{T}_h$ denote the canonical quotient map. The topological space $\mathscr{T}_h$ can be made into a PMM by setting the specified point as $\rho:= q_h(0) = q_h(x)$, the distance as $d(q_h(s),q_h(t)) = d_h(s,t)$ which is a well-defined metric and the measure as $\mu = (q_h)_{\#}\operatorname{Leb}|_{[0,x]}$. We call $\mathscr{T}_h$ the tree encoded by the function $h$, and call $h$ the height function (or process) of $\mathscr{T}_h$.
The spaces $\mathscr{T}_h$ are tree-like in the sense that given any two elements $a,b\in \mathscr{T}_h$, there exists a unique isometry $f_{a,b}:[0,d(a,b)]\to \mathscr{T}_h$ such that $f_{a,b}(0) = a$ and $f_{a,b}(d(a,b)) = b$ and every continuous injection $f:[0,1]\to \mathscr{T}_h$ such that $f(0) = a$ and $f(b) = 1$ is a reparametrization of $f_{a,b}$.
Let us now describe how to add \textit{shortcuts} to the tree $\mathscr{T}_h$ in order to form a graph-like metric space. We fix a c\`adl\`ag function $g:[0,x]\to [0,\infty)$ such that $g(0) = g(x) = 0$ and $g$ doesn't jump downwards, i.e. $g(t)-g(t-)\ge 0$ for all $t$. We also suppose that we have a finite set $Q = \{(t_j,y_j):j=1,\dotsm, s\}$ of points in $[0,x]\times \mathbb{R}_+$ such that $y_j\le g(t_j)$. For each of these values $t_j$, we define the value $\tilde{t}_j$ by
\begin{equation*}
\tilde{t}_j = \inf\{u\ge t_j: g(u) = y_j\}.
\end{equation*} The infimum above is taken over a non-empty set because $g$ does not jump downwards. These times $t_j$ and $\tilde{t}_j$ will come to represent the points in the tree that are glued together.
Let us now go back into the continuum tree $\mathscr{T}_h$. Define the vertices $u_j = q_h(t_j)$ and $v_j = q_h(\tilde{t}_j)$ where $q_h$ is the canonical quotient map. We define a new equivalence relation $\sim$ on $\mathscr{T}_h$ which depends on both $g$ and $Q$ by setting $u_j\sim v_j$ for each $j = 1,2,\dotsm,s$. We define the set
\begin{equation*}
\mathscr{G}(h,g,Q) = \mathscr{T}_h /\sim.
\end{equation*}
It is straightforward to turn $\mathscr{G}(h,g,Q)$ into a PMM by where the distance between $u_j$ and $v_j$ is zero.
In the description of the construction of the graph $\mathscr{G}(h,g,Q)$ it is easier to consider only the case where $Q$ consists of points $(t,y)$ such that $0\le y\le g(t)$ and $t\in[0,x]$. We can equally as well consider the situation where $Q$ is a discrete set in $\mathbb{R}_+\times \mathbb{R}_+$ with finitely many elements in any compact set, and define
\begin{equation*}
\mathscr{G}(h,g,Q) = \mathscr{G}(h,g,g\cap Q),\quad\text{where}\quad g\cap Q = \{(t,y)\in Q: y\le g(t)\}.
\end{equation*}
Let us now describe the graphs $\mathscr{G}^{(\alpha,k)}$ that we mentioned in the introduction. See \cite{GHS.18, CKG.20} for more information on these graphs. We first define the $\alpha$-stable graph $\mathscr{G}^{(\alpha)}$ where we let the surplus be a random non-negative integer.
The graphs $\mathscr{G}^{(\alpha)}$ are the graphs $\mathscr{G}(\tilde{\mathbf{h}}^{(1)}_1,\tilde{\e}^{(1)}_1,\mathscr{P})$ for a Poisson random measure $\mathscr{P}$ on $\mathbb{R}_+^2$ with Lebesgue intensity. The Poisson point process $\mathscr{P}$ has only a finite number of points $(t,y)$ such that $0\le y\le \tilde{\e}^{(\delta)}_1(t)$, and this is the surplus of the random graph $\mathscr{G}^{(\alpha)}$. The graph $\mathscr{G}^{(\alpha,k)}$ is just the graph $\mathscr{G}^{(\alpha)}$ conditioned on having fixed surplus $k$. This conditioning on the number of points of $\mathscr{P}$ which lie under the curve $\tilde{\e}^{(1)}_1$ changes the exponential tilting in \eqref{eqn:tilting} to the polynomial tilting in \eqref{eqn:ktilt}.
For more information on random trees and graphs see \cite{Aldous.91,Aldous.91a,Aldous.93, LeGall.05} for the Brownian CRT, see \cite{Miermont.03,Duquesne.03,DL.02} for the stable-trees, see \cite{ABBG.12,BBSW.14} for the Brownian random graph and \cite{CKG.20,GHS.18} for the stable graph.
\section{Proofs of Weak Convergence Results} \label{sec:proofOfMainThm}
We now turn our attention to proving the abstract weak convergence results: Theorems \ref{thm:conv1} and \ref{thm:conv2}. To simplify the notation in the proof of Theorem \ref{thm:conv1}, we write $X_n(\cdot) = Z_n(0) + X_n^{\operatorname{BF}}(\cdot)$. By assumption (4) in Theorem \ref{thm:conv1}, $Z_n(0)\to 0$ in probability and so assumption (1) in Theorem \ref{thm:conv1} holds with $X_n$ replacing $X_n^{\operatorname{BF}}$ by Slutsky's theorem. Moreover, changing \eqref{eqn:discLamperti2} to match this notation, the process $Z_n$ solves
\begin{equation*}
Z_n(h+1) = X_{n}\circ C_n(h),\qquad C_n(h) = \sum_{j=0}^h Z_n(j), \qquad C_n(-1) = 0.
\end{equation*}
We define the rescalings:
\begin{equation*}
\begin{split}
\tilde{Z}_n(t) &= \frac{\alpha_n}{\gamma_n} Z_n(\fl{\alpha_nt})\\
\tilde{C}_n(t) &= \frac{1}{\gamma_n} C_n(\fl{\alpha_nt}) \\
\tilde{X}_n(t) &= \frac{\alpha_n}{\gamma_n}X_n(\gamma_nt).
\end{split}
\end{equation*}
We begin by proving the tightness of $\tilde{X}_n$ and $\tilde{C}_n$.
\begin{prop}\label{prop:jointCX}
Under the assumptions of Theorem \ref{thm:conv1}, and the above notation, the sequence $((\tilde{C}_n,\tilde{X}_n);n\ge 1)$ is tight in $\mathbb{D}(\mathbb{R}_+,\mathbb{R})^2$. Moreover, any subsequential limit of $(\tilde{C}_n,\tilde{X}_n;n\ge 1)$, say $(C,X)$, must satisfy \begin{equation*}C(t) = \int_0^t X\circ C(s)\,ds.\end{equation*}
\end{prop}
\begin{proof}
We alter the proof of Proposition 7 in \cite{AUB.20}. That proof involves a linear interpolation of $C_n$ instead, which makes their proof slightly simpler. The differences are easily overcome using compactness results in Billingsley's monograph \cite{Billingsley.99}.
Because tightness of marginals implies tightness of the pair of random elements, in order to show the tightness claimed, it suffices to show that $(\tilde{C}_n;n\ge 1)$ is tight, since we assume that $\tilde{X}_n$ converges weakly and is therefore tight.
Towards this end, observe that $\tilde{C}_n$ is uniformly bounded:
\begin{equation}\label{eqn:uniformlyBounded}
0\le \tilde{C}_n(t) \le \frac{1}{\gamma_n} \mathfrak{m}_n(G_n) =1.
\end{equation}
We now set $t>s$. We have
\begin{align*}
\tilde{C}_n(t) - \tilde{C}_n(s) &= \frac{1}{\gamma_n} (C_{n}(\fl{\alpha_nt}) - C_n(\fl{\alpha_ns}))\\
&= \frac{1}{\gamma_n} \sum_{h=\fl{\alpha_ns}+1}^{\fl{\alpha_nt}} Z_n(h)\\
&\le \frac{1}{\gamma_n} \int_{\alpha_ns-1}^{\alpha_nt+1} Z_n(\fl{u})\,du\\
&= \frac{1}{\gamma_n} \int_{\alpha_ns-1}^{\alpha_nt+1} X_n\circ C_n(\fl{u})\,du\\
&= \frac{\alpha_n}{\gamma_n} \int_{s-\alpha_n^{-1}}^{t+\alpha_n^{-1}} X_n\circ C_n(\fl{\alpha_nu})\,du\\
&\le (t-s+2\alpha_n^{-1}) \|\tilde{X}_n\|,
\end{align*} where
\begin{equation*}
\|f(t)\| = \sup_{t\in[0,1]} |f(t)|.
\end{equation*}
Define the functions
\begin{equation*}
w(f; I):= \sup_{s,t\in I} |f(t)-f(s)|,\qquad f\in \mathbb{D}(\mathbb{R}_+), I\subset \mathbb{R},
\end{equation*}
and, for $\delta>0$,
\begin{equation*}
w_N'(f;\delta):= \inf_{\{t_i\}} \max_{1\le i\le v} w(f;[t_{i-1},t_i)), \qquad N = 1,2,\dotsm
\end{equation*} where the infimum is taken over all partitions $0= t_0< t_1<\dotsm < t_v= N$ such that $t_i-t_{i-1}>\delta$ for $1\le i< v$.
From the above string of inequalities, for any integer $N>0$ and any $\delta>0$, we have
\begin{equation*}
w'_N(\tilde{C}_n;\delta)\le 2\left(\delta+\alpha_n^{-1} \right) \|\tilde{X}_n\|, \qquad \forall n\ge 1,
\end{equation*} Moreover, for any fixed $\delta>0$, there exists an $n_0$ sufficiently large such that
\begin{equation*}
2(\delta+ \alpha_n^{-1}) \|\tilde{X}_n\| \le 4 \delta \|\tilde{X}_n\|,\qquad \forall n\ge n_0,
\end{equation*} since $\alpha_n\to\infty$.
Fix $\varepsilon>0$ and an integer $N$. Applying Theorem 13.2 in \cite{Billingsley.99} gives
\begin{align*}
\lim_{\delta\downarrow 0}\limsup_{n\to\infty} \mathbb{P}\left(w_N'(\tilde{C}_n;\delta) \ge \varepsilon \right) \le \lim_{\delta\downarrow 0} \limsup_{n\to\infty} \mathbb{P} \left(\|\tilde{X}_n\| \ge \frac{\varepsilon}{4\delta} \right) = 0.
\end{align*} Hence, by Theorem 16.8 in \cite{Billingsley.99}, the process $\tilde{C}_n$ is tight.
The statement about the form of the subsequential weak limits follows as in the proof of Proposition 7 in \cite{AUB.20} with little alteration.
\end{proof}
\subsection{Proofs of Theorems \ref{thm:conv1} and \ref{thm:conv2}}
We now move to describe more accurately the possible subsequential limits in Proposition \ref{prop:jointCX}.
By Proposition \ref{prop:AUB} and Proposition \ref{prop:jointCX}, the subsequential limits $(\tilde{C}_{n_\ell},\tilde{X}_{n_\ell}) \weakarrow (C,X)$ must be of the form $C(t) = C^0((t-\Lambda)_+)$ for some (random) $\Lambda:= \inf\{t: C(t)>0\}\in[0,\infty]$ where $C^0$ is the Lamperti transform of the $X$. We desire to show that $\Lambda =0$ almost surely.
By the Skorokhod representation theorem and by possible taking a further subsequence, we can assume that we are working on a probability space such that both
\begin{equation*}
\operatorname{scale}(\alpha_{n_\ell}^{-1},\gamma_{n_\ell}^{-1})G_{n_\ell}\to \mathcal{M},\qquad \text{and}\qquad (\tilde{C}_{n_\ell},\tilde{X}_{n_\ell}) \to (C , X)
\end{equation*} occur almost surely in their respective topologies: the first convergence is with respect to the pointed {Gromov-Hausdorff-Prokhorov } topology and the second convergence is with respect to the product topology on the $\mathbb{D}\times\mathbb{D}$. We write $\tilde{G}_{n_\ell} = (\tilde{G}_{n_\ell}, \rho_{n_\ell}, \tilde{d}, \tilde{\mathfrak{m}}_{n_\ell})$ for $\operatorname{scale}(\alpha_{n_\ell}^{-1},\gamma_{n_\ell}^{-1})G_n^i$. By Lemma \ref{lem:measrueOfBalls}, we have for all but countably many $t>0$,
\begin{equation*}
\begin{split}
\tilde{C}_{n_\ell}(t) &= \frac{1}{\gamma_{n_\ell}} \mathfrak{m}_{n_\ell}\{v\in G_{n_\ell}: d(v,\rho_{n_\ell})\le \alpha_{n_\ell} t\} \\
&= \tilde{\mathfrak{m}}_{n_\ell}\left(\{v\in \tilde{G}_{n_\ell}: \tilde{d}(v,\rho_{n_\ell}) \le t\} \right) \longrightarrow \mu(\{x\in \mathcal{M}: d(\rho,x)\le t\}).
\end{split}
\end{equation*} Similarly, by the convergence of $\tilde{C}_{n_\ell}$ in $\mathbb{D}$ we have for all but countably many $t>0$
\begin{equation*}
\tilde{C}_{n_\ell}(t) \rightarrow C(t).
\end{equation*} By a standard diagnolization argument there exists a sequence $t_{m}\downarrow 0$ such that
\begin{equation*}
C(t_{m}) = \mu \left( \bar{B}(\rho,t_{m})\right)> 0.
\end{equation*} The inequality follows from Assumption (3) in Theorem \ref{thm:conv1}.
Hence
\begin{equation*}
\Lambda = \inf\{t: C(t)>0\} = 0.
\end{equation*}
Therefore every subsequential weak limit for $(\tilde{C}_n,\tilde{X}_n)$ must be of the form $(C^0,X)$ where $C^0$ is part of the Lamperti pair associated with $X$. Along with looking at Proposition \ref{prop:AUB}, we have proved the following:
\begin{prop}\label{prop:jointCXwithLimit}
Let $(Z^0,C^0)$ be the Lamperti pair of $X$. Then, under the assumptions of Theorem \ref{thm:conv1}, the following weak convergence holds
\begin{equation*}
\left(\tilde{C}_n,\tilde{X}_n\right) \Longrightarrow \left(C^0,X\right),
\end{equation*} in the product Skorokhod space $\mathbb{D}\times\mathbb{D}$.
Moreover, since $C^0$ is not identically zero $X$ must satisfy
\begin{equation*}
\int_{0+} \frac{1}{X(s)}\,ds <\infty.
\end{equation*}
\end{prop}
We now finish the proof of Theorem \ref{thm:conv1}.
\begin{proof}[Proof of Theorem \ref{thm:conv1}]
The proof of Proposition \ref{prop:jointCXwithLimit} gives the proof of conclusion (2) of Theorem \ref{thm:conv1}, and so we finish the proof of part (1).
By Proposition \ref{prop:jointCX} and Proposition \ref{prop:jointCXwithLimit}, and the Skorohod representation theorem, we can assume that we are working on a probability space such that
\begin{equation*}
\left(\tilde{C}_n,\tilde{X}_n\right) \longrightarrow (C^0,X)\qquad \text{a.s.}.
\end{equation*}
Then, a result of Wu \cite[Theorem 1.2]{Wu.08} which extends a result of Whitt \cite{Whitt.80}, the following convergence holds in the $\mathbb{D}$:
\begin{equation*}
\tilde{Z}_n = \tilde{X}_n\circ \tilde{C}_n \longrightarrow X\circ C^0 \qquad \text{a.s.}.
\end{equation*}
Using Proposition \ref{prop:AUB} part (2), we observe that
\begin{equation*}
Z^0:=X\circ C^0 = D_+ C^0.
\end{equation*} That is $(Z^0,C^0)$ is the Lamperti pair associated with $X$ and in $\mathbb{D}^2$
\begin{equation*}
(\tilde{Z}_n,\tilde{C}_n) \weakarrow (Z^0,C^0).
\end{equation*}
\end{proof}
The proof of Theorem \ref{thm:conv1} can be easily extended to joint convergence of the of finitely many graphs $G_n^i$ of random masses $\mathfrak{m}_n(G_n^i)$ as $n\to\infty$. Since the graph $G_n^{i}$ are ordered by decreasing mass, the excursion lengths also decrease: $\zeta_1\ge \zeta_2\ge \dotsm$. The only part that changes is \eqref{eqn:uniformlyBounded} is replaced with an analogous tightness bound on \begin{equation*}
\max_{j \le N} \frac{1}{\gamma_n} \mathfrak{m}_n(G_n^j) =
\frac{1}{\gamma_n} \mathfrak{m}_n(G_n^1)
\end{equation*} This will yield a proof of Theorem \ref{thm:conv2}. The details are omitted.
\section{The Configuration Model}
In this section we focus on the applications to the configuration model when one specifies a critical degree distribution $\nu$ in the domain of attraction of a stable law. We will focus on the case $\alpha\in(1,2)$, although the Brownian case $\alpha=2$ can be obtained by these methods. The results can easily be altered to cover the $\alpha = 2$ as well, by instead considering the case where $\nu$ has finite third moment at the critical point (see the definition of $\theta$ in \eqref{eqn:thetaDef} below) and omitting the cases $\nu(2)<1$ and $\nu(0)> 0$.
We will be using the results of Joseph \cite{Joseph.14} and Conchon-Kerjan and Goldschmidt \cite{CKG.20} on scaling limits related to the configuration model. The latter reference provides a metric space scaling limit for the components of the graph at the point of criticality $\theta = 1$, where $\theta$ is defined in \eqref{eqn:thetaDef}. This allows us to utilize Theorem \ref{thm:conv2}. Similar results in the $\alpha=2$ case were obtained prior to Joseph, see Riordan's work \cite{Riordan.12} and also \cite{BS.20}.
\subsection{Preliminaries: The configuration model and convergence}\label{sec:configuration}
Let us describe briefly the configuration model, some of the associated walks on the graphs, and their scaling limits. For a more detailed account of the configuration model, see Chapter 7 of \cite{vanderHofstad.17}.
The multigraph $M(\mathbf{d}^n)$ is a random graph on vertex set $(i;i\in[n])$ where the vertex $i$ has degree (counted with multiplicity) $d_i$. We can construct this graph by viewing the vertices $i$ as hubs with $d_i$ \textit{half-edges} jutting out from the vertex $i$. We then pair half-edges uniformly at random to create a multigraph. Given a multigraph $G$, we have \cite[Prop 7.7]{vanderHofstad.17}
\begin{equation*}
\mathbb{P}(M(\mathbf{d}^n) = G) = \frac{1}{\left( -1+\sum_{j=1}^n d_j \right)!!}\times \frac{ \prod_{j=1}^n d_j!}{ \prod_{j=1}^n \operatorname{loop}(i) \times \prod_{1\le i<j\le n} \operatorname{mult}(i,j)! },
\end{equation*} where $\operatorname{loop}(i)$ is the number of self-loops at vertex $i$ and $\operatorname{mult}(i,j)$ is the number of edges between $i$ and $j$. Below we describe two different algorithms for how to construct the multigraph and describe associated walks. It is also convenient to assume that the $d_i$ half-edges connected to a vertex $i$ are ordered, so that we can talk about the ``least'' half-edge. We remark that this random construction described above is taken from a deterministic sequence of half-edges $\mathbf{d}^n$, later on we will take the vertex degrees to be random.
We describe two algorithms for the construction in a manner quite similar to Joseph \cite{Joseph.14}. We partition the $\sum_{j=1}^n d_j$ half-edges into three disjoint subsets: the set $\mathcal{S}$ of sleeping half-edges, the set $\mathcal{A}$ of active half-edges, and the set $\mathcal{D}$ of dead half-edges. We call the set $\mathcal{S}\cup \mathcal{A}$ the collection of alive half-edges. Initially all half-edges are sleeping.
\subsubsection{Breadth-first construction}
We construct a graph $M^{\operatorname{BF}}(\mathbf{d}^n)$ (we initially include a ${\operatorname{BF}}$ to specify the construction) as follows:
To initialize at step 1, we pick a sleeping half-edge uniformly at random. Label the corresponding vertex as $v_1$ and declare all of the half-edges attached to $v_1$ as active.
Suppose that we have just finished step $j$. There are three possibilities: (1) $\mathcal{A} \neq \emptyset$, (2) $\mathcal{A} = \emptyset$ and $\mathcal{S}\neq \emptyset$, (3) all half-edges are dead.
In case 1, we proceed as follows:
\begin{enumerate}
\item Let $i$ be the \textbf{smallest} integer $k$ such that there exists an active half-edge attached to $v_k$.
\item Pick the least half-edge $l$ from all active half-edges attached to $v_i$.
\item Kill $l$, that is, remove it from $\mathcal{A}$ and add it to $\mathcal{D}$.
\item Choose uniformly at random from all living half-edges $r$ and pair it with $l$, that is, add an edge between the vertex $v_i$ (which is attached to $l$) and the corresponding vertex connected to $r$.
\item If $r$ is sleeping, then we have discovered a new vertex. Label this new vertex $v_{m+1}$ where we have discovered the vertices $v_{1},\dotsm, v_m$ up to this point. Declare all the half-edges of $v_{m+1}$ are active.
\item Kill $r$.
\end{enumerate}
In case 2, we have finished exploring a connected component of $M^{\operatorname{BF}}(\mathbf{d}^n)$. We proceed by picking a sleeping half-edge uniformly at random. We then label the corresponding vertex $v_{m+1}$ if we've discovered vertices $v_1,\dotsm, v_m$ up to this point, and we declare all the half-edges connected to $v_{m+1}$ as active.
In case 3, we have explored the entire graph and we are done.
The above is the breadth-first construction of the multigraph $M^{\operatorname{BF}}(\mathbf{d}^n)$. In the sequel, we denote the ordering of the vertices in the exploration/construction above by $v_1^{\operatorname{BF}},\dotsm, v_n^{\operatorname{BF}}$.
\begin{remark}
While the above algorithm gives a breadth-first construction of the graph $M(\mathbf{d}^n)$, observe that this can also be used to explore the graph $M(\mathbf{d}^n)$. Indeed, if in step (4), we selected the half-edge $r$ which is connected to $l$ instead of sampling it uniformly, then we would have explored the graph and obtain the an equal in distribution ordering of the vertices.
\end{remark}
\begin{figure}
\caption{Left: The initial collection of 11 vertices with half-edges appearing from the center. Right: The structure of the breadth-first constructed graph after initially selected a half-edge connected to vertex $11$. The edges were added in this order $\{11,1\}, \{11,2\}, \{1,4\}, \{1,10\}, \{2,2\}$. The next half-edge to be explored is the remaining half-edge jutting out form vertex 2.}
\end{figure}
In case 1 above, it is possible that we match two half-edges $l$ with $r$ where $r$ is already active. We call the corresponding edge in the multigraph $M^{\operatorname{BF}}(\mathbf{d}^n)$ a \textit{breadth-first (bf) backedge}.
We let $F^{\operatorname{BF}}(\mathbf{d}^n)$ denote the forest constructed from the multigraph $M^{\operatorname{BF}}(\mathbf{d}^n)$ obtained by splitting all bf backedges into two half-edges and adding two leaves to each of these half-edges. More formally, if the multigraph $M^{\operatorname{BF}}(\mathbf{d}^n)$ has a bf backedge between vertices $v_l, v_r$. Remove that edge from the multigraph and add two vertices $v_l'$ and $v_r'$ and add an edge between both pairs $(v_l,v_l')$ and $(v_r,v_r')$. Continue this until all bf backedges are removed and replaced.
\begin{remark} \label{rem:forestlabel}
This algorithm can also be used to mark where the new leafs occur within a breadth-first exploration of the forest $F^{\operatorname{BF}}(\mathbf{d}^n)$. When we first find backedge between half-edges $l$ and $r$ in $M^{\operatorname{BF}}(\mathbf{d}^n)$ we replace it with two new leafs. Then as we are exploring the half-edge $l$, we find a new leaf and do not ``see'' the half-edge $r$. This means we do not kill that half-edge in step (6). This means we will eventually choose half-edge $r$ in step (2) and find second new leaf for this bf backedge. We can then label these vertices $u_1^{\operatorname{BF}},\dotsm, u_p^{\operatorname{BF}}$ for some $p\ge n$. See Figure \ref{fig:bfforestlabeling} for an example of how this is done for the breadth-first construction.
\end{remark}
\begin{figure}\label{fig:bfforestlabeling}
\end{figure}
\subsubsection{Depth-first construction}
We construct a graph $M^{\operatorname{DF}}(\mathbf{d}^n)$ (we initially include a ${\operatorname{DF}}$ to specify the construction) as follows:
We initialize at step 1 as before, we pick a sleeping half-edge uniformly at random. Label the corresponding vertex as $v_1$ and declare all of the half-edges attached to $v_1$ as active. The only thing that changes on subsequent steps is that in Case 1 we replace part (1) with
\begin{enumerate}
\item[(1')] Let $i$ be the \textbf{largest} integer $k$ such that there exists an active half-edge attached to $v_k$.
\end{enumerate}
This above is the depth-first construction of the multigraph $M^{\operatorname{DF}}(\mathbf{d}^n)$. This changes the order in which we find vertices and label them, so we will denote the new ordering and labeling by $v_1^{\operatorname{DF}},\dotsm, v_n^{\operatorname{DF}}$. We analogously construct the depth-first forest, by removing depth-first (df) backedges and replacing them with leaves.
\subsubsection{Symmetry between constructions}
Recall that the multigraph $M(\mathbf{d}^n)$ is taken to be uniform over all possible pairings of half-edges. The following claim is trivial.
\begin{claim}
For any degree sequence $\mathbf{d}^n = (d_1,\dotsm, d_n)$ the graphs $M^{\operatorname{BF}}(\mathbf{d}^n)$ and $M^{\operatorname{DF}}(\mathbf{d}^n)$ are equal in distribution. We write both as $M(\mathbf{d}^n)$.
\end{claim}
The symmetry between the constructions allow us to look at two random walks which turn out being equal in distribution. We write $\deg(v)$ for the degree (counted with multiplicity) of the vertex $v$ in a graph, $M(\mathbf{d}^n), F^{\operatorname{DF}}(\mathbf{d}^n),$ etc., which is clear from context. Recall that $(v_j^{\operatorname{BF}};j\in[n])$ and $(v_j^{\operatorname{DF}};j\in[n])$ are the vertices in the multigraph $M(\mathbf{d}^n)$ labeled in two distinct ways acording the breadth-first exploration or depth-first explorations respectively. Define the following two walks:
\begin{equation}\label{eqn:swalks}
S_{\mathbf{d}^n}^{\operatorname{BF}}(k) = \sum_{j=1}^k (\deg (v^{\operatorname{BF}}_j) - 2) ,\qquad S_{\mathbf{d}^n}^{\operatorname{DF}}(k) = \sum_{j=1}^k (\deg(v^{\operatorname{DF}}_j) - 2).
\end{equation}
We wish to define analogous walks on the forests $F^{{\operatorname{BF}}}(\mathbf{d}^n)$ and $F^{\operatorname{DF}}(\mathbf{d}^n)$, to which we remind the reader of Remark \ref{rem:forestlabel}. Hence we have a labeling all the vertices of the forests as $(u_j^{\operatorname{BF}})$ for the forest $F^{\operatorname{BF}}(\mathbf{d}^n)$ with the breadth-first exploration and $(u_j^{\operatorname{DF}})$ for the forest $F^{\operatorname{DF}}(\mathbf{d}^n)$ with the depth-first exploration.
For each connected component of the multigraph and hence forest, there is a vertex discovered when the collection of active vertices $\mathcal{A}$ was empty, we call those vertices \textit{roots}. If $u$ is a root in a forest, write $\chi(u) = \deg(u)$, otherwise write $\chi(u) = \deg(u) - 1$. The value of $\chi(u)$ is precisely the number of children that vertex $u$ has in the forest in which it lives. For $j$ sufficiently large, there will be no vertex of $u_j^{\operatorname{BF}}$ or $u_j^{\operatorname{DF}}$. This will not matter for our scaling limits, but for completeness we define $u_j^{\operatorname{BF}}$ and $u_j^{\operatorname{DF}}$ as root vertices of components with a single vertex, and therefore $\chi(u_j^{\operatorname{BF}}) = \chi(u_j^{\operatorname{DF}}) = 0$ for sufficiently large $j$. Define the walks
\begin{equation}\label{eqn:xwalks1}
X^{\operatorname{BF}}_{\mathbf{d}^n}(k) = \sum_{j=1}^k (\chi(u^{\operatorname{BF}}_j) -1),\qquad X^{\operatorname{DF}}_{\mathbf{d}^n}(k) = \sum_{j=1}^k (\chi(u^{\operatorname{DF}}_j)-1).
\end{equation}
As is shown in Section 5.1 of \cite{CKG.20}, the distribution of the depth first walk $X^{\operatorname{DF}}_{\mathbf{d}^n}$ can be reconstructed from $S_{\mathbf{d}^n}^{\operatorname{DF}}$. This was done for when the degree sequence is taken to be random; however it works for a deterministic degree sequence as well. A trivial alteration of that algorithm can be used to construct $X^{{\operatorname{BF}}}_{\mathbf{d}^n}$ from the walk $S_{\mathbf{d}^n}^{\operatorname{BF}}$ and moreover, we can couple these two constructions to see that backedges have a particular correspondence. We summarize this construction later in the appendix. We write this as the following lemma.
\begin{lem}\label{lem:convBFDFequal}
\begin{enumerate}
\item For any degree sequence $\mathbf{d}^n$. The breadth-first walks and the depth-first walks are equal in distribution. That is
\begin{equation*}
(S_{\mathbf{d}^n}^{\operatorname{BF}}(k);k\ge 0) \overset{d}{=} (S_{\mathbf{d}^n}^{\operatorname{DF}}(k); k\ge 0) ,\qquad\text{and}\qquad (X_{\mathbf{d}^n}^{\operatorname{BF}}(k);k\ge 0) \overset{d}{=} (X_{\mathbf{d}^n}^{\operatorname{DF}}(k); k\ge 0).
\end{equation*}
\item There exists a coupling of $F^{\operatorname{DF}}(\mathbf{d}^n)$ and $F^{\operatorname{BF}}(\mathbf{d}^n)$ such that for all $j<i$ a df-backedge appears between $u_j^{\operatorname{DF}}$ and $u_i^{\operatorname{DF}}$ if and only if a bf-backedge appears between $u_j^{\operatorname{BF}}$ and $u_i^{\operatorname{BF}}$.
\end{enumerate}
In particular, there exists a coupling of $X_n^{\operatorname{BF}}$ and $X_n^{\operatorname{DF}}$ such that
\begin{equation*}
\inf_{i\le k} X_n^{\operatorname{BF}}(i) = \inf_{i\le k} X_n^{\operatorname{DF}}(i).
\end{equation*}
\end{lem}
The last part of the above lemma tells us something that will be used several times in the sequel, under this coupling the distribution excursions of $X_{\mathbf{d}^n}^{\operatorname{BF}}$ and $X_{\mathbf{d}^n}^{\operatorname{DF}}$ above the running infimum have the same length.
\subsubsection{Random degree distribution}
In this subsection we describe some of what happens when we take the sequence $\mathbf{d}^n = (d_1,\dotsm,d_n)$ to be from i.i.d. samples from a distribution $\nu$ on $\mathbb{N} = \{1,2,\dotsm\}$. We take $(d_j;j\ge 1)$ to be an i.i.d. sequence with common distribution $\nu$. In order to guarantee that a multigraph with degree sequence $\mathbf{d}^n = (d_1,\dotsm,d_n)$ exists, we replace $d_n$ with $d_{n}+1$ if the sum has the wrong parity.
Recall from the introduction that we write $M_n(\nu)$ for the random graphs with a random degree distribution and forest. We do the same when referencing the forests, i.e. we will write $F^{\operatorname{BF}}_n(\nu)$ instead of $F^{\operatorname{BF}}(\mathbf{d}^n)$ when the degree sequence is random. We do not emphasize this dependence on $\nu$ when describing the random walks, instead we replace subscript $\mathbf{d}^n$ with just $n$, i.e. we write $X_n^{\operatorname{DF}}$ instead of $X_{\mathbf{d}^n}^{\operatorname{DF}}$. If $\nu$ has finite variance, then it can be shown (see \cite[Section 7.6]{vanderHofstad.17}) there is positive probability that the multigraph is simple, i.e. contains no self-loops nor multiple edges, and it has an explicit asymptotic formula
\begin{equation}\label{eqn:thetaDef}
\lim_{n\to\infty} \mathbb{P}(M_n(\nu) \text{ is simple}) = \exp\left(-\frac{\theta}{2} - \frac{\theta^2}{4} \right), \qquad\text{where} \quad \theta = \theta(\nu) = \frac{\mathbb{E}[d_1(d_1-1)]}{\mathbb{E}[d_1]}.
\end{equation} Moreover, when $M_n(\nu)$ is simple, it is uniformly distributed over all simple graphs with degree distribution $\mathbf{d}^n$. We write $G_n(\nu)$ for the graph $M_n(\nu)$ conditioned on it being simple.
Under certain conditions on $\nu$, the value of $\theta$ also tells us something about the critical behavior of the graph which dates back to Molloy and Reed's work \cite{MR.95,MR.98}. See also \cite{JL.09}. That is if $V_n$ denotes the size of the largest component of the multigraph $M_n(\nu)$ there is a phase-transition which occurs:
\begin{enumerate}
\item If $\theta(\nu)>1$ then $V_n/n \overset{p}{\to} a(\nu)$ for a deterministic constant $a(\nu)>0$.
\item If $\theta(\nu)\le 1$ then $V_n/n \overset{p}{\to} 0$.
\end{enumerate}
We will now restrict our attention to the case where $\nu$ is as in \eqref{eqn:cdelta1} and observe that the right-most assumption in \eqref{eqn:cdelta1} is $\mathbb{E}[d_1^2] = 2\mathbb{E}[d_1]$, which is equivalent to $\theta = 1$.
In this setting Joseph \cite{Joseph.14} gives a scaling limit of a depth-first walk for the multigraph $M_n(\nu)$, which is very slightly different than what we wrote as $S_n^{\operatorname{DF}}$. That work was extended by Conchon-Kerjan and Goldschmidt in \cite{CKG.20}. We now recall the scaling limit in the latter reference. Let $\tilde{X} = (\tilde{X}(t);t\ge 0)$ and $\tilde{H} = (\tilde{H}(t);t\ge 0)$ be defined by the change of measure in \eqref{eqn:processStableTilt}. We write $X$ for a L\'{e}vy process with Laplace exponent \eqref{eqn:psiStable} and $H$ is its associated height process. We define the process $J_n = (J_n(k);k\ge 0)$
\begin{equation}\label{eqn:SnJn}
J_n(k) = \#\{j\in \{0,1,\dotsm,k-1\} : S_n^{\operatorname{DF}}(j) = \inf_{j\le \ell \le k} S_n(\ell)\},
\end{equation} which is a discretization of \eqref{eqn:heightProcess}.
\begin{thm} [Joseph \cite{Joseph.14}, Conchon-Kerjan - Goldschmidt \cite{CKG.20}] \label{thm:alphaStableLimitLiterature}
Fix some $\alpha\in (1,2)$. Let $\nu$ be a distribution satisfying \eqref{eqn:cdelta1} and write $A = \frac{c \Gamma(2-\alpha)}{\delta\alpha(\alpha-1)}$.
Using the notation above, the following joint convergence holds in $\mathbb{D}^2$:
\begin{equation*}
\left(n^{-\frac{1}{\alpha+1}} S_n(\fl{n^{\frac{\alpha}{\alpha+1}} t}) , n^{-\frac{\alpha-1}{\alpha+1}} J_n(\fl{n^{\frac{\alpha}{\alpha+1}} t});t\ge 0 \right) \weakarrow \left(\tilde{X}(t),\tilde{H}(t);t\ge 0\right).
\end{equation*}
\end{thm}
A similar result is obtained in \cite{BvdHavL.10} under a finite third moment condition on the measure $\nu$ where the limiting process is rescaling of Brownian motion with a different parabolic drift.
Crucially for their descriptions of the limiting graphs, the authors of \cite{CKG.20} also develop the excursion theory for the process $\tilde{X}$ (which is notated as $\tilde{L}$ in that work). Proposition 3.9 in \cite{CKG.20} shows that the excursions of $\tilde{X},\tilde{H}$, conditioned on their length being exactly $x$ are distributed as $(\tilde{\mathbf{e}}_x^{(\delta)},\tilde{\mathbf{h}}^{(\delta)}_x)$ defined in \eqref{eqn:tilting}.
\subsubsection{Continuum Graph Limits}
We now heuristically describe how the authors of \cite{CKG.20} obtain their metric space scaling limit. Let $H_n$ be the height process on the forest $F_n^{\operatorname{DF}}(\nu)$. That is $H_n(k)$ is the distance in $F^{\operatorname{DF}}_n(\nu)$ from vertex $u_k^{\operatorname{DF}}$ to the root in its connected component. This process $H_n$ satisfies \cite{LeGall.05}:
\begin{equation*}
H_n(k) = \#\{j\in \{0,\dotsm , k-1\}: X_n^{\operatorname{DF}}(j) = \inf_{j\le \ell\le k} X_n^{\operatorname{DF}}(\ell)\}.
\end{equation*}
To examine the components of the graph $M_n(\nu)$, the authors of \cite{CKG.20} look at the collection of excursions of the processes $X_n^{\operatorname{DF}}$ and $H_n$. These are defined as follows:
\begin{equation*}
\begin{split}
\widehat{X}_{n,i}^{\operatorname{DF}}(k) &= X_n^{\operatorname{DF}} (\sigma_{n}(i-1) + k) - X_n^{\operatorname{DF}}(\sigma_n(i-1)),\\
\widehat{H}_{n,i}(k) &= H_n(\sigma(i-1)+k),
\end{split} \qquad k = 0,\dotsm, \sigma_{n}(i)-\sigma_n(i-1)
\end{equation*} where $\sigma_n(i) = \inf\{j: X_n^{\operatorname{DF}}(j) = -i\}$ is the first hitting time of level $-i$. The process $\widehat{X}_{n,i}^{\operatorname{DF}}$ starts at zero and is non-negative until it hits level $-1$ at time $k = \sigma_n(i)-\sigma_n(i-1)$. The process $\widehat{H}_{n,i}$ is strictly positive for $k = 1,\dots, \sigma_n(i)-\sigma_n(i-1)-1$. We extend both of these by constancy for $k>\sigma_n(i)-\sigma_n(i-1)$. These processes encode the tree structure \cite{LeGall.05} of the $i^\text{th}$ connected component of the forest $F_n^{\operatorname{DF}}(\nu)$. By the construction of $M_n^{\operatorname{DF}}(\nu)$, this orders the components of the forest $F_n^{\operatorname{DF}}(\nu)$ in a manner size-biased by the number of edges in the component. There are further only a finite number of indexes $i$ such that $\sigma_n(i)-\sigma_n(i-1)\neq 1$ since for sufficiently large $i$ the $i^\text{th}$ component of $F_n^{\operatorname{DF}}(\nu)$ is simply an isolated vertex.
To study the large components of the graph, we instead reorder the excursions by decreasing lengths with ties broken arbitrarily. Denote this new ordering by omitting the ``widehat'' notation: $(X_{n,i}^{\operatorname{DF}};i\ge 1) = \left((X_{n,i}^{\operatorname{DF}}(k);k\ge 0);i\ge 1\right)$ and $(H_{n,i};i\ge 1) = \left((H_{n,i}(k);k\ge 0);i\ge 1\right)$.
The $i^\text{th}$ excursion $X_{n,i}^{\operatorname{DF}}$ may not tell us information about the $i^\text{th}$ largest component of $M_n(\nu)$, $G_n^i$. This is because the forest $F_n^{\operatorname{DF}}(\nu)$ contains additional vertices which could change the ordering of the components. I.e. if $G_n^i$ had $10$ vertices and $0$ df backedges and component $G_n^{i+1}$ had $9$ vertices and 2 df backedges then the corresponding components in $F_n^{\operatorname{DF}}(\nu)$ will have $10$ and $13$ vertices respectively. In turn, their indices will appear in the opposite order. We also note that the excursions of the process $X_n^{\operatorname{DF}}$ do not identically correspond to the excursions of the process $S_n^{\operatorname{DF}}$ discussed previously. While this may cause some problems in the discrete, in the large $n$ limit neither of these problems are relevant as we will shortly explain.
Before turning to the scaling limits, let $T_n^1$ be the largest connected component, i.e. tree, contained in $F_n^{\operatorname{DF}}(\nu)$. This is encoded by $X_{n,1}^{\operatorname{DF}}$ and $H_{n,1}$. This tree may contain df backedges and these backedges appear in pairs. For concreteness, suppose that there are $m\ge 1$ of these pairs. These can be indexed by $(l_1,r_1),\dotsm, (l_m,r_m)$. This means that the $l_i^\text{th}$ vertex explored in the depth-first exploration of the largest component of $T_n^1$ will be paired with the $r_i^\text{th}$ vertex explored in the corresponding component of $M_n(\nu)$. See Figure \ref{fig:excursionsbfwithmarks} for the analogous pairs for the breadth-first labeling of the component in Figure \ref{fig:bfforestlabeling}. We now define $\mathscr{P}_{n,1}^{\operatorname{DF}}$ as the collection of points
\begin{equation*}
\mathscr{P}_{n,1}^{\operatorname{DF}} = \left\{ \left( n^{-\frac{\alpha}{\alpha+1}} l_i, n^{-\frac{\alpha}{\alpha+1}} r_i\right): i =1,\dotsm, m \right\}.
\end{equation*} When there are no df backedges present, just define the set as the empty set. We call this set the set of \textit{marks}, and we can do the same thing to each of the other connected components as well to get sets $\mathscr{P}_{n,i}^{\operatorname{DF}}$.
\begin{figure}\label{fig:excursionsbfwithmarks}
\end{figure}
An important step in how the authors of \cite{CKG.20} proved the components $G_n^1, G_n^2,\dotsm$ have scaling limits was showing scaling limits of the processes $X_{n,i}^{\operatorname{DF}}$ and $H_{n,i}$ and of the set of marks $\mathscr{P}_{n,i}^{\operatorname{DF}}$. The convergence of the sets $\mathscr{P}_{n,i}^{\operatorname{DF}}$ is with respect to the vague topology of its associated counting measure. Namely they prove \cite[Proposition 5.16]{CKG.20}
\begin{equation}\label{eqn:markExcursionConv}
\begin{split}
&\left(\left(n^{-\frac{1}{\alpha+1}} X_{n,i}^{\operatorname{DF}}(\fl{n^{\frac{\alpha}{\alpha+1}} t});t\ge 0\right),\left( n^{-\frac{\alpha-1}{\alpha+1}} H_{n,i}(\fl{n^{\frac{\alpha}{\alpha+1} }t}); t\ge 0\right), \mathscr{P}_{n,i}^{\operatorname{DF}} ;i\ge 1\right) \\
&\qquad\qquad\qquad \weakarrow \left( \tilde{e}_i,\tilde{h}_i,\mathscr{P}_i;i\ge 1\right),
\end{split}
\end{equation} for some discrete sets $(\mathscr{P}_i;i\ge 1)$. Here the convergence in the first two coordinates is with respect to the Skorohod topology and the convergence in the third coordinate is with respect to the vague topology of its associated counting measure and then the product topology is taken over the index $i\ge 1$.
The limiting set $\mathscr{P}_i$ can be described as follows. Let $(\mathscr{Q}_i;i\ge 1)$ denote an i.i.d. collection of Poisson point processes on $\mathbb{R}_+\times\mathbb{R}_+$ with intensity $\frac{1}{\delta}\operatorname{Leb}$. Only finitely many of these points $(s,y)\in \mathscr{Q}_i$ will satisfy $0\le y\le \tilde{e}_i(s)$, and index these as $(s^1,y^1),\dotsm, (s^m,y^m)$ for some $m$. The set $\mathscr{P}_i$ is then the collection
\begin{equation*}
\mathscr{P}_i = \{(s^p,t^p): p = 1,\dotsm, m\} \qquad t^p = \inf\{u\ge s^p: \tilde{e}_i(u)\le y^p\}.
\end{equation*}
This, in turn, allowed them to show that the ordered sequence of components of $M_n(\nu)$ and $G_n(\nu)$ by conditioning converge after proper rescaling in a product {Gromov-Hausdorff-Prokhorov } topology to the sequence of continuum random graphs
\begin{equation} \label{eqn:graphSequenceAlpha}
\left( \mathcal{M}_i;i\ge 1\right) = \left(\mathscr{G}(\tilde{h_i}, \tilde{e_i} , \mathscr{Q}_i );i\ge 1\right),
\end{equation} where $\mathscr{Q}_i$ were defined above.
Let us summarize these results in a theorem for easy reference.
\begin{thm} (Conchon-Kergan - Goldschmidt \cite{CKG.20}) \label{thm:ckgThm2}
Let $(G_n^i;i\ge 1)$ denote the components of the critical random graph $M_n(\nu)$ ordered by decreasing number of vertices and viewed as pointed measured metric spaces.
Under the assumptions of Theorem \ref{thm:alphaStableLimitLiterature}, the convergence in \eqref{eqn:markExcursionConv} holds in the product topology (product over the index $k$). Jointly with \eqref{eqn:markExcursionConv}, the weak convergence
\begin{equation}\label{eqn:graphStableconv}
\left(\operatorname{scale}(n^{-\frac{\alpha-1}{\alpha+1}} , n^{-\frac{\alpha}{\alpha+1}}) G_n^i ; i \ge 1 \right) \weakarrow (\mathcal{M}_i; i\ge 1)
\end{equation} holds with respect to the product {Gromov-Hausdorff-Prokhorov } topology, where the sequence $(\mathcal{M}_i;i\ge 1)$ is distributed as \eqref{eqn:graphSequenceAlpha}.
\end{thm}
\begin{remark}
Theorem 1.1 in \cite{CKG.20} does not state the joint convergence between equations \eqref{eqn:markExcursionConv} and \eqref{eqn:graphStableconv}; however, the proof of said theorem shows there is joint convergence.
\end{remark}
These recalled results from \cite{CKG.20} are on the convergence for \textit{depth-first} objects. However, symmetry between depth-first and breadth-first constructions described above in Lemma \ref{lem:convBFDFequal} allow us to have similar results for the analogous \textit{breadth-first} object. Consequently, if we let $X_{n,i}^{\operatorname{BF}}$ be the breadth-first walk of the $i^\text{th}$ largest component of $F_n^{\operatorname{BF}}(\nu)$, or, equivalently stated, it is the $i^\text{th}$ longest excursion of $X_n^{\operatorname{BF}}$ above its running minimum, then
\begin{equation}\label{eqn:excursionBFstable}
\left( \left( n^{-\frac{1}{\alpha+1}} X_{n,i}^{\operatorname{BF}} (\fl{n^{\frac{\alpha}{\alpha+1}} t}) ;t\ge 0\right) ;i\ge1 \right) \weakarrow \left( \tilde{e}^*_i; i\ge 1\right),
\end{equation} where $\left(\tilde{e}_i^*;i\ge 1\right) \overset{d}{=} \left(\tilde{e}_i;i\ge 1 \right)$. More importantly for our work, there are auxiliary processes described in \cite[pg. 30, 32]{CKG.20} and recalled in the appendix that can easily be defined in the same way for a breadth-first construction. In particular, these auxiliary processes include the collection of marks $\mathscr{P}^{\operatorname{DF}}_{n,k}$ recalled above. Therefore, we can extend the convergence \eqref{eqn:excursionBFstable} by using Lemma \ref{lem:convBFDFequal} to include collection of marks in an analogous way to the depth-first marks:
\begin{lem}\label{lem:18}
There exists a finite set $\mathscr{P}_{n,i}^{\operatorname{BF}}\subset\mathbb{R}_+^2$ of marks corresponding to the $i^\text{th}$ largest component of $F_n^{\operatorname{BF}}(\nu)$ which keep track of the bf backedges such that
\begin{equation*}
\left( X_{n,i}^{\operatorname{BF}} , \mathscr{P}_{n,i}^{\operatorname{BF}} ; i \ge 1\right) \overset{d}{=} \left(X_{n,i}^{\operatorname{DF}}, \mathscr{P}_{n,i}^{\operatorname{DF}};i\ge 1 \right).
\end{equation*}
\end{lem}
We now state the following lemma:
\begin{lem} \label{lem:stableExcursionsSameSize} Couple $X_n^{\operatorname{BF}}$ and $X_n^{\operatorname{DF}}$ as in Lemma \ref{lem:convBFDFequal} so they have the same excursion intervals. Then, under the assumptions of Theorem \ref{thm:ckgThm2},
any joint subsequential limit of \eqref{eqn:markExcursionConv}, \eqref{eqn:graphStableconv} and \eqref{eqn:excursionBFstable} satisfies for each fixed $i\ge 1$:
\begin{equation}\label{eqn:stableExcursionsSameSize}
\zeta(\tilde{e}_i^*) = \zeta(\tilde{e}_i)= \mu_i(\mathcal{M}_i),
\end{equation} almost surely.
\end{lem}
\begin{proof}
Under this conditioning, the excursion intervals of $X_n^{\operatorname{DF}}$ and $X_n^{\operatorname{BF}}$ have the same length. The equality $\zeta(\tilde{e}_i) = \mu_i(\mathcal{M}_i)$ follows from \cite{CKG.20}, and that implies $\zeta(\tilde{e}_i^*) = \mu_i(\mathcal{M}_i)$ as well. See also \cite{Joseph.14}.
\end{proof}
We have now gathered most of the required ingredients and background to prove Theorems \ref{thm:Epidemic3} and \ref{thm:ctsAlpha} using the approach in Theorem \ref{thm:conv2}. The last thing we'll verify is that Assumption 3 holds in Theorems \ref{thm:conv1} and \ref{thm:conv2}. By scaling of the $\alpha$ stable graph \cite{CKG.20}, we focus on the case that the total mass equals 1.
\begin{prop}\label{prop:ass3Foralpha} Fix $\alpha\in (1,2)$.
Let $(\tilde{\e}^{(\delta)}_x,\tilde{\mathbf{h}}^{(\delta)}_x)$ be defined as in \eqref{eqn:tilting}. Let $\mathscr{G} = \mathscr{G}(\tilde{\mathbf{h}}^{(\delta)}_x,\tilde{\e}^{(\delta)}_x, \mathscr{Q})$ denote the continuum random graph where $\mathscr{Q}$ is a Poisson point process with intensity $\frac{1}{\delta}\operatorname{Leb}$ for some $\delta>0$. Then, almost surely,
\begin{equation*}
\mu(B(\rho,t)\setminus\{\rho\})>0 ,\qquad\forall t>0.
\end{equation*}
The same holds for the graphs $\mathcal{M}_i$ appearing in \eqref{eqn:graphSequenceAlpha}.
\end{prop}
\begin{proof}
The same statement holds for the graphs $\mathcal{M}_i$ hold by conditioning on their mass $\mu_i(\mathcal{M}_i)$ \cite[Theorem 1.2]{CKG.20}.
We use the tree $\mathscr{T}_{\tilde{\mathbf{h}}^{(\delta)}_x}$ as a measured metric space, and when confusion might arise we will use subscripts to specify whether we are dealing with the tree or the graph.
We observe that from the quotient map
\begin{equation*}
q:\mathscr{T}_{\tilde{\mathbf{h}}^{(\delta)}_x} \longrightarrow \mathscr{G}(\tilde{\mathbf{h}}^{(\delta)}_x,\tilde{\e}^{(\delta)}_x,\mathscr{Q})
\end{equation*} in the construction of the random graph satisfies the following:
\begin{equation*}
\operatorname{d}_{\mathscr{G}}(\rho,q(x))\le \operatorname{d}_{\mathscr{T}_{\tilde{\mathbf{h}}^{(\delta)}_x}} (\rho,x),\qquad \forall x\in \mathscr{T}_{\tilde{\mathbf{h}}^{(\delta)}_x}.
\end{equation*}
Consequently,
\begin{equation*}
\mu_{\mathscr{G}}\left( B_{\mathscr{G}}(\rho,t)\right) \ge \mu_{\mathscr{T}_{\tilde{\mathbf{h}}^{(\delta)}_x}}\left(B_{\mathscr{T}_{\tilde{\mathbf{h}}^{(\delta)}_x}}(\rho,t)\right) = \int_0^1 1_{[\tilde{\mathbf{h}}^{(\delta)}_x(s)\in (0,t)]}\,ds.
\end{equation*} Since the process $\tilde{\mathbf{h}}^{(\delta)}_x$ is non-negative and almost surely not identically zero. The result follows easily.
\end{proof}
\subsection{Proof of Theorem \ref{thm:Epidemic3}} \label{sec:epidemic3}
We now prove Theorem \ref{thm:Epidemic3}.
\begin{proof}[Proof of Theorem \ref{thm:Epidemic3}]
Throughout the proof all limits will be as $n$ or a subsequence of $n$ goes towards infinity.
Make the components of the graph $G_n^i$, as measured metric spaces with graph distance and the measure of each vertex is one.
The processes $Z_{n,i}$ measure the number of vertices infected on day $h$, which is simply the number of vertices at distance $h$ from $\rho_i$ in $G_n^i$:
\begin{equation*}
Z_{n,i}(h) = \#\{v\in G_n^i : d(v,\rho_i) = h\}
\end{equation*} and the process $C_{n,i} = (C_{n,i}(h);h\ge 0)$ denote its running sum:
\begin{equation*}
C_{n,i}(h) = \sum_{j=0}^h Z_{n,i}(j) = \mathfrak{m}_i(\{v\in G_n^i : d(v,\rho_i)\le h\}).
\end{equation*} These processes measure something close to the height profile on the components of the forests $F_{n}^{\operatorname{BF}}(\nu)$; however it is not exactly the same because of the addition of new leaves. This complicates a direct application of Theorem \ref{thm:conv2}.
Let us write $Z^*_{n,i}(h)$ is the discrete Lamperti transform of $X_{n,i}^{\operatorname{BF}}$:
\begin{equation} \label{eqn:lampZstar}
Z^*_{n,i}(h+1) = 1+ X_{n,i}^{\operatorname{BF}}\circ C_{n,i}^*(h) ,\qquad C_{n,i}^*(h) = \sum_{j=0}^h Z^*_{n,i}(j).
\end{equation} This process $Z_{n,i}^*$ measures the number of vertices at height $h$ in the $i^\text{th}$ largest component of the forest $F_{n}^{\operatorname{BF}}(\nu)$; see the discussion around \eqref{eqn:discLamperti2} and more generally \cite{CPGUB.13}. Said another way, the values of $Z_{n,i}^*(h)$ and $Z_{n,i}(h)$ for a fixed $h$ only differ by the number of new leaves at height $h$ in the component of the forest $F_n^{{\operatorname{BF}}}(\nu)$.
The total number of such additional vertices is twice the number of bf backedges (which is the number of df backedges as well). Therefore, for a fixed index $i$, the number of bf backedges in $G_n^i$ is a tight sequence in the index $n$ of random variables. Indeed, a stronger statement is true. By Proposition 5.12 in \cite{CKG.20} the weak convergence
\begin{equation*}
\#\{\text{bf backedges in }G_n^i\} \weakarrow \text{Poisson}\left(\frac{1}{\delta} \int_0^{\zeta(\tilde{e}_i)} \tilde{e}_i(t)\,dt \right),
\end{equation*} where $\tilde{e}_i$ is as in \eqref{eqn:markExcursionConv}. Now for each $i$ we can bound the difference between $Z_{n,i}(h)$ and $Z_{n,i}^*(h)$ for each $i$ uniformly in $h$. Indeed
\begin{equation*}
\begin{split}
\sum_{h\ge 0} |Z_{n,i}(h) - Z_{n,i}^*(h)| &= \sum_{h\ge 0} \left|\#\{v\in G_n^i : d(v,\rho_i) = h\} - Z^*_{n,i}(h) \right|\\
&= 2\cdot \#\{\text{bf backedges in }G_n^i\}\le \kappa_{n,i}
\end{split}
\end{equation*} where for each $i\ge 1$ the sequence $(\kappa_{n,i};n\ge 1)$ is a tight sequence of random variables.
By Slutsky's theorem, in order to prove the rescaled convergence of $(Z_{n,i};i\ge 1)$ to the desired limit, we just need to prove the convergence of $(Z^*_{n,i};i\ge 1)$ under the same scaling regime to the same limiting processes. Indeed, their difference, when rescaled by $n^{-\frac{1}{\alpha+1}}$ converges in probability to the zero path in the Skorohod space:
\begin{equation*}
\left(n^{-\frac{1}{\alpha+1}} \kappa_{n,i} ;t \ge 0\right) \weakarrow \left(0; t\ge 0\right)\qquad\text{as }n\to\infty.
\end{equation*}
By Proposition \ref{prop:jointCX}, for any fixed integer $N$
\begin{equation*}
\left(\left(\left(\left(n^{-\frac{\alpha}{\alpha+1}}C^*_{n,i}(\fl{n^{\frac{\alpha-1}{\alpha+1}} t});t\ge 0\right),\left( n^{-\frac{1}{\alpha+1}} X_{n,i}^{\operatorname{BF}} (\fl{n^{\frac{\alpha}{\alpha+1}}t});t\ge 0\right) \right); i\in[N]\right) ;n\ge 1\right)
\end{equation*} viewed as a sequence in $\mathbb{D}(\mathbb{R}_+,\mathbb{R})^{2N}$, is tight. Consequently, in the product topology over the index $i\ge 1$ the sequence
\begin{equation}\label{eqn:cxcombo}
\left(\left(\left(\left(n^{-\frac{\alpha}{\alpha+1}}C^*_{n,i}(\fl{n^{\frac{\alpha-1}{\alpha+1}} t});t\ge 0\right),\left( n^{-\frac{1}{\alpha+1}} X_{n,i}^{\operatorname{BF}} (\fl{n^{\frac{\alpha}{\alpha+1}}t});t\ge 0\right) \right); i\ge 1\right) ;n\ge 1\right)
\end{equation} is tight in $(\mathbb{D}^2)^\infty$. Additionally, by Proposition \ref{prop:jointCX} any subsequential limit, say
\begin{equation} \label{eqn:lampLim1}
\left(\left(\left(C_i(t);t\ge 0\right), \left(X_i(t);t\ge 0\right)\right); i\ge 1\right),
\end{equation} must satisfy $C_i(t) = \int_0^t X_i\circ C_i(s)\,ds$. Moreover, the sequence $(X_i;i\ge 1)\overset{d}{=}(\tilde{e}_i^*;i\ge 1) \overset{d}{=}(\tilde{e}_i;i\ge 1)$. In particular subsequential limits of \eqref{eqn:cxcombo} are classified by a time-shift as in Proposition \ref{prop:AUB}.
By Theorem \ref{thm:ckgThm2} and Lemma \ref{lem:convBFDFequal}, we know that the convergences in \eqref{eqn:markExcursionConv}, \eqref{eqn:graphStableconv} and \eqref{eqn:excursionBFstable} hold. By a tightness argument, we can assume that sequence converge jointly along a subsequence, which we will denote by the index $n$.
Observe the sequence
\begin{equation*}
\left(\left(\left( n^{-\frac{\alpha}{\alpha+1}} C_{n,i}(\fl{n^{\frac{\alpha-1}{\alpha+1}}t}); t\ge 0\right);i\ge 1\right);n\ge 1\right)
\end{equation*} is tight in $\mathbb{D}^\infty$. Indeed, this easily follows the tightness of
\begin{equation*}
\left(\left( \left( n^{-\frac{\alpha}{\alpha+1}} C_{n,i}^*(\fl{n^{\frac{\alpha-1}{\alpha+1}}t}); t\ge 0\right);i\ge1\right);n\ge 1\right)
\end{equation*} in $\mathbb{D}^\infty$ discussed above
and the bounds
\begin{equation*}
|C_{n,i}(h) - C_{n,i}^*(h)| \le \sum_{j\ge 0} |Z_{n,i}(j)-Z_{n,i}^*(j)| \le \kappa_{n,i}.
\end{equation*}
Let us work on a subsequence of \eqref{eqn:cxcombo} which converges to \eqref{eqn:lampLim1}. Call this index $n_j$. Then, by the previous paragraph,
\begin{equation*}
\left(\left( n_j^{-\frac{\alpha}{\alpha+1}} C_{n_j,i}( \fl{n_j^{\frac{\alpha-1}{\alpha+1}}t});t\ge 0\right) ;i\ge 1\right) \weakarrow \left(\left({C}_i(t);t\ge 0\right);i\ge 1\right)
\end{equation*} for the same processes ${C}_i$ in \eqref{eqn:lampLim1}. However, $C_{n,i}(h) = \#\{v\in G_{n}^i: d(v,\rho_i)\le h\}$ is just the measure of the ball of radius $h$ in $G_n^i$ and so an application of Lemma \ref{lem:ballCont} implies
\begin{equation}\label{eqn:cConvtoMeas}
{n_j}^{-\frac{\alpha}{\alpha+1}} C_{n_j,i}(\fl{n_j^{\frac{\alpha-1}{\alpha+1}}t}) \weakarrow \mu_i(B(\rho,t)), \qquad \text{for all but countably many }t>0,
\end{equation} where $\mu_i$ is the mass measure on the scaling limit of the graph component $G_n^i$. Hence ${C}_i(t)$ must satisfy:
\begin{equation*}
{C}_i(t) \overset{d}{=}\mu_i(B(\rho,t)), \qquad\text{for Lebsgue a.e. } t>0.
\end{equation*} It follows easily from Proposition \ref{prop:ass3Foralpha} that
\begin{equation*}
\mathbb{P}(C_i(t)>0, \forall t>0) =1,\qquad \forall i\ge 1.
\end{equation*} Indeed $C_i(t)$ is non-decreasing, and we can find a countable dense set of $t>0$ such that $P(C_i(t) > 0) = 1$. Hence, by Propositions \ref{prop:AUB} and $(X_i;i\ge 1) \overset{d}{=} (\tilde{e}_i;i\ge 1)$, we get $((C_i, X_i);i\ge 1) \overset{d}{=} ((C_i,\tilde{e}_i);i\ge 1)$ where $(Z_i,C_i)$ is the Lamperti pair associated with $\tilde{e}_i$. Since this works for any subsequential limit, we conclude that the original sequence converges:
\begin{equation*}
\left( \left(n^{-\frac{\alpha}{\alpha+1}} C_{n,i}(\fl{n^{\frac{\alpha-1}{\alpha+1} } t};t\ge 0 \right);i\ge 1\right) \weakarrow (C_i;i\ge 1).
\end{equation*}
A similar proof of Proposition \ref{prop:jointCXwithLimit} yields the joint convergence
\begin{equation}\label{eqn:t1}
\left(\left( n^{-\frac{1}{\alpha+1}} Z^*_{n,i}(\fl{n^{\frac{\alpha-1}{\alpha+1}}t});t\ge 0\right), \left( n^{-\frac{\alpha}{\alpha+1}} C_{n,i}^*(\fl{n^{\frac{\alpha-1}{\alpha+1}}t}); t\ge 0\right) ;i\ge 1\right)\weakarrow ((Z_i,C_i);i\ge 1)
\end{equation} where $(Z_i,C_i)$ is the Lamperti pair associated with the excursion $\tilde{e}_i$.
This proves the desired claim.
\end{proof}
Before turning to proof of Theorem \ref{thm:ctsAlpha}, we state and prove the following lemma:
\begin{lem}\label{lem:20} Couple the depth-first and breadth-first walks as in Lemma \ref{lem:convBFDFequal}.
Under the assumptions of Theorem \ref{thm:alphaStableLimitLiterature}, and using the notation in \eqref{eqn:lampZstar}. There is joint convergence in distribution along a subsequence of the index $n\ge 1$ of the collection
\begin{equation*}
\begin{split}
\bigg( \bigg( \Big( n^{-\frac{1}{\alpha+1}}& Z^*_{n,i}(\fl{n^{\frac{\alpha-1}{\alpha+1}}t});t\ge 0\Big),\Big( n^{-\frac{\alpha}{\alpha+1}} C_{n,i}^*(\fl{n^{\frac{\alpha-1}{\alpha+1}}t});t\ge 0\Big),\\
& \Big(n^{-\frac{1}{\alpha+1}} X_{n,i}^{\operatorname{BF}} (\fl{n^{\frac{\alpha}{\alpha+1}} t}) ;t\ge 0\Big), \operatorname{scale}(n^{-\frac{\alpha-1}{\alpha+1}}, n^{-\frac{\alpha}{\alpha+1}} )G_n^i, \#\mathscr{P}_{n,i}^{\operatorname{BF}} \bigg);i\ge 1\bigg)_{n\ge 1}
\end{split}
\end{equation*} towards
\begin{equation*}
((Z_i,C_i,\tilde{e}^*_i, \mathcal{M}_i, \operatorname{{sur}}(\mathcal{M}_i));i\ge 1),
\end{equation*} where
\begin{enumerate}
\item $\mathcal{M}_i$ are as in Theorem \ref{thm:ckgThm2} and \eqref{eqn:graphSequenceAlpha};
\item $(Z_i,C_i)$ is the Lamperti pair associated with the excursion $\tilde{e}^*_i$;
\item The process $C_i(t) = \mu_i(B(\rho,t))$ for almost all (and hence all) $t\ge 0$;
\item The excursions $(\tilde{e}_i^*;i\ge 1)\overset{d}{=} (\tilde{e}_i;i\ge 1)$ in the construction of $\mathcal{M}_i$, and, in particular, the length of the excursion $\tilde{e}_i^*$ is the mass of the space $\mathcal{M}_i$, i.e.
\begin{equation*}
\zeta(\tilde{e}_i^*) = \mathcal{M}_i;
\end{equation*}
\item The random variable $\operatorname{{sur}}(\mathcal{M}_i)$, the surplus of the space $\mathcal{M}_i$ is
\begin{equation*}
\operatorname{{sur}}(\mathcal{M}_i) \overset{d}{=} \operatorname{Poisson}\left(\frac{1}{\delta}\int_0^{\zeta(\tilde{e}_i^*)}\tilde{e}_i^*(t)\,dt\right);
\end{equation*}
\item Lastly, conditionally on the length of excursion $\zeta_i:=\zeta(\tilde{e}_i^*)$ and the surplus values $R_i:=\operatorname{{sur}}(\mathcal{M}_i)$, the graph $\mathcal{M}_i$ satisfies
\begin{equation*}
\mathcal{M}_i \overset{d}{=} \operatorname{scale}\left(\zeta_i^{(\alpha-1)/\alpha} ,\zeta_i \right) \mathscr{G}^{(\alpha,R_i)}
\end{equation*}
\end{enumerate}
\end{lem}
\begin{proof}
These are tight random variables in each of the marginals, so joint convergence along a subsequence is standard.
Item 1 follows from the referenced theorem.
Item 2 follows from the proof of Theorem \ref{thm:Epidemic3} and the identity in distribution $(\tilde{e}_i;i\ge 1)\overset{d}{=}( \tilde{e}_i^*;i\ge 1)$ previously seen.
Item 3 follows from the proof of Theorem \ref{thm:Epidemic3}, particularly around \eqref{eqn:cConvtoMeas}.
Item 4 is from Lemma \ref{lem:stableExcursionsSameSize}.
Item 5 follows from Theorem 5.5 and Proposition 5.12 in \cite{CKG.20} along with the equality $\#\mathscr{P}_{n,1}^{\operatorname{DF}} = \#\mathscr{P}_{n,i}^{\operatorname{BF}}$.
Item 6 follows from the proof of Theorem 1.2 in \cite{CKG.20}.
\end{proof}
\subsection{Proof of Theorem \ref{thm:ctsAlpha}} \label{sec:ctsAlpha}
\begin{proof}[Proof of Theorem \ref{thm:ctsAlpha}]
The big content of this proof is to show that we can condition on the length $\zeta(\tilde{e}_i^*)$ of the excursion $\tilde{e}_i^*$ and the surplus of the graphs $\mathcal{M}_i$ by using Lemma \ref{lem:20} and scaling results for the excursions proved in \cite{CKG.20}.
By Lemma \ref{lem:20}, we know that we can write the height profile of the graph $\mathcal{M}_i$ (which is of random mass) as the process $Z_i$ where $(Z_i,C_i)$ is the Lamperti pair associated with the excursion $\tilde{e}^*_i$. In fact, we know
\begin{equation*}
\left( (\mu_i(B(\rho_i,v);v\ge 0), \mu_i(\mathcal{M}_i), \operatorname{{sur}}(\mathcal{M}_i)\right)_{i\ge 1} \overset{d}{=} \left((C_i(v);v\ge 0), \zeta(\tilde{e}_i^*), R_i \right)_{i\ge 1}
\end{equation*} where $(Z_i,C_i)$ is the Lamperti pair associated with the excursion $\tilde{e}_i^*$ and \linebreak $R_i\sim$ $ \text{Poisson}\left(\frac{1}{\delta}\int_0^{\zeta(\tilde{e}_i^*)} \tilde{e}^*_i(t)\,dt\right)$.
Conditioning on the values of $\zeta(\tilde{e}_1^*)$ and $R_1$ gives
\begin{equation} \label{eqn:conditEq}
\left( (\mu_1(B(\rho_1,v));v\ge 0)\bigg| \mu_1(\mathcal{M}_1) = 1, \operatorname{{sur}}(\mathcal{M}_1) = k\right) \overset{d}{=} \left((C_1(t);t\ge 0) \bigg| \zeta(\tilde{e}_1^*) = 1, R_1 = k \right).
\end{equation}
We can use the \textit{proof} of Theorem 1.2 in \cite{CKG.20} to handle this conditioning on the right-hand side and the statement of Theorem 1.2 in \cite{CKG.20} to handle the left-hand side.
The conditioning in the proof of Theorem 1.2 in \cite{CKG.20} gives
\begin{equation*}
\mathbb{E}\left[ g(\tilde{e}_i^*)\big| R_i = k, \zeta(\tilde{e}_i) = 1\right] = \mathbb{E}[g(\mathbf{e}^{(k)})]
\end{equation*} for all positive functionals $g$ and where $\mathbf{e}^{(k)} = (e^{(k)}(t);t\in[0,1])$ is defined in \eqref{eqn:ktilt}. In particular this holds for $i = 1$. Recall from Section \ref{sec:lamperti}, that $c_1$ is simply a functional of $\tilde{e}_1^*$. Therefore, conditionally on the values of $\zeta(\tilde{e}^*_1)$ and $R_1$ we have
\begin{equation*}
\left((C_1(t);t\ge 0) \bigg| \zeta(\tilde{e}_1^*) = 1, R_1 = k \right) \overset{d}{=} \left(\mathbf{c}^{(k)}(t);t\ge 0 \right)
\end{equation*} where $(\mathbf{z}^{(k)},\mathbf{c}^{(k)})$ is the Lamperti pair associated with the excursion $\mathbf{e}^{(k)}$.
The left-hand side of \eqref{eqn:conditEq} is easy to condition with part (6) of Lemma \ref{lem:20}. Conditionally on the values of $\mu_1(\mathcal{M}_1)$ and $\operatorname{{sur}}(\mathcal{M}_1)$ (which is precisely the conditioning described above for $c_1$) the metric spaces $\mathcal{M}_1$ satisfies
\begin{equation*}
\left(\mathcal{M}_1 \bigg| \mu_1(\mathcal{M}_1) = 1, \operatorname{{sur}}(\mathcal{M}_1) = k\right) \overset{d}{=} \mathscr{G}^{(\alpha,k)}.
\end{equation*}
Hence
\begin{equation*}
\left(\mu_{\mathscr{G}^{(\alpha,k)}}(B(\rho,v));v\ge 0 \right) \overset{d}{=} \left(\mathbf{c}^{(k)}(v);v\ge 0 \right).
\end{equation*} An application of Proposition \ref{prop:AUB} completes the proof.
\end{proof}
\section{Discussion} \label{sec:disc}
In this work we showed convergence of the height profiles for the macroscopic components of a certain class of critical random graphs. We did this by looking at the height profile of these graphs and we relied on the weak convergence results that exist in the literature on some encoding stochastic processes. We observe that these techniques can likely be extended to other graph models appearing in the literature.
For example, the work of Broutin, Duquesne and Wang \cite{BDW.18,BDW.20} provides the rescaled convergence under certain conditions of the \textit{rank-1 inhomogeneous model} associated to a weight sequence $\mathtt{w} = (w_1,\dotsm,w_n)$. That graph, whose asymptotics were studied by Aldous and Limic in \cite{AL.98}, is a graph on $n$ vertices where edges are added independently with probability
\begin{equation*}
\mathbb{P}\left( \text{edge} \{i,j\} \text{ is included}\right)= 1 - \exp\left(-w_i w_j /q\right)
\end{equation*} for some parameter $q>0$. This graph goes by other names as well: the Poisson random graph \cite{BvdHavL.10,NR.06} and the Norros-Reittu model \cite{BvdHavL.10}. See also \cite{BvdHRS.18,BvdHavL.12} and Section 6.8.2 of \cite{vanderHofstad.17} for more information.
The resulting limiting processes and graphs are related to L\'{e}vy-type processes (sometimes called L\'{e}vy processes without replacement) constructed from spectrally positive L\'evy processes which {are not} stable. As in \cite{ABBG.12, CKG.20}, Broutin, Duquesne and Wang show convergence of the graphs as metric spaces by using a depth-first descriptions. However, there is also convergence of the breadth-first walks \cite{AL.98}, and so proving convergence of the height profiles should similar to the proof of Theorems \ref{thm:Epidemic3} and Theorems \ref{thm:conv2}.
Using the results in the literature on Galton-Watson trees conditioned on having a fixed size \cite{LeGall.05,Duquesne.03, MM.03, Aldous.93} one can recover the Jeulin identity \cite{JY.85} and its $\alpha$-stable extension due to Miermont \cite{Miermont.03} from our Theorem \ref{thm:conv1} as well. The proofs in \cite{Miermont.03, JY.85} do not rely on weak convergence arguments. For proofs using weak-convergence arguments more in-line with the results of this papers see Kersting's work \cite{Kersting.11}, or joint work of Angtuncio and Uribe Bravo \cite{AUB.20}. See also \cite{AMP.04} for a weak convergence result in a slightly weaker topology.
More generally, under certain conditions (see Theorem 2.3.1 in \cite{DL.02}) on the offspring distribution, there is convergence of Galton-Watson forests to continuum forests encoded by spectrally positive L\'{e}vy processes. Under these assumptions, one can use a modification of Lemma 4.8 in \cite{MR.17} or Lemma 5.8 in \cite{CKG.20}, one should be able to prove a Jeulin-type identity for excursions for non-stable L\'{e}vy processes and their associated height processes by a simple application of Theorem \ref{thm:conv2}. As far as the author is aware, such results are not present in the literature.
\begin{appendix}
\section*{}
In this appendix we recall the construction of the depth-first walks and auxiliary processes in an analogous way that Conchon-Kerjan and Goldschmidt \cite[pg. 30, 32]{CKG.20} do in their work. We do this from a deterministic degree sequence, whereas they work with a random degree sequence. That is we fix an $n$ and $\mathbf{d}^n = (d_1,\dotsm, d_n)$ with $d_j\ge 1$. We omit reference to $\mathbf{d}^n$ from our notation.
We let $M$ denote a uniformly random multigraph with degree distribution $\mathbf{d}^n$. The vertices of $M$ can be ordered in a depth-first order, $v_1^{\operatorname{DF}},\dotsm, v_n^{\operatorname{DF}}$, or a breadth-first order $v_1^{\operatorname{BF}},\dotsm, v_n^{\operatorname{BF}}$. We let $D_j^{\operatorname{DF}} = \deg(v^{\operatorname{DF}}_j)$ and $D_j^{\operatorname{BF}} = \deg(v^{\operatorname{BF}}_j)$ counted with multiplicity. Let $F^{\operatorname{DF}}$ and $F^{\operatorname{BF}}$ be the forests constructed from $M$ by removing backedges and replacing them with two leaves and let $X^{\operatorname{DF}}$ (resp. $X^{\operatorname{BF}}$) denote the depth-first (resp. component-by-component breadth-first) walk on $F^{\operatorname{DF}}$ (resp. $F^{\operatorname{BF}}$).
We write
\begin{equation*}
S^{\operatorname{DF}}(k) = \sum_{j=1}^k( D^{\operatorname{DF}}_j - 2),\qquad S^{\operatorname{BF}}(k) = \sum_{j=1}^k (D_j^{\operatorname{BF}} - 2).
\end{equation*} These walks appeared (with slightly different notation) in \eqref{eqn:swalks} however they are equal in distribution.
Let us now discuss the construction of the walk $X^{\operatorname{DF}}$ in \eqref{eqn:xwalks1} from the walk $S^{\operatorname{DF}}$. We start with $N^{\operatorname{DF}}(0) = X^{\operatorname{DF}}(0) = 0$, $\mathcal{M}^{\operatorname{DF}}(0) = \emptyset$ and $\tau^{\operatorname{DF}}(0) = 0$. The process $N^{\operatorname{DF}}$ counts the number of df backedges discovered in the graph at step $k$, and the set-valued process $\mathcal{M}^{\operatorname{DF}}$ keeps track of the marks corresponding to the df backedges. The process $\tau^{\operatorname{DF}}(k)$ is a time-change relating to the new leaves that will be included in the forest $F^{\operatorname{DF}}$. For $k\ge 0$:
\begin{itemize}
\item \textbf{New component of $F^{\operatorname{DF}}$ is discovered}\\If $X^{\operatorname{DF}}(k) = \min_{0\le i\le k-1} X^{\operatorname{DF}}(i) - 1$ or $k = 0$, then we have discovered a new component. We set $\tau^{\operatorname{DF}}(k+1) = \tau^{\operatorname{DF}}(k)+1$, $N^{\operatorname{DF}}(k+1) = N^{\operatorname{DF}}(k)$, $\mathcal{M}^{\operatorname{DF}}(k+1) = \mathcal{M}^{\operatorname{DF}}(k)$, and
\begin{equation*}
X^{\operatorname{DF}}(k+1) = X^{\operatorname{DF}}(k) + S^{\operatorname{DF}}(\tau^{\operatorname{DF}}(k)+1) - S^{\operatorname{DF}}(\tau^{\operatorname{DF}}(k)) +1.
\end{equation*}
\item \textbf{Determine if we start a back-edge or not}\\
If $X^{\operatorname{DF}}(k)> \min_{0\le i\le k-1} X^{\operatorname{DF}}(i) -1$ and $X^{\operatorname{DF}}(k)\notin \mathcal{M}^{\operatorname{DF}}(k)$ then the vertex $u_{k+1}^{\operatorname{DF}}$ in $F^{\operatorname{DF}}$ is not a new-leaf paired to a previously explored new-leaf. There is still a chance that $u_{k+1}^{\operatorname{DF}}$ is a new-leaf which is paired with an undiscovered new leaf.
\begin{itemize}
\item \textbf{The vertex $u_{k+1}^{\operatorname{DF}}$ is a new-leaf}\\
The vertex $u_{k+1}^{\operatorname{DF}}$ is a new leaf with probability
\begin{equation*}
\frac{\displaystyle X^{\operatorname{DF}}(k) - \min_{0\le i\le k} X^{\operatorname{DF}}(i) - \#\mathcal{M}^{\operatorname{DF}}(k)}{ \displaystyle X^{\operatorname{DF}}(k) - \min_{0\le i\le k} X^{\operatorname{DF}}(i) - \# \mathcal{M}^{\operatorname{DF}}(k) + \sum_{j = \tau^{\operatorname{DF}}(k)+1}^n D^{\operatorname{DF}}_j}.
\end{equation*} Above, the numerator represents the number of active half-edges in the corresponding exploration of the mulitgraph. We substract that $\#\mathcal{M}^{\operatorname{DF}}(k)$ term because these represent $\#\mathcal{M}^{\operatorname{DF}}(k)$ new-leafs which would have already been killed at the corresponding step in the exploration of the multigraph. The denominator counts the total number of half-edges yet to be explored in the multigraph.
In this situation, let $\tau^{\operatorname{DF}}(k+1) = \tau^{\operatorname{DF}}(k)$. We also now that $u_{k+1}^{\operatorname{DF}}$ is a new leaf with no children and so we set $X^{\operatorname{DF}}(k+1) = X^{\operatorname{DF}}(k)-1$. Let $N^{\operatorname{DF}}(k+1) = N^{\operatorname{DF}}(k)+1$ and sample $U^{\operatorname{DF}}(k+1)$ uniformly from
\begin{equation*}
\left\{ \min_{0\le i\le k} X^{\operatorname{DF}}(i), \min_{0\le i\le k} X^{\operatorname{DF}}(i) +1,\dotsm, X^{\operatorname{DF}}(k)-1\right\} \setminus \mathcal{M}^{\operatorname{DF}}(k).
\end{equation*} This new leaf $u_{k+1}^{\operatorname{DF}}$ will be connected to a the vertex $U^{\operatorname{DF}}(k+1)$.
Lastly, set $\mathcal{M}^{\operatorname{DF}}(k+1) = \mathcal{M}^{\operatorname{DF}}(k)\cup\{U^{\operatorname{DF}}(k+1)\}$.
\item \textbf{The vertex $u_{k+1}^{\operatorname{DF}}$ is part of the original multigraph}\\ With the complement probability the vertex $u_{k+1}^{\operatorname{DF}}$ is not a new leaf. In which case set $\tau^{\operatorname{DF}}(k+1) = \tau^{\operatorname{DF}}(k)+1$,
\begin{equation*}
X^{\operatorname{DF}}(k+1) = X^{\operatorname{DF}}(k) + S^{\operatorname{DF}}(\tau^{\operatorname{DF}}(k) +1 ) - S^{\operatorname{DF}}(\tau^{\operatorname{DF}}(k)),
\end{equation*} $N^{\operatorname{DF}}(k+1) = N^{\operatorname{DF}}(k)$ and $\mathcal{M}^{\operatorname{DF}}(k+1) = \mathcal{M}^{\operatorname{DF}}(k)$.
\end{itemize}
\item \textbf{Ending a back-edge}\\
If $X^{\operatorname{DF}}(k) > \min_{0\le i\le k-1} X^{\operatorname{DF}}(i)-1$ and $X^{\operatorname{DF}}(k)\in \mathcal{M}^{\operatorname{DF}}(k)$ then the vertex $u_{k+1}^{\operatorname{DF}}$ is a new-leaf which is connected to a previously discovered new leaf. We let $\tau^{\operatorname{DF}}(k+1) = \tau^{\operatorname{DF}}(k)$, $X^{\operatorname{DF}}(k+1) = X^{\operatorname{DF}}(k) -1$, $N^{\operatorname{DF}}(k+1) = N^{\operatorname{DF}}(k)$ and $\mathcal{M}^{\operatorname{DF}}(k+1)= \mathcal{M}^{\operatorname{DF}}(k) \setminus\{X^{\operatorname{DF}}(k)\}$.
\end{itemize}
We note that the above construction is the discretized version of creating the continuum random graphs in Section \ref{sec:realGraphs}. The term marks in the main body of this work were described by $\mathscr{P}^{\operatorname{DF}}_{n,k}$ in the $k^\text{th}$ component of the graph. These marks of the time-shifting and time-scalings of the pairs $(i+1,U(i+1))$ whenever $N(i+1) - N(i) = 1$.
Of course, we can do the same process with replacing every ${\operatorname{DF}}$ above with a ${\operatorname{BF}}$ and see that there is a distributionally equivalent way of constructing the walk $X^{\operatorname{BF}}$ from the walk $S^{\operatorname{BF}}$. Since the breadth-first and depth-first constructed multigraphs are equal in distribution, and by the first part of Lemma \ref{lem:convBFDFequal}, which has a trivial proof, we can use the above construction to create a coupling between the forests $F^{\operatorname{DF}}$ and $F^{\operatorname{BF}}$ where the depth-first walk on the former $X^{\operatorname{DF}}$ is the same as the breadth-first walk on the latter $X^{\operatorname{BF}}$. Moreover, this shows that a df-backedge between $u_j^{\operatorname{DF}}$ and $u_i^{\operatorname{DF}}$ in this coupling corresponds to precisely the a bf-backedge between $u_j^{\operatorname{BF}}$ and $u_i^{\operatorname{BF}}$. An analogous symmetry was used in \cite{AMP.04} to describe the height profile of inhomogeneous continuum random trees.
\end{appendix}
\end{document} |
\begin{document}
\title{Generation of Total Angular Momentum Eigenstates in Remote Qubits}
\author{A. Maser} \affiliation{Institut f\"ur Optik, Information und Photonik, Max-Planck Forschungsgruppe,\\Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany}
\author{U. Schilling} \affiliation{Institut f\"ur Optik, Information und Photonik, Max-Planck Forschungsgruppe,\\Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany}
\author{T. Bastin} \affiliation{Institut de Physique Nucl\'eaire, Atomique et de Spectroscopie, Universit\'e de Li\`ege, 4000 Li\`ege, Belgium}
\author{E. Solano} \affiliation{Departamento de Qu\'imica F\'isica, Universidad del Pa\'is Vasco - Euskal Herriko Unibertsitatea, Apartado 644, 48080 Bilbao, Spain}
\author{C. Thiel} \email{cthiel@optik.uni-erlangen.de} \homepage{http://www.ioip.mpg.de/jvz/} \affiliation{Institut f\"ur Optik, Information und Photonik, Max-Planck Forschungsgruppe,\\Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany}
\author{J. von Zanthier} \affiliation{Institut f\"ur Optik, Information und Photonik, Max-Planck Forschungsgruppe,\\Universit\"at Erlangen-N\"urnberg, 91058 Erlangen, Germany}
\date{\today}
\begin{abstract} We propose a scheme enabling the universal coupling of angular momentum of $N$ remote noninteracting qubits using linear optical tools only. Our system consists of $N$ single-photon emitters in a $\Lambda$-configuration that are entangled among their long-lived ground-state qubits through suitably designed measurements of the emitted photons. In this manner, we present an experimentally feasible algorithm that is able to generate any of the $2^N$ symmetric and nonsymmetric total angular momentum eigenstates spanning the Hilbert space of the $N$-qubit compound. \end{abstract}
\pacs{42.50.Dv,42.50.Tx,37.10.-x,03.67.-a}
\maketitle
\section{Introduction}
Since the celebrated article by Einstein, Podolsky, and Rosen in 1935~\cite{Einstein:1935:a}, it is commonly assumed that the phenomenon of entanglement between different systems occurs if the systems {\em had previously interacted with each other}. Indeed, for most experiments generating entangled quantum states interactions such as non-linear effects~\cite{Kwiat:1995:a}, atomic collisions~\cite{Osnaghi:2001:a}, Coulomb coupling~\cite{Leibfried:2005:a,Haeffner:2005:a}, or atom-photon interfaces~\cite{Wilk:2007:a}, are a prerequisite. Recent proposals considered that entanglement between systems that never interacted before can be created as a consequence of measuring photons propagating along multiple quantum paths, leaving the emitters in particular entangled states~\cite{Cabrillo:1999:a,Bose:1999:a,Skornia:2001:a,Duan:2001:a,Feng:2003:a,Duan:2003:a,Simon:2003:a,Thiel:2007:a,Bastin:2007:a}. Since then, several experiments generating {\em entanglement at a distance via projection} have been realized, first between disordered clouds of atoms~\cite{Julsgaard:2001:a,Chou:2005:a,Matsukevich:2006:a} and very recently even between single trapped atoms~\cite{Moehring:2007:a}.
On the other hand, the coupling of angular momentum is commonly utilized to account for the interaction between particles in order to retrieve the corresponding energy eigenstates and eigenvalues of the total system. This coupling of angular momentum has been fruitfully employed in as disparate fields as solid state, atomic or high-energy physics, to account for the interaction between electric or magnetic multipoles or spins of quarks, respectively~\cite{Wigner:1959}. Here again, it seems counter-intuitive that noninteracting particles, such as remotely placed spin-1/2 particles, will couple to form arbitrary total angular momentum eigenstates as if an interaction were present, including highly and weakly entangled quantum states.
In this article, we propose a method how to mimic the universal coupling of angular momentum of $N$ remote noninteracting spin-1/2 particles (qubits) in an experimentally operational manner. Hereby, an arbitrary number of distant particles can be entangled in their two-level ground states providing long-lived $N$-qubit states via the use of suitably designed projective measurements. In reference to the algorithm describing the coupling of angular momentum of individual spin-1/2 particles, our method couples successively remote qubit states to a multi-qubit compound system. Thereby, it offers access to the entire coupled basis of an $N$-qubit compound system of dimension $2^N$, i.e., to any of the $2^N$ symmetric and nonsymmetric total angular momentum eigenstates.
\section{Description of the physical system}
For $N$ spin-1/2 particles, the total angular momentum eigenstates, defined as simultaneous eigenstates of the square of the total spin operator $\hat{\bf S}^2$ and its $z$-component $\hat{S}_z$, are commonly denoted by $|S_N;\!m_N\rangle$, with the corresponding eigenvalues $S_N(S_N+1)\hbar^2$ and $m_N\hbar$~\cite{Dicke:1954:a,Mandel:1995:a}. However, since the denomination $|S_N;\!m_N\rangle$ generally characterizes more than one quantum state, we will extend the notation of an $N$-qubit state by its coupling history, i.e.~by adding the values of $S_1, S_2, ..., S_{N-1}$ to those of $S_N$ and $m_N$. A single qubit state has $S_1=\frac{1}{2}$, a two-qubit system can either have $S_2=0$ or $S_2=1$, a three-qubit system $S_3=\frac{1}{2}$ or $S_3=\frac{3}{2}$, and so on. Including the coupling history we thus get the following notation $|S_1,\!S_2,...,\!S_N;\!m_N\rangle$ which describes a particular angular momentum eigenstate unambiguously.
\begin{figure}
\caption{ (Color online) Experimental setup for the angular momentum coupling of two atoms via projective measurements using optical fibers. In a successful measurement cycle, each atom emits a single photon and each detector registers exactly one photon. Note that the detectors cannot distinguish which of the atoms emitted a registered photon.}
\label{fig1}
\end{figure}
In the following, we consider a system consisting of $N$ indistinguishable single-photon emitters, e.g.~atoms, with a $\Lambda$-configuration, see Fig.~\ref{fig1}. We denote the two ground levels of the $\Lambda$-configured atoms as $|+\rangle$ and $|-\rangle$ or, using the notation introduced before, $|\frac{1}{2};\!+\frac{1}{2}\rangle\equiv|+\rangle$ and $|\frac{1}{2};\!-\frac{1}{2}\rangle\equiv|-\rangle$. Initially, all atoms are excited by a laser $\pi$ pulse towards the excited state $|e\rangle$ and subsequently decay by spontaneously emitting $N$ photons that are collected by single-mode optical fibers~\cite{Moehring:2007:a,Volz:2006:a} and transmitted to $N$ different detectors. Since each atom is connected via optical fibers to several detectors, a single photon can travel on several alternative, yet equally probable paths to be eventually recorded by one detector. After a successful measurement, where all $N$ photons have been recorded at the $N$ detectors so that each detector registers exactly one photon, it is thus impossible to determine along {\em which way} each of the $N$ photons propagated. This may cause quantum interferences of $N$th order which can be fruitfully employed to engineer particular quantum states of the emitters, e.g., to generate families of entangled states symmetric under permutation of their qubits~\cite{Thiel:2007:a,Bastin:2007:a}. Here, we will consider the generation of a more general class of quantum states, including symmetric {\em and} nonsymmetric states. By mimicking the process of spin-spin coupling, we will demonstrate how to generate any quantum state belonging to the coupled basis of an $N$-qubit compound system.
\section{Measurement based preparation of total angular momentum eigenstates}
Let us start by looking at the most basic process of our system. If one single excited atom with a $\Lambda$-configuration emits a photon, the atomic ground state and the photonic polarization states cannot be described independently. The excited state $|e\rangle$ can decay along two possible channels, $|e\rangle\rightarrow|+\rangle$ and $|e\rangle\rightarrow|-\rangle$, accompanied by the spontaneous emission of a $\sigma^-$ or a $\sigma^+$-polarized photon, respectively (consider e.g.~Zeeman sub-levels). A single decaying atom thus forms an entangled state between the polarization state of the emitted photon and the corresponding ground state of the de-excited atom~\cite{Volz:2006:a,Blinov:2004:a}. This correlation implies that the state of the atom is projected onto $|+\rangle$ ($|-\rangle$) if the emitted photon is registered by a detector with a $\sigma^-$ ($\sigma^+$) polarized filter in front.
\subsection{Preparation of $2$-qubit states}
In a next step, we consider the system shown in Fig.~\ref{fig1} where two atoms with a $\Lambda$-configuration are initially excited and subsequent measurements on the spontaneously emitted photons are performed at two different detectors. Again, if a polarization sensitive measurement is performed on the two emitted photons using two different polarization filters in front of the detectors, the state of the two atoms is projected due to the measurement. However, if the polarization of both photons is measured along orthogonal directions, the state of the atoms will be projected onto a superposition of both ground states, since it is impossible to determine which atom emitted the photon travelling to the first or the second detector by the information obtained in the measurement process. With each qubit having a total spin of $\frac{1}{2}$, a two-qubit system can have a total spin of either $1$ or $0$ and thus defines four angular momentum eigenstates given by: \begin{tabular*}{\textwidth}{cccccc}\\
spin-1 triplet & $|S_1,\!S_2;\!m\rangle$ && spin-0 singlet & $|S_1,\!S_2;\!m\rangle$ &
\\
$|\!\!+\!+\!\rangle$ & $|\frac{1}{2},\!1;\!+1\rangle$ &&&&
\\
$\frac{1}{\sqrt{2}}(|\!\!+\!-\!\rangle\!+\!|\!\!-\!+\!\rangle)$ & $|\frac{1}{2},\!1;\!0\rangle$ && $\frac{1}{\sqrt{2}}(|\!\!+\!-\!\rangle\!-\!|\!\!-\!+\!\rangle)$ & $|\frac{1}{2},\!0;\!0\rangle$
\\
$|\!\!-\!-\!\rangle$ & $|\frac{1}{2},\!1;\!-1\rangle$ &&&&
\end{tabular*}
The spin-1 triplet can be easily generated with the setup shown in Fig.~\ref{fig1} by choosing the polarization filters accordingly: For example, if both filters are oriented in such a way that only $\sigma^-$ ($\sigma^+$) polarized photons are transmitted, the emitters are projected onto the state $|\!\!+\!+\rangle$ ($|\!\!-\!-\rangle$); if the filters are orthogonal, i.e.~one is transmitting $\sigma^-$ and one $\sigma^+$ polarized photons, the system is projected onto the state $|\frac{1}{2},\!1;\!0\rangle$, since any information along {\em which way} the photons propagated is erased by the system. Finally, in order to generate the singlet state $|\frac{1}{2},\!0;\!0\rangle$, we may introduce an optical phase shift of $\pi$ in one of the optical paths shown in Fig.~\ref{fig1}, e.g., by extending or shortening the length of the optical path by $\frac{\lambda}{2}$. The generation of the four two-particle total angular momentum eigenstates with the system shown in Fig.~\ref{fig1} thus requires only the variation of two polarizer orientations and, in case of the singlet state, to introduce an optical phase shift of $\pi$.
\subsection{Preparation of $3$-qubit states}
With the two-qubit angular momentum eigenstates at hand, we can next couple an additional qubit in order to access the eight possible three-qubit total angular momentum eigenstates. In the following, we will exemplify our method for the three-qubit state $|\!\frac{1}{2},\!1,\!\frac{1}{2};\!+\frac{1}{2}\rangle$ given by \begin{eqnarray}\label{3Dicke}\textstyle
|\frac{1}{2},\!1,\!\frac{1}{2};\!+\frac{1}{2}\rangle&=&\frac{1}{\sqrt{6}}\,(2|\!+\!+-\rangle-|\!+\!-+\rangle-|\!-\!++\rangle)\\
&=&\frac{\sqrt{2}}{\sqrt{3}}\,|\frac{1}{2},\!1;+\!1\rangle\otimes|-\rangle-\frac{1}{\sqrt{3}}\,|\frac{1}{2},\!1;\!0\rangle\otimes|+\rangle\nonumber, \end{eqnarray}
where the last line in Eq.~(\ref{3Dicke}) exhibits the coupling history: In order to generate the three-qubit state $|\frac{1}{2},\!1,\!\frac{1}{2};\!+\frac{1}{2}\rangle$, the two-qubit spin-1 states $|\frac{1}{2},\!1;\!+1\rangle$ and $|\frac{1}{2},\!1;\!0\rangle$ are coupled with $|-\rangle$ and $|+\rangle$, respectively. Thereby, the prefactors $\frac{\sqrt{2}}{\sqrt{3}}$ and $-\frac{\sqrt{1}}{\sqrt{3}}$ represent the corresponding Clebsch-Gordan coefficients as a result of changing the basis~\cite{Clebsche:2002:a}. In the following, we will make use of our knowledge of how to generate the states $|\frac{1}{2},\!1;\!+1\rangle$ and $|\frac{1}{2},\!1;\!0\rangle$ in order to generate the desired state $|\frac{1}{2},\!1,\!\frac{1}{2};\!+\frac{1}{2}\rangle$. Therefore, we have to add a third qubit and combine the two systems generating the two individual states accordingly in one setup.
\begin{figure}
\caption{(Color online) Left: Extension of the setup shown in Fig.~\ref{fig1} capable of generating the state $|\frac{1}{2},\!1;\!0\rangle\otimes|+\rangle$. Right: configuration for the generation of the state $|\frac{1}{2},\!1;\!1\rangle\otimes|-\rangle$.}
\label{fig2}
\end{figure}
The two setups individually capable of generating the three-qubit states $|\frac{1}{2},\!1;\!+1\rangle\otimes|-\rangle$ and $|\frac{1}{2},\!1;\!0\rangle\otimes|+\rangle$ are shown in Fig.~\ref{fig2}. The additional qubit is not yet coupled to the two-qubit system, i.e.~it is simply projected either onto the state $|+\rangle$ (Fig.~\ref{fig2}, left) or $|-\rangle$ (Fig.~\ref{fig2}, right), where the two-qubit systems are projected in the same way as explained in Fig.~\ref{fig1}. In order to generate the three-qubit state $|\frac{1}{2},\!1,\!\frac{1}{2};\!\frac{1}{2}\rangle$, we now have to superpose these two possibilities. The combined system is shown in Fig.~\ref{fig3}. We will explain the underlying physics by considering the possible scenarios when detecting the photon emitted by the additional third atom.
\begin{figure}
\caption{(Color online) Setup for the generation of the state $2|{+\!+\!-}\rangle-|{+\!-\!+}\rangle-|{-\!+\!+}\rangle$. The blue dashed lines indicate the quantum path which leads to $2|{+\!+\!-}\rangle$, whereas the red solid labeled path leads to $-|{+\!-\!+}\rangle-|{-\!+\!+}\rangle$. Please note that the different red solid and blue dashed lines leading from atom 1 (2) to detector~$D_1$ are drawn to indicate the different quantum paths only. Physically, there is only one fiber from atom 1 (2) to detector~$D_1$.}
\label{fig3}
\end{figure}
In a successful measurement cycle, the three emitted photons are detected at three different detectors. Thus, there are only two possible situations due to a measurement of a photon emitted by the third atom: \begin{itemize}
\item[I.] (red solid lines) The emitted photon is registered at detector~$D_3$ which has a $\sigma^-$ polarizing filter in front. In this case, emitter 3 is projected onto the state $|+\rangle$ and emitter 1 and 2 are left in the setup generating the state $|\frac{1}{2},\!1;\!0\rangle\equiv\frac{1}{\sqrt{2}}(|\!+\!-\rangle+|\!-\!+\rangle)$; as discussed in Fig.~\ref{fig2} (left).
\item[II.] (blue dashed lines) The emitted photon is registered at detector~$D_2$ which has a $\sigma^+$ polarizing filter in front. In this case, emitter 3 is projected onto the state $|-\rangle$ and emitter 1 and 2 are left in the setup generating the state $|\frac{1}{2},\!1;\!1\rangle\equiv|\!+\!+\rangle$; as discussed in Fig.~\ref{fig2} (right). \end{itemize}
In other words, the third emitter acts as a switch between the two possible quantum paths: with equal probabilities, the system is either projected onto the state $2|++-\rangle$ or onto the state $|+-+\rangle+|-++\rangle$. Note that the relative factor of two results from using an equal number of path ways (optical fibers) in both cases. In addition, we can modify the path where a photon emitted by the third atom is registered at detector~$D_3$ by implementing a relative optical phase shift of $\pi$ (c.f.~Fig.~\ref{fig3}) to obtain a minus sign for scenario II. relative to scenario I. In this case, the final state projected by the setup shown in Fig.~\ref{fig3} corresponds to the three-qubit state $|\frac{1}{2},\!1,\!\frac{1}{2};\!\frac{1}{2}\rangle$ of Eq.~(\ref{3Dicke}).
Reconsidering the state $|\frac{1}{2},\!1,\!\frac{1}{2};\!\frac{1}{2}\rangle$ in terms of our extended notation, we coupled two spin-1/2 particles to form a spin-1 compound state that was coupled again with a spin-1/2 particle to form a three-particle spin-1/2 compound state. Similarly, we could have coupled the spin-1 compound state with an additional qubit in such a way that we obtain the symmetric state $|\frac{1}{2},\!1,\!\frac{3}{2};\!\frac{1}{2}\rangle$, also known as W-state~\cite{Duer:2000:a}. For this case, we have to change the setup shown in Fig.~\ref{fig3} slightly: we remove the optical phase shift of $\pi$ and connect the third emitter also with detector~$D_1$. In this case, the totally symmetric setup generates a W-state (c.f.~\cite{Thiel:2007:a}).
\begin{figure}
\caption{ (Color online) Experimental setup for the spin-spin coupling of $N$ remote atoms via projective measurements.}
\label{fig4}
\end{figure}
\subsection{Preparation of $N$-qubit states}
Finally, let us outline how to engineer the coupling of angular momentum of $N$ remote qubits to form an arbitrary $N$-qubit total angular momentum eigenstate. In order to generate the $N$-qubit state $|{S_1,S_2,S_3,...S_N;m_s}\rangle$ we have to \begin{itemize} \item[1.] set up $\frac{N}{2}+m_s$ ($\frac{N}{2}-m_s$) detectors with $\sigma^-$ ($\sigma^+$) polarized filters in front. Hereby, we connect the first emitter with optical fibers to all $N$ detectors. \item[2.] check for each particle $i$ beginning with $i=2$ whether $S_i>S_{i-1}$ or $S_i<S_{i-1}$. If \begin{itemize} \item[a.] $S_i>S_{i-1}$; we have to connect the particle with optical fibers to all detectors except those which are mentioned in case~b.~below. \item[b.] $S_i<S_{i-1}$; we have to connect the particle with optical fibers to one detector with a $\sigma^-$ polarizer and to one with a $\sigma^+$ polarizer. The optical fiber leading to the $\sigma^-$ polarizer should induce a relative optical phase shift of $\pi$ and those two detectors should not be linked to any other subsequent particle. \end{itemize} \end{itemize}
If one wants to create a particular total angular momentum eigenstate $|{S_1,S_2,S_3,...S_N;m_s}\rangle$, the setup is determined by the total spins $S_1,S_2,S_3,...S_N$ obtained by successively coupling $N$ spin-1/2 particles. Hereby, the spin number $m_s$ determines the fraction of $\sigma^-$ and $\sigma^+$ polarized filters used in the setup (s.~Fig.~\ref{fig4}).
As examples, let us apply this algorithm for the two three-qubit total angular momentum eigenstates $|\frac{1}{2},\!1,\!\frac{1}{2};\!\frac{1}{2}\rangle$ and $|\frac{1}{2},\!1,\!\frac{3}{2};\!\frac{1}{2}\rangle$ discussed above. Since $m_s=\frac{1}{2}$ for both states, we use two detectors with $\sigma^-$ polarized filters and one with a $\sigma^+$ polarized filter. Further, in both cases we have $S_2>S_1$ which implies that the first and the second emitter are connected to all three detectors. For the state $|\frac{1}{2},\!1,\!\frac{1}{2};\!\frac{1}{2}\rangle$, we find $S_3<S_2$. Therefore, we connect the third emitter only to two detectors with $\sigma^-$ and $\sigma^+$ polarized filters in front, respectively, e.g.~detector~$D_2$ and~$D_3$, and we introduce an optical phase shift of $\pi$ for the path leading from the third emitter to detector~$D_3$. Summarizing we obtain the setup shown in Fig.~\ref{fig3} as postulated. For the state $|\frac{1}{2},\!1,\!\frac{3}{2};\!\frac{1}{2}\rangle$, we find $S_3>S_2$. Here, we connect the third emitter to all three detectors. In this case, as mentioned above, the setup will generate the symmetric W-state~\cite{Thiel:2007:a}.
The method proposed here, relies on the probabilistic scattering of photons. Thereby, the efficiency of generating a particular $N$-qubit total angular momentum eigenstate decreases with increasing number of qubits $N$. If the probability to find a single photon in an angular detection window $\Delta\Omega$ is given by $P(\Delta\Omega)$, including fiber coupling and detection efficiencies, the corresponding $N$-fold counting rate is found to be $P^N(\Delta\Omega)$. This might limit the scalability of our scheme (see the discussion in~\cite{Thiel:2007:a}) as is indeed the case with other experiments observing entangled atoms~\cite{Blinov:2004:a,Volz:2006:a,Moehring:2007:a}.
\section{Conclusions}
In conclusion, we considered a system of $N$ remote noninteracting single-photon emitters with a $\Lambda$-configuration. By mimicking the coupling of angular momentum, we showed that it is possible to engineer any of the $2^N$ total angular momentum eigenstates in the long-lived ground-state qubits. Using linear optical tools only, our method employs the detection of all $N$ photons scattered from the $N$ emitters at $N$ polarization sensitive detectors. Thereby, it offers access to any of the $2^N$ states of the coupled basis of an $N$-qubit compound system. Using projective measurements we thereby form highly and weakly entangled quantum states even though no interaction between the qubits is present.
\section{Acknowledment}
U.S. thanks financial support from the Elite Network of Bavaria. E.S. thanks financial support from Ikerbasque Foundation, EU EuroSQIP project, and UPV-EHU Grant GIU07/40. C.T.~and J.v.Z.~gratefully acknowledge financial support by the Staedtler foundation.
\end{document} |
\begin{document}
\title[Twisted Gauss sums and totally isotropic subspaces] {Higher level quadratically twisted Gauss sums and totally isotropic subspaces}
\author{Lynne Walling} \address{School of Mathematics, University of Bristol, University Walk, Clifton, Bristol BS8 1TW, United Kingdom; phone +44 (0)117 331-5245} \email{l.walling@bristol.ac.uk}
\keywords{Gauss sums, quadratic forms}
\begin{abstract} We consider a generalized Gauss sum supported on matrices over a number field. We evaluate this Gauss sum and relate it to the number of totally isotropic subspaces of related quadratic spaces. Then we consider a further generalization of such a Gauss sum, realizing its value in terms of numbers of totally isotropic subspaces of related quadratic spaces. \end{abstract}
\maketitle \def\arabic{footnote}{} \footnote{2010 {\it Mathematics Subject Classification}: Primary 11L05, 11E08 } \def\arabic{footnote}{\arabic{footnote}}
\section{Introduction}
Gauss sums and their numerous generalizations are ubiquitous in number theory. When studying the action of Hecke operators on half-integral weight Hilbert-Siegel modular forms, the generalized Gauss sum we encounter is defined as follows. Let ${\mathbb K}$ be a number field with $\mathcal O$ its ring of integers, ${\mathfrak P}$ a nondyadic prime ideal in $\mathcal O$, and $\mathbb F=\mathcal O/{\mathfrak P}$; we fix $\rho\in\partial^{-1}{\mathfrak P}^{-1}$ so that $\rho\mathcal O_{{\mathfrak P}}=\partial^{-1}{\mathfrak P}^{-1}\mathcal O_{{\mathfrak P}}$ (where $\partial$ is the different of ${\mathbb K}$). Then for $T\in\mathbb F^{n,n}_{\sym}$ (meaning that $T$ is a symmetric $n\times n$ matrix over $\mathbb F$), we set $$\mathcal G^*_T({\mathfrak P})=\sum_{S\in\mathbb F^{n,n}_{\sym}} \left(\frac{\det S}{{\mathfrak P}}\right)\e\{2TS\rho\}$$ where $\sigma$ denotes the matrix trace map, $\e\{*\}=\exp(\pi i Tr^{{\mathbb K}}_{\mathbb Q}(\sigma(*)))$, and $\left(\frac{*}{{\mathfrak P}}\right)$ is the Legendre symbol. One sees that for $M,N\in\mathcal O^{n,n}_{\sym}$ with $M\equiv N\ (\text{mod }{\mathfrak P})$, we have $\e\{2M\rho\}=\e\{2N\rho\}$; consequently, $\mathcal G^*_T({\mathfrak P})$ is well-defined, although it is dependent on the choice of $\rho$.
For our application to half-integral weight Hecke operators, we need to relate these Gauss sums to $R^*(T\perp\big<1\big>,0_a)$, which is the number of $a$-dimensional totally isotropic subspaces of the dimension $n+1$ $\mathbb F$-space $V$ whose quadratic form is given by $T\perp\big<1\big>$. (A subspace $W$ of $V$ is totally isotropic if the quadratic form restricted to $W$ is 0, and $A\perp B$ denotes the block-diagonal matrix $\diag(A,B)$.) In Theorem 1.1 we evaluate $\mathcal G^*_T({\mathfrak P})$, and in Corollary 1.2 we give $\mathcal G^*_T({\mathfrak P})$ in terms of $R^*(T\perp\big<1\big>,0_a)$.
To state the theorem, set $\varepsilon=\left(\frac{-1}{{\mathfrak P}}\right),$ and fix $\omega\in\mathbb F$ so that $\omega$ is not a square in $\mathbb F$; set $J_n=I_{n-1}\perp\big<\omega\big>$. For $T,S\in\mathbb F^{n,n}_{\sym}$, write $T\sim S$ if there is some $G\in GL_n(\mathbb F)$ so that $T=\,^tGSG$. Note that with $d=\rank T$, either $T\sim I_d\perp 0_{n-d}$ or $T\sim J_d\perp 0_{n-d}$. With this notation, we have the following.
\begin{thm} Take $T\in\mathbb F^{n,n}_{\sym}$ where $n\in\mathbb Z_+$. Suppose that $0\le d\le n$ and $T\sim I_d\perp 0_{n-d}$ or $T\sim J_d\perp 0_{n-d}$. Take $c$ so that $d=2c$ or $d=2c+1$. \begin{enumerate} \item[(a)] Suppose that $n=2m$. Then with $N({\mathfrak P})$ the norm of ${\mathfrak P}$, $$\mathcal G^*_T({\mathfrak P})=(-1)^c\varepsilon^m N({\mathfrak P})^{m^2}\cdot \prod_{i=1}^{m-c}(N({\mathfrak P})^{2i-1}-1).$$ \item[(b)] Suppose that $n=2m+1$. When $d=2c$, $\mathcal G^*_T({\mathfrak P})=0$. When $d=2c+1$, $$\mathcal G^*_T({\mathfrak P})=(-1)^c\varepsilon^{m+c}N({\mathfrak P})^{m^2+2m-c}\,\mathcal G^*_1({\mathfrak P})\cdot \prod_{i=1}^{m-c}(N({\mathfrak P})^{2i-1}-1)$$ if $T\sim I_d\perp 0_{n-d}$, and $\mathcal G^*_T({\mathfrak P})=-\mathcal G^*_{I_d\perp 0_{n-d}}({\mathfrak P})$ if $T\sim J_d\perp 0_{n-d}$. \end{enumerate} \end{thm}
\begin{cor} Take $T\in\mathbb F^{n,n}_{\sym}$ where $n\in\mathbb Z_+$. Let $V$ be the dimension $n+1$ space over $\mathbb F$ with quadratic form given by $T\perp\big<1\big>$, and let $R^*(T\perp\big<1\big>,0_a)$ be the number of $a$-dimensional totally isotropic subspaces of $V$. We have $$\left(\mathcal G_1^*({\mathfrak P})\right)^n\mathcal G^*_T({\mathfrak P}) =\sum_{a=0}^n (-1)^{n+a}N({\mathfrak P})^{n(n+1)/2+a(a-n)} R^*(T\perp\big<1\big>,0_a).$$ \end{cor}
To prove Theorem 1.1, we perform a deconstruction to reduce $\mathcal G^*_T({\mathfrak P})$ to a sum in terms of Gauss sums $\mathcal G^*_Y({\mathfrak P})$ where the $Y$ are smaller than $T$. For this we repeatedly use the elementary fact that $\sigma(AB)=\sigma(BA)$, knowledge of representation numbers over finite fields, and elementary combinatorial methods. Then using induction, we prove Theorem 1.1; Corollary 1.2 then follows from Lemma 3.1 of \cite{half-int-aps}.
In Proposition 4.1, we consider the following generalized Gauss sum: with $T\in\mathbb F^{n,n}_{\sym}$ and $0\le r\le n$, set
$$\mathcal G^*_T({\mathfrak P};r)= \sum_{S\sim I_r\perp 0_{n-r}}\e\{2TS\rho\}-\sum_{S\sim J_r\perp 0_{n-r}}\e\{2TS\rho\}$$ (so for $r=n$, this is $\mathcal G^*_T({\mathfrak P})$). We again deconstruct $\mathcal G^*_T({\mathfrak P};r)$ as a sum in terms of Gauss sums $\mathcal G^*_Y({\mathfrak P})$ with the $Y$ smaller than $T$, and then from this and Theorem 1.1, we describe $\mathcal G^*_T({\mathfrak P};r)$ in terms of numbers of totally isotropic subspaces of spaces of quadratic spaces related to $T$.
It is important for us to note that in \cite{S}, Saito studies analogues of these Gauss sums over finite fields, with an interest to applications to twists of Siegel modular forms. Although his main interest is in twists by the quadratic and the trivial characters, he considers twists by all characters, making his arguments more complicated than ours. We note that Theorem 1.3 \cite{S} includes the results of our Theorem 1.1. Saito also considers finite field analogues of the Gauss sums $\mathcal G^*_T({\mathfrak P};r)$. He develops relations between these Gauss sums, some of which are quite complicated. In Proposition 4.1 (a), we present a simple relation very similar to his relation in Proposition 1.12 \cite{S}; then in Proposition 4.1 (b) we present formulas for these Gauss sums in terms of numbers of totally isotropic subspaces. The value of this paper is to present an approach simpler than that of \cite{S}, demonstrating our deconstruction technique, and to relate these Gauss sums to representations of zeros.
Note that it is quite easy to modify our techniques to
generalized Gauss sums twisted by the trivial character, and to Gauss sums over a finite field $\mathbb F_q$ with odd characteristic $p$ where
$\e\{*\}$ is replaced by $\exp(\pi i Tr^{\mathbb F_q}_{\mathbb F_p}(\sigma(*))/p)).$
\section{Notation}
Besides the notation given in the introduction, we define the following.
For $t,s\in \mathbb Z_+$ with $s\le t$, and $X\in\mathbb F^{t,t}_{\sym}$, $Y\in\mathbb F^{s,s}_{\sym}$, define the representation number $r(X,Y)$ to be $$r(X,Y)=\#\{C\in\mathbb F^{t,s}:\ ^tCXC=Y\ \},$$ and define the primitive representation number $r^*(X,Y)$ to be $$r^*(X,Y)=\#\{C\in\mathbb F^{t,s}:\ ^tCXC=Y,\ \rank C=s\ \}.$$ Let $o(X)$ denote the order of the orthogonal group of $X$; so $o(X)=r^*(X,X).$ We make great use of the following elementary functions, that help us encode formulas involving representation numbers. \begin{align*} \boldsymbol \mu(t,s)&=\prod_{i=0}^{s-1}(N({\mathfrak P})^{t-i}-1),\ \boldsymbol \delta(t,s)=\prod_{i=0}^{s-1}(N({\mathfrak P})^{t-i}+1),\\ \boldsymbol \beta(t,s)&=\frac{\boldsymbol \mu(t,s)}{\boldsymbol \mu(s,s)},\ \boldsymbol \nu(t,s)=\prod_{i=s}^{t-1}(N({\mathfrak P})^t-N({\mathfrak P})^i),\ \boldsymbol \gamma(t,s)=\frac{\boldsymbol \mu\boldsymbol \delta(t,s)}{\boldsymbol \mu\boldsymbol \delta(s,s)}. \end{align*} We agree that when $s=0$, the value of any of these functions is 1; when $s<0$, we agree that $\boldsymbol \beta(t,s)=0$. Note that $\boldsymbol \beta(t,s)$ is the number of $s$-dimensional subspaces of a $t$-dimensional space over $\mathbb F$, and $\boldsymbol \nu(t,0)$ is the number of bases for a $t$-dimensional space. Finally, for $d\in\mathbb Z_+$ and $i\in\mathbb Z$ with $0\le i\le d$, we set $U_{d,i}=I_i\perp 0_{d-i}$ and $\overline U_{d,i}=J_i\perp 0_{d-i}.$
\section{Proofs of Theorem 1.1 and Corollary 1.2}
We begin by proving Theorem 1.1. As ${\mathfrak P}$ is fixed, in this section we write $\mathcal G^*_T$ for $\mathcal G^*_T({\mathfrak P})$.
First notice that $$\mathcal G^*_{0_n}=\sum_{Y\sim I_n}1-\sum_{Y\sim J_n}1
=\frac{|GL_n(\mathbb F)|}{o(I_n)}-\frac{|GL_n(\mathbb F)|}{o(J_n)};$$ so using Lemma 5.1, when $n$ is odd we get $\mathcal G^*_{0_n}=0$, and when $n=2m$ we get $$\mathcal G^*_{0_n}=\varepsilon^mN({\mathfrak P})^{m^2}\frac{\boldsymbol \mu(2m,2m)}{\boldsymbol \mu\boldsymbol \delta(m,m)}.$$
For the rest of this section, take $d$ so that $0<d<n$. With $G\in GL_n(\mathbb F)$, we have $$\e\{2\,^tGI_nGU_{n,d}\cdot\rho\} =\e\{2Y'\rho\},\ \e\{2\,^tGI_nG\overline U_{n,d}\cdot\rho\} =\e\{2Y'J_d\rho\}$$ where $Y'$
is the upper left block of $^tGI_nG$; similarly, $$\e\{2\,^tGJ_nGU_{n,d}\cdot\rho\} =\e\{2Y'\rho\},\ \e\{2\,^tGJ_nG\overline U_{n,d}\cdot\rho\} =\e\{2Y'J_d\rho\}$$
where $Y'$ is the upper left block of $^tGJ_nG$. The number of $Y\sim I_n$ with upper left $d\times d$ block $Y'$ is $\boldsymbol \nu(n,d) r^*(I_n,Y')/o(I_n),$ as for $C\in\mathbb F^{n,d}$ with $\rank C=d$, the number of ways to extend $C$ to an element of $GL_n(\mathbb F)$ is $\boldsymbol \nu(n,d)$. Similarly, the number of $Y\sim J_n$ with upper left $d\times d$ block $Y'$ is $\boldsymbol \nu(n,d) r^*(J_n,Y')/o(J_n).$ Hence we have \begin{align*} \mathcal G^*_{U_{n,d}} &=\sum_{Y\sim I_n} \e\{2YU_{n,d}\cdot\rho\} -\sum_{Y\sim J_n} \e\{2YU_{n,d}\cdot\rho\}\\ &=\sum_{G\in GL_n(\mathbb F)} \left( \frac{\e\{2\,^tGI_nGU_{n,d}\cdot\rho\}}{o(I_n)} - \frac{\e\{2\,^tGJ_nGU_{n,d}\cdot\rho\}}{o(J_n)}\right)\\ &=\boldsymbol \nu(n,d)\sum_{Y'\in\mathbb F^{d,d}_{\sym}} \left(\frac{r^*(I_n,Y')}{o(I_n)}-\frac{r^*(J_n,Y')}{o(J_n)}\right)\e\{2Y'\rho\}. \end{align*} Note that we can partition $\mathbb F^{d,d}_{\sym}$ into $GL_d(\mathbb F)$-orbits, and in Lemma 5.1, we compute representation numbers $r^*(\cdot,\cdot)$. We find that when $n$ is odd, we have $o(I_n)=o(J_n)$, $r^*(I_n, U_{d,2k})-r^*(J_n,U_{d,2k})=0,$ and $$r^*(I_n,U_{d,2k+1})-r^*(J_n,U_{d,2k+1}) =r^*(J_n,\overline U_{d,2k+1})-r^*(I_n,\overline U_{d,2k+1}).$$ Hence with $n=2m+1$, using Lemma 5.1 and then Lemma 5.3, we get \begin{align*} \mathcal G^*_{U_{n,d}} &=\frac{\boldsymbol \nu(n,d)}{o(I_n)} \sum_{k=0}^{d/2} 2\varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(d-2k-1)(d-2k-2)/2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,d-k-1) \left(\sum_{Y\sim U_{d,2k+1}}\e\{2Y\rho\} -\sum_{Y\sim\overline U_{d,2k+1}}\e\{2Y\rho\}\right)\\ &\quad=\frac{\boldsymbol \nu(n,d)}{o(I_n)} \sum_{k=0}^{d/2} 2\varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(d-2k-1)(d-2k-2)/2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,d-k-1) \cdot\sum_{\cls Y\in\mathbb F^{2k+1,2k+1}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)}\, \mathcal G^*_Y \end{align*} (where $\cls Y$ is the isometry class of $Y$, or equivalently, the $GL_d(\mathbb F)$-orbit of $Y$). With $n=2m+1$, similar reasoning gives us \begin{align*} \mathcal G^*_{\overline U_{n,d}} &=\boldsymbol \nu(n,d)\sum_{Y'\in\mathbb F^{d,d}_{\sym}} \left(\frac{r^*(I_n,Y')}{o(I_n)}-\frac{r^*(J_n,Y')}{o(J_n)}\right) \e\{2Y'J_d\rho\}\\ &\quad=\frac{\boldsymbol \nu(n,d)}{o(I_n)} \sum_{k=0}^{d/2} 2\varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(d-2k-1) (d-2k-2)/2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,d-k-1)\cdot \sum_{\cls Y\in\mathbb F^{2k+1,2k+1}_{\sym}}\frac{r^*(J_d,Y)}{o(Y)}\, \mathcal G^*_Y. \end{align*}
Now suppose that $n=2m$. Then using Lemma 5.1 we have \begin{align*} &\frac{r^*(I_n,Y)}{o(I_n)}-\frac{r^*(J_n,Y)}{o(J_n)}\\ &\quad= \frac{1}{o(I_{n+1})} (r^*(I_{2m+1},\big<1\big>\perp Y)-r^*(J_{2m+1},\big<1\big>\perp Y)). \end{align*} So following the above reasoning, for $n=2m$ we have \begin{align*} \mathcal G^*_{U_{n,d}} &=\frac{\boldsymbol \nu(n,d)}{o(I_{n+1})} \sum_{k=0}^{d/2} 2\varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(d-2k)(d-2k-1)/2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,d-k)\cdot \sum_{\cls Y\in\mathbb F^{2k,2k}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)}\, \mathcal G^*_Y,\\ \mathcal G^*_{\overline U_{n,d}} &=\frac{\boldsymbol \nu(n,d)}{o(I_{n+1})} \sum_{k=0}^{d/2} 2\varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(d-2k)(d-2k-1)/2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,d-k)\cdot \sum_{\cls Y\in\mathbb F^{2k,2k}_{\sym}}\frac{r^*(J_d,Y)}{o(Y)}\, \mathcal G^*_Y. \end{align*}
To evaluate $\mathcal G^*_{I_n}$ and $\mathcal G^*_{J_n}$, we make use of the (non-twisted) Gauss sums \begin{align*} \mathcal G_{I_n}&=\sum_{U\in\mathbb F^{n,n}}\e\{2I_n[U]\rho\},\ \mathcal G_{J_n}=\sum_{U\in\mathbb F^{n,n}}\e\{2J_n[U]\rho\},\\ \overline\mathcal G_{I_n} &=\sum_{U\in\mathbb F^{n,n}}\e\{2I_n[U]J_n\rho\},\ \overline\mathcal G_{J_n} =\sum_{U\in\mathbb F^{n,n}}\e\{2J_n[U]J_n\rho\}. \end{align*} For $Y\in\mathbb F^{n,n}$, by looking at the trace of the matrix $^tYY$, it is easy to check that
$\mathcal G_{I_n}=(\mathcal G_1^*)^{n^2}$. Similarly, we have \begin{align*} \mathcal G_{J_n}&=(\mathcal G_1^*)^{n(n-1)}\cdot(\mathcal G^*_{\omega})^{n}= \overline\mathcal G_{I_n},\ \overline\mathcal G_{J_n}=(\mathcal G^*_1)^{(n-1)^2}\cdot(\mathcal G^*_{\omega})^{2n-1}. \end{align*} Classical techniques give us $\mathcal G^*_{\omega}=-\mathcal G^*_1$ and $(\mathcal G^*_1)^2=\varepsilon N({\mathfrak P})$.
On the other hand, we have \begin{align*} \mathcal G_{I_{n}}&=\sum_{Y\in\mathbb F^{n,n}_{\sym}} r(I_{n}, Y) \e\{2Y\rho\},\ \mathcal G_{J_{n}}=\sum_{Y\in\mathbb F^{n,n}_{\sym}} r(J_{n}, Y) \e\{2Y\rho\},\\ \overline\mathcal G_{I_{n}}&=\sum_{Y\in\mathbb F^{n,n}_{\sym}} r(I_{n}, Y) \e\{2YJ_n\rho\},\ \overline\mathcal G_{J_{n}}=\sum_{Y\in\mathbb F^{n,n}_{\sym}} r(J_{n}, Y) \e\{2YJ_n\rho\}. \end{align*} Partitioning $\mathbb F^{n,n}_{\sym}$ into $GL_{n}(\mathbb F)$-orbits, we get \begin{align*} &\frac{1}{o(I_n)}\,\mathcal G_{I_n}-\frac{1}{o(J_n)}\,\mathcal G_{J_n}\\ &\quad= \frac{r(I_n,0_n)}{o(I_n)}-\frac{r(J_n,0_n)}{o(J_n)}\\ &\qquad+ \sum_{0<\ell\le n}\sum_{Y\sim U_{n,\ell}} \left(\frac{r(I_n,U_{n,\ell})}{o(I_n)}- \frac{r(J_n,U_{n,\ell})}{o(J_n)}\right) \e\{2Y\rho\}\\ &\qquad + \sum_{0<\ell\le n}\sum_{Y\sim\overline U_{n,\ell}} \left(\frac{r(I_n,\overline U_{n,\ell})}{o(I_n)}- \frac{r(J_n,\overline U_{n,\ell})}{o(J_n)}\right) \e\{2Y\rho\}. \end{align*} Notice that $r(I_n,I_{\ell}\perp 0_{d-\ell})=r^*(I_n,I_{\ell})r(I_{n-\ell},0_{d-\ell}).$ So using Lemmas 5.1 and 5.2, and then Lemma 5.3, when $n$ is odd we get \begin{align*} \mathcal G_{I_n}-\mathcal G_{J_n} &\quad= \sum_{\substack{0\le \ell\le n\\ \ell\,\text{odd}}} (r(I_{n}, U_{n,\ell})-r(J_{n}, U_{n,\ell}))\\ &\qquad\cdot \left(\sum_{Y\sim U_{n,\ell}}\e\{2Y\rho\} -\sum_{Y\sim \overline U_{n,\ell}}\e\{2Y\rho\}\right)\\ &\quad= \sum_{\substack{0\le \ell\le n\\ \ell\,\text{odd}}} (r(I_{n}, U_{n,\ell})-r(J_{n}, U_{n,\ell})) \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_{n},Y)}{o(Y)}\, \mathcal G^*_Y. \end{align*} Similarly, with $n$ odd, \begin{align*} \overline\mathcal G_{I_{2m+1}}-\overline\mathcal G_{J_{2m+1}} &=\sum_{\substack{0\le \ell\le n\\ \ell\,\text{odd}}} (r(I_{n}, U_{n,\ell})-r(J_{n}, U_{n,\ell})) \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(J_{n},Y)}{o(Y)}\, \mathcal G^*_Y. \end{align*}
When $n$ is even, similar arguments give us \begin{align*} &r^*(I_{n+1},1)\,\mathcal G_{I_{n}}-r^*(J_{n+1},1)\,\mathcal G_{J_{n}}\\ &= \sum_{\substack{0\le \ell\le n\\ \ell\,\text{even}}} (r(I_{n+1}, \big<1\big>\perp U_{n,\ell})-r(J_{n+1}, \big<1\big>\perp U_{n,\ell})) \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_{n},Y)}{o(Y)}\, \mathcal G^*_Y, \end{align*} \begin{align*} &r^*(I_{n+1},1)\,\overline\mathcal G_{I_{n}}-r^*(J_{n+1},1)\,\overline\mathcal G_{J_{n}}\\ &= \sum_{\substack{0\le \ell\le n\\ \ell\,\text{even}}} (r(I_{n+1}, \big<1\big>\perp U_{n,\ell})-r(J_{n+1}, \big<1\big>\perp U_{n,\ell})) \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(J_{n},Y)}{o(Y)}\, \mathcal G^*_Y. \end{align*}
Now we argue by induction on $m$ to prove the theorem in the case that $n=2m+1$. For $m=0$, we have $\mathcal G^*_{U_{1,1}}=\mathcal G^*_1$ (by definition of $\mathcal G^*_1$), and as we have already noted, $\mathcal G^*_{\overline U_{1,1}}=-\mathcal G^*_1$. So suppose that $m\ge1$ and that the theorem holds for all $\mathcal G^*_Y$ where $Y\in\mathbb F^{2r+1,2r+1}_{\sym}$ and $0\le r<m$. With $0<d<n$, we begin with the expression for $\mathcal G^*_{U_{n,d}}$ that we derived above. By the induction hypothesis, for $2k+1\le d$ and $Y\in\mathbb F^{2k+1,2k+1}_{\sym}$, we have $\mathcal G^*_Y=\varepsilon^k N({\mathfrak P})^{k^2+2k}\cdot h_Y$ where $h_Y$ is defined in Lemma 5.4. So by Lemma 5.4 we have $\mathcal G^*_{U_{n,d}}=0$ when $d$ is even, and \begin{align*} \mathcal G^*_{U_{n,d}} &=\frac{\boldsymbol \nu(n,d)}{o(I_n)} \sum_{k=0}^{d/2} 2\varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(d-2k-1)(d-2k-2)/2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,d-k-1)\cdot (-1)^k\varepsilon^{k+c} N({\mathfrak P})^{k^2+c}\,\boldsymbol \gamma(c,k) \end{align*} when $d$ is odd with $d=2c+1$.
So assume now that $d=2c+1$; then we have \begin{align*}
\mathcal G^*_{U_{n,d}} &= \frac{\boldsymbol \nu(n,d)}{o(I_n)} \,2\varepsilon^{m+c} N({\mathfrak P})^{m+2c^2}\mathcal G^*_1\cdot A(c,0) \text{ where }\\ A(t,q)&=\sum_{k=0}^t(-1)^k N({\mathfrak P})^{2k(m+k-2t-q)}\boldsymbol \mu\boldsymbol \delta(m,2t+q-k) \boldsymbol \gamma(t,k). \end{align*} Since $\gamma(t,k)=N({\mathfrak P})^{2k}\boldsymbol \gamma(t-1,k)+\boldsymbol \gamma(t-1,k-1)$, we have \begin{align*} A(t,q) &=\sum_{k=0}^{t-1} (-1)^k N({\mathfrak P})^{2k(m+k+1-2t-q)} \boldsymbol \mu\boldsymbol \delta(m,2t+q-k-1))\boldsymbol \gamma(t-1,k)\\ &\quad\cdot (\boldsymbol \mu\boldsymbol \delta(m,2t+q-k)-N({\mathfrak P})^{2(m-2t-q+k+1)})\\ &=-A(t-1,q+1)=(-1)^tA(0,t)=(-1)^t \boldsymbol \mu\boldsymbol \delta(m,t+q). \end{align*} Therefore, using that $\boldsymbol \nu(n,d)=N({\mathfrak P})^{(n-d)(n+d-1)/2}\boldsymbol \mu(n-d,n-d)$, \begin{align*} \mathcal G^*_{U_{n,d}} &=(-1)^c \varepsilon^{m+c} N({\mathfrak P})^{m^2+2m-c}\cdot \frac{\boldsymbol \mu(2(m-c),2(m-c))}{\boldsymbol \mu\boldsymbol \delta(m-c,m-c)}\, \mathcal G^*_1, \end{align*} as claimed in the statement of the theorem. A virtually identical argument gives us $\mathcal G^*_{\overline U_{n,d}}=-\mathcal G^*_{U_{n,d}}$.
Now, still taking $n=2m+1$ and beginning with our earlier expression for $\mathcal G_{I_n}-\mathcal G_{J_n},$ we use Lemmas 5.1 and 5.2 to give us \begin{align*} \mathcal G_{I_n}-\mathcal G_{J_n}&=2\sum_{k=0}^{m}\sum_{s=0}^{m-k}(-1)^{m-k-s} \varepsilon^{m-k}N({\mathfrak P})^{2mk-k^2+m-k+(m-k)^2+s^2}\\ &\quad\cdot \boldsymbol \mu\boldsymbol \delta(m,k) \boldsymbol \beta\boldsymbol \delta(m-k,s) \cdot\sum_{\cls Y\in\mathbb F^{2k+1,2k+1}_{\sym}} \frac{r^*(I_{2m+1},Y)}{o(Y)}\, \mathcal G^*_Y. \end{align*} Using that $o(I_n)=2N({\mathfrak P})^{m^2}\boldsymbol \mu\boldsymbol \delta(m,m)$, and the induction hypothesis for $Y\in\mathbb F^{2k+1,2k+1}_{\sym}$ with $k<m$, we have \begin{align*} \mathcal G_{I_n}-\mathcal G_{J_n}&= o(I_{2m+1})\mathcal G^*_{I_{2m+1}} -o(I_{2m+1})\varepsilon^mN({\mathfrak P})^{m^2+2m}\mathcal G^*_1\, h_{I_{2m+1}}+2\mathcal G^*_1B \end{align*} where \begin{align*}B&=\sum_{k=0}^m\sum_{s=0}^{m-k}(-1)^{m-k-s} \varepsilon^{m-k}N({\mathfrak P})^{m^2+m-k+s^2}\boldsymbol \mu\boldsymbol \delta(m,k)\boldsymbol \beta\boldsymbol \delta(m-k,s)\\ &\quad\cdot \varepsilon^kN({\mathfrak P})^{k^2+2k} \sum_{\cls Y\in\mathbb F^{2k+1,2k+1}_{\sym}} \frac{r^*(I_{2m+1},Y)}{o(Y)}\, h_Y. \end{align*} By Lemma 5.4, we get \begin{align*} B&=\sum_{k=0}^m\sum_{s=0}^{m-k}(-1)^{m-s}N({\mathfrak P})^{m^2+2m+k(k-1)+s^2} \boldsymbol \mu\boldsymbol \delta(m,k)\boldsymbol \beta\boldsymbol \delta(m-k,s)\boldsymbol \gamma(m,k). \end{align*} Since $\boldsymbol \gamma(m,k)\boldsymbol \beta\boldsymbol \delta(m-k,s) =\boldsymbol \beta\boldsymbol \delta(m,s)\boldsymbol \gamma(m-s,k),$ we can sum on $0\le s\le m$, $0\le k\le m-s$. Then replacing $s$ by $m-s$ and using that $\boldsymbol \beta(m,m-s)=\boldsymbol \beta(m,s),$ we get \begin{align*} &B=\sum_{s=0}^m(-1)^sN({\mathfrak P})^{2m^2+2m-2ms+s^2} \boldsymbol \delta(m,m-s)\boldsymbol \beta(m,s)\cdot C(s) \text{ where}\\ &C(t)=\sum_{k=0}^t N({\mathfrak P})^{k(k-1)}\boldsymbol \mu\boldsymbol \delta(m,k)\boldsymbol \gamma(t,k). \end{align*} Since $\boldsymbol \gamma(t,k)=N({\mathfrak P})^{2k}\boldsymbol \gamma(t-1,k)+\boldsymbol \gamma(t-1,k-1),$ we have \begin{align*} C(t) &=N({\mathfrak P})^{2m}C(t-1)=N({\mathfrak P})^{2mt}C(0)=N({\mathfrak P})^{2mt}. \end{align*}
Therefore \begin{align*} &B=N({\mathfrak P})^{2m^2+2m}D(m,0) \text{ where}\\ &D(t,q)=\sum_{s=0}^t (-1)^s N({\mathfrak P})^{s(s+q)}\boldsymbol \delta(t+q,t-s)\boldsymbol \beta(t,s). \end{align*} Since $\boldsymbol \beta(t,s)=N({\mathfrak P})^s\boldsymbol \beta(t-1,s)+\boldsymbol \beta(t-1,s-1),$ we have \begin{align*} D(t,q) &=D(t-1,q+1)=D(0,t+q)=1. \end{align*} Therefore $B=N({\mathfrak P})^{2m^2+2m}$. Our earlier computations show that $\mathcal G_{I_n}-\mathcal G_{J_n}=2 N({\mathfrak P})^{2m^2+2m}\mathcal G^*_1$; so $$\mathcal G^*_{I_{2m+1}}=\varepsilon^mN({\mathfrak P})^{m^2+2m}\cdot h_{I_{2m+1}}\mathcal G^*_1 =(-1)^mN({\mathfrak P})^{m^2+m}\,\mathcal G^*_1.$$ A virtually identical argument gives us $\mathcal G^*_{J_n}=-\mathcal G^*_{I_n}$.
Now we argue by induction on $m$ to prove the theorem in the case that $n=2m$. Since the computation for $m=1$ is essentially identical to the induction step for $m>1$, we formally define $\mathcal G^*_{I_0}=\mathcal G^*_{J_0}=1$ (which is consistent with the formula claimed in the theorem). So now suppose that $m\ge1$ and that the theorem holds for all $\mathcal G^*_Y$ where $Y\in\mathbb F^{2r,2r}_{\sym}$ and $0\le r<m$. With $0<d<n$, we begin with the expression for $\mathcal G^*_{U_{n,d}}$ that we derived above. Take $c$ so that $d=2c$ or $d=2c+1$. Using the induction hypothesis, Lemma 5.4, and arguing as we did when $n$ was odd, we get $$\mathcal G^*_{U_{n,d}}=\frac{\boldsymbol \nu(n,d)}{o(I_{n+1})} 2\varepsilon^m N({\mathfrak P})^{m+d(d-1)/2}A(c,d-2c)$$ where $A(t,q)$ is as defined earlier in this proof; recall that $$A(t,q)=(-1)^t\boldsymbol \mu\boldsymbol \delta(m,t+q).$$ Since $\boldsymbol \mu(2(m-c),1)=\boldsymbol \mu\boldsymbol \delta(m-c,1)$, for $d=2c$ or $2c+1$ we get $$\mathcal G^*_{U_{n,d}} =(-1)^c\varepsilon^mN({\mathfrak P})^{m^2} \frac{\boldsymbol \mu(2(m-c),2(m-c))}{\boldsymbol \mu\boldsymbol \delta(m-c,m-c)}.$$ A virtually identical argument gives us $\mathcal G^*_{\overline U_{n,d}}=\mathcal G^*_{U_{n,d}}.$
Still assuming that $n=2m$ and beginning with our earlier expression for $r^*(I_{n+1},1)\mathcal G_{I_n}-r^*(J_{n+1},1)\mathcal G_{J_n}$, we use Lemmas 5.1 and 5.2 and the induction hypothesis to get \begin{align*} &r^*(I_{n+1},1)\mathcal G_{I_n}-r^*(J_{n+1},1)\mathcal G_{J_n}\\ &\quad=o(I_{n+1})\mathcal G^*_{I_n}-o(I_{n+1})\varepsilon^m N({\mathfrak P})^{m^2} \cdot h_{I_n} +2\varepsilon^m N({\mathfrak P})^{-m}B \end{align*} where $B$ is as in the case of $n$ odd. We saw that $B=N({\mathfrak P})^{2m^2+2m}$, and $$r^*(I_{n+1},1)\mathcal G_{I_n}-r^*(J_{n+1},1)\mathcal G_{J_n} =2\varepsilon^m N({\mathfrak P})^{2m+m};$$ so we get $$\mathcal G^*_{I_{2m}}=\varepsilon^m N({\mathfrak P})^{m^2}\cdot h_{I_{2m}} =(-1)^m\varepsilon^m N({\mathfrak P})^{m^2}.$$ The argument to evaluate $\mathcal G^*_{J_{2m}}$ is essentially identical to that of evaluating $\mathcal G^*_{J_{2m+1}}$, where for this we begin with the identity $$r^*(J_{2m+1},1)\overline\mathcal G_{I_{2m}}-r^*(I_{2m+1},1)\overline\mathcal G_{J_{2m}} =2\varepsilon^m N({\mathfrak P})^{2m^2+m}.$$ This proves the theorem.
To prove Corollary 1.2, we first note that by Theorem 1.1, $\left(\mathcal G^*_1\right)^m\mathcal G^*_T$ has no dependence on our choice of $\rho$. Thus we can follow the argument of Lemma 3.1 \cite{half-int-aps}, as the techniques are local. In \cite{half-int-aps}, all quadratic forms were assumed to be even; since $2$ is a unit in $\mathbb F$, we have $R^*(T\perp\big<1\big>,0_a)= R^*(2T\perp\big<2\big>,0_a)$, and hence Corollary 1.2 follows.
\section{Variations on quadratically twisted Gauss sums}
For $T\in\mathbb F^{n,n}_{\sym}$ and $0\le r\le n$, here we consider $\mathcal G_T^*({\mathfrak P};r)$, as defined in the introduction. For $T=0_n$, we have $\mathcal G^*_{0_n}({\mathfrak P};r)=\mathcal G^*_{0_d}({\mathfrak P})$, so we only need to consider $T\not=0_n$.
\begin{prop} Take $n\in\mathbb Z_+$, $T\in\mathbb F^{n,n}_{\sym}$ and let $d=\rank T$. \begin{enumerate} \item[(a)] Suppose that $0\le 2t+1\le n$. When $d$ is even we have $\mathcal G^*_T({\mathfrak P};2t+1)=0.$ When $d$ is odd with $d=2c+1$, we have $\mathcal G^*_{\overline U_{n,d}}({\mathfrak P};2t+1)=-\mathcal G^*_{U_{n,d}}({\mathfrak P};2t+1),$ and \begin{align*} \mathcal G^*_{U_{n,d}}({\mathfrak P};2t+1) &=\frac{\boldsymbol \nu(n,2c+1)}{\boldsymbol \nu(n-1,2c)}\varepsilon^cN({\mathfrak P})^c\mathcal G^*_1({\mathfrak P}) \mathcal G^*_{U_{n-1,2c}}({\mathfrak P};2t). \end{align*}
\item[(b)] Suppose that $0\le 2t\le n$; set $s=n-2t$. Then with $c$ so that $d=2c$ or $2c+1$, we have \begin{align*} \mathcal G^*_{T}({\mathfrak P};2t) &= \frac{\boldsymbol \nu(n,d)}{o(I_{2t+1}\perp0_s)} \sum_{k=0}^c (-1)^k\varepsilon^kN({\mathfrak P})^{s(2k+1)+2tk+t-k}\boldsymbol \mu\boldsymbol \delta(t,k) \\ &\quad\cdot \boldsymbol \gamma(c,k) A_s(t-k,c-k) \end{align*} where \begin{align*} A_s(x,y)= (N({\mathfrak P})^x+\varepsilon^x) r^*(I_{2x}\perp0_s,0_{2y})
- (N({\mathfrak P})^x-\varepsilon^x)r^*(J_{2x}\perp0_s,0_{2y}). \end{align*}
\end{enumerate} \end{prop}
\noindent{\bf Remark:} As $\boldsymbol \nu(2y,0)$ is the number of bases for any $2y$-dimensional space, $r^*(T',0_{2y})=\boldsymbol \nu(2y,0)\cdot R^*(T',0_{2y})$ for any symmetric $T'$.
\begin{proof} Throughout this proof, we follow the lines of argument used in Section 3. In this way we get \begin{align*} \mathcal G^*_{U_{n,d}}({\mathfrak P};r) &=\boldsymbol \nu(n,d)\sum_{Y\in\mathbb F^{d,d}_{\sym}} \left(\frac{r^*(U_{n,r},Y)}{o(U_{n,r},Y)} -\frac{r^*(\overline U_{n,r})}{o(\overline U_{n,r},Y)}\right) \e\{2Y/p\}, \end{align*} \begin{align*} \mathcal G^*_{\overline U_{n,d}}({\mathfrak P};r) &=\boldsymbol \nu(n,d)\sum_{Y\in\mathbb F^{d,d}_{\sym}} \left(\frac{r^*(U_{n,r},Y)}{o(U_{n,r},Y)} -\frac{r^*(\overline U_{n,r})}{o(\overline U_{n,r},Y)}\right) \e\{2YJ_d/p\}. \end{align*} We have $o(0_{n-r})=\boldsymbol \nu(n-r,n-r),$ and $$o(U_{n,r})=o(I_r)N({\mathfrak P})^{r(n-r)}o(0_{n-r}),\ o(\overline U_{n,r})=o(J_r)N({\mathfrak P})^{r(n-r)}\boldsymbol \nu(n-r,n-r).$$
First consider the case that $r=2t+1$. Then for even $\ell$ ($\ell\le d$), we have $$r^*(U_{n,r},U_{d,\ell})-r^*(\overline U_{n,r},U_{d,\ell}) =0=r^*(U_{n,r}\overline U_{d,\ell})-r^*(\overline U_{n,r},\overline U_{d,\ell}),$$ and for odd $\ell$ we have $$r^*(U_{n,r},U_{d,\ell})=r^*(\overline U_{n,r},\overline U_{d,\ell}),\ r^*(U_{n,r},\overline U_{d,\ell})=r^*(\overline U_{n,r},U_{d,\ell}).$$ Hence using Lemma 5.3, we have \begin{align*} \mathcal G^*_{U_{n,d}}({\mathfrak P};2t+1) &=\frac{\boldsymbol \nu(n,d)}{o(U_{n,2t+1})} \sum_{0\le 2k+1\le d} \left(\sum_{\cls Y\in\mathbb F^{2k+1,2k+1}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)} \mathcal G^*_Y\right)\\ &\quad\cdot (r^*(U_{n,2t+1},U_{d,2k+1})-r^*(\overline U_{n,2t+1},U_{d,2k+1})). \end{align*} So by Theorem 1.1 and Lemmas 5.1 and 5.4, when $d$ is even we get $\mathcal G^*_{U_{n,d}}({\mathfrak P};2t+1)=0$, and when $d$ is odd with $d=2c+1$, we get \begin{align*} \mathcal G^*_{U_{n,d}}({\mathfrak P};2t+1) &=\frac{\boldsymbol \nu(n,d)}{o(U_{n,2t+1})} \sum_{k=0}^c (-1)^k\varepsilon^{k+c} N({\mathfrak P})^{k^2+c} \boldsymbol \gamma(c,k) \mathcal G^*_1\\ &\quad\cdot (r^*(U_{n,2t+1},U_{d,2k+1})-r^*(\overline U_{n,2t+1},U_{d,2k+1})). \end{align*} An almost identical argument gives us $\mathcal G^*_{\overline U_{n,d}}({\mathfrak P};2t+1)=-\mathcal G^*_{U_{n,d}}({\mathfrak P};2t+1).$
Now consider the case that $r=2t$. With $d=2c$ or $2c+1$, reasoning as in Section 3 gives us \begin{align*} \mathcal G^*_{U_{n,d}}({\mathfrak P};2t) &=\frac{\boldsymbol \nu(n,d)}{o(U_{n+1,2t+1})} \sum_{k=0}^c(-1)^k\varepsilon^k N({\mathfrak P})^{k^2} \boldsymbol \gamma(c,k)\\ &\quad\cdot\left(r^*(U_{n+1,2t+1},U_{d+1,2k+1})
- r^*(\overline U_{n+1,2t+1},U_{d+1,2k+1})\right)\\ &= \mathcal G^*_{\overline U_{n,d}}({\mathfrak P};2t). \end{align*} This gives us (a) and part of (b).
To finish proving (b), we begin with the above equation, taking $d=2c$. It is easily seen that $r^*(I_r\perp 0_s,1)=N({\mathfrak P})^s r^*(I_r,1),$ and consequently from Lemma 5.1 we get \begin{align*} & r^*(U_{n+1,2t+1},U_{d+1,2k+1})
- r^*(\overline U_{n+1,2t+1},U_{d+1,2k+1})\\ &\quad = N({\mathfrak P})^{(n-2t)(2k+1)+2tk-k^2+t-k}\boldsymbol \mu\boldsymbol \delta(t,k) A_{n-2t}(t-k,c-k) \end{align*} with $A_s(x,y)$ as in the statement of the proposition. \end{proof}
\section{Lemmas and their proofs}
\begin{lem} Take $t,d\in\mathbb Z_+$. We have: \begin{align*} r^*(I_{2t},1)&=N({\mathfrak P})^{t-1}(N({\mathfrak P})^t-\varepsilon^t)=r^*(I_{2t},\omega),\\ r^*(J_{2t},1)&=N({\mathfrak P})^{t-1}(N({\mathfrak P})^t+\varepsilon^t)=r^*(J_{2t},1),\\ r^*(I_{2t+1},1)&=N({\mathfrak P})^t(N({\mathfrak P})^t+\varepsilon^t)=r^*(J_{2t+1},\omega),\\ r^*(I_{2t+1},\omega)&=N({\mathfrak P})^t(N({\mathfrak P})^t-\varepsilon^t)=r^*(J_{2t+1},1); \end{align*} also, \begin{align*} r^*(I_{2t},0_d)&=N({\mathfrak P})^{d(d-1)/2}(N({\mathfrak P})^t-\varepsilon^t)\boldsymbol \mu\boldsymbol \delta(t-1,d-1) (N({\mathfrak P})^{t-d}+\varepsilon^t),\\ r^*(J_{2t},0_d)&=N({\mathfrak P})^{d(d-1)/2}(N({\mathfrak P})^t+\varepsilon^t)\boldsymbol \mu\boldsymbol \delta(t-1,d-1) (N({\mathfrak P})^{t-d}-\varepsilon^t),\\ r^*(I_{2t+1},0_d)&=N({\mathfrak P})^{d(d-1)/2}\boldsymbol \mu\boldsymbol \delta(t,d)=r^*(J_{2t+1},0_d). \end{align*} \end{lem}
\begin{proof} The first collection of formulas are from Theorems 2.59 and 2.60 of \cite{Ger}. For the second collection of formulas, we begin with Theorems 2.59 and 2.60 of \cite{Ger}, giving us formulas for $r(I_t,0)=r^*(I_t,0)+1$ and $r(J_t,0)=r^*(J_t,0)+1$. Now consider the case that $V$ is a $2t$-dimensional space over $\mathbb F$ equipped with a quadratic form $Q_V$ given by $I_{2t}$ relative to some basis for $V$. So $r^*(I_{2t},0_d)$ is the number of all (ordered) bases for $d$-dimensional, totally isotropic subspaces of $V$. (Recall that a subspace $W$ of $V$ is totally isotropic if $Q_V$ restricts to 0 on $W$.) Suppose that $d>1$; we construct all bases for $d$-dimensional, totally isotropic subspaces of $V$ as follows. Choose an isotropic vector $x$ from $V$ (so $x\not=0$ and $Q_V(x)=0$; note that this is not possible if $t=1$ and $\varepsilon=-1$). Then as $V$ is a regular space, there is some $y\in V$ so that $y$ is not orthogonal to $x$; hence (by Theorem 2.23 \cite{Ger})
$x,y$ span a hyperbolic plane, and (by Theorem 2.17 \cite{Ger}), this hyperbolic plane splits $V$, giving us $V=(\mathbb F x\oplus\mathbb F y)\perp V'$ where $V'$ is hyperbolic if and only if $V$ is. We have $\disc V=\varepsilon \disc V'$ and so the quadratic form on $V'$ is given by $I_{2(t-1)}$ if $\varepsilon=1$, and by $J_{2(t-1)}$ if $\varepsilon=-1$. The number of all bases for $d$-dimensional, totally isotropic subspaces of $V$ with $x$ as the first basis element is $N({\mathfrak P})^{d-1} r^*(I_{2(t-1)},0_{d-1})$ if $\varepsilon=1$, and $N({\mathfrak P})^{d-1} r^*(J_{2(t-1)},0_{d-1})$ otherwise. The formula claimed now follows by induction on $d$.
Virtually identical arguments yield the formulas when $I_{2t}$ is replaced by $J_{2t}$ or $I_{2t+1}$ or $J_{2t+1}$. \end{proof}
\begin{lem} Suppose that $m\ge 0$. We have \begin{align*} &\sum_{s=0}^m (-1)^s N({\mathfrak P})^{(2m+1)(m-s)+s^2}\boldsymbol \beta\boldsymbol \delta(m,m-s)\\ &\quad=r(I_{2m+1},0_{2m+1}) =r(J_{2m+1},0_{2m+1}), \end{align*} \begin{align*} &\sum_{s=0}^m(-1)^s N({\mathfrak P})^{2m(m-s)+s(s-1)}\boldsymbol \beta(m,m-s)\boldsymbol \delta(m-1,m-s)\\ &\quad=\begin{cases} r(I_{2m},0_{2m})&\text{if $\varepsilon^m=1$,}\\ r(J_{2m},0_{2m})&\text{if $\varepsilon^m=-1$,}\end{cases}\\ &\sum_{s=1}^m (-1)^{s+1} N({\mathfrak P})^{2m(m-s)+s(s-1)}\boldsymbol \beta(m-1,m-s)\boldsymbol \delta(m,m-s)\\ &\quad=\begin{cases} r(J_{2m},0_{2m})&\text{if $\varepsilon^m=1$,}\\ r(I_{2m},0_{2m})&\text{if $\varepsilon^m=-1$.}\end{cases} \end{align*} \end{lem}
\begin{proof} Suppose that $V$ is an $n$-dimensional vector space over $\mathbb F$ equipped with a quadratic form given by $Q_V=I_n$ or $J_n$. Then $r(Q_V,0_n)$ is the number of (ordered) $x_1,\ldots,x_n\in V$ so that $\text{span}\{x_1,\ldots,x_n\}$ is totally isotropic. As $\boldsymbol \nu(d,0)$ is the number of bases for any given dimension $d$ space over $\mathbb F$, the number of dimension $d$ totally isotropic subspaces of $V$ is $$\varphi_d(V)=r^*(Q_V,0_d)/\boldsymbol \nu(d,0).$$
We treat the case that $V\simeq\mathbb H^m$, meaning that $\dim V=2m$ and the quadratic form on $V$ is given by $I_{2m}$ if $\varepsilon^m=1$, and by $J_{2m}$ otherwise (analogous arguments treat the other cases). Slightly abusing notation, we write $(x_1,\ldots,x_{2m})\subseteq V$ to mean that $(x_1,\ldots,x_{2m})$ is an ordered $2m$-tuple of vectors from $V$. We set $$\mathcal W_{m-s}=\{\text{dimension } m-s \text{ totally isotropic subspaces } W \text{ of }V\},$$ and we let $${\mathbb 1}_W(x_1,\ldots,x_{2m})= \begin{cases}1&\text{if $x_1,\ldots,x_{2m}\in W$,}\\ 0&\text{otherwise}.\end{cases}$$ Thus for $(x_1,\ldots,x_{2m})\subseteq V$, $\sum_{W\in\mathcal W_{m-s}} {\mathbb 1}_W(x_1,\ldots,x_{2m})$ is the number of elements of $\mathcal W_{m-s}$ containing $x_1,\ldots,x_{2m}$, and, noting that $N({\mathfrak P})^{2m(m-s)}$ is the number of (ordered) $2m$-tuples of vectors in each $W\in\mathcal W_{m-s}$, we have $$\sum_{(x_1,\ldots,x_{2m})\subseteq V} \left(\sum_{W\in\mathcal W_{m-s}} {\mathbb 1}_W(x_1,\ldots,x_{2m})\right)
= N({\mathfrak P})^{2m(m-s)}\varphi_{m-s}(V).$$ So \begin{align*} \psi(V):= &\sum_{s=0}^m (-1)^s N({\mathfrak P})^{s(s-1)+2m(m-s)}\varphi_{m-s}(V)\\ =&\sum_{(x_1,\ldots,x_{2m})\subseteq V}\left(\sum_{s=0}^m (-1)^s N({\mathfrak P})^{s(s-1)} \sum_{W\in\mathcal W_{m-s}} {\mathbb 1}_W(x_1,\ldots,x_{2m})\right). \end{align*}
Fix $(x'_1,\ldots,x'_{2m})\subseteq V$; let $W'$ be the subspace spanned by $x'_1,\ldots,x'_{2m}$, and set $\ell=\dim W'$. If $W'$ is not totally isotropic then ${\mathbb 1}_W(x'_1,\ldots,x'_{2m})=0$ for all totally isotropic $W$. So suppose that $W'$ is totally isotropic. Then repeatedly using Theorems 2.19, 2.23, 2.52 of \cite{Ger} and the assumption that $V$ is regular, we find that there is a dimension $\ell$ subspace $W''$ so that $W'\oplus W''\simeq\mathbb H^{\ell}$ and $V=(W'\oplus W'')\perp V'$ where $V'\simeq \mathbb H^{m-\ell}$. Hence the number of $W\in\mathcal W_{m-s}$ that contain $W'$ is $\varphi_{m-s-\ell}(V')$. Therefore, using Lemma 5.1 and the above formula for $\varphi_{m-s-\ell}(V')$, we have \begin{align*} &\sum_{s=0}^{m-\ell} (-1)^s N({\mathfrak P})^{s(s-1)} \sum_{W\in\mathcal W_{m-s}} {\mathbb 1}(x'_1,\ldots,x'_{2m}) =A(m-\ell,m-\ell-1) \end{align*} where $$A(t,k)=\sum_{s=0}^t (-1)^sN({\mathfrak P})^{s(s+k-t)}\boldsymbol \delta(k,t-s)\boldsymbol \beta(t,t-s).$$ We argue by induction on $t$ to show that for any $k$ and $t\ge 0$, we have $A(t,k)=1$. Clearly $A(0,k)=1$ for all $k$. So fix $t\ge0$ and suppose that $A(t,k)=1$ for all $k$. Hence we have \begin{align*} 1 &=(N({\mathfrak P})^k+1)\sum_{s=0}^t(-1)^sN({\mathfrak P})^{s(s+k-1-t)} \boldsymbol \delta(k-1,t-s)\boldsymbol \beta(t,t-s)\\ &\quad -N({\mathfrak P})^k\sum_{s=0}^t (-1)^s N({\mathfrak P})^{s(s+k-t)}\boldsymbol \delta(k,t-s)\boldsymbol \beta(t,t-s). \end{align*} Notice that in the first sum in the above equality, we can allow $s$ to vary from $0$ to $t+1$ (since $\boldsymbol \beta(t,-1)=0$), and in the second sum we can allow $s$ to vary from $-1$ to $t$ (since $\boldsymbol \beta(t,t+1)=0$). Also, we know that $$(N({\mathfrak P})^k+1)\boldsymbol \delta(k-1,t-s)=\boldsymbol \delta(k,t-s+1);$$ so replacing $s$ by $s-1$ in the second sum and using that $$\boldsymbol \beta(t,t-s)+N({\mathfrak P})^{t+1-s}\boldsymbol \beta(t,t+1-s)=\boldsymbol \beta(t+1,t+1-s),$$ we find that $A(t+1,k)=1$ for all $k$. Hence $\psi$ counts $(x_1,\ldots,x_{2m})\subseteq V$
zero times if $\text{span}\{x_1,\ldots,x_{2m}\}$ is not totally isotropic, and once otherwise. Thus $\psi=r(Q_V,0_{2m}).$ \end{proof}
\begin{lem} Fix $d\in\mathbb Z_+$ and $\ell\in\mathbb Z$ so that $0\le \ell<d$. Then $$\sum_{Y\sim U_{d,\ell}} \e\{2Y\rho\} -\sum_{Y\sim \overline U_{d,\ell}} \e\{2Y\rho\} =\sum_{\cls Y'\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_d,Y')}{o(Y')} \mathcal G^*_{Y'}({\mathfrak P} I_{\ell})$$ and $$\sum_{Y\sim U_{d,\ell}} \e\{2YJ_d\rho\} -\sum_{Y\sim \overline U_{d,\ell}} \e\{2YJ_d\rho\} =\sum_{\cls Y'\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(J_d,Y')}{o(Y')} \mathcal G^*_{Y'}({\mathfrak P} I_{\ell}),$$ where $\cls Y'$ varies over a set of representatives for the $GL_{\ell}(\mathbb F)$-orbits in $\mathbb F^{\ell,\ell}_{\sym}$. \end{lem}
\begin{proof} We first consider the sum over $Y\sim \overline U_{d,\ell}$. We know that for $G\in GL_{d}(\mathbb F)$ and $G'$ in the orthogonal group of $\overline U_{d,\ell}$, we have $^t(G'G)\overline U_{d,\ell}(G'G)=\,^tG\overline U_{d,\ell}G$, so when we let $G$ vary over $GL_{d}(\mathbb F)$, each element in the orbit of $\overline U_{d,\ell}$ appears exactly $o(\overline U_{d,\ell})$ times. Also, recall that with $\sigma$ denoting the matrix trace map, we have $\sigma(^tG\overline U_{d,\ell}G)=\sigma(\overline U_{d,\ell}GI_d\,^tG)$ and $\sigma(\overline U_{d,\ell}GI_d\,^tG)=\sigma(J_{\ell}Y')$ where $Y'$ is the upper left $\ell\times \ell$ block of $GI_d\,^tG$. So we have \begin{align*} \sum_{Y\sim\overline U_{d,\ell}}\e\{2Y\rho\} &=\frac{1}{o(\overline U_{d,\ell})} \sum_{G\in GL_d(\mathbb F)} \e\{2\,^tG\overline U_{d,\ell}G\rho\}\\ &=\frac{\boldsymbol \nu(d,\ell)}{o(\overline U_{d,\ell})} \sum_{Y'\in\mathbb F^{\ell,\ell}_{\sym}} r^*(I_d,Y') \e\{2 J_{\ell} Y'\rho\} \end{align*} since $$r^*(I_d,Y')=\#\{C\in\mathbb F^{d,\ell}: \ ^tCC=Y',\ \rank C=\ell\ \},$$ and the number of ways to extend $C$ to an element of $GL_{d}(\mathbb F)$ is $\boldsymbol \nu(d,\ell)$. Now, as $G$ varies over $GL_{\ell}(\mathbb F)$, $^tGY'G$ varies $o(Y')$ times over the elements in $\cls Y'$. Also, by Lemma 5.1, we have $o(\overline U_{d,\ell})=o(J_{\ell})\boldsymbol \nu(d,\ell)$. Hence \begin{align*} \sum_{Y\sim\overline U_{d,\ell}} \e\{2Y\rho\} &= \frac{1}{o(J_{\ell})} \sum_{G\in GL_d(\mathbb F)} \sum_{\cls Y'\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_d,Y')}{o(Y')}\e\{2G J_{\ell}\,^tGY'\rho\}\\ &= \sum_{\cls Y'\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(I_d,Y')}{o(Y')} \sum_{X\sim J_{\ell}}\e\{2XY'\rho\} \end{align*} where for the last equality we used that as $G$ varies over $GL_{\ell}(\mathbb F)$, $GJ_{\ell}\,^tG$ varies $o(J_{\ell})$ times over the elements in the orbit of $J_{\ell}$.
The analysis of $$\sum_{Y\sim U_{d,\ell}}\e\{2Y\rho\},\ \sum_{Y\sim U_{d,\ell}}\e\{2YJ_d\rho\},\text{ and } \sum_{Y\sim \overline U_{d,\ell}}\e\{2YJ_d\rho\}$$ follow in a virtually identical manner. Then we note that $$\sum_{X\sim I_{\ell}}\e\{2XY'\rho\}-\sum_{X\sim J_{\ell}}\e\{2XY'\rho\} =\mathcal G^*_{Y'},$$ completing the proof. \end{proof}
\begin{lem} Suppose that $0< \ell\le d$; take $c$ so that $d$ is $2c$ or $2c+1$. Take $Y\in\mathbb F^{\ell,\ell}_{\sym}$, and take $b$ so that $\rank Y$ is $2b$ or $2b+1$. \begin{enumerate} \item[(a)] Suppose that $\ell=2k$; set $$h_Y=(-1)^b\cdot\frac{\boldsymbol \mu(2(k-b),2(k-b))}{\boldsymbol \mu\boldsymbol \delta(k-b,k-b)}.$$ Then $$\sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)}\,h_Y= \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(J_d,Y)}{o(Y)}\,h_Y= (-1)^k\boldsymbol \gamma(c,k).$$ \item[(b)] Suppose that $\ell=2k+1$; set $$h_Y=\begin{cases} (-1)^b\varepsilon^b N({\mathfrak P})^{-b}\cdot\frac{\boldsymbol \mu(2(k-b),2(k-b))}{\boldsymbol \mu\boldsymbol \delta(k-b,k-b)} &\text{if $Y\sim I_{2b+1}\perp 0_{2(k-b)}$,}\\ (-1)^{b+1}\varepsilon^b N({\mathfrak P})^{-b}\cdot\frac{\boldsymbol \mu(2(k-b),2(k-b))}{\boldsymbol \mu\boldsymbol \delta(k-b,k-b)} &\text{if $Y\sim J_{2b+1}\perp 0_{2(k-b)}$,}\\ 0&\text{if $\rank Y=2b$.} \end{cases}$$ Then when $d=2c$, $$\sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)}\,h_Y= \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(J_d,Y)}{o(Y)}\,h_Y= 0,$$ and when $d=2c+1$, \begin{align*} \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)}\,h_Y &= -\sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(J_d,Y)}{o(Y)}\,h_Y\\ &= (-1)^k\varepsilon^cN({\mathfrak P})^{c-2k} \boldsymbol \gamma(c,k). \end{align*} \end{enumerate} \end{lem}
\begin{proof} We have \begin{align*} &\sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(I_d,Y)}{o(Y)\,}h_Y =\frac{r^*(I_d,0_{\ell})}{o(0_{\ell})}\\ &\quad +\sum_{a=1}^{\ell}\left(\frac{r^*(I_d,I_a\perp 0_{\ell-a})}{o(I_a\perp0_{\ell-a})} \,h_{I_a\perp 0_{\ell-a}} +\frac{r^*(I_d,J_a\perp 0_{\ell-a})}{o(J_a\perp0_{\ell-a})} \,h_{J_a\perp 0_{\ell-a}}\right). \end{align*} \begin{align*} &o(I_{2b+1})N({\mathfrak P})^{2bs}\boldsymbol \nu(s,0)=N({\mathfrak P})^{2b}\boldsymbol \mu(s,1)o(I_{2b+1}\perp 0_{s-1})\\ &\quad=r^*(I_{2b+1},1)o(I_{2b}\perp0_s) =r^*(J_{2b+1},1)o(J_{2b}\perp0_s). \end{align*}
(a) Suppose that $\ell=2k$. Then using Lemma 5.1, when $d=2c$ we get \begin{align*} &r^*(I_{2b+1},1)r^*(I_d,I_{2b}\perp 0_{2(k-b)}) +r^*(J_{2b+1},1)r^*(I_d,J_{2b}\perp 0_{2(k-b)})\\ &\quad= 2N({\mathfrak P})^{(k-b)(2k-2b-1)+2cb-b^2}\\ &\qquad\cdot \boldsymbol \mu\boldsymbol \delta(c-1,2k-b-1)(N({\mathfrak P})^c-\varepsilon^c)(N({\mathfrak P})^{c-2(k-b)}+\varepsilon^c) \end{align*} and \begin{align*} &N({\mathfrak P})^{2b}(N({\mathfrak P})^{2(k-b)}-1)\\ &\qquad\cdot \left(r^*(I_d,I_{2b+1}\perp 0_{2(k-b)-1}) +r^*(I_d,J_{2b+1}\perp 0_{2(k-b)-1})\right)\\ &\quad= 2N({\mathfrak P})^{(k-b)(2k-2b-1)+2cb-b^2+c-2(k-b)}\\ &\qquad\cdot \boldsymbol \mu\boldsymbol \delta(c-1,2k-b-1)(N({\mathfrak P})^c-\varepsilon^c)(N({\mathfrak P})^{2(k-b)}-1). \end{align*} So using that $\boldsymbol \mu\boldsymbol \delta(t,s+s')=\boldsymbol \mu\boldsymbol \delta(t,s)\boldsymbol \mu\delta(t-s,s'),$ we have \begin{align*} \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}}\frac{r^*(I_{2c},Y)}{o(Y)}\,h_Y &= \sum_{b=0}^k(-1)^b\frac{N({\mathfrak P})^{2b(b+c-2k)}\boldsymbol \mu\boldsymbol \delta(c,2k-b)\boldsymbol \mu\boldsymbol \delta(k,b)} {\boldsymbol \mu\boldsymbol \delta(b,b)\boldsymbol \mu\boldsymbol \delta(k-b,k-b)\boldsymbol \mu\boldsymbol \delta(k,b)}\\ &= \boldsymbol \gamma(k,c)\cdot S(k,c) \end{align*} where $$S(k,c)=\sum_{b=0}^k(-1)^b N({\mathfrak P})^{2b(b+c-2k)} \boldsymbol \mu\boldsymbol \delta(c-k,k-b)\boldsymbol \gamma(k,b).$$ Since $\boldsymbol \gamma(k,b)=N({\mathfrak P})^{2b}\boldsymbol \gamma(k-1,b)+\boldsymbol \gamma(k-1,b-1),$ we find that \begin{align*} S(k,c) &= -S(k-1,c-1)=(-1)^k S(0,c-k)=(-1)^k, \end{align*} proving one case of (a). We follow this same line of argument when replacing $I_{2c}$ by $J_{2c}$, and when replacing $2c$ by $2c+1$.
(b) Suppose that $\ell=2k+1$. Using the definition of $h_Y$, we have \begin{align*} \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_d,Y)}{o(Y)}\, h_Y &=\sum_{b=0}^k \left(r^*(I_d,I_{2b+1}\perp0_{2(k-b)}) - r^*(I_d,J_{2b+1}\perp0_{2(k-b)})\right)\\ &\quad\cdot \frac{h_{I_{2b+1}\perp0_{2(k-b)}}}{o(I_{2b+1}\perp0_{2(k-b)})}. \end{align*} When $d$ is even, $r^*(I_d,I_{2b+1}\perp 0_{2(k-b)}) =r^*(I_d,J_{2b+1}\perp0_{2(k-b)}),$ so when $d$ is even the above sum on $\cls Y$ is 0. So suppose that $d=2c+1$. Then with $S(k,c)$ as in case (a), we have \begin{align*} \sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_d,Y)}{o(Y)}\, h_Y &=\varepsilon^c N({\mathfrak P})^{c-2k} \boldsymbol \gamma(c,k) S(c,k)\\ &=(-1)^k\varepsilon^c N({\mathfrak P})^{c-2k} \boldsymbol \gamma(c,k). \end{align*}
To evaluate the sum on $\cls Y$ when $I_d$ is replaced by $J_d$, we first note that for any $s\ge0$, when $d$ is even we have $r^*(J_d,I_{2b+1}\perp 0_{s})=r^*(J_d,J_{2b+1}\perp0_{s})$, and when $d$ is odd we have $r^*(J_d,I_{2b+1}\perp 0_{s})=r^*(I_d,J_{2b+1}\perp0_{s})$ . So mimicking our above analysis, we find that when $d$ is even, the sum on $\cls Y =0$, and when $d$ is odd with $d=2c+1$, we have $$\sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(J_d,Y)}{o(Y)}\,h_Y = -\sum_{\cls Y\in\mathbb F^{\ell,\ell}_{\sym}} \frac{r^*(I_d,Y)}{o(Y)}\,h_Y.$$ \end{proof}
\end{document} |
\begin{document}
\title{Class invariants by the CRT method} \author{Andreas Enge$^1$\and Andrew V. Sutherland$^2$} \institute{$^1$INRIA Bordeaux--Sud-Ouest\hspace{20pt}$^2$Massachusetts Institute of Technology}
\maketitle
\begin{abstract}
We adapt the CRT approach for computing Hilbert class polynomials to handle a wide range of class invariants. For suitable discriminants $D$, this improves its performance by a large constant factor, more than 200 in the most favourable circumstances. This has enabled record-breaking constructions of elliptic curves via the CM method, including examples with $|D|>10^{15}$. \end{abstract}
\section{Introduction} Every ordinary elliptic curve $E$ over a finite field $\F_q$ has \textit {complex multiplication} by an imaginary quadratic order $\mathcal{O}$, by which we mean that the endomorphism ring $\operatorname{End}(E)$ is isomorphic to $\mathcal{O}$. The Deuring lifting theorem implies that $E$ is the reduction of an elliptic curve $\hat{E}/\mathbb{C}$ that also has complex multiplication by~$\mathcal{O}$. Let $K$ denote the fraction field of $\mathcal{O}$. The $j$-invariant of $\hat{E}$ is an algebraic integer whose minimal polynomial over $K$ is the \textit {Hilbert class polynomial} $H_D$, where $D$ is the discriminant of $\mathcal{O}$. Notably, the polynomial $H_D$ actually lies in $\mathbb{Z}[X]$, and its splitting field is the \textit {ring class field} $K_\mathcal{O}$ for the order $\mathcal{O}$.
Conversely, an elliptic curve $E/\F_q$ with complex multiplication by $\mathcal{O}$ exists whenever $q$ satisfies the norm equation $4q=t^2-v^2D$, with $t,v\in\mathbb{Z}$ and $t\not\equiv 0$ modulo the characteristic of $\F_q$. In this case $H_D$ splits completely over $\F_q$, and its roots are precisely the $j$-invariants of the elliptic curves $E/\F_q$ that have complex multiplication by~$\mathcal{O}$. Such a curve has $q+1\pm t$ points, where $t$ is determined, up to a sign, by the norm equation. With a judicious selection of $D$ and $q$ one may obtain a curve with prescribed order. This is known as the \emph{CM~method}.
The main challenge for the CM method is to obtain the polynomial $H_D$, which has degree equal to the class number $h(D)$, and total size
$O(|D|^{1+\epsilon})$. There are three approaches to computing $H_D$, all of which, under reasonable assumptions, can achieve a running time of $O(|D|^{1+\epsilon})$. These include the complex analytic method \cite{Enge:FloatingPoint}, a $p$-adic algorithm \cite{Couveignes:ClassPolynomial,Broker:pAdicClassPolynomial}, and an approach based on the Chinese Remainder Theorem (CRT) \cite{Belding:HilbertClassPolynomial}. The first is the most widely used, and it is quite efficient; the range of discriminants to which it may be applied is limited not by its running time, but by the space required. The polynomial $H_D$ is already likely to exceed available memory when
$|D|>10^9$, hence one seeks to apply the CM method to alternative class polynomials that have smaller coefficients than~$H_D$. This makes computations with $|D|>10^{10}$ feasible.
Recently, a modified version of the CRT approach was proposed that greatly reduces the space required for the CM method
\cite{Sutherland:HilbertClassPolynomials}. Under the Generalised Riemann Hypothesis (GRH), this algorithm is able to compute $H_D\bmod P$ using $O(|D|^{1/2+\epsilon}\log P)$ space and
$O(|D|^{1+\epsilon})$ time. (Here and in the following, all complexity estimates refer to bit operations.)
The reduced space complexity allows it to handle much larger discriminants, including examples with $|D| > 10^{13}$.
An apparent limitation of the CRT approach is that it depends on some specific features of the $j$-function. As noted in \cite{Belding:HilbertClassPolynomial}, this potentially precludes it from computing class polynomials other than $H_D$. The purpose of the present article is to show how these obstructions may be overcome, allowing us to apply the CRT method to many functions other than $j$, including two infinite families.
Subject to suitable constraints on $D$, we may then compute a class polynomial with smaller coefficients than $H_D$ (by a factor of up to 72), and, in certain cases, with smaller degree (by a factor of 2). Remarkably, the actual running time with the CRT method is typically \textit {better} than the size difference would suggest. Fewer CRT moduli are needed, and we may choose a subset for which the computation is substantially faster than on average.
We start~\S\ref {sec:crtj} with a brief overview of the CRT method, and then describe a new technique to improve its performance, which also turns out to be crucial for certain class invariants. After discussing families of invariants in~\S\ref {sec:invariants}, we consider CRT-based approaches applicable to the different families and give a general algorithm in~\S\ref {sec:crtinv}. Computational results and performance data appear in~\S\ref {sec:implementation}.
\section{Hilbert class polynomials via the CRT} \label {sec:crtj}
\subsection {The algorithm of Belding, Br\"oker, Enge, Lauter and Sutherland} \label {ssec:bbels}
The basic idea of the CRT-based algorithm for Hilbert class polynomials is to compute $H_D$ modulo many small primes~$p$, and then lift its coefficients by Chinese remaindering to integers, or to their reductions modulo a large (typically prime) integer~$P$, via the explicit CRT \cite[Thm. 3.1]{Bernstein:ModularExponentiation}. The latter approach suffices for most applications, and while it does not substantially reduce the running time (the same number of small primes is required), it can be accomplished using only $O(|D|^{1/2+\epsilon}\log P)$ space with the method of \cite[\S6]{Sutherland:HilbertClassPolynomials}.
For future reference, we summarise the algorithm to compute $H_D\bmod p$ for a prime $p$ that splits completely in the ring class field $K_\mathcal{O}$. Let $h=h(D)$.
\begin{algorithm}[Computing $\boldsymbol{H_D\bmod p}$] \label {alg:crtj} \quad \begin{enumerate} \item Find the $j$-invariant $j_1$ of an elliptic curve $E/\F_p$ with $\operatorname{End}(E)\cong\mathcal{O}$. \item Enumerate the other roots $j_2, \ldots, j_{h}$ of $H_D \bmod p$. \item Compute $H_D(X) \bmod p = (X-j_1) \cdots (X-j_{h})$. \end{enumerate} \end {algorithm}
The first step is achieved by varying $j_1$ (systematically or randomly) over the elements of $\F_p$ until it corresponds to a suitable curve; details and many practical improvements are given in \cite{Belding:HilbertClassPolynomial,Sutherland:HilbertClassPolynomials}. The third step is a standard building block of computer algebra. Our interest lies in Step~2.
\subsection{Enumerating the roots of $H_D\bmod p$} \label{ssec:crtjenum}
The key idea in \cite{Belding:HilbertClassPolynomial} leading to a quasi-linear complexity is to apply the Galois action of $\operatorname{Cl}(\mathcal{O}) \simeq \operatorname{Gal} (K_\mathcal{O}/K)$. The group $\operatorname{Cl}(\mathcal{O})$ acts on the roots of $H_D$, and when $p$ splits completely in $K_\mathcal{O}$ there is a corresponding action on the set $\mathrm {Ell}_{\mathcal{O}}(\F_p)=\{j_1,\ldots,j_h\}$ containing the roots of $H_D \bmod p$. For an ideal class $[\mathfrak{a}]$ in $\operatorname{Cl}(\mathcal{O})$ and a $j$-invariant $j_i\in\mathrm {Ell}_{\mathcal{O}}(\F_p)$, let us write $[\mathfrak{a}]j_i$ for the image of $j_i$ under the Galois action of $[\mathfrak{a}]$. We then have $\mathrm {Ell}_{\mathcal{O}}(\F_p)=\{[\mathfrak{a}]j_1:[\mathfrak{a}]\in\operatorname{Cl}(\mathcal{O})\}$.
As in \cite[\S 5]{Sutherland:HilbertClassPolynomials}, we use a polycyclic presentation defined by a sequence of ideals $\mathfrak{l}_1,\ldots,\mathfrak{l}_m$ with prime norms $\ell_1,\ldots,\ell_m$ whose classes generate $\operatorname{Cl}(\mathcal{O})$. The \emph{relative order} $r_k$ is the least positive integer for which $[\mathfrak{l}_k^{r_k}] \in \langle [\mathfrak{l}_1], \ldots, [\mathfrak{l}_{k-1}] \rangle$. We may then uniquely write $[\mathfrak{a}] = [\mathfrak{l}_1^{e_1}]\cdots[\mathfrak{l}_m^{e_m}]$, with $0 \leq e_k < r_k$. To maximise performance, we use a presentation in which $\ell_1<\cdots <\ell_m$, with each~$\ell_k$ as small as possible subject to $r_k > 1$. Note that the relative order $r_k$ divides the order~$n_k$ of $[\mathfrak{l}_k]$ in $\operatorname{Cl}(\mathcal{O})$, but for $k>1$ we can (and often do) have $r_k < n_k$.
For each $j_i\in\mathrm {Ell}_{\mathcal{O}}(\F_p)$ and each $\mathcal{O}$-ideal $\mathfrak{l}$ of prime norm $\ell$, the $j$-invariant~$[\mathfrak{l}]j_i$ corresponds to an $\ell$-isogenous curve, which we may obtain as a root of ${\rm\Phi}_{\ell}(j_i,X)$, where ${\rm\Phi}_\ell\in\mathbb{Z}[J,J_{\ell}]$ is the \textit {classical modular polynomial} \cite[\S 69]{Weber:Algebra}. The polynomial~${\rm\Phi}_\ell$ has the pair of functions $\bigl(j(z),j(\ell z)\bigr)$ as roots, and parameterises isogenies of degree~$\ell$.
Fixing an isomorphism $\operatorname{End}(E)\cong \mathcal{O}$, we let $\pi\in\mathcal{O}$ denote the Frobenius endomorphism. When the order~$\mathbb{Z}[\pi]$ is maximal at $\ell$, the univariate polynomial ${\rm\Phi}_\ell(j_i,X)\in\F_p[X]$ has exactly two roots $[\mathfrak{l}]j_i$ and~$[\bar\mathfrak{l}]j_i$ when $\ell$ splits in $\mathcal{O}$, and a single root $[\mathfrak{l}]j_i$ if $\ell$ is ramified \cite[Prop.~23]{Kohel:Thesis}. To simplify matters, we assume here that~$\mathbb{Z}[\pi]$ is maximal at each $\ell_k$, but this is not necessary, see \cite[\S 4]{Sutherland:HilbertClassPolynomials}.
We may enumerate $\mathrm {Ell}_{\mathcal{O}}(\F_p)=\{[\mathfrak{a}]j_1:[\mathfrak{a}]\in\langle[\mathfrak{l}_1],\ldots,[\mathfrak{l}_m]\rangle\}$ via \cite[Alg.~1.3]{Sutherland:HilbertClassPolynomials}: \begin{algorithm}[Enumerating $\boldsymbol{\mathrm {Ell}_{\mathcal{O}}(\F_p)}$ --- Step 2 of Algorithm \ref {alg:crtj}] \label {alg:crtjenum} \quad \begin{enumerate} \item Let $j_2$ be an arbitrary root of ${\rm\Phi}_{\ell_m}(j_1,X)$ in $\F_p$. \item For $i$ from $3$ to $r_m$, let $j_i$ be the root of ${\rm\Phi}_{\ell_m}(j_{i-1},X)/(X-j_{i-2})$ in $\F_p$. \item If $m > 1$, then for $i$ from $1$ to $r_m$:
\hspace{12pt} Recursively enumerate the set $\{[\mathfrak{a}]j_i:[\mathfrak{a}]\in\langle[\mathfrak{l}_1],\ldots,[\mathfrak{l}_{m-1}]\rangle\}$. \end{enumerate} \end{algorithm} In general there are two distinct choices for $j_2$, but either will do. Once $j_2$ is chosen, $j_3,\ldots,j_{r_m}$ are determined. The sequence $(j_1,\ldots,j_{r_m})$ corresponds to a path of $\ell_m$-isogenies; we call this path an $\ell_m$-\emph{thread}.
The choice of $j_2$ in Step 1 may change the order in which $\mathrm {Ell}_{\mathcal{O}}(\F_p)$ is enumerated. Three of the sixteen possibilities when $m=2$, $r_1=4$, and $r_2=3$ are shown below; we assume $[\mathfrak{l}_2^3]=[\mathfrak{l}_1]$, and label each vertex $[\mathfrak{l}_2^e]j_1$ by the exponent~$e$.
\begin{center} \begin{tikzpicture} \scriptsize
\tikzstyle{vertex}=[circle,draw=black,fill=black!15,minimum size=9pt,inner sep=0pt]
\foreach \name/\x/\y in {0/0.0/1.6, 3/0.8/1.6, 6/1.6/1.6, 9/2.4/1.6,
1/0.0/0.8, 4/0.8/0.8, 7/1.6/0.8, 10/2.4/0.8,
2/0.0/0.0, 5/0.8/0.0, 8/1.6/0.0, 11/2.4/0.0}
\node[vertex] (G-\name) at (\x,\y) {$\name$};
\foreach \from/\to in {0/1,0/3,1/4,2/5}
\draw[line width=1.0pt,->] (G-\from) -- (G-\to);
\foreach \y/\z in {1.23/black,0.43/black} \node at (-0.15,\y) {{\color{\z}$\mathfrak{l}_2$}};
\foreach \x/\z in {0.4/black,1.2/black,2.0/black} \node at (\x,1.76) {{\color{\z}$\mathfrak{l}_1$}};
\foreach \x/\z in {0.4/black,1.2/black,2.0/black} \node at (\x,0.95) {{\color{\z}$\mathfrak{l}_1$}};
\foreach \x/\z in {0.4/black,1.2/black,2.0/black} \node at (\x,0.15) {{\color{\z}$\mathfrak{l}_1$}};
\foreach \from/\to in {1/2,3/6,6/9,4/7,7/10,5/8,8/11}
\draw[black,->] (G-\from) -- (G-\to);
\foreach \name/\x/\y in {0/4.4/1.6, 9/5.2/1.6, 6/6.0/1.6, 3/6.8/1.6,
1/4.4/0.8, 10/5.2/0.8, 7/6.0/0.8, 4/6.8/0.8,
2/4.4/0.0, 5/5.2/0.0, 8/6.0/0.0, 11/6.8/0.0}
\node[vertex] (G-\name) at (\x,\y) {$\name$};
\foreach \from/\to in {0/1,0/9,1/10,2/5}
\draw[line width=1.0pt,->] (G-\from) -- (G-\to);
\foreach \y/\z in {1.23/black,0.43/black} \node at (4.25,\y) {{\color{\z}$\mathfrak{l}_2$}};
\foreach \x/\z in {4.8/black,5.6/black,6.4/black} \node at (\x,1.79) {{\color{\z}$\bar\mathfrak{l}_1$}};
\foreach \x/\z in {4.8/black,5.6/black,6.4/black} \node at (\x,0.98) {{\color{\z}$\bar\mathfrak{l}_1$}};
\foreach \x/\z in {4.8/black,5.6/black,6.4/black} \node at (\x,0.15) {{\color{\z}$\mathfrak{l}_1$}};
\foreach \from/\to in {1/2,9/6,6/3,10/7,7/4,5/8,8/11}
\draw[black,->] (G-\from) -- (G-\to);
\foreach \name/\x/\y in {0/8.8/1.6, 3/9.6/1.6, 6/10.4/1.6, 9/11.2/1.6,
11/8.8/0.8, 8/9.6/0.8, 5/10.4/0.8, 2/11.2/0.8,
10/8.8/0.0, 1/9.6/0.0, 4/10.4/0.0, 7/11.2/0.0}
\node[vertex] (G-\name) at (\x,\y) {$\name$};
\foreach \from/\to in {0/11,0/3,11/8,10/1}
\draw[line width=1.0pt,->] (G-\from) -- (G-\to);
\foreach \y/\z in {1.25/black,0.45/black} \node at (8.65,\y) {{\color{\z}$\bar\mathfrak{l}_2$}};
\foreach \x/\z in {9.2/black,10.0/black,10.8/black} \node at (\x,1.76) {{\color{\z}$\mathfrak{l}_1$}};
\foreach \x/\z in {9.2/black,10.0/black,10.8/black} \node at (\x,0.98) {{\color{\z}$\bar\mathfrak{l}_1$}};
\foreach \x/\z in {9.2/black,10.0/black,10.8/black} \node at (\x,0.15) {{\color{\z}$\mathfrak{l}_1$}};
\foreach \from/\to in {11/10,3/6,6/9,8/5,5/2,1/4,4/7}
\draw[black,->] (G-\from) -- (G-\to);
\normalsize \end{tikzpicture} \end{center}
Bold edges indicate where a choice was made. Regardless of these choices, Algorithm~\ref {alg:crtjenum} correctly enumerates $\mathrm {Ell}_{\mathcal{O}}(\F_p)$ in every case \cite[Prop.~5]{Sutherland:HilbertClassPolynomials}.
\subsection {Finding roots with greatest common divisors (gcds)} \label {ssec:gcd}
The potentially haphazard manner in which Algorithm~\ref {alg:crtjenum} enumerates $\mathrm {Ell}_{\mathcal{O}}(\F_p)$ is not a problem when computing $H_D$, but it can complicate matters when we wish to compute other class polynomials. We could distinguish the actions of $\mathfrak{l}$ and $\bar\mathfrak{l}$ using an Elkies kernel polynomial \cite{Elkies98}, as suggested in \cite[\S 5]{Broker:pAdicClassPolynomial}, however this slows down the algorithm significantly. An alternative approach using polynomial gcds turns out to be much more efficient, and actually speeds up Algorithm~\ref {alg:crtjenum}, making it already a useful improvement when computing $H_D$.
We need not distinguish the actions of $\mathfrak{l}$ and $\bar\mathfrak{l}$ at this stage, but we wish to ensure that our enumeration of $\mathrm {Ell}_{\mathcal{O}}(\F_p)$ makes a consistent choice of direction each time it starts an $\ell$-thread. The first $\ell$-thread may be oriented arbitrarily, but for each subsequent $\ell$-thread $(j_1',j_2',\ldots,j_r')$, we apply Lemma~\ref{gcdlemma} below. This allows us to ``square the corner" by choosing $j_2'$ as the unique common root of ${\rm\Phi}_\ell(X,j_1')$ and ${\rm\Phi}_{\ell'}(X,j_2)$, where $(j_1,\ldots,j_r)$ is a previously computed $\ell$-thread and $j_1$ is $\ell'$-isogenous to $j_1'$. The edge $(j_1,j_1')$ lies in an $\ell'$-thread that has already been computed, for some $\ell'>\ell$.
\begin{center} \begin{tikzpicture} \scriptsize
\tikzstyle{vertex}=[circle,draw,fill=black!15,minimum size=12pt,inner sep=0pt]
\foreach \name/\lab/\x/\y in {1/j_1/0.0/1.6, 2/j_2/0.8/1.6, 3/j_3/1.6/1.6, r/j_r/3.3/1.6,
11/j_1'/0.0/0.8, 22/j_2'/0.8/0.8}
\node[vertex] (G-\name) at (\x,\y) {$\lab$};
\node (G-4) at (2.3,1.6) {};
\node (G-dots) at (2.5,1.6) {$\cdots$};
\node (G-rm1) at (2.6,1.6) {};
\foreach \from/\to in {1/11,1/2}
\draw[->] (G-\from) -- (G-\to);
\foreach \x in {-0.15,0.65} \node at (\x,1.22) {$\mathfrak{l}'$};
\foreach \x in {0.4,1.2,2.0,2.9} \node at (\x,1.75) {$\mathfrak{l}$};
\node at (0.4,0.95) {{\color{black}$\mathfrak{l}$}};
\foreach \from/\to in {2/3,3/4,rm1/r}
\draw[black,->] (G-\from) -- (G-\to);
\foreach \from/\to in {11/22,2/22}
\draw[dashed,->] (G-\from) -- (G-\to);
\foreach \name/\lab/\x/\y in {1/j_1/6.0/1.6, 2/j_2/6.8/1.6, 3/j_3/7.6/1.6, r/j_r/9.3/1.6,
11/j_1'/6.0/0.8, 22/j_2'/6.8/0.8, 33/j_3'/7.6/0.8, rr/j_r'/9.3/0.8}
\node[vertex] (G-\name) at (\x,\y) {$\lab$};
\node (G-4) at (8.3,1.6) {};
\node (G-dots) at (8.5,1.6) {$\cdots$};
\node (G-rm1) at (8.6,1.6) {};
\node (G-44) at (8.3,0.8) {};
\node (G-ddots) at (8.5,0.8) {$\cdots$};
\node (G-rrm1) at (8.6,0.8) {};
\foreach \from/\to in {1/11,1/2}
\draw[->] (G-\from) -- (G-\to);
\foreach \x in {5.85,6.65,7.45,9.15} \node at (\x,1.22) {$\mathfrak{l}'$};
\foreach \x in {6.4,7.2,8.0,8.9} \node at (\x,1.75) {$\mathfrak{l}$};
\foreach \x in {6.4,7.2,8.0,8.9} \node at (\x,0.95) {$\mathfrak{l}$};
\foreach \from/\to in {2/3,3/4,rm1/r}
\draw[black,->] (G-\from) -- (G-\to);
\foreach \from/\to in {11/22,2/22,22/33,3/33,33/ddots,rrm1/rr,r/rr}
\draw[dashed,->] (G-\from) -- (G-\to);
\normalsize \end{tikzpicture} \end{center}
Having computed $j_2'$, we could compute $j_3',\ldots,j_r'$ as before, but it is usually better to continue using gcds, as depicted above. Asymptotically, both root-finding and gcd computations are dominated by the $O(\ell^2 \textsf{M} (\log p))$ time it takes to instantiate ${\rm\Phi}_{\ell}(X,j_i)\bmod p$, but in practice $\ell$ is small, and we effectively gain a factor of $O(\log p)$ by using gcds when $\ell\approx \ell'$. This can substantially reduce the running time of Algorithm~\ref {alg:crtjenum}, as may be seen in Table~\ref {tab1} of~\S\ref {sec:implementation}.
With the gcd approach described above, the total number of root-finding operations can be reduced from $\prod_{k=1}^m r_k$ to $\sum_{k=1}^m r_k$. When $m$ is large, this is a big improvement, but it is no help when $m=1$, as necessarily occurs when $h(D)$ is prime. However, even in this case we can apply gcds by looking for an auxiliary ideal $\mathfrak{l}_1'$, with prime norm $\ell_1'$, for which $[\mathfrak{l}_1']=[\mathfrak{l}_1^{e}]$. When $r_1$ is large, such an $\mathfrak{l}_1'$ is easy to find, and we may choose the best combination of $\ell_1'$ and $e$ available. This idea generalises to $\ell_k$-threads, where we seek $[\mathfrak{l}_k']\in\langle[\mathfrak{l}_1]\ldots,[\mathfrak{l}_k]\rangle \backslash \langle[\mathfrak{l}_1]\ldots,[\mathfrak{l}_{k-1}]\rangle$.
\begin{lemma}\label{gcdlemma} Let $j_1,j_2\in\mathrm {Ell}_{\mathcal{O}}(\F_p)$, and let $\ell_1,\ell_2\ne p$ be distinct primes with
$4\ell_1^2\ell_2^2 < |D|$. Then $\gcd\bigl({\rm\Phi}_{\ell_1}(j_1,X),{\rm\Phi}_{\ell_2}(j_2,X)\bigr)$ has degree at most~$1$. \end{lemma} \begin{proof} It follows from \cite[Prop.~23]{Kohel:Thesis} that ${\rm\Phi}_{\ell_1}(X,j_1)$ and ${\rm\Phi}_{\ell_2}(X,j_2)$ have at most two common roots in the algebraic closure $\overline \F_p$, which in fact lie in $\mathrm {Ell}_{\mathcal{O}}(\F_p)$. If there are exactly two, then both $\ell_1 = \mathfrak{l}_1 \overline\mathfrak{l}_1$ and $\ell_2 = \mathfrak{l}_2 \overline\mathfrak{l}_2$ split in $\mathcal{O}$, and one of $\mathfrak{l}_1^2\mathfrak{l}_2^2$ or $\mathfrak{l}_1^2\bar\mathfrak{l}_2^2$ is principal with a non-rational generator. We thus have a norm equation $4\ell_1^2\ell_2^2=a^2-b^2D$ with $a,b \in \mathbb{Z}$ and $b\ne 0$, and the lemma follows. \end{proof}
\section{Class invariants} \label {sec:invariants}
Due to the large size of $H_D$, much effort has been spent seeking smaller generators of $K_\mathcal{O}$. For a modular function $f$ and $\mathcal{O}=\mathbb{Z}[\tau]$, with $\tau$ in the upper half plane, we call $f (\tau)$ a \textit {class invariant} if $f (\tau) \in K_\mathcal{O}$. The \textit {class polynomial} for $f$ is \[ H_D [f] (X) = \prod_{[\mathfrak{a}] \in \operatorname{Cl} (\mathcal{O})} \left( X - [\mathfrak{a}] f (\tau) \right). \] The contemporary tool for determining class invariants is Shimura's reciprocity law; see \cite[Th.~4]{Schertz02} for a fairly general result. Class invariants arising from many different modular functions have been described in the literature; we briefly summarise some of the most useful ones.
Let $\eta$ be Dedekind's function, and let $\zeta_n=\exp(2\pi i/n)$. Weber considered \[ \mathfrak{f} = \zeta_{48}^{-1} \frac {\eta \left( \frac {z+1}{2} \right)}{\eta (z)}, \qquad \mathfrak{f}_1 (z) = \frac {\eta \left( \frac {z}{2} \right)}{\eta (z)}, \qquad \mathfrak{f}_2 (z) = \sqrt 2 \, \frac {\eta (2 z)}{\eta (z)}, \] powers of which yield class invariants when $\legendre {D}{2} \neq -1$, and also $\gamma_2 = \sqrt[3] j$, which is a class invariant whenever $3\nmid D$. The Weber functions can be generalised \cite {EnMo09,EnSc04,GeSt98,Gee01,HaVi97}, and we have the simple and double $\eta$-quotients \[ \mathfrak{w}_N (z) = \frac {\eta \left( \frac {z}{N} \right)}{\eta (z)};\qquad\qquad \mathfrak{w}_{p_1, p_2} = \frac {\eta \left( \frac {z}{p_1} \right) \eta \left( \frac {z}{p_2} \right)}{\eta \left( \frac {z}{p_1 p_2} \right) \eta (z)} \text { with } N = p_1 p_2, \] where $p_1$ and $p_2$ are primes. Subject to constraints on $D$, including that no prime dividing $N$ is inert in $\mathcal{O}$, suitable powers of these functions yield class invariants, see \cite{EnMo09,EnSc04}. For $s = 24/\gcd \bigl(24, (p_1 -1)(p_2 - 1)\bigr)$, the canonical power $\mathfrak{w}_{p_1, p_2}^s$ is invariant under the Fricke involution $W|_N:z \mapsto \frac {-N}{z}$ for ${\rm\Gamma}^0 (N)$, equivalently, the Atkin-Lehner involution of level $N$, by \cite[Thm.~2]{EnSc05}.
The theory of \cite {Schertz02} applies to any functions for ${\rm\Gamma}^0 (N)$, in particular to those of prime level $N$ invariant under the Fricke involution, which yield class invariants when $\legendre {D}{N} \neq -1$. Atkin developed a method to compute such functions $A_N$, which are conjectured to have a pole of minimal order at the unique cusp \cite{Elkies98,Morain95}. These are used in the SEA algorithm, and can be found in \textsc {Magma} or \textsc {Pari/GP}.
The functions above all yield algebraic integers, so $H_D [f] \in \mathcal{O}_K[X]$. Except for $\mathfrak{w}_N^e$ or when $\gcd (N, D) \neq 1$, in which cases additional restrictions may apply, one actually has $H_D [f] \in \mathbb{Z} [X]$, cf.~\cite[Cor.~3.1]{EnSc04}. The (logarithmic) \textit{height} of
$H_D [f]=\sum a_iX^i$ is $\log\max|a_i|$, which determines the precision needed to compute the $a_i$. We let $c_D(f)$ denote the ratio of the heights of $H_D[j]$ and $H_D [f]$.
With $c(f)=\lim_{|D|\to\infty}c_D(f)$, we have: $c(\gamma_2)=3$; $c(\mathfrak{f})=72$ (when $\legendre {D}{2} = 1$); \[
c(\mathfrak{w}^e_N)=\frac{24(N+1)}{e(N-1)};\qquad c(\mathfrak{w}_{p_1,p_2}^s)=\frac{12\psi(p_1p_2)}{s(p_1-1)(p_2-1)};\qquad c(A_N)=\frac{N+1}{2|v_N|}, \] where $e$ divides the exponent~$s$ defined above, $v_N$ is the order of the pole of $A_N$ at the cusp, and $\psi(p_1p_2)$ is $(p_1+1)(p_2+1)$ when $p_1\ne p_2$, and $p_1(p_1+1)$ when $p_1=p_2$. Morain observed in \cite{Morain09} that $c(A_{71})=36$, which is so far the best value known when $\legendre {D}{2} = -1$. We conjecture that in fact for all primes $N > 11$ with $N \equiv 11 \bmod{60}$ we have $c(A_N) = 30 \frac {N+1}{N-11}$, and that for $N \equiv -1 \bmod {60}$ we have $c(A_N)=30$. This implies that given an arbitrary discriminant $D$, we can always choose $N$ so that $A_N$ yields class invariants with $c_D(A_N)\ge 30+o(1)$.
When the prime divisors of $N$ are all ramified in $K$, both $\mathfrak{w}_{p_1,p_2}$ and $A_N$ yield class polynomials that are squares in $\mathbb{Z} [X]$, see \cite[\S 1.6]{Enge07} and \cite{EnSc10}. Taking the square root of such a class polynomial reduces both its degree and its height by a factor of 2. For a composite fundamental discriminant~$D$ (the most common case), this applies to $H_D[A_N]$ for any prime $N \mid D$. In the best case, $D$ is divisible by 71, and we obtain a class polynomial that is 144 times smaller than $H_D$.
\subsection {Modular polynomials}
Each function $f(z)$ considered above is related to $j(z)$ by a modular polynomial ${\rm\Psi}_f\in\mathbb{Z} [F, J]$ satisfying ${\rm\Psi}_f (f (z), j (z)) = 0$. For primes $\ell$ not dividing the level $N$, we let ${\rm\Phi}_{\ell, f}$ denote the minimal polynomial satisfying ${\rm\Phi}_{\ell, f} (f (z), f (\ell z)) = 0$; it is a factor of $ \operatorname{Res}_{J_\ell} \bigl( \operatorname{Res}_J ({\rm\Phi}_\ell (J, J_\ell), {\rm\Psi}_f (F, J)), {\rm\Psi}_f (F_\ell, J_\ell) \bigr), $ and as such, an element of $\mathbb{Z} [F, F_\ell]$. Thus ${\rm\Phi}_{\ell, f}$ generalises the classical modular polynomial ${\rm\Phi}_\ell={\rm\Phi}_{\ell,j}$.
The polynomial ${\rm\Phi}_{\ell,f}$ has degree $d(\ell+1)$ in $F$ and $F_\ell$, where $d$ divides $\deg_J{\rm\Psi}_f$, see~\cite[\S6.8]{Broker:Thesis}, and $2d$ divides $\deg_J{\rm\Psi}_f$ when $f$ is invariant under the Fricke involution. In general, $d$ is maximal, and $d = 1$ is achievable only in the relatively few cases where $X_0 (N)$, respectively $X_0^+ (N)$, is of genus~$0$ and, moreover, $f$ is a hauptmodul, that is, it generates the function field of the curve. Happily, this includes many cases of practical interest.
The polynomial ${\rm\Psi}_f$ characterises the analytic function $f$ in an algebraic way; when $d=1$, the polynomials ${\rm\Phi}_\ell$ and ${\rm\Phi}_{\ell, f}$ algebraically characterise $\ell$-isogenies between elliptic curves given by their $j$-invariants, or by class invariants derived from $f$, respectively. These are key ingredients for the CRT method.
\section {CRT algorithms for class invariants} \label {sec:crtinv}
To adapt Algorithm~\ref{alg:crtj} to class invariants arising from a modular function $f(z)$ other than $j(z)$, we only need to consider Algorithm~\ref{alg:crtjenum}. Our objective is to enumerate the roots of $H_D[f]\bmod p$ for suitable primes $p$, which we are free to choose. This may be done in one of two ways. The most direct approach computes an ``$f$-invariant" $f_1$, corresponding to $j_1$, then enumerates $f_2,\ldots,f_h$ using the modular polynomials ${\rm\Phi}_{\ell,f}$. Alternatively, we may enumerate $j_1,\ldots,j_h$ as before, and from these derive $f_1,\ldots,f_h$. The latter approach is not as efficient, but it applies to a wider range of functions, including two infinite families.
Several problems arise. First, an elliptic curve $E/\F_p$ with CM by~$\mathcal{O}$ unambiguously defines a $j$-invariant~$j_1=j(E)$, but not the corresponding $f_1$. The $f_1$ we seek is a root of $\psi_f(X) = {\rm\Psi}_f (X, j_1) \bmod p$, but $\psi_f$ may have other roots, which may or may not be class invariants. The same problem occurs for the $p$-adic lifting algorithm and can be solved generically \cite[\S 6]{Broker:Thesis}; we describe some more efficient solutions, which are in part specific to certain types of functions.
When $\psi_f$ has multiple roots that are class invariants, these may be roots of distinct class polynomials. We are generally happy to compute any one of these, but it is imperative that we compute the reduction of ``the same" class polynomial $H_D[f]$ modulo each prime $p$.
The lemma below helps to address these issues for at least two infinite families of functions: the double $\eta$-quotients $\mathfrak{w}_{p_1,p_2}$ and the Atkin functions $A_N$.
\begin{lemma} \label{lemma:psiroots} Let $f$ be a modular function for ${\rm\Gamma}^0 (N)$, invariant under the Fricke involution $W|_N$, such that $f (z)$ and $f \left( \frac {-1}{z} \right)$ have rational $q$-expansions. Let the imaginary quadratic order $\mathcal{O}$ have conductor coprime to $N$ and contain an ideal $\mathfrak{n} = \bigl( N, \frac {B_0 + \sqrt D}{2} \bigr)$. Let $A_0 = \frac {B_0^2 - D}{4 N}$ and $\tau_0 = \frac {-B_0 + \sqrt D}{2 A_0}$, and assume that $\gcd (A_0, N) = 1$. Then $f (\tau_0)$ is a class invariant, and if $f (\tau)$ is any of its conjugates under the action of $\operatorname{Gal}(K_\mathcal{O}/K)$ we have \[ {\rm\Psi}_f\bigl(f(\tau),j(\tau)\bigr)=0\qquad{\text{and}}\qquad {\rm\Psi}_f\bigl(f(\tau),[\mathfrak{n}]j(\tau)\bigr)=0. \] \end{lemma} \begin{proof} By definition, ${\rm\Psi}_f \bigl(f (z), j (z)\bigr) = 0$. Applying the Fricke involution yields $ 0 = {\rm\Psi}_f \left( (W|_N f) (z), (W|_N j) (z) \right) = {\rm\Psi}_f \left( f (z), j \left( \frac {-N}{z} \right) \right) = {\rm\Psi}_f \left( f (z), j \left( \frac {z}{N} \right) \right). $ The value $f (\tau_0)$ is a class invariant by \cite[Th.~4]{Schertz02}. By the same result, we may assume that $\tau$ is the basis quotient of an ideal $\mathfrak{a} = \bigl( A, \frac {-B + \sqrt D}{2} \bigr)$ with $\gcd (A, N) = 1$ and $B \equiv B_0 \bmod {2 N}$. Then $\frac {\tau}{N}$ is the basis quotient of $\mathfrak{a} \overline\mathfrak{n} = \bigl( AN, \frac {-B + \sqrt D}{2} \bigr)$. It follows that $[\mathfrak{n}] j (\tau) = j \left( \frac {\tau}{N} \right)$, and replacing $z$ above by $\tau$ completes the proof. \end{proof}
If we arrange the roots of $H_D$ into a graph of $\mathfrak{n}$-isogeny cycles corresponding to the action of $\mathfrak{n}$, the lemma yields a dual graph defined on the roots of $H_D[f]$, in which vertices $f(\tau)$ correspond to edges $\bigl(j(\tau),[\mathfrak{n}]j(\tau)\bigr)$.
In computational terms, $f (\tau)$ is a root of $\gcd \big( {\rm\Psi}_f\bigl(X,j(\tau)\bigr), {\rm\Psi}_f\bigl(X,[\mathfrak{n}]j(\tau)\bigr) \big)$. Generically, we expect this gcd to have no other roots modulo primes $p$ that split completely in $K_\mathcal{O}$. For a finite number of such primes, there may be additional roots. We have observed this for $p$ dividing the conductor of the order generated by $f (\tau)$ in the maximal order of $K_\mathcal{O}$. Such primes may either be excluded from our CRT computations, or addressed by one of the techniques described in \S\ref{ssec:indirect}.
\subsection {Direct enumeration} \label {ssec:direct}
When the polynomials ${\rm\Phi}_{\ell,f}$ have degree $\ell+1$ we can apply Algorithm~\ref{alg:crtjenum} with essentially no modification; the only new consideration is that $\ell$ must not divide the level $N$, but we can exclude such $\ell$ when choosing a polycyclic presentation for $\operatorname{Cl}(\mathcal{O})$. When the degree is greater than $\ell+1$ the situation is more complex, moreover the most efficient algorithms for computing modular polynomials do not apply \cite{BrLaSu09,Enge09mod}, making it difficult to obtain ${\rm\Phi}_{\ell, f}$ unless $\ell$ is very small. Thus in practice we do not use ${\rm\Phi}_{\ell,f}$ in this case; instead we apply the methods of \S\ref{ssec:indirect} or \S\ref{ssec:general}. For the remainder of this subsection and the next we assume that we do have polynomials ${\rm\Phi}_{\ell,f}$ of degree $\ell+1$ with which to enumerate $f_1,\ldots,f_h$, and consider how to determine a starting point $f_1$, given the $j$-invariant $j_1=j(E)$ of an elliptic curve $E/\F_p$ with CM by $\mathcal{O}$.
When $\psi_f(X) = {\rm\Psi}_f (X, j_1) \bmod p$ has only one root, our choice of $f_1$ is immediately determined. This is usually not the case, but we may be able to ensure it by restricting our choice of~$p$. As an example, for $f = \gamma_2$ with $3\nmid D$, if we require that $p \equiv 2 \bmod 3$, then $f_1$ is the unique cube root of~$j_1$ in $\F_p$. If we additionally have $D\equiv 1\bmod 8$ and $p\equiv 3\bmod 4$, then the equation $\gamma_2 = (\mathfrak{f}^{24}-16)/\mathfrak{f}^8$ uniquely determines the square of the Weber $\mathfrak{f}$ function, by \cite[Lem.~7.3]{BrLaSu09}. To treat $\mathfrak{f}$ itself we need an additional trick described in \S\ref{ssec:tracetrick}.
The next simplest case occurs when only one of the roots of $\psi_f$ is a class invariant. This necessarily happens when $f$ is invariant under the Fricke involution and all the primes dividing $N$ are ramified in~$\mathcal{O}$. In the context of Lemma~\ref{lemma:psiroots}, each root of $H_D[f]$ then corresponds to an isolated edge $\big(j(\tau),[\mathfrak{n}]j(\tau)\bigr)$ in the $\mathfrak{n}$-isogeny graph on the roots of $H_D$, and we compute $f_1$ as the unique root of $\gcd\bigl({\rm\Psi}_f(X,j_1),{\rm\Psi}_f(X,[\mathfrak{n}]j_1)\bigr)$. In this situation $\mathfrak{n}=\bar\mathfrak{n}$, and each $f(\tau)$ occurs twice as a root of $H_D[f]$. By using a polycyclic presentation for $\operatorname{Cl}(\mathcal{O})/\langle[\mathfrak{n}]\rangle$ rather than $\operatorname{Cl}(\mathcal{O})$, we enumerate each double root of $H_D[f]\bmod p$ just once.
Even when $\psi_f$ has multiple roots that are class invariants, it may happen that they are all roots of the \emph{same} class polynomial. This applies to the Atkin functions $f=A_N$. When $N$ is a split prime, there are two $N$-isogenous pairs $(j_1,[\mathfrak{n}]j_1)$ and $([\bar\mathfrak{n}]j_1,j_1)$ in $\mathrm {Ell}_{\mathcal{O}}(\F_p)$, and under Lemma~\ref{lemma:psiroots} these correspond to roots $f_1$ and $[\bar\mathfrak{n}]f_1$ of $\psi_f$. Both are roots of $H_D[f]$, and we may choose either.
The situation is slightly more complicated for the double $\eta$-quotients $\mathfrak{w}_{p_1,p_2}$, with $N=p_1p_2$ composite. If $p_1=\mathfrak{p}_1\bar\mathfrak{p}_1$ and $p_2=\mathfrak{p}_2\bar\mathfrak{p}_2$ both split and $p_1\ne p_2$, then there are four distinct $N$-isogenies corresponding to four roots of $\psi_f$. Two of these roots are related by the action of $[\mathfrak{n}]=[\mathfrak{p}_1\mathfrak{p}_2]$; they belong to the same class polynomial, which we choose as $H_D[f]\bmod p$. The other two are related by $[\mathfrak{p}_1\bar\mathfrak{p}_2]$ and are roots of a different class polynomial. We make an arbitrary choice for $f_1$, explicitly compute $[\mathfrak{n}] f_1$, and then check whether it occurs among the other three roots; if not, we correct the initial choice. The techniques of \S\ref {ssec:indirect} may be used to efficiently determine the action of $[\mathfrak{n}]$.
Listed below are some of the modular functions $f$ for which the roots of $H_D[f]\bmod p$ may be directly enumerated, with sufficient constraints on $D$ and~$p$. In each case $p$ splits completely in $K_\mathcal{O}$ and $D<-4N^2$ has conductor $u$. \renewcommand{(\arabic{enumi})}{(\arabic{enumi})} \begin {enumerate} \setlength{\itemsep}{4pt} \item $\gamma_2$, with $3\nmid D$ and $p\equiv 2\bmod 3$; \item $\mathfrak{f}^2$, with $D\equiv1\bmod 8$, $3\nmid D$, and $p\equiv 11\bmod 12$; \item $\mathfrak{w}_N^s$, for $N\in\{3,5,7,13\}$ and $s=24/\gcd(24,N-1)$, with $N \mid D$ and $N\nmid u$; \item $\mathfrak{w}_5^2$, with $3\nmid D$, $5 \mid D$, and $5\nmid u$; \item $A_N$, for $N\in\{3,5,7,11,13,17,19,23,29,31,41,47,59,71\}$, with $\legendre{D}{N}\ne -1$ and $N\nmid u$. \item $\mathfrak{w}_{p_1,p_2}^s$, for $(p_1,p_2)\in\{(2,3),(2,5),(2,7),(2,13),(3,5),(3,7),(3,13),(5,7)\}$ and $s=24/\gcd\bigl(24,(p_1-1)(p_2-1)\bigr)$, with $\legendre{D}{p_1},\legendre{D}{p_2}\ne -1$ and $p_1,p_2\nmid u$. \item $\mathfrak{w}_{3,3}^6$ with $\legendre{D}{3}=1$ and $3\nmid u$. \end {enumerate}
\subsection {The trace trick} \label {ssec:tracetrick}
In \S\ref{ssec:direct} we were able to treat the square of the Weber $\mathfrak{f}$ function but not $\mathfrak{f}$ itself. To remedy this, we generalise a method suggested to us by Reinier Br\"{o}ker.
We consider the situation where there are two modular functions $f$ and $f'$ that are roots of ${\rm\Psi}_f(X,j(z))$, both of which yield class invariants for $\mathcal{O}$, and we wish to apply the direct enumeration approach. We assume that $p$ is chosen so that $\psi_f(X)={\rm\Psi}_f(X,j_1)\bmod p$ has exactly two roots, and depending on which root we take as $f_1$, we may compute the reduction of either $H_D[f](X)$ or $H_D[f'](X)$ modulo $p$. In the case of Weber $\mathfrak{f}$, we have $f'=-f$, and $H_D[f']$ differs from $H_D[f]$ only in the sign of every other coefficient.
Consider a fixed coefficient $a_i$ of $H_D[f](X)=\sum a_iX^i$; most of the time, the trace $t = -a_{h-1} = f_1 + \cdots + f_h$ will do (if $f'=-f$, we need to use $a_i$ with $i\not\equiv h\bmod 2$). The two roots $f_1$ and $f_1'$ lead to two possibilities $t$ and $t'$ modulo~$p$. However, the elementary symmetric functions $T_1 = t + t'$ and $T_2 = t t'$ are unambiguous modulo~$p$. Computing these modulo many primes $p$ yields $T_1$ and~$T_2$ as integers (via the CRT), from which $t$ and $t'$ are obtained as roots of the quadratic equation $X^2 - T_1 X + T_2$. If these are different, we arbitrarily pick one of them, which, going back, determines the set of conjugates $\{ f_1, \ldots, f_h \}$ or $\{ f_1', \ldots, f_h' \}$ to take modulo each of the primes $p\nmid t-t'$. In the unlikely event that they are the same (the suspicion $t = t'$ being confirmed after, say, looking at the second prime), we need to switch to a different coefficient $a_i$.
If $f$ and $f'$ differ by a simple transformation (such as $f'=-f$), the second set of conjugates and the value $t'$ are obtained essentially for free. As a special case, when $h$ is odd and the class invariants are units (as with Weber $\mathfrak{f}$), we can simply fix $t = a_0=1$, and need not compute $T_1=0$ and $T_2=-1$.
The key point is that the number of primes $p$ we use to determine $t$
is much less than the number of primes we use to compute $H_D[f]$. Asymptotically, the logarithmic height of the trace is smaller than the height bound we use for $H_D[f]$ by a factor quasi-linear in $\log |D|$, under the GRH. In practical terms, determining~$t$ typically requires less than one tenth of the primes used to compute $H_D[f]$, and these computations can be combined.
The approach described above generalises immediately to more than two roots, but this case does not occur for the functions we examine. Unfortunately it can be used only in conjunction with the direct enumeration approach of \S\ref{ssec:direct}; otherwise we would have to consistently distinguish not only between $f_1$ and $f_1'$, but also between $f_i$ and $f_i'$ for $i = 2, \ldots, h$.
\subsection {Enumeration via the Fricke involution} \label {ssec:indirect} For functions $f$ to which Lemma~\ref{lemma:psiroots} applies, we can readily obtain the roots of $H_D[f]\bmod p$ without using the polynomials ${\rm\Phi}_{\ell,f}$. We instead enumerate the roots of $H_D\bmod p$ (using the polynomials ${\rm\Phi}_\ell$), and arrange them into a graph~$G$ of $\mathfrak{n}$-isogeny cycles, where $\mathfrak{n}$ is the ideal of norm $N$ appearing in Lemma~\ref{lemma:psiroots}. We then obtain roots of $H_D[f]\bmod p$ by computing $\gcd\bigl({\rm\Psi}_f(X,j_i),{\rm\Psi}_f(X,[\mathfrak{n}]j_i)\bigr)$ for each edge $(j_i,[\mathfrak{n}]j_i)$ in $G$.
The graph $G$ is composed of $h/n$ cycles of length $n$, where $n$ is the order of $[\mathfrak{n}]$ in $\operatorname{Cl}(\mathcal{O})$. We assume that the $\mathcal{O}$-ideals of norm $N$ are all non-principal and inequivalent (by requiring $|D|>4N^2$ if needed). When every prime dividing $N$ is ramified in $\mathcal{O}$ we have $n=2$; as noted in \S\ref{ssec:direct}, every root of $H_D[f]$ then occurs with multiplicity~$2$, and we may compute the square-root of $H_D[f]$ by taking each root just once. Otherwise we have $n>2$.
Let $[\mathfrak{l}_1],\ldots,[\mathfrak{l}_m]$ be a polycyclic presentation for $\operatorname{Cl}(\mathcal{O})$ with relative orders $r_1,\ldots,r_m$, as in \S\ref{ssec:crtjenum}. For $k$ from 1 to $m$ let us fix $\mathfrak{l}_k=\bigl(\ell_k,\frac{-B_k+\sqrt{D}}{2}\bigr)$ with $B_k \ge 0$. To each vector $\vec{e}=(e_1,\ldots,e_m)$ with $0\le e_k < r_k$, we associate a unique root $j_{\vec{e}}$ enumerated by Algorithm~\ref{alg:crtjenum}, corresponding to the path taken from $j_1$ to~$j_{\vec{e}}$, where $e_k$ counts steps taken along an $\ell_k$-thread. For $\vec{o}=(0,\ldots,0)$ we have $j_{\vec{o}}=j_1$, and in general \[ j_{\vec{e}}=[\mathfrak{l}_1^{\sigma_1e_1}\cdots\mathfrak{l}_m^{\sigma_m e_m}]j_{\vec{o}}, \] with $\sigma_k=\pm 1$. Using the method of \S\ref{ssec:gcd} to consistently orient the $\ell_k$-threads ensures that each $\sigma_k$ depends only on the orientation of the first $\ell_k$-thread.
To compute the graph $G$ we must determine the signs $\sigma_k$. For those $[\mathfrak{l}_k]$ of order 2, we let $\sigma_k=1$. We additionally fix $\sigma_k=1$ for the least $k=k_0$ (if any) for which $[\mathfrak{l}_k]$ has order greater than 2, since we need not distinguish the actions of $\mathfrak{n}$ and $\bar{\mathfrak{n}}$. It suffices to show how to determine $\sigma_k$, given that we know $\sigma_1,\ldots,\sigma_{k-1}$. We may assume $[\mathfrak{l}_{k_0}]$ and $[\mathfrak{l}_k]$ both have order greater than 2, with $k_0 < k \le m$.
Let $\mathfrak{l}$ be an auxiliary ideal of prime norm $\ell$ such that $[\mathfrak{l}]=[\mathfrak{a}\mathfrak{b}]=[\mathfrak{l}_1^{e_1}\cdots\mathfrak{l}_k^{e_k}]$, with $0 \le e_i < r_i$, where $\mathfrak{b}=\mathfrak{l}_k^{e_k}$, and $[\mathfrak{a}]$ and $[\mathfrak{b}]$ have order greater than 2. Our assumptions guarantee that such an $\mathfrak{l}$ exists, by the \v{C}ebotarev density theorem, and under the GRH, $\ell$ is relatively small \cite{Bach:ERHbounds}. The fact that $[\mathfrak{a}]$ and $[\mathfrak{b}]$ have order greater than 2 ensures that $[\mathfrak{a}\bar\mathfrak{b}]$ is distinct from $[\mathfrak{l}]$ and its inverse. It follows that $\sigma_k=1$ if and only if ${\rm\Phi}_\ell(j_{\vec{o}},j_{\vec{e}})=0$, where $\vec{e}=(e_1,\ldots,e_k,0,\ldots,0)$.
Having determined the $\sigma_k$, we compute the unique vector $\vec{v}=(v_1,\ldots,v_m)$ for which $[\mathfrak{n}]=[\mathfrak{l}_1^{\sigma_1v_1}\cdots\mathfrak{l}_m^{\sigma_mv_m}]$. We then have $[\mathfrak{n}]j_{\vec{o}}=j_{\vec{v}}$, yielding the edge $(j_{\vec{o}},j_{\vec{v}})$ of $G$. In general, we obtain the vector corresponding to $[\mathfrak{n}]j_{\vec{e}}$ by computing $\vec{e}+\vec{v}$ and using relations $[\mathfrak{l}_k^{r_k}]=[\mathfrak{l}_1^{x_1}\cdots\mathfrak{l}_{k-1}^{x_{k-1}}]$ to reduce the result, cf. \cite[\S5]{Sutherland:HilbertClassPolynomials}.
This method may be used with any function $f$ satisfying Lemma~\ref{lemma:psiroots}, and in particular it applies to two infinite families of functions: \renewcommand{(\arabic{enumi})}{(\arabic{enumi})} \begin {enumerate} \setlength{\itemsep}{4pt} \item[(8)] $A_N$, for $N>2$ prime, with $\legendre{D}{N}\ne -1$ and $N\nmid u$. \item[(9)] $\mathfrak{w}_{p_1,p_2}^s$, for $p_1,p_2$ primes not both 2, with $\legendre{D}{p_1},\legendre{D}{p_2}\ne -1$ and $p_1,p_2\nmid u$. \end{enumerate} As above, $u$ denotes the conductor of $D<-4N^2$.
As noted earlier, for certain primes $p$ we may have difficulty computing the edges of $G$ when $\gcd\bigl({\rm\Psi}_f(X,j_i),{\rm\Psi}_f(X,[\mathfrak{n}]j_i)\bigr)$ has more than one root in $\F_p$. While we need not use such primes, it is often easy to determine the correct root. Here we give two heuristic techniques for doing so.
The first applies when $N$ is prime, as with the Atkin functions. In this case problems can arise when $H_D[f]$ has repeated roots modulo~$p$. By Kummer's criterion, this can happen only when $p$ divides the discriminant of $H_D[f]$, and even then, a repeated root $x_1$ is only actually a problem when it corresponds to two alternating edges in $G$, say $(j_1,j_2)$ and $(j_3,j_4)$, with the edge $(j_2,j_3)$ between them. In this scenario we will get two roots $x_1$ and $x_2$ of $\gcd\bigl({\rm\Psi}_f(X,j_2),{\rm\Psi}_f(X,j_3)\bigr)$. But if we already know that $x_1$ corresponds to $(j_1,j_2)$, we can unambiguously choose $x_2$. In each of the $N$-isogeny cycles of~$G$, it is enough to find a single edge that yields a unique root. If no such edge exists, then every edge must yield the \emph{same} two roots $x_1$ and~$x_2$, and we count each with multiplicity $n/2$.
The second technique applies when the roots of $H_D[f]$ are units, as with the double $\eta$-quotients \cite[Thm.~3.3]{EnSc04}. The product of the roots is then $\pm 1$. Assuming that the number of edges in $G$ for which multiple roots arise is small (it is usually zero, and rarely more than one or two), we simply test all the possible choices of roots and see which yield $\pm 1$. If only one combination works, then the correct choices are determined. This is not guaranteed to happen, but in practice it almost always does.
\subsection {A general algorithm} \label{ssec:general} We now briefly consider the case of an arbitrary modular function $f$ of level $N$, and sketch a general algorithm to compute $H_D[f]$ with the CRT method.
Let us assume that $f(\tau)$ is a class invariant, and let $D$ be the discriminant and $u$ the conductor of the order $\mathcal{O}=[1,\tau]$. The roots of ${\rm\Psi}_f(X,j(\tau))\in K_\mathcal{O}[X]$ lie in the ray class field of conductor $u N$ over $K$, and some number $n$ of these, including $f(\tau)$, actually lie in the ring class field $K_\mathcal{O}$. We may determine~$n$ using the method described in \cite[\S 6.4]{Broker:Thesis}, which computes the action of $(\mathcal{O}/N\mathcal{O})^*/\mathcal{O}^*$
on the roots of ${\rm\Psi}_f(X,j(\tau))$. We note that the complexity of this task is essentially fixed as a function of $|D|$.
Having determined $n$, we use Algorithm~\ref{alg:crtjenum} to enumerate the roots $j_1,\ldots,j_h$ of $H_D\bmod p$ as usual, but if for any $j_i$ we find that ${\rm\Psi}_f(X,j_i)\bmod p$ does not have exactly $n$ roots $f_i^{(1)},\ldots,f_i^{(n)}$, we exclude the prime $p$ from our computations. The number of such $p$ is finite and may be bounded in terms of the discriminants of the polynomials ${\rm\Psi}_f(X,\alpha)$ as $\alpha$ ranges over the roots of $H_D[f]$. We then compute the polynomial $H(X)=\prod_{i=1}^{h} \prod_{r=1}^n \bigl( X - f_i^{(r)} \bigr)$ of degree $nh$
in $\F_p[X]$. After doing this for sufficiently many primes $p$, we can lift the coefficients by Chinese remaindering to the integers. The resulting $H$ is a product of $n$ distinct class polynomials, all of which may be obtained by factoring $H$ in $\mathbb{Z}[X]$. Under suitable heuristic assumptions (including the GRH), the total time to compute $H_D[f]$ is quasi-linear in $|D|$, including the time to factor $H$.
This approach is practically efficient only when $n$ is small, but then it can be quite useful. A notable example is the modular function $g$ for which \[ {\rm\Psi}_g(X,J)=(X^{12}-6X^6-27)^3-JX^{18}. \] This function was originally proposed by Atkin, and is closely related to certain class invariants of Ramanujan \cite[Thm.~4.1]{BerndtChan:Ramanujan}. The function $g$ yields class invariants when $D\equiv13\bmod 24$. In terms of our generic algorithm, we have $n=2$, and for $p\equiv 2\bmod 3$ we get exactly two roots of ${\rm\Psi}_g(X,j_i)\bmod p$, which differ only in sign. Thus $H(X)=H_D[g^2](X^2)=H_D[g](X)H_D[g](-X)$, and from this we easily obtain $H_D[g^2]$, and also $H_D[g]$ if desired.
\section{Computational Results} \label {sec:implementation} This section provides performance data for the techniques developed above. We used AMD Phenom II 945 CPUs clocked at 3.0~GHz for our tests; the software was implemented using the \texttt {gmp} \cite{gmp} and \texttt {zn\_poly} \cite{Harvey:zn_poly} libraries, and compiled with \texttt {gcc} \cite{gcc}.
To compute the class polynomial $H_D[f]$, we require a bound on the size of its coefficients. Unfortunately, provably accurate bounds for functions~$f$ other than~$j$ are generally unavailable. As a heuristic, we take the bound $B$ on the coefficients of $H_D$ given by \cite[Lem.~8]{Sutherland:HilbertClassPolynomials}, divide $\log_2 B$ by the asymptotic height factor $c(f)$, and add a ``safety margin" of 256 bits. We note that with the CM method, the correctness of the final result can be efficiently and unconditionally confirmed~\cite{BissonSutherland:Endomorphism}, so we are generally happy to work with a heuristic bound.
\subsection{Class polynomial computations using the CRT method}
Our first set of tests measures the improvement relative to previous computations with the CRT method. We used discriminants related to the construction of a large set of pairing-friendly elliptic curves, see \cite[\S 8]{Sutherland:HilbertClassPolynomials} for details. We reconstructed many of these curves, first using the Hilbert class polynomial $H_D$, and then using an alternative class polynomial $H_D[f]$. In each case we used the explicit CRT to compute $H_D$ or $H_D[f]$ modulo a large prime $q$ (170 to 256 bits).
Table~\ref {tab1} gives results for four discriminants with $|D|\approx 10^{10}$, three of which appear in \cite[Table~2]{Sutherland:HilbertClassPolynomials}. Each column lists times for three class polynomial computations. First, we give the total time $T_{\rm tot}$ to compute $H_D\bmod q$, including the time $T_{\rm enum}$ spent enumerating $\operatorname{Ell}_D(\F_p)$, for all the small primes $p$, using Algorithm~\ref{alg:crtjenum} as it appears in \S\ref{ssec:crtjenum}. We then list the times $T'_{\rm enum}$ and $T'_{\rm tot}$ obtained when Algorithm~\ref{alg:crtjenum} is modified to use gcd computations whenever it is advantageous to do so, as explained in \S\ref{ssec:gcd}. The gcd approach typically speeds up Algorithm~\ref{alg:crtjenum} by a factor of~$2$ or more.
For the third computation we selected a function $f$ that yields class invariants for $D$, and computed $H_D[f]\bmod q$. This polynomial can be used in place of $H_D$ in the CM method (one extracts a root $x_0$ of $H_D[f]\bmod q$, and then extracts a root of ${\rm\Psi}_f(x_0,J)\bmod q$). For each function $f$ we give a ``size factor", which approximates the ratio of the total size of $H_D$ to $H_D[f]$ (over $\mathbb{Z}$). In the first three examples this is just the height factor $c(f)$, but in Example 4 it is $4c(f)$ because the prime 59 is ramified and we actually work with the square root of $H_D[A_{59}]$, as noted in \S\ref{ssec:direct}, reducing both the height and degree by a factor of~2.
We then list the speedup $T'_{\rm tot}/T'_{\rm tot}[f]$ attributable to computing $H_D[f]$ rather than $H_D$. Remarkably, in each case this speedup is about twice what one would expect from the height factor. This is explained by a particular feature of the CRT method: The cost of computing $H_D\bmod p$ for small primes $p$ varies significantly, and, as explained in \cite[\S 3]{Sutherland:HilbertClassPolynomials}, one can accelerate the CRT method with a careful choice of primes. When fewer small primes are needed, we choose those for which Step 1 of Algorithm~\ref{alg:crtj} can be performed most quickly.
The last line in Table~\ref {tab1} lists the total speedup $T_{\rm tot}/T'_{\rm tot}[f]$ achieved.
\begin{table} \begin{center} \begin{tabular}{@{}l@{}rrrr@{}} &Example 1& Example 2& Example 3&Example 4\\ \midrule
$|D|$ & $13569850003$ & $\quad\medspace 11039933587$ & $\quad\medspace 12901800539$ & $\quad\quad \textsc{12042704347}$\\ $h(D)$& 20203 & 11280 & 54706 & 9788\\
$\left\lceil\log_2 B\right\rceil$& 2272564& 1359134 & 5469776 & 1207412\\ $(\ell_1^{r_1},\ldots,\ell_k^{r_k})$&$(7^{20203})$&$(17^{1128},19^{10})$&$(3^{27038},5^2)$&$(29^{2447},31^2,43^2)$\\ \midrule $T_{\rm enum}$ (roots) & 6440 & 10200 & 10800 & 21700\\ $T_{\rm tot}$ & 19900 & 23700 & 52200 & 42400\\ \midrule $T'_{\rm enum}$ (gcds) & 2510 & 2140 & 3440 & 4780\\ $T'_{\rm tot}$ & 15900 & 15500 & 44700 & 25300\\ \midrule Function $f$ & $A_{71}$& $A_{47}$& $A_{71}$& $A_{59}$\\ Size factor & 36 & 24 & 36 & 120*\\
$T'_{\rm tot}[f]$ & 213 & 305 & 629 & 191\\
Speedup ($T'_{\rm tot}/T'_{\rm tot}[f]$) & 75 & 51 & 71 & 132\\ Speedup ($T_{\rm tot}/T'_{\rm tot}[f]$) & {\bf 93} & {\bf 78} & {\bf 83} & {\bf 222}\\ \bottomrule \end{tabular} \\
\caption{Example class polynomial computations (times in CPU seconds)} \label {tab1} \end{center} \end{table}
\subsection{Comparison to the complex analytic method} \label{ssec:compare}
Our second set of tests compares the CRT approach to the complex analytic method. For each of the five discriminants listed in Table~\ref {tab2} we computed class polynomials $H_D[f]$ for the double $\eta$-quotient $\mathfrak{w}_{3,13}$ and the Weber $\mathfrak{f}$ function, using both the CRT approach described here, and the implementation \cite{cm} of the complex analytic method as described in \cite{Enge:FloatingPoint}. With the CRT we computed $H_D[f]$ both over $\mathbb{Z}$ and modulo a 256-bit prime~$q$; for the complex analytic method these times are essentially the same.
\begin{table} \begin{center} \begin{tabular}{@{}rrcrrcrrcrr@{}} &&&\multicolumn{2}{c}{complex analytic}&&\multicolumn{2}{c}{CRT}&&\multicolumn{2}{c}{CRT mod $q$}\\ \cmidrule(r){4-5}\cmidrule(r){7-8}\cmidrule(r){10-11}
$|D|$&$\qquad h(D)$&$\qquad$&$\mathfrak{w}_{3,13}$&$\qquad\quad\mathfrak{f}\medspace$&$\qquad\medspace$&$\mathfrak{w}_{3,13}$&$\qquad\medspace\mathfrak{f}\medspace$&$\qquad\medspace$&$\mathfrak{w}_{3,13}$&$\qquad\mathfrak{f}\medspace$\\ \midrule
6961631 & 5000 && 15 & 5.4 && 2.2 & 1.0 && 2.1 & 1.0\\
23512271 & 10000 && 106 & 33 && 10 & 4.1 && 9.8 & 4.0\\
98016239 & 20000 && 819 & 262 && 52 & 22 && 47 & 22\\
357116231 & 40000 && 6210 & 1900 && 248 & 101 && 213 & 94\\ 2093236031 & 100000 && 91000 & 27900 && 2200 & 870 && 1800 & 770\\ \bottomrule \end{tabular} \\
\caption{CRT vs. complex analytic (times in CPU seconds)} \label {tab2} \end{center} \end{table}
We also tested a ``worst case" scenario for the CRT approach: the discriminant $D=-85702502803$, for which the smallest non-inert prime is $\ell_1=109$. Choosing the function most suitable to each method, the complex analytic method computes $H_D[\mathfrak{w}_{109,127}]$ in 8310 seconds, while the CRT method computes $H_D[A_{131}]$ in 7150 seconds. The CRT approach benefits from the attractive height factor of the Atkin functions, $c(A_{131})=33$ versus $c(\mathfrak{w}_{109,127})\approx 12.4$, and the use of gcds in Algorithm~\ref {alg:crtjenum}. Without these improvements, the time to compute $H_D$ with the CRT method is 1460000 seconds. The techniques presented here yield more than a 200-fold speedup in this example.
\subsection{A record-breaking CM construction}
To test the scalability of the CRT approach, we constructed an elliptic curve using $|D|=1000000013079299 > 10^{15}$, with $h(D)=10034174>10^7$. This yielded a curve $y^2=x^3-3x+c$ of prime order $n$ over the prime field $\F_q$, where \scriptsize \begin{align*} c &= 12229445650235697471539531853482081746072487194452039355467804333684298579047;\\ q &= 28948022309329048855892746252171981646113288548904805961094058424256743169033;\\ n &= 28948022309329048855892746252171981646453570915825744424557433031688511408013. \end{align*} \normalsize This curve was obtained by computing the square root of $H_D[A_{71}]$ modulo $q$, a polynomial of degree $h(D)/2=5017087$. The height bound of 21533832 bits was achieved with 438709 small primes $p$, the largest of which was 53 bits in size. The class polynomial computation took slightly less than a week using 32 cores, approximately 200 days of CPU time. Extracting a root over $\F_q$ took 25 hours of CPU time using NTL \cite{Shoup:NTL}.
We estimate that the size of $\sqrt{H_D[A_{71}]}$ is over 13 terabytes, and that the size of the Hilbert class polynomial $H_D$ is nearly 2 petabytes. The size of $\sqrt{H_D[A_{71}]}\bmod q$, however, is under 200 megabytes, and less than 800 megabytes of memory (per core) were needed to compute it.
\input{classinv.bbl}
\end{document} |
\begin{document}
\begin{frontmatter} \vspace*{6pt} \title{Discussion of ``Multiple Testing for Exploratory Research'' by J. J. Goeman and A. Solari} \runtitle{Discussion}
\begin{aug} \author[a]{\fnms{Nicolai} \snm{Meinshausen}\corref{}\ead[label=e1]{meinshausen@stats.ox.ac.uk}} \runauthor{N. Meinshausen}
\affiliation{University of Oxford}
\address[a]{Nicolai Meinshausen is University Lecturer, Department of Statistics, University of Oxford, UK \printead{e1}.}
\end{aug}
\end{frontmatter}
I want to congratulate the authors on this thought-provoking and important paper on multiple testing in exploratory settings.
Standard Multiple Testing procedures can appear very mechanistic. Hypotheses are ordered by increasing $p$-value. Given a Type I error criterion, the Multiple Testing procedure selects a cut-off in this list. Simply working down the list of hypotheses in order of their $p$-values is perhaps suboptimal for exploratory analysis as a lot of information is lost in this way and important discoveries might be missed. Some previous work has addressed this issue by\break changing the ranking of the hypotheses. To highlight only three examples: Tibshirani and Wasserman (\citeyear{tibshirani2006correlation}) devised a method to borrow strength across highly correlated test statistics in microarray experiments. Storey (\citeyear{storey2007optimal}) proposed an ``optimal discovery'' procedure that again leads to a different ranking of variables than the ranking implied by the marginal $p$-values. One of the authors also proposed a very powerful way of incorporating known network structure into the testing procedure [Goeman and Mansmann, \citeyear{goeman2008multiple}].
The proposed approach to exploratory multiple testing is more radical, though, than changing the cut-off or changing the ranking of hypotheses. Instead of the perhaps rather dull task of selecting a~cut-off in a list of ordered hypotheses, the researcher can reject for follow-up analysis any set of hypotheses he or she regards as interesting, using all the information at hand. The method then returns a~lower bound on the number of false null\vadjust{\goodbreak} hypotheses (true discoveries) in this set. Since the bound is valid simultaneously across all sets, an exploratory approach does not invalidate the error bound.
I think this method will be very important and useful in many fields as it allows a flexible exploration of possibly interesting sets of hypotheses, while at the same time protecting the practitioner against too many false rejections (or at least managing expectations about the number of true discoveries one can hope to make).
\begin{figure*}
\caption{Left: computational time (on log-scale) for the standard approach in regression (solid line), with the number of hypotheses ranging from 2 to 12, and computational time for the shortcut for independent tests (dotted line), with the number of hypotheses between $2\cdot10^5$ and $1.2\cdot10^6$. Only the latter is feasible for large-scale testing. Right: the average lower bound on the total number of true discoveries with the proposed approach (circles) and an alternative given in the text (diamonds). Due to the simultaneous nature of the proposed bounds, the overall power to reject is good for a few dozen hypotheses but is deteriorating as the number of hypotheses grows into the hundreds.}
\label{fig:1}
\end{figure*}
There is a price to be paid for the simultaneous nature of the bound, though. I have some doubts (hopefully unfounded) about the applicability to\break large-scale testing situations as they arise, for example, in genomics or astronomy for two reasons: computational complexity and statistical power.
It is obvious and also acknowledged by the authors that the proposed procedure without shortcuts will be impractical for even just a few dozen hypotheses. The computational complexity is simply too high. An example is shown in Figure~\ref{fig:1} for a genomics regression example with less than one hundred observations. The proposed method takes already more than half a minute for 12 predictor variables on a~standard computer with a 3~GHz CPU and the supplied \texttt{cherry} R-package and the complexity seems to be (super-)exponential in the number of hypotheses, as one would expect. The proposed shortcuts are not applicable in all settings. If they are applicable, they seem to be very effective in reducing the computational complexity, making large-scale testing feasible. Figure~\ref{fig:1} shows that even testing situations with $>10^6$ tests are handled in about a second or less.
Maybe more worrying, the statistical power of the method deteriorates with an increasing number of hypotheses. This is due to the simultaneous nature of the bound on the number of correctly rejected hypotheses among all possible sets of hypotheses. I~compared the power for a simple setting, in which there are $m$ independent $p$-values $p_i$ with $i=1,\ldots,m$ with distribution $p_i \sim U([0,c_i])$ and $c_i=1$ if $i>10$ and $c_i=0.1/ m$ if $i\le10$ (there are hence 10 false null hypotheses). If rejecting all hypotheses, the lower bound for the number of correctly rejected hypotheses is shown as a function of $m$ in Figure \ref{fig:1}, along with the bound for the same quantity proposed by Meinshausen and Rice (\citeyear{meinshausen04estimating}). The proposed approach works very well up to a few dozen hypotheses. If the number of hypotheses is in the hundreds, the number of sets the bound needs to be valid over is getting so large that the power of the method starts to deteriorate quickly.
I acknowledge that the comparison is not quite fair since the method in Meinshausen and Rice (\citeyear{meinshausen04estimating}) does much less: it only gives a lower bound on the \emph{total} number of false null hypotheses or a lower bound for the number of true discoveries in a list that is ordered by increasing $p$-values of the hypotheses. [If we were to ask only if there are any false null hypotheses at all, we could be even more sensitive to deviations from the global null hypothesis with Higher Criticism (Donoho and Jin, \citeyear{donoho04higher}).] And for fewer than 50 hypotheses, the proposed bound is remarkably good.
The power and computational cost objectives thus both indicate that the method is working very well for up to a few dozen hypotheses but will probably need refinements for large-scale testing.
A thought regarding the presentation of results. As proposed, the method acts somewhat like a black-box: if given a set of hypotheses, it returns a lower bound on the number of true discoveries within this set. While this might be the right approach in many exploratory settings, I also think that many practitioners could use some guidance as to which sets of hypotheses could be interesting (without prescribing exactly which ones to reject, so as to not fall back into the standard ranking scheme). A step in this direction is the helpful concept of \emph{defining hypotheses}, which summarizes the results of the procedure in compact form.
Each defining hypothesis is a set of hypotheses out of which at least \emph{one hypothesis} must be a false null hypothesis. In other words: the defining hypotheses have a logical AND--OR connection (with AND between the sets of hypotheses and OR between hypotheses in a set). A complementary view could be given by a logical OR--AND connection, with OR between sets of hypotheses and AND between hypotheses in a set. The results are still presented as sets of hypotheses. Among all these sets and conditional on event $E$, there is now guaranteed to be at least \emph{one set} such that \emph{all hypotheses} in this set are false. This extends the usual Multiple Testing paradigm, where the user is handed back just one set of hypotheses, which is guaranteed to be a set of false null hypotheses.
\begin{figure}
\caption{An alternative presentation of the results in the regression example. All dots on each horizontal line form a set of hypotheses. Out of all nine sets, at least one must correspond to a set of false null hypotheses (conditional on event~$E$).}
\label{fig:2}
\end{figure}
In the regression example, there are nine such sets, the first two being \{\textit{waist, height, calf, thigh}\} and \{\textit{waist, neck, calf, thigh}\}. Figure~\ref{fig:2} visualizes them. Among these nine sets, at least one must be a set where all hypotheses are false null hypotheses (always conditional on the event $E$). We can then directly read off that if \textit{height} is known to be a~null hypothesis (either by a follow-up experiment or through prior knowledge), then the results give no reason any longer to suppose that \textit{chest} was a~false null (since \textit{chest} is only part of the fifth set; and if \textit{height} is a true null, this set can be excluded and neck will not any longer be in the union of all other candidate sets). Or, if \textit{calf} can be excluded, then the results do not give reason to still suspect that \textit{neck} was a false null hypothesis. Such statements and connections are much more difficult to read off the set of defining hypotheses but might be useful in practice, when planning which hypotheses to follow up.
I want to congratulate the authors again on this very impressive and useful paper and I hope to see strong uptake of the method.\vspace*{-3pt}
\section*{Acknowledgment} The author wishes to acknowledge support from the Leverhulme Trust.\vspace*{15pt}
\end{document} |
\begin{document}
\def\spacingset#1{\renewcommand{\baselinestretch} {#1}\small\normalsize} \spacingset{1}
\if00 { \title{\bf An Objective Prior from a Scoring Rule }
\author{Stephen G. Walker\footnote{Department of Mathematics, University of Texas at Austin, USA. e-mail: s.g.walker@math.utexas.edu}\hspace{.2cm} \& \hspace{.2cm} Cristiano Villa\footnote{ School of Mathematics, Statistics \& Physics, University of Newcastle, UK. e-mail: Cristiano.Villa@ncl.ac.uk} }
\date{} \maketitle } \fi
\if10 {
\begin{center}
{\LARGE\bf } \end{center}
} \fi
\begin{abstract} In this paper we introduce a novel objective prior distribution levering on the connections between information, divergence and scoring rules. In particular, we do so from the starting point of convex functions representing information in density functions. This provides a natural route to proper local scoring rules using Bregman divergence. Specifically, we determine the prior which solves setting the score function to be a constant. While in itself this provides motivation for an objective prior, the prior also minimizes a corresponding information criterion.
\end{abstract}
\noindent {\it Keywords:} Bregman divergence; Convex function; Euler--Lagrange equation; Objective prior
\section{Introduction}\label{sc_introduction} A major drawback of objective priors, such as Jeffreys prior \citep{Jeff1961} and the reference prior \citep{Bern:1979}, is that, in many cases, they are improper. While for a parameter that is defined over a bounded interval, such as $(0,1)$, it is generally possible to derive objective prior distributions that are proper, this is not the case for parameters on $(0,\infty)$ or $(-\infty,\infty)$. The literature provides many examples where improper prior distributions cannot be suitably employed; such as Bayes factors, mixture models and hierarchical models, to name but a few. Methods have been proposed to get around these obstacles, for example, Intrinsic Bayes Factors \citep{BergPer:1996} and Fractional Bayes factors \citep{Ohag:1995} or reparametrising mixture models \citep{GrazianRobert2018}. However, these type of results are generally valid for a limited number of specific conditions. Additonally, improper prior distributions are not too suitable to be employed where large numbers of parameters are involved as it would be difficult to establish properness of the full posterior distribution.
The idea of this paper is to present a novel objective prior distribution for continuous parameter spaces by considering the connection between information, divergence and scoring rules. In particular, the proposed prior can be defined over $(0,\infty)$ and $(-\infty,\infty)$, the latter by extending the former, and it has the appealing property of being proper.
Recently, \cite{LVW:2020}, introduced a new class of objective prior which solved a differential equation of the form $S(q,q',q'')=0$, where $S$ is a score function and the solution $q$ acts as the prior distribution. The solution is also shown to minimize an information criterion.
There are two well known relations that connect information, proper local scoring rules and divergences. The most famous of which links Shannon information, Kullback--Leibler divergence and the log--score, given by \begin{equation}\label{dis} \int p\log \frac{p}{q}=\int p\log p +\int p\,(-\log q), \end{equation} where $p$ and $q$ are two densities,
and integrals will be generally defined with respect to the Lebesgue measure. The term on the left-hand-side of \eqref{dis} is the Kullback--Leibler divergence \citep{Kull:1951} between $p$ and $q$, the first term on the right-hand-side is the Shannon information associated with density $p$, and the second term is the expectation of the log-score function.
Another way to connect information, divergence and proper local scoring rules, involves Fisher divergence, Fisher information, and the Hyv\"{a}rinen score function (\cite{Hyva:2005}): $$\int p\left(\frac{p'}{p}-\frac{q'}{q}\right)^2=\int \frac{(p')^2}{p} +\int p\left(2\frac{q''}{q}-\left(\frac{q'}{q}\right)^2\right),$$ where the final term has been obtained using an integration by parts. In general, these relationships can be expressed as \begin{equation}\label{generalrelation} D(p,q)=I(p)+\int p\,S(q), \end{equation} where $D$ denotes the divergence, $I$ the measure of information and $S$ the score.
Recently, in \cite{Parry:2012}, a new class of score function was introduced, where the starting point is the property of the score function, which is that
$$p=\arg\min_q\int p\,S(q),$$ for all densities $p$. In other words, a score is said to be proper if the above is minimised by the choice of $q=p$. Let us consider the well known log-score, $S(q)=-\log q(x)$. Then, we have that it satisfies the above property, since for any density $p$ it is that $\int p\,\log(p/q)\geq 0$, with equality only when $q\equiv p$. As such, we have that the log-score is a proper score. Furthermore, a score is said to be local if it only depends on $q$ through the density value $q(x)$. See \cite{Parry:2012} and \cite{EhmGnei:2012}. It has to be noted that the log-score is the only proper score to be local.
If we consider the Hyv\"{a}rinen score function \citep{Hyva:2005}, which is given by $$S(q,q',q'')=2\,\frac{q''}{q}-\left(\frac{q'}{q}\right)^2,$$ which we note depends on $(q,q',q'')$, i.e. $q$ and the first two derivatives, as such it is not local in the above sense. However, the locality condition can be weakened \citep{Parry:2012} by allowing the score to depend on a finite number $m$ of derivatives. Therefore, the Hyv\"{a}rinen score will be a order--2 proper local scoring rule.
More generally, if a proper score depends on $m$ derivatives, then it will be defined an \textit{order--m local scoring rule}. The theory in support of this, is based on the fact that the minimizer of $\int p\,S(q)$ is $p$, and this can be investigated using variational analysis. The relevant Euler--Lagrange equation of order two being \begin{equation}\label{euler} S+q\frac{\partial S}{\partial q}-\frac{d}{dx}\,q\,\frac{\partial S}{\partial q'}+\frac{d^2}{dx^2}\,q\,\frac{\partial S}{\partial q''}=0. \end{equation} The corresponding general case of \eqref{euler} is given as equation (18) in \cite{Parry:2012}.
Throughout this paper we will focus on the case $m=2$, since this is where we draw our prior from.
The Appendix provides the expression for a general $m$.
In \cite{Parry:2012}, the solution to equation \eqref{euler}, is proposed using properties of differential operators and $1$--homogeneous functions. Recall that a $1$--homogeneous function $f$ is such that $f(x,\lambda q,\lambda q')=\lambda\,f(x,q,q')$ for any $\lambda>0$. In particular, the Hyv\"{a}rinen score arises with $f(x,q,q')=(q')^2/q$ and \begin{equation}\label{hyvar} S(q)=-\frac{\partial f}{\partial q}+\frac{d}{dx}\frac{\partial f}{\partial q'}. \end{equation} Furthermore, \cite{Parry:2012} and \cite{EhmGnei:2012} characterize all local and proper scoring rules of order $m=2$. With this respect, as an additional interesting result, in the Appendix we present the characterization using measures of information and the Bregman divergence \citep{Breg:1967}. The benefits of the proposed approach are that complicated mathematical analysis is avoided and the derivation of the local rule is made explicit.
Following \cite{Parry:2012} and \cite{EhmGnei:2012} and the novel derivation of their results using Bregman divergence, which is the focus of the Appendix, information, divergence and scores can be obtained as follows: For some convex function $\alpha:\mathbb{R}\to\mathbb{R}$,
\begin{description}
\item 1. \textit{Divergence}: Given the result \eqref{cond}, we get $$D(p,q)=\int p\,\alpha(p'/p)-\int p\,\frac{\partial \phi}{\partial q}-\int p'\,\frac{\partial \phi}{\partial q'},$$ where $$\frac{\partial \phi}{\partial q}=\alpha(q'/q)-(q'/q)\,\alpha'(q'/q)\quad\mbox{and}\quad \frac{\partial \phi}{\partial q'}=\alpha'(q'/q).$$ Using integration by parts on the right most intergal, and assuming that $[p\,\cdot\,\partial\phi/\partial q']$ vanishes at the extremes of the integral, $$D(p,q)=\int p\,\alpha(p'/p)+\int p\left\{\frac{d}{dx}\alpha'(q'/q)-\alpha(q'/q)+(q'/q)\,\alpha'(q'/q)\right\}. $$
\item 2. \textit{Information}: This follows from the divergence, and from \eqref{divinfsc}, and is given by $$I(p)=\int p\,\alpha(p'/p).$$
\item 3. \textit{Score}: Again, from the form of the divergence and \eqref{score}, this is given by \begin{equation}\label{ascore} S(q,q',q'')=\frac{d}{dx}\alpha'(q'/q)-\alpha(q'/q)+(q'/q)\,\alpha'(q'/q). \end{equation}
\end{description}
\noindent The score $S(q,q',q'')$ in \eqref{ascore} generalizes the Hyv\"{a}rinen score, which arises when $\alpha(u)=u^2$.
\noindent The paper is organised as follows. Section \ref{sc_application} introduces the proposed objective prior. Section \ref{sc_mixtures} includes a thorough simulation study, and an application to mixture models that involves both simulated and real data. In Section \ref{sc_hierarchical} we have discussed another critical scenario where improper priors may resolve in improper posteriors, that is assigning an objective prior to the variance parameter in a hierarchical model. The supporting theory is presented in the Appendix. In \ref{sc_informationandbregman} we use Bregman divergence to obtain general forms for score functions and associated divergences and following on from this in \ref{sc_divergences} we detail how we use Bregman divergences to obtain a divergence between probability density functions using their first derivatives, and show how to obtain score functions from these divergences. \ref{sc_highorder} provides the general case using $m$ derivatives. Finally, in \ref{ParryApp} we make the connection with our derivations of scores and that of \cite{Parry:2012}.
\section{New Objective prior}\label{sc_application}
\cite{LVW:2020} proposed constructing objective prior distributions on parameter spaces by solving equations of the kind $S(q)=0$. Specifically, they used a weighted mixture of the log-score and the Hyv\"{a}rinen score functions. Note that the sole use of the log-score function would result in the uniform prior, which is not appropriate in many cases and may yield improper posterior distributions. On the other hand, a weighted combination of the two score functions yields a differential equation given by \begin{equation}\label{LVWdiffeq} -w\log q(x)+\frac{q''(x)}{q(x)}-\hbox{$1\over2$} \left(\frac{q'(x)}{q(x)}\right)^2=0, \end{equation} where $q$ denotes the prior density and $w$ the weight balancing the two score functions.
Solutions to the differential equation \eqref{LVWdiffeq} can be found for different spaces, and constraints on the shape of $q$ can be considered; so to have a prior density with desirable behaviour, such as monotone, convex, log--concave and more.
We have already seen that the Hyv\"{a}rinen score arises with $\alpha(u)=u^2$; see \eqref{ascore}.
An important property an objective prior distribution may be required to have is a heavy tail. We will consider such on $(0,\infty)$.
Mirroring the Hyv\"{a}rinen score, we adopt $\alpha(u)=u^{-2}$ with $u=-q'/q$, and $q$ a decreasing density on $(0,\infty)$. In this case, equation \eqref{ascore} becomes $6\,u^\prime/u^4 -3/u^2,$ which, by setting to 0, becomes $u^\prime = \hbox{$1\over2$} u^2.$ The solution is easily seen to be $u(x) = -2/(a+x),$ for some constant $a$. In this case, the prior on the parameter space $(0,\infty)$ is \begin{equation}\label{eq_prior2} q(x) = \frac{a}{(a+x)^2}. \end{equation} Interestingly, the prior in \eqref{eq_prior2}, is a Lomax distribution \citep{Lomax:1954} with scale parameter $a$ and shape parameter equal to 1. Recalling that the Lomax distribution can be directly connected to the Pareto Type I and Pareto Type II distributions, its heavy-tailed nature is immediately obvious.
Fig.~\ref{fig:prior2} shows the prior with $a=1$. \begin{figure}
\caption{Prior $q(x)=1/(1+x)^2$}
\label{fig:prior2}
\end{figure}
Making the connection more directly with the theory set out in the paper with $\alpha(u)=u^{-2}$, we have $\phi(u,v)=u^3/v^2$ which is easy to show satisfies $\phi=u\partial\phi/\partial u+v\partial \phi/\partial v$. Then using (\ref{score}) we get \begin{equation}\label{2dimscore} S(q,q',q'')=3\left(\frac{q}{q'}\right)^2\left\{2\frac{q\,q''}{(q')^2}-3\right\}. \end{equation} Setting this to zero; i.e. $2qq''=3(q')^2$, this can be solved and the solution is precisely of the form $a/(a+x)^2$. We now write this all out in a theorem.
\begin{theorem} Let $\phi(p,p')=p^3/(p')^2$ be the convex function appearing in (\ref{breg2dim}); i.e. $\phi(u,v)=u\alpha(v/u)$ with $\alpha(\xi)=\xi^{-2}$. Then $\phi$ is convex for either $\xi<0$ or $\xi>0$. The Euler equation associated with this $\phi$; i.e. $d/dx\,\, \partial \phi/\partial p'=\partial \phi/\partial p$, yields $$6p^3\,p''/(p')^4-6(p/p')^2=3(p/p')^2,$$ the solution to which can be written as $S(p,p',p'')=0$ where $S$ is the corresponding score function (\ref{2dimscore}). \end{theorem}
To obtain the corresponding prior on $(-\infty,\infty)$ through symmetry about 0, we get \begin{equation}\label{eq:cases}
q(x) =
\begin{cases}
\hbox{$1\over2$} a\,(x-a)^{-2}, & \text{for } x<0\\
\hbox{$1\over2$} a\,(x+a)^{-2}, & \text{for } x\geq0.
\end{cases} \end{equation}
Here we motivate the natural objective choice for the constant $a$ as 1. The only important transformation to be considered here is $\phi=1/\theta$. This, for example, would take variance to precision. For the prior in \eqref{eq_prior2} to be invariant, that is $p_a(\phi)=a/(a+\phi)^2$, we need to have $a=1$, since \begin{eqnarray*}
p_a(\phi) = \frac{a}{(a+1/\phi)^2}\left|\frac{1}{\phi^2}\right| = \frac{a}{(a\phi + 1)^2}, \end{eqnarray*} which yields $p_a(\theta)=a/(a+\theta)^2$ iff $a=1$. All the illustrations that follow have been made taking this choice for $a$.
\subsection{First examples}
The first simulation study was to make inference on a scale parameter; specifcally the standard deviation of a normal density with mean $\mu=0$ and standard deviation $\sigma\in(0,\infty)$. We compare prior \eqref{eq_prior2} with Jeffreys prior, that is $\pi(\sigma)\propto1/\sigma$. We took 250 samples of size $n=100$, obtained the posterior distributions using standard MCMC methods (6000 iterations, with a burn--in of 1000 and a thinning of 10) and computed the following two indexes. The root Mean Squared Error (MSE) divided by the true parameter value from the sample mean; $$\mbox{MSE} = \frac{\sqrt{\mathbb{E}(\widehat{\sigma}-\sigma)^2}}{\sigma},$$ where $\widehat{\sigma}$ is the posterior mean, and the coverage of the 95\% posterior credible interval for $\sigma$. Table \ref{tab:tab1} shows the results for the MSE for $\sigma=\{0.25,0.50,1,2,5,10,20\}$, where we see little difference between the performance of the two priors. However, the important point is that the score prior is proper, an important property.
\begin{table}[h!] \centering
\begin{tabular}{|c|cc|} \hline $\sigma$ & Jeffreys Prior & Score Prior \\ \hline 0.25 & 0.0723 & 0.0720 \\
0.50 & 0.0721 & 0.0721 \\
1 & 0.0721 & 0.0716 \\
2 & 0.0719 & 0.0716 \\
5 & 0.0722 & 0.0720 \\
10 & 0.0723 & 0.0716 \\
20 & 0.0722 & 0.0718 \\ \hline \end{tabular} \caption{Posterior MSE for $\sigma$ to compare Jeffreys prior against the prior based on scores for data generated by a normal density with mean $\mu=0$ and unknown variance. This has been obtained on 250 samples of size $n=100$ and with standard deviation $\sigma=\{0.25,0.50,1,2,5,10,20\}$.} \label{tab:tab1} \end{table}
The coverage of the 95\% posterior credible interval is shown in Table \ref{tab:COVnormal}, where we can also see very similar behaviour between the two priors; although both show an average slightly lower than the nominal 0.95. \begin{table}[h!] \centering
\begin{tabular}{|c|cc|} \hline $\sigma$ & Jeffreys Prior & Score Prior \\ \hline 0.25 & 0.91 & 0.91 \\
0.50 & 0.92 & 0.91 \\
1 & 0.91 & 0.91 \\
2 & 0.93 & 0.91 \\
5 & 0.93 & 0.91 \\
10 & 0.90 & 0.92 \\
20 & 0.90 & 0.93 \\ \hline \end{tabular} \caption{Posterior coverage of the 95\% credible interval for $\sigma$ to compare Jeffreys prior against the prior based on scores for data generated by a normal density with mean $\mu=0$ and unknown variance. This has been obtained on 250 samples of size $n=100$ and with standard deviation $\sigma=\{0.25,0.50,1,2,5,10,20\}$.} \label{tab:COVnormal} \end{table}
To illustrate the frequentist properties of the prior in \eqref{eq:cases}, we have compared it to a flat prior, $\pi(\mu)\propto1$, in making inference for a location parameter of a log--normal density with unknown $\mu$ and known scale parameter $\sigma=1$. Similar to the previous case, we have drawn 250 samples of size $n=100$ and computed the MSE and coverage of the 95\% posterior credible interval. The values of $\mu$ considered were from the set $\{0,1,5,50,100\}$. Table \ref{tab:tab2} shows the MSE for the two priors, where we see that, apart for a small difference for $\mu=0$, the two priors appear to perform in a very similar fashion.
\begin{table}[h!] \centering
\begin{tabular}{|c|cc|} \hline $\mu$ & Jeffreys Prior & Score Prior \\ \hline 0 & 0.0114 & 0.0007 \\
1 & 0.0086 & 0.0091 \\
5 & 0.0085 & 0.0085 \\
10 & 0.0085 & 0.0085 \\
50 & 0.0085 & 0.0087 \\
100 & 0.0085 & 0.0087 \\ \hline \end{tabular} \caption{Posterior MSE for $\mu$ to compare Jeffreys prior against the prior based on scores for data generated from a log-normal density with unknown location parameter $\mu$ and known scale parameter $\sigma=1$. This has been obtained on 250 samples of size $n=100$ and with standard deviation $\mu=\{0,1,1,5,50,100\}$.} \label{tab:tab2} \end{table}
The coverage of the 95\% posterior credible interval for $\mu$ is shown in Table~\ref{tab:COVlognormal}, where we can see a very similar behaviour for the two priors, with an exception for $\mu=0$, although the two coverage level are perfectly acceptable.
\begin{table}[h!] \centering
\begin{tabular}{|c|cc|} \hline $\mu$ & Jeffreys Prior & Score Prior \\ \hline 0 & 0.92 & 1.00 \\
1 & 0.97 & 0.98 \\
5 & 0.97 & 0.98 \\
10 & 0.97 & 0.98 \\
50 & 0.97 & 0.98 \\
100 & 0.97 & 0.98 \\ \hline \end{tabular} \caption{Posterior coverage of the 95\% credible interval for $\mu$ to compare Jeffreys prior against the prior based on scores for data generated from a log-normal density with unknown location parameter $\mu$ and known scale parameter $\sigma=1$. This has been obtained on 250 samples of size $n=100$ and with standard deviation $\mu=\{0,1,1,5,50,100\}$.} \label{tab:COVlognormal} \end{table}
The general conclusion for the two experiments above, is that the prior obtained via $\alpha(u)=1/u^2$ exhibits tails which are sufficiently heavy to generate optimal frequentist performance even for large parameter values. These properties are comparable to those obtained by Jeffreys prior, which is well--known for being the objective prior yielding good frequentist properties of the posterior. The advantage with our prior is that it is always proper.
\section{Mixture models}\label{sc_mixtures} A major area of challenge for objective priors is finite mixture models, where observations are assumed to be generated by the following model; \begin{equation}\label{eq_mixmodel} f(y) =\sum_{l=1}^k \omega_l\,\,f_l(y\mid \theta_l), \qquad \sum_{l=1}^k\omega_l=1, \end{equation}
for densities $(f_l(\cdot|\theta_l))$, where $\theta_l$ is vector of parameters characterising the densities. Given the ill-defined nature of the model in \eqref{eq_mixmodel}, \cite{GrazianRobert2018}, the use of improper priors for the parameters is not acceptable. In particular, if we consider densities $f_l$ to be location-scale distributions, \cite{GrazianRobert2018} show that, under certain circumstances, Jeffreys priors cannot be used, due to their improperness. For example, if all parameters are unknown (i.e. weights, location and scale parameters), then Jeffreys prior yields improper posteriors. Even in more restrictive circumstances the use of improper priors if troublesome; if we consider only the location parameters unknown, then Jeffreys prior yields improper posteriors for $k>2$. The above represents severe limitations in Bayesian analysis. Therefore, the objective priors proposed in this paper represent a clear solution to the above problem, avoiding reparametrisation or addition of extra parameters, as proposed, for example in \cite{GrazianRobert2018}, the latter resulting in an increased uncertainty.
\subsection{Single sample} In this first simulation study, we illustrate the performance of the proposed prior on the following three-component mixture model, from which we have drawn a sample of size $n=200$, \begin{equation}\label{example1} 0.25\,\mbox{N}(0,1.2^2) + 0.65\,\mbox{N}(-10,1) + 0.10\,\mbox{N}(7,0.8^2). \end{equation} In terms of prior distributions, we have assumed a symmetric Dirichlet prior with concentration parameters equal to one, and for the means and standard deviations of the Gaussian components the proposed prior on $(-\infty,\infty)$ and on $(0,\infty)$, respectively. We have also assumed independent information, so the priors for the parameters of the component have the following form, $$\pi(\pmb\mu,\pmb\sigma) = \prod_{l=1}^3\pi_l(\mu_l)\times\prod_{l=1}^3\pi(\sigma_l),$$ where $\pmb\mu=(\mu_1,\mu_2,\mu_3)$ and $\pmb\sigma=(\sigma_1,\sigma_2,\sigma_3).$ The histogram of the sample, together with the true density, is shown in Fig.~\ref{fig:example1_hist}.
\begin{figure}
\caption{Histogram of the sample of size $n=200$ from model \eqref{example1}, and density (red line) of the true model.}
\label{fig:example1_hist}
\end{figure}
The analysis uses a Metropolis--within--Gibbs algorithm with a total of 60000 iterations, a burn--in of 10000 and a thinning of 100. We note that the true values are within the posterior credible intervals.
\begin{table}[htbp]
\centering
\begin{tabular}{c|ccc}
\hline
Component & $\omega$ & $\mu$ & $\sigma$ \\
\hline
\multirow{2}[0]{*}{1} & 0.26 & -10.2 & 1.1 \\
& (0.21, 0.33) & (-10.5, -9.9) & (0.9, 1.4) \\
\multirow{2}[0]{*}{2} & 0.66 & 0.0 & 1.3 \\
& (0.58,0.72) & (-0.1, 0.2) & (1.1, 1.5) \\
\multirow{2}[0]{*}{3} & 0.08 & 6.7 & 0.9 \\
& (0.04, 0.12) & (6.2, 7.2) & (0.6, 1.4) \\
\hline
\end{tabular}
\caption{Posterior means and 95\% credible intervals (in brackets) for a sample of size $n=200$ from model \eqref{example1}.}
\label{tab:singlesample_100} \end{table}
\subsection{Repeated sampling} To have a more thorough understanding of the proposed prior implementation, we have performed some experiment on repeated sampling, taking into consideration different scenarios, which include different sample sizes and model structure. We have limited the analysis to mixture of normal densities, but it is obvious that, due to the properness of the prior, its implementation can be extended to any family of densities for the mixture components. We computed the posterior distribution for $M=20$ replications of sample of size $n=(50, 100, 200)$ of mixture models with number of components $k=(3,4,5)$. For this illustrations, we reported the results for the means and the standard deviations of each components, as they are estimated using the proposed prior. The models used are as follows: \begin{eqnarray} \frac{1}{3}\mbox{N}(-10,1) &+& \frac{1}{3}\mbox{N}(0,0.8^2) + \frac{1}{3}\mbox{N}(7,1.2^2) \nonumber \\ \frac{1}{4}\mbox{N}(-10,1) &+&\frac{1}{4}\mbox{N}(-3,0.9) +\frac{1}{4}\mbox{N}(0,0.8) +\frac{1}{4}\mbox{N}(7,1.2) \nonumber \\ \frac{1}{5}\mbox{N}(-10,1) &+& \frac{1}{5}\mbox{N}(-3,0.9) +\frac{1}{5}\mbox{N}(0,0.8) +\frac{1}{5}\mbox{N}(3,1) +\frac{1}{5}\mbox{N}(7,1.2). \nonumber \end{eqnarray} Note that we have not chosen variable weights as these are not associated with a proposed prior.
Fig.~\ref{fig:means} shows the boxplots of the posterior means for the $\pmb\mu$ of the mixture components, while Fig.~\ref{fig:sds} shows the boxplots of the posterior means for the standard deviations $\pmb\sigma$. As one would expect, the larger the sample size the less variability in the repeated estimates, for the same number of components. Keeping the sample size fixed, we notice an increase in the variability of the estimates as the number of components grows, which is also an expected result.
\begin{figure}
\caption{Boxplots of posterior means of the means $\pmb\mu$, using the proposed prior, for samples of size $n=(50,100,200)$, plotted by row, and number of components $k=(3,4,5)$, plotted by column.}
\label{fig:1a}
\label{fig:2a}
\label{fig:3a}
\label{fig:4a}
\label{fig:5a}
\label{fig:6a}
\label{fig:7a}
\label{fig:8a}
\label{fig:9a}
\label{fig:means}
\end{figure}
\begin{figure}
\caption{Boxplots of posterior standard deviations of the means $\pmb\sigma$, using the proposed prior, for samples of size $n=(50,100,200)$, plotted by row, and number of components $k=(3,4,5)$, plotted by column.}
\label{fig:1b}
\label{fig:2b}
\label{fig:3b}
\label{fig:4b}
\label{fig:5b}
\label{fig:6b}
\label{fig:7b}
\label{fig:8b}
\label{fig:9b}
\label{fig:sds}
\end{figure}
\subsection{Real data analysis} In this section we analyse the well--known galaxy data set, which contains the velocity of 82 galaxies in the Corona Borealis region. To support a particular theory about the formation of galaxies, the analysis aims to estimate the number of stellar populations. This is a benchmark data set, well investigated in the literature, for example in \cite{EscobWest:1995}, \cite{RichGreen:1997} and \cite{Grazianetal:2019}, among others. We consider the galaxies velocities as random variables distributed according to a mixture of $k$ normal densities. The estimation of the number of components has proved to be delicate, with estimates ranging from 3 to 7, depending on factors, such as the priors for the parameters and the Bayesian method used. The histogram of the data set with a superimposed density is presented in Fig.~\ref{fig:galaxy}.
\begin{figure}
\caption{Histogram of the Galaxy data set with a smoothed density (red curve) superimposed.}
\label{fig:galaxy}
\end{figure}
To select the number of components in the mixture of normal densities, we have fitted models with $k=(2,3,4,5,6,7,8)$ components, computing the Deviance Information Criterion (DIC) under each model. The results are reported in Table \ref{tab:galaxy}. We notice that, according to the computed index, we identify as best model the mixture with $k=4$ components, which is in line with \cite{Grazianetal:2019}, and slightly more conservative than \cite{RichGreen:1997} and \cite{Grazianetal:2019}, where the number of components with non-zero weight is 5. Table \ref{tab:galaxyestimates} shows the posterior means for the parameters of the 4 components estimated.
\begin{table}[htb] \centering
\begin{tabular}{c|ccccccc} \hline $k$ & 2 & 3 & 4 & 5 & 6 & 7 & 8 \\
DIC & 476.72 & 425.20 & 371.48 & 413.68 & 446.45 & 446.54 & 458.81 \\ \hline \end{tabular} \caption{Deviance Information Criterion for mixture with number of components $k=(2,3,4,5,6,7,8)$.} \label{tab:galaxy} \end{table}
\begin{table}[htb] \centering
\begin{tabular}{c|ccc} \hline Component & $\omega$ & $\mu$ & $\sigma$ \\ \hline 1 & 0.32 & 19.16 & 0.71 \\
2 & 0.31 & 23.59 & 3.46 \\
3 & 0.29 & 22.01 & 4.59 \\
4 & 0.10 & 9.31 & 0.58 \\ \hline \end{tabular} \caption{Posterior means of the parameters for the $k=5$ components. The results are reported by descending order of the weights.} \label{tab:galaxyestimates} \end{table}
\section{Variance parameters in hierarchical models}\label{sc_hierarchical} In this section we discuss a well-known implementation of a hierarchical model that is proposed, for example, in \cite{Gelman:2006}. The basic two-level hierarchical model is as follows: \begin{eqnarray*} y_{ij}&\sim& N(\mu+\alpha_j,\sigma_y^2), \qquad i=1,\ldots,n,\,\,j=1,\ldots,J\\ \alpha_j&\sim& N(0,\sigma_\alpha^2), \qquad j=1,\ldots,J. \end{eqnarray*} This model has three parameters, namely $\mu$, $\sigma_y$ and $\sigma_\alpha$. However, out interest for this paper is in $\sigma_\alpha$ only, noting that ``regular'' objective priors can be used on the remaining parameters, like $\pi(\mu,\sigma_y)\propto1$, for example. Although being improper, this prior yields a proper posterior on the parameters.
The actual concern is on the variance (scale) parameter $\sigma_\alpha$, as if we were to put an improper prior on it, then the corresponding posterior, most likely, would be improper as well. To compare the proposed prior, we assign an inverse-gamma prior on the variance with parameters set so to define a very sparse probability distribution. This is recommended, for example, in \cite{Spieg:1994}, where the prior is $\pi(\sigma_\alpha^2)\sim IG(\varepsilon,\varepsilon)$, with $\varepsilon>0$ sufficiently small. We do not discuss in detail the appropriateness of the above choice, or other alternatives; the reader can refer to \cite{Gelman:2006}, for example, for a through discussion.
The data consists of $J=8$ educational testing experiments, where the parameters $\alpha_1,\ldots,\alpha_8$ represent the relative effects of Scholastic Aptitude Test coaching programs in different schools. In this example, the parameter $\sigma_\alpha$ represents the between-schools variability (standard deviation) of the effects. Table \ref{Tab:schools} shows the data.
\begin{table}[h] \centering
\begin{tabular}{c|cc} \hline School & $y_j$ & $\sigma_j$ \\ \hline A & 28 & 15 \\
B & 8 & 10 \\
C & -3 & 16 \\
D & 7 & 11 \\
E & -1 & 9 \\
F & 1 & 11 \\
G & 18 & 10 \\
H & 12 & 18 \\ \hline \end{tabular} \caption{Observed effects $(y_j)$ of special preparation on SAT scores on eight randomised experiments, where $\sigma_j$ are the standard errors of effect estimate.} \label{Tab:schools} \end{table}
We have compared the marginal posteriors $\pi(\sigma_\alpha^2|\mathbf{y})$ obtained by using the inverse-gamma prior with $\varepsilon=1$ and the proposed prior in \eqref{eq_prior2} with $a=1$. The histograms of the marginal posteriors are in Figure \ref{fig:schools}, where we note similar results. The statistics of the posteriors are reported in Table \ref{tab:schools_poststas}, where we note a less-informative distribution when the proposed prior is employed. This is expected, as the inverse-gamma distribution is considered a relatively informative one \citep{Gelman:2006}.
\begin{figure}
\caption{Histogram of the marginal posterior for $\sigma_\alpha^2$ for the School problem when using an inverse-gamma prior (left) and the proposed prior based on scores (right).}
\label{fig:1a}
\label{fig:2a}
\label{fig:schools}
\end{figure}
\begin{table}[h] \centering
\begin{tabular}{c|cc} \hline Prior & Mean & 95\% C.I. \\ \hline Inverse-Gamma & 3.8 & (0.3, 23.9) \\
Score-based & 2.3 & (0.2, 57.8) \\ \hline \end{tabular} \caption{Posterior statistics for the marginal distribution of $\sigma_\alpha^2$ for the schools problem.} \label{tab:schools_poststas} \end{table}
\section{Discussion}\label{sc_discussion} In this paper we have derived a class of objective prior distributions that have the appealing properties of being proper and heavy-tailed. These have been obtained by exploiting a straightforward approach to the construction of score functions (here proposed). In detail, using convex function $\alpha(\cdot)$ we can find the score function with first two derivatives using \eqref{ascore}. The Hyv\"{a}rinen score arises with $\alpha(u)=u^2$; whereas we have used $\alpha(u)=u^{-2}$ and used it to construct objective prior distributions using methodology introduced in \cite{LVW:2020}.
The class of prior is heavy-tailed, behaving as $1/x^2$ for large $|x|$; this result is immeditely obvious as the prior on $(0,\infty)$ is a Lomax distribution with shape 1. In this respect, it behaves similar to standard objective priors but comes without the problems of being improper. The benefits of using a proper prior is that the posterior is automatically proper and so does not need to be checked.
We have showed that, when compared to Jeffreys prior on simulated data, the frequentist performances of the prior distribution derived from score functions are nearly equivalent. In addition, we have showed that, on both simulated and real data, the proposed prior is suitable to be used in a key scenario where improper priors (e.g. Jeffreys and reference) are not suitable (or are yet to be found). We have also illustrated the prior on a common problem for hierarchical models, that is assigning an objective prior for the variance parameter.
As a final point, we briefly discuss the case where a prior is needed on a multidimensional parameter space. So, say we have a model with $k$ parameters, that is $\pmb\theta=(\theta_1,\ldots,\theta_k)$, where $\theta_j\in\Theta_j$, for $j=1,\ldots,k$. We also assume that the uni-dimensional space for each parameter is either $(0,\infty)$ or $(-\infty,\infty)$. Assuming $k$ relatively large, besides some specific statistical models such as regression models or graphical models, a common practice to assign objective priors on $\pmb\theta$ is as follows: $$\pi(\pmb\theta) = \pi_1(\theta_1)\times\cdots\times\pi_k(\theta_k).$$ In other words, parameters are assumed to be independent a priori, so the join prior distribution is represented by the product of the marginal priors on each parameter. We can then set $\pi_j(\theta_j)$ to be either \eqref{eq_prior2} or \eqref{eq:cases}, for $j=1,\ldots,k$, depending on $\Theta_j$.
\appendix \section{Appendix}
\subsection{Information and Bregman divergence}\label{sc_informationandbregman}
Information for a density function is assumed to be convex, based on the obvious notion that averaging reduces information \citep{Topsoe:2001}. Hence, if we write $I(p)=\int \phi(p)$, then $\phi$ is assumed convex.
For example, when $\phi(p)=p\log p$, $\phi$ is convex, as well as when $\phi(p,p')=(p')^2/p$, being convex in $(p,p')$.
With a convex function we can set up a Bregman divergence. First, recall that a Bregman divergence (or Bregman distance) is a measure of the distance between two points, and it is defined in terms of a convex function. In particular, when the two points are probability distributions, then we have a statistical distance. Probably the most elementary Bregman distance is the (squared) Euclidean distance.
Let us start by considering the case $m=2$, so $\phi$ is a function of $(p,p')$. Let $u$ and $v$ be vectors in $\mathbb{R}^d$. The Bregman divergence with convex function $\phi:\mathbb{R}^d\to\mathbb{R}$ is given by $$B_\phi(u,v)=\phi(u)-\phi(v)-\langle \nabla \phi(v),u-v\rangle,$$ where the right most term is $$\sum_{j=1}^d \frac{\partial \phi(v)}{\partial v_j}(u_j-v_j).$$ So $B_\phi(u,v)\geq 0$, and equality holds if and only if $u=v$.
In this work, we will focus on such divergences where the arguments are probability density functions \citep{StumVaj:2012}. Hence, in the one dimensional case, with $\phi:\mathbb{R}_+\to\mathbb{R}$ a convex function and $p(x)$ a density function, define \begin{equation}\label{onebreg} B_\phi(p(x),q(x))=\phi(p(x))-\phi(q(x))-\frac{\partial \phi(q)}{\partial q}(x)\,\,(p(x)-q(x)), \end{equation} and the divergence between $p$ and $q$ defined as \begin{equation}\label{bdiv} D(p,q)=\int B_\phi(p(x),q(x))\,{\rm d} x. \end{equation} For example, if $\phi(p)=p\,\log p$, which is convex in $p$, then \begin{eqnarray*} D(p,q) &=& \int p\log p-\int q\log q-\int (1+\log q)\,(p-q) \\ &=& \int p\,\log(p/q), \end{eqnarray*} which is the well known Kullback--Leibler divergence. The $L_2$ divergence arises when $\phi(p)=p^2$, with $B_\phi(p,q)=(p-q)^2$.
Note that we can write $$D(p,q)=\int \phi(p)-\int p\left[\frac{\partial \phi}{\partial q}-\int \left\{q\frac{\partial \phi}{\partial q}-\phi(q) \right\}\right],$$ and so we see that $$S(q)=\frac{\partial \phi}{\partial q}-\int \left\{q\frac{\partial \phi}{\partial q}-\phi(q) \right\}$$ is a score function, the Bregman score, and is local if $\phi$ satisfies $q\,\partial \phi/\partial q=\phi(q)$. In this case the only solution is the log-score. As mentioned above, the log-score is the sole local and proper score function, that is depending on $q$ only. So, the Bregman divergence and the Bregman score confirm this, together with the results in \cite{Bern:1979}. We will see this is also the case with order $m=2$.
The extension to the Bregman divergence and the Bregman score that we discuss in this paper, follows from a two-dimensional Bregman divergence with arguments $(p,p')$, where $p'(x)=dp/dx$, and for some two-dimensional convex function $\phi(u,v)$. It is defined by \begin{equation}\label{breg2dim} B_\phi(p(x),q(x))=\phi(p(x),p'(x))-\phi(q(x),q'(x))-\frac{\partial \phi}{\partial q}(x)\,(p(x)-q(x))- \frac{\partial \phi}{\partial q'}(x)\,(p'(x)-q'(x)). \end{equation}
For example, if $\phi(u,v)=v^2/u$, which is easily shown to be convex, we get $$D(p,q)=\int p\left(\frac{p'}{p}-\frac{q'}{q}\right)^2,$$ known as the Fisher information divergence; see, for example, \cite{Villani:2008}.\\
\subsection{Divergences and scores}\label{sc_divergences}
The two most well known measures of information are differential Shannon and Fisher. See, for example, \cite{MacKay:2003}. Associated divergences are the Kullback--Leibler and Fisher, respectively, with corresponding score functions the logarithmic and Hyv\"{a}rinnen. The connection between divergence, information and score functions can be understood from the following; \begin{equation}\label{divinfsc} D(p,q)=I(p)+\int p \,S(q). \end{equation} Here $D$ denotes divergence, $I$ information and $S$ score. From this it is clear that $\int pS(q)$ is minimized at $q=p$. For \eqref{divinfsc} based on the Kullback--Leibler divergence, we have $$D(p,q)=\int p\,\log(p/q),\quad I(p)=\int p\log p\quad\mbox{and}\quad S(q)=-\log q.$$ For \eqref{divinfsc} based on Fisher information, we have \begin{equation}\label{gendiv} D(p,q)=\int p\left(p'/p-q'/q\right)^2,\quad I(p)=\int (p')^2/p\quad\mbox{and}\quad S(q)=2q''/q-(q'/q)^2. \end{equation} From \eqref{onebreg} we get $$D(p,q)=\int \phi(p)+\int p\left\{-\frac{\partial \phi}{\partial q}+\int \left[q\,\frac{\partial \phi}{\partial q}-\phi(q)\right]\right\},$$ so $$S(q)=-\frac{\partial \phi}{\partial q}+\int \left[q\,\frac{\partial \phi}{\partial q}-\phi(q)\right].$$ More generally, using the two dimensional Bregman divergence in \eqref{breg2dim}, and put it in \eqref{bdiv}, we obtain $$D(p,q)=\int \phi(p,p')+\int p\,S(q,q',q''),
$$ where the score is \begin{equation}\label{scores} S(q,q',q'')=-\frac{\partial\phi}{\partial q}+\frac{d}{dx}\frac{\partial \phi}{\partial q'}-\left\{\int \phi(q,q')-\int q\,\frac{\partial \phi}{\partial q}-\int q'\frac{\partial\phi}{\partial q'}\right\}. \end{equation} The score $S(q)$ has been obtained by implementing integration by parts, and assuming $[p\cdot \partial \phi/\partial q']$ vanishes at the boundary points. To ensure the score is local, we use the following condition on $\phi$, \begin{equation}\label{condphi} \phi(u,v)=u\,\frac{\partial \phi}{\partial u}+v\,\frac{\partial\phi}{\partial v}. \end{equation} Hence, the proposed class of score functions, which effectively is the class introduced in \cite{Parry:2012}, is given by \begin{equation}\label{score} S(q,q',q'')=-\frac{\partial\phi}{\partial q}+\frac{d}{dx}\frac{\partial \phi}{\partial q'}. \end{equation}
The derivation just presented is arguably much simpler to the one used in \cite{Parry:2012}, as we have avoided any variational analysis and the use of differential operators.
For a specific example, consider the class of convex functions, satisfying \eqref{condphi}, given by \begin{equation}\label{diva} \phi(u,v)=u\,\alpha(v/u), \end{equation} for some convex function $\alpha:\mathbb{R}\to\mathbb{R}$. The convexity of $\alpha$ implies the convexity of function $\phi$ in \eqref{diva}, as the following Lemma \ref{lem_convex} shows.
\begin{lemma}\label{lem_convex} The function $\phi(u,v)=u\alpha(v/u)$ is convex when $\alpha$ is convex. \end{lemma}
\begin{proof} The Hessian matrix corresponding to $\phi$ is seen to be $$ \frac{\alpha''(v/u)}{u}\left( \begin{matrix} (v/u)^2 & v/u \\ \\ -v/u & 1 \end{matrix} \right), $$ which can be shown to be positive definite when $\alpha''>0$. Given that $\alpha$ is assumed to be convex, then the condition $\alpha''>0$ is true. \end{proof}
\noindent Given the above form for $\alpha$, we are now able to find the divergence, the information and the score. However, before proceeding, we first note that $$-\phi(q,q')+q\,\frac{\partial \phi(q,q')}{\partial q}+q'\,\frac{\partial \phi(q,q')}{\partial q'}=0.$$ That is, $\phi$ satisfies the condition in \eqref{condphi}. In fact, we have \begin{equation}\label{cond} -u\,\alpha(v/u)+u\,\left[\alpha(v/u)+\alpha'(v/u)\,(-v/u^2)\right]+v\,\alpha'(v/u)\,(1/u)=0 \end{equation} for all $(u,v)$.
\subsection{Higher order score functions}\label{sc_highorder}
In this section we look at score functions using an arbitrary number of derivatives; that is, we allow $m>2$. So let $\phi$ now be a convex function on $(m+1)$ dimensions: $$\phi(u_0,\ldots,u_{m})\geq \phi(v_0,\ldots,v_{m})+\sum_{j=0}^{m}\frac{\partial \phi}{\partial v_j}(u_j-v_j).$$ The Bregman divergence can be written as $$B_\phi(q,p)=\phi(q_0,q_1,\ldots,q_m)-\phi(p_0,p_1,\ldots,p_m)-\sum_{j=0}^m \frac{\partial \phi}{\partial p_j}(q_j-p_j),$$ where the subscript $j$ indicates the order of differentiation, with, for example, $p_0=p$ and $p_j=d^j p/d x^j$. Also, we have $D(p,q)=\int B_\phi(p,q).$ In this Section, to keep a readable notation, we set $p=(p_0,p_1,\ldots,p_m)$ and $q=(q_0,q_1,\ldots,q_m)$. If we have \begin{equation}\label{const} \phi(p)=\sum_{j=0}^m \frac{\partial \phi}{\partial p_j}p_j, \end{equation} and the derivatives disappear at boundary values, using multiple integration by parts we get
$$ \begin{array}{ll} D(q,p) & =\int \phi(p)-\sum_{j=0}^m \int p_j\,\frac{\partial\phi}{\partial q_j} \\ & =\int \phi(p)-\int p\,\sum_{j=0}^m (-1)^j \frac{d^j}{d x^j} \frac{\partial\phi}{\partial q_j}. \end{array} $$
Hence, the score function is given by \begin{equation}\label{mscore} S(q)=\sum_{j=0}^m (-1)^{j+1}\,\frac{d^j}{d x^j} \frac{\partial\phi}{\partial q_j}. \end{equation} Note that, if we have $m=0$ and $\phi(u)=u\log u-u$, we recover the log-score function, that is $S(q)=-\log q$. If we have $m=1$ and $\phi(u,v)=v^2/u$, we recover the Hyv\"{a}rinen score function.
A general form of convex function satisfying the requirement \eqref{const} is $$\phi(u_0,\ldots,u_{m})=\sum_{j=0}^{m-1} u_j\,\alpha_j(u_{j+1}/u_j),$$ where $(\alpha_0,\alpha_1,\ldots,\alpha_{m-1})$ is a set of convex functions.
\subsection{Connection with \cite{Parry:2012}}\label{ParryApp}
In \ref{sc_divergences}, we have shown that it is possible to derive the class of score functions using convex functions and the Bregman divergence only. Here we show that the properties we have used, imply those used by \cite{Parry:2012}.
First we show that a function $\phi$ satisfying \eqref{condphi} implies that $\phi$ is $1$--homogeneous.
\begin{lemma}\label{lemonehom} Let $\phi$ be such that $\phi(u,v)=u\partial\phi/\partial u+v\partial \phi/\partial v$. Then, for any $\lambda>0$, it is that $\phi(\lambda u,\lambda v)=\lambda\,\phi(u,v)$. \end{lemma}
\begin{proof} Let $T(\lambda)=\phi(\lambda u,\lambda v)$. Thus, $\partial T/\partial\lambda=T(\lambda)/\lambda$, which implies $T(\lambda)\propto \lambda$. \end{proof}
\noindent Next we show that the class of score functions \eqref{score} and the condition \eqref{condphi}, imply \eqref{euler}.
\begin{lemma}\label{lemeuler} If a function $\phi$ satisfies the condition in \eqref{condphi}, and a score $S$ is given by \eqref{score}, then the score $S$ satisfies equation \eqref{euler}. \end{lemma}
\begin{proof} The proof is a simple matter of algebra and calculus. It requires the key observation that $$\frac{\partial }{\partial q}\frac{d}{dx}=\frac{d}{dx}\frac{\partial}{\partial q} \quad\quad\mbox{and}\quad\quad \frac{\partial }{\partial q'}\frac{d}{dx}=\frac{d}{dx}\frac{\partial}{\partial q'}+\frac{\partial}{\partial q}. $$ Given that $\phi=q\partial \phi/\partial q+q'\partial \phi/\partial q'$ we get $$q\frac{\partial^2\phi}{\partial q^2}+q'\frac{\partial^2\phi}{\partial q\partial q'}=0\quad\mbox{and}\quad q\frac{\partial^2\phi}{\partial q\partial q'}+q'\frac{\partial^2\phi}{\partial q'^2}=0. $$ Since $S=-\partial\phi/\partial q+q'\partial^2\phi/\partial q\partial q'+q''\partial^2\phi/\partial q'^2$, we have $\partial S/\partial q'' =\partial^2\phi/\partial q'^2$. Further, $$\frac{\partial S}{\partial q'}=-\frac{\partial^2\phi}{\partial q\partial q'}+\frac{d}{dx}\frac{\partial^2\phi}{\partial q'^2}+\frac{\partial^2\phi}{\partial q\partial q'}\quad \mbox{and}\quad\frac{\partial S}{\partial q}=-\frac{\partial^2\phi }{\partial q^2}+\frac{d}{dx}\frac{\partial^2\phi}{\partial q\partial q'}.$$ Combining all the above expressions, yields \eqref{euler}. \end{proof}
\noindent Finally, in this Appendix, we show that if $\phi$ is $1$--homogeneous, then $\phi$ is convex. The result is quite straightforward, and is achieved by showing that $\partial^2\phi/\partial u^2\,\cdot \partial^2\phi/\partial v^2\geq (\partial^2\phi/\partial u\partial v)^2$.
Here we make the connection with equation (39) in \cite{Parry:2012} and the score function in \eqref{mscore}. The main result is to show that if $$\phi(u)=\sum_{j=0}^m u_j\,\frac{\partial \phi}{\partial u_j}\quad \mbox{and}\quad S(u)=\sum_{j=0}^m (-1)^{j+1}\frac{d^j}{dx^j}\frac{\partial \phi}{\partial u_j},$$ then the relevant Euler--Lagrange equation is $$\sum_{j=0}^{2m}(-1)^j\frac{d^j}{dx^j}\left(u_0\frac{\partial S}{\partial u_j}\right)=0.$$ To this end, define the operators, as in \cite{Parry:2012} $$E=\sum_{j} u_j\,\frac{\partial}{\partial u_j},\quad \Lambda=\sum_{j} (-1)^{j+1}\frac{d^j}{dx^j}\frac{\partial}{\partial u_j}\quad\mbox{and}\quad L=\sum_{j}(-1)^j\frac{d^j}{dx^j}\left(u_0\frac{\partial}{\partial u_j}\right).$$
\begin{theorem} If $S=\Lambda \phi$ and $E\phi=\phi$, then $LS=0$. \end{theorem}
\begin{proof} To proof the theorem, we consider the properties of differential operations, as discussedi in Section 5 of \cite{Parry:2012}. In particular, the properties $\Lambda \,E=E\Lambda+\Lambda$ and $E\,L=L\,E$.
\noindent Hence, $L\Lambda \phi=L\,\Lambda\,E\phi=L(E\Lambda+\Lambda)\phi$, which implies that $L\,E\,\Lambda\phi=0$. This in turn implies $E\,L\Lambda\phi=0$; i.e. $E\,LS=0$. Finally, it is easy to see that if $E\psi=0$ then $\psi=0$. \end{proof}
\end{document} |
\begin{document}
\title[The Swing Lemma and $\E C_1$-diagrams] {Using the Swing Lemma and $\E C_1$-diagrams for congruences of planar semimodular lattices} \author[G.\ Gr\"atzer]{George Gr\"atzer} \email{gratzer@me.com} \urladdr{http://server.maths.umanitoba.ca/homepages/gratzer/} \address{University of Manitoba} \date{June 6, 2021}
\begin{abstract} A planar semimodular lattice $K$ is \emph{slim} if $\SM{3}$ is not a sublattice of~$K$. In a recent paper, G. Cz\'edli found four new properties of congruence lattices of slim, planar, semimodular lattices, including the \emph{No Child Property}: \emph{Let~$\mathcal{P}$ be the ordered set of join-irreducible congruences of $K$. Let $x,y,z \in \mathcal{P}$ and let $z$ be a~maximal element of $\mathcal{P}$. If $x \neq y$ and $x, y \prec z$ in $\mathcal{P}$, then there is no element $u$ of $\mathcal{P}$ such that $u \prec x, y$ in $\mathcal{P}$.}
We are applying my Swing Lemma, 2015, and a type of standardized diagrams of Cz\'edli's, to verify his four properties. \end{abstract}
\subjclass[2000]{06C10}
\keywords{Rectangular lattice, slim planar semimodular lattice, congruence lattice}
\maketitle
\section{Introduction}\label{S:Introduction}
Let $K$ be a planar semimodular lattice. We call the lattice $K$ \emph{slim} if $\SM{3}$ is not a~sublattice of~$K$. In the paper \cite[Theorem 1.5]{gG14a}, I found a property of congruences of slim, planar, semimodular lattices. In the same paper (see also Problem 24.1 in G. Gr\"atzer~\cite{CFL2}), I~proposed the following:
\tbf{Problem.} Characterize the congruence lattices of slim planar semimodular lattices.
G. Cz\'edli ~\cite[Corollaries 3.4, 3.5, Theorem 4.3]{gCa} found four new properties of congruence lattices of slim, planar, semimodular lattices.
\begin{theoremn}\label{T:main} Let $K$ be a slim, planar, semimodular lattice with at least three elements and let~$\E P$ be the ordered set of join-irreducible congruences of $K$.
\begin{enumeratei} \item \emph{Partition Property:} The set of maximal elements of $\E P$ can be divided into the disjoint union of two nonempty subsets such that no two distinct elements in the same subset have a common lower cover.\label{E:LC} \item \emph{Maximal Cover Property:} If $v \in \E P$ is covered by a maximal element $u$ of $\E P$, then $u$ is not the only cover of $v$. \item \emph{No Child Property:} Let $x \neq y \in \E P$ and let $u$ be a maximal element of $\E P$. If $x,y \prec u$ in $\E P$, then there is no element $z \in \E P$ such that $z \prec x, y$ in $\E P$. \item \emph{Four-Crown Two-pendant Property:} There is no cover-preserving embedding of the ordered set $\E R$ in Figure~\ref{F:notation} into $\E P$ satisfying the property\tup{:} any maximal element of~$\E R$ is a maximal element of $\E P$. \end{enumeratei} \end{theoremn}
In this paper, we will provide a short and direct proof of this theorem using only the Swing Lemma and $\E C_1$-diagrams, see Section~\ref{S:Tools}.
\begin{figure}
\caption{The Four-crown Two-pendant ordered set $\E R$ with notation; the covering $\SfS 7$ sublattice with edge and element notation}
\label{F:notation}
\end{figure}
\subsection*{Outline} Section~\ref{S:Motivation} provides the motivation for Cz\'edli's Theorem. Section~\ref{S:Tools} provides the tools we need: the Swing Lemma, $\E C_1$-diagrams, and forks. Section~\ref{S:partition} proves the Partition Property, Section~\ref{S:Maximal} does the Maximal Cover Property, while Section~\ref{S:Child} verifies the No Child Property. Finally, The Four-Crown Two-pendant Property is proved in Section~\ref{S:Crown}.
\section{Motivation}\label{S:Motivation}
In my paper \cite{GLS98a} with H. Lakser and E.\,T. Schmidt, we proved that every finite distributive lattice $D$ can be represented as the congruence lattice of a semimodular lattice $L$. To our surprise, the semimodular lattice $K$ we constructed was \emph{planar}.
G.~Gr\"atzer and E.~Knapp~\cite{GKn07}--\cite{GK10} started the study of planar semimodular lattices. I continued it with my ``Notes on planar semimodular lattices'' series (started with Knapp): \cite{gG13}, \cite{GW10} (with T. Wares), \cite{CG12} (with G. Cz\'edli), \cite{gG19}, \cite{gG21b}. See also G. Cz\'edli and E.\,T. Schmidt \cite{CS13} and G. Cz\'edli \cite{gC14}--\cite{gCb}.
A major subchapter of the theory of planar semimodular lattices started with the observation that in the construction of the lattice $K$, as in the first paragraph of this section, $\SM{3}$ sublattices play a crucial role. It was natural to raise the question what can be said about congruence lattices of slim, planar, semimodular (SPS) lattices (see [CFL2, Problem~24.1], originally raised in G. Gr\"atzer~\cite{gG14a}). In~\cite{gG14a}, I~found the first necessary condition and G. Cz\'edli \cite{gC14a} proved that this condition is not sufficient (see also my related papers \cite{gG15a} and \cite{gG19}).
A number of papers developed tools to tackle this problem: the Swing Lemma (G. Gr\"atzer~\cite{gG15}), trajectory coloring (G. Cz\'edli \cite{gC14}), special diagrams (G. Cz\'edli \cite{gC17}), lamps (G. Cz\'edli \cite{gCa}). Some of these results require long proofs. The proof of the trajectory coloring theorem is just shy of 20 pages, while the basic theory of lamps and its application to Theorem~\ref{T:main} is 23 pages.
There are a number of surveys of this field, see the book chapters G.~Cz\'edli and G.~Gr\"atzer~\cite{CG14} and G.~Gr\"atzer~\cite{gG13b} in G.~Gr\"atzer and F.~Wehrung, eds.\,~\cite{LTS1}. My~presentation \cite{gG21a} provides a gentle review for the background of this topic.
\section{The tools we need}\label{S:Tools}
Most basic concepts and notation not defined in this paper are available in Part~I of the book \cite{CFL2}, see
\verb+https://www.researchgate.net/publication/299594715+\\ \indent {\tt arXiv:2104.06539}
\noindent It is available to the reader. We will reference it, for instance, as [CFL2, page 52]. In particular, we use the notation $C \persp D$, $C \perspup D$, and $C \perspdn D$ for perspectivity, up-perspectivity, and down-perspectivity, respectively. As usual, for planar lattices, a prime interval (or covering interval) is called an \emph{edge}. For a finite lattice $K$ and a~finite ordered set $R$, a \emph{cover-preserving} embedding $\ge \colon R \to K$ is an embedding~$\ge$ mapping edges of $R$ to edges of $K$. We define a \emph{cover-preserving} sublattice similarly. For the lattice $\SfS 7$ of Figure~\ref{F:notation}, we need a variant: an $\SfS 7$ sublattice $\SfS{}$ (a sublattice isomorphic to $\SfS 7$) is a \emph{peak sublattice} if the three top edges ($L$, $M$, and $R$ in Figure~\ref{F:notation}) are edges in $K$.
By G. Gr\"atzer and E. Knapp \cite{GKn09}, every slim, planar, semimodular lattice $K$ has a congruence-preserving extension (see [CFL2, page 43]) $\hat K$ to a slim rectangular lattice. Any of the properties (i)--(iv) holds for $K$ if{}f it holds for $\hat K$. Therefore, in~the rest of this paper, we can assume that $K$ is a slim rectangular lattice, simplifying the discussion.
\subsection{Swing Lemma}\label{S:Swing}
For an edge $E$ of an SPS lattice $K$, let $E = [0_E, 1_E]$ and define $\Col{E}$, the \emph{color of}~$E$, as $\con E$, the (join-irreducible) congruence generated by collapsing $E$ (see [CFL2, Section 3.2]). We write $\E P$ for $\Ji {\Con K}$, the ordered set of join-irreducible congruences of $K$.
As in my paper~\cite{gG15}, for the edges $U, V$ of an SPS lattice $K$, we define a binary relation: $U$~\emph{swings} to $V$, in formula, $U \swing V$, if $1_U = 1_V$, the element $1_U = 1_V$ of~$K$ covers at least three elements, and $0_V$ is neither the left-most nor the right-most element covered by $1_U = 1_V$; if also $0_U$ is such, then the swing is \emph{interior}, otherwise, it is \emph{exterior}, denoted by $U \inswing V$ and $U \exswing V$, respectively.
\begin{named}{Swing Lemma [G. Gr\"atzer~\cite{gG15}]} Let $K$ be an SPS lattice and let $U$ and $V$ be edges in $K$. Then $\Col V \leq\Col U$ if{}f there exists an edge $R$ such that $U$ is up-perspective to $R$ and there exists a sequence of edges and a~sequence of binary relations \begin{equation*}\label{E:sequence}
R = R_0 \mathbin{\gr}_1 R_1 \mathbin{\gr}_2 \dots \mathbin{\gr}_n R_n = V, \end{equation*} where each relation $\mathbin{\gr}_i$ is $\perspdn$ \pr{down-perspective} or $\swing$ \pr{swing}. In~addition, this sequence also satisfies \begin{equation*}\label{E:geq}
1_{R_0} \geq 1_{R_1} \geq \dots \geq 1_{R_n}. \end{equation*} \end{named}
The following statements are immediate consequences of the Swing Lemma, see my papers~\cite{gG15} and \cite{gG14e}.
\begin{corollary}\label{C:equal}
We use the assumptions of the Swing Lemma. \begin{enumeratei} \item The equality $\Col U = \Col V$ holds in $\E P$ if{}f there exist edges $S$ and $T$ in $K$, such that \begin{equation*}\label{E:xx}
U \perspup S,\ S \inswing T,\ T \perspdn V. \end{equation*} \item Let us further assume that the element $0_U$ is meet-irreducible. Then the equality $\Col U = \Col V$ holds in $\E P$ if{}f there exists an edge $T$ such that $U \inswing T \perspdn V$. \item If the lattice $K$ is rectangular and $U$ is on the upper boundary of $K$, then the equality $\Col U = \Col V$ holds in $\E P$ if{}f $U \perspdn V$. \end{enumeratei} \end{corollary}
Note that in (i) the edges $S, T, U, V$ need not be distinct, so we have as special cases $U = V$, $U \persp V$, $S = T$, and others.
\begin{corollary}\label{C:cov}
We use the assumptions of the Swing Lemma. \begin{enumeratei} \item The covering $\Col V \prec \Col U$ holds in $\E P$ if{}f there exist edges $R_1, \dots, R_4$ in~$K$, such that \begin{equation*}
U \perspup R_1,\ R_1 \inswing R_2,\ R_2 \perspdn R_3,\
R_3 \exswing R_4,\ R_4 \perspdn V. \end{equation*} \item If the element $0_U$ is meet-irreducible, then the covering $\Col V \prec \Col U$ holds in $\E P$ if{}f there exist edges $S, T$ in $K$, so that \begin{equation*}
U \perspdn S \exswing T \perspdn V. \end{equation*} \end{enumeratei} \end{corollary}
\begin{corollary}\label{C:covnew} Let $K$ be a slim rectangular lattice, let $U$ and $V$ be edges in $K$, and let $U$ be in the upper-left boundary of $K$. \begin{enumeratei} \item The covering $\Col V \prec \Col U$ holds in $\E P$ if{}f there exist edges $S, T$ in $K$, such that \begin{equation}\label{E:seq5}
U \perspdn S \exswing T \perspdn V. \end{equation} \item Define the element $t = 1_S = 1_T \in K$ and let $S = E_1, E_2, \dots, E_n = W$ enumerate, from left to right, all the edges $E$ of $K$ with $1_E = t$. Then \begin{align}
\col{S} &\neq \col{W},\label{E:1}\\%\eqref{E:1}
\col{E_2} = \cdots &= \col{E_{n-1}} = \col{T},\label{E:2}\\%\eqref{E:2}
\col{T} &\prec \col{S}, \col{W}.\label{E:3} \end{align} \end{enumeratei} \end{corollary}
\begin{corollary}\label{C:max} Let the edge $U$ be on the upper edge of the rectangular lattice $K$. Then $\Col U$ is a maximal element of $\E P$. \end{corollary}
The converse of this statement is stated in Corollary~\ref{C:max1}.
\subsection{$\E C_1$-diagrams}\label{S:diagrams}
In the diagram of a planar lattice $K$, a \emph{normal edge} (\emph{line}) has a~slope of $45\degree$ or $135\degree$. If it is the first, we call it a~\emph{normal-up edge} (\emph{line}), otherwise, a \emph{normal-down edge} (\emph{line}). Any edge of slope strictly between $45\degree$ and $135\degree$ is \emph{steep}.
\begin{definition}\label{D:well}
A diagram of an rectangular lattice $K$ is a \emph{$\E C_1$-diagram} if the middle edge of any covering $\SfS 7$ is steep and all other edges are normal. \end{definition}
This concept was introduced in G.~Cz\'edli~\cite[Definition 5.3(B)] {gC17}, see also G.~Cz\'edli \cite[Definition 2.1]{gCa} and G. Cz\'edli and G.~Gr\"atzer~\cite[Definition 3.1]{CG21}. The following is the existence theorem of $\E C_1$-diagrams in G. Cz\'edli \cite[Theorem 5.5]{gC17}.
\begin{theorem}\label{T:well}
Every rectangular lattice lattice $K$ has a $\E C_1$-diagram. \end{theorem} See the illustrations in this paper for examples of $\E C_1$-diagrams. For a short and direct proof for the existence of $\E C_1$-diagrams, see my paper~\cite{gG21b}.
\emph{In this paper, $K$ denotes a slim rectangular lattice with a fixed $\E C_1$-diagram and~$\E P$ is the ordered set of join-irreducible congruences of $K$}.
Let $C$ and $D$ be maximal chains in an interval $[a,b]$ of $K$ such that $C \ii D = \set{a,b}$. If there is no element of~$K$ between $C$ and $D$, then we call $C \uu D$ a~\emph{cell}. A~four-element cell is a \text{\emph{$4$-cell}}. Opposite edges of a $4$-cell are called \emph{adjacent}. Planar semimodular lattices are $4$-cell lattices, that is, all of its cells are $4$-cells, see G.~Gr\"atzer and E. Knapp \cite[Lemmas 4, 5]{GKn07} and [CFL2,~Section 4.1] for more detail.
The following statement illustrates the use of $\E C_1$-diagrams.
\begin{lemma}\label{L:application} Let $K$ be a slim rectangular lattice $K$ with a fixed $\E C_1$-diagram and let~$X$ be a normal-up edge of $K$. Then $X$ is up-perspective either to an edge in the upper-left boundary of $K$ or to a steep edge. \end{lemma}
\begin{proof} If $X$ is not steep nor it is in the upper-left boundary of $K$, then there is a~$4$-cell $C$ whose lower-right edge is $X$. If the upper-left edge is steep or it is in the upper-left boundary, then we are done. Otherwise, we proceed the same way until we reach a~steep edge or an edge the upper-left boundary. \end{proof}
\begin{corollary}\label{C:max1} Let the edge $U$ be on the upper edge of $K$. Then $\Col U$ is a maximal element of $\E P$. Conversely, if $u$ is a maximal element of $\E P$, then there is an edge $U$ on the upper edge of $K$ so that $\Col U = u$. \end{corollary}
\subsection{Trajectories}\label{S:Trajectories} G. Cz\'edli and E.\,T. Schmidt \cite{CS11} introduced a \emph{trajectory} in $K$ as a maximal sequence of consecutive edges, see also [CFL2, Section~4.1]. The \emph{top edge}~$T$ of a trajectory is either in the upper boundary of $K$ or it is steep by Lemma~\ref{L:application}. For such an edge~$T$, we denote by $\traj T$ the trajectory with top edge~$T$.
By G.~Gr\"atzer and E. Knapp \cite[Lemma 8]{GKn07}, an element $a$ in an SPS lattice~$K$ has at most two covers. Therefore, a trajectory has at most one top edge and at most one steep edge. So we conclude the following statement.
\begin{lemma}\label{L:disj}
Let $K$ be a slim rectangular lattice $K$ with a fixed $\E C_1$-diagram. Let $X$ and $Y$ be distinct steep edges of $K$. Then $\traj X$ and $\traj Y$ are disjoint. \end{lemma}
\section{The Partition Property}\label{S:partition}
First, we verify the Partition Property for the slim rectangular lattice $K$ and with a fixed $\E C_1$-diagram. We start with a lemma.
\begin{lemma}\label{L:disjoint}
Let $X $ and $Y$ be distinct edges on the upper-left boundary of $K$. Then there is no edge $Z$ of $K$ such that $\col Z \prec \col X, \col Y$. \end{lemma}
\begin{proof} By way of contradiction, let $Z$ be an edge such that $\col Z \prec \col X, \col Y$. Since $X$ and $Y$ are on the upper-left boundary, Corollary~\ref{C:covnew}(i) applies. Therefore, there exist normal-up edges $S_X, S_Y$ and steep edges $T_X, T_Y$ such that \[
X \perspdn S_X \exswing T_X,\q Y \perspdn S_Y \exswing T_Y,\q
Z \in \traj {T_X} \ii \traj {T_Y}. \] By Lemma~\ref{L:disj}, the third formula implies that $T_X = T_Y$ and xo $X = Y$, contrary to the assumption. \end{proof}
By Corollary~\ref{C:max1}, the set of maximal elements of $\E P$ is the same as the set of colors of edges in the upper boundaries. We can partition the set of edges in the upper boundaries into the set of edges~$\E L$ in the upper-left boundary and the set of edges~$\E R$ in the upper-right boundary. If $X $ and $Y$ are distinct edges in $\E L$, then there is no edge $Z$ of $K$ such that $\col Z \prec \col X, \col Y$ by Lemma~\ref{L:disjoint}. By symmetry, this verifies the Partition Property.
\section{The Maximal Cover Property}\label{S:Maximal}
Next, we verify the Maximal Cover Property for the slim rectangular lattice~$K$ and with a fixed $\E C_1$-diagram.
Let $x \in \E P$ be covered by a maximal element $u$ of $\E P$ in $K$. By Corollary~\ref{C:max1}, we can choose an edge $U$ of color $u$ on the upper boundary of $K$, by symmetry, on the upper-left boundary of $K$. By Corollary~\ref{C:covnew}(ii), we can choose the edges $S, T$ in $K$ so that $U \perspdn S \exswing T$, $\col S = u$, and $\col T = x$. By Corollary~\ref{C:covnew}(ii), specifically, by equations \eqref{E:1} and \eqref{E:3}, we have $x \prec u, \col{W}$ and $u \neq \col{W}$, verifying the Maximal Cover Property.
\section{The No Child Property}\label{S:Child}
In this section, we verify the No Child Property for the slim rectangular lattice~$K$ and with a fixed $\E C_1$-diagram.
Let $x,y,z,u \in \E P$ with $x \neq y \in \E P$, let $u$ be a maximal element of $\E P$, and let $x, y \prec u$ in $\E P$. By way of contradiction, let us assume that there is an element $z \in \E P$ such that $z \prec x,y$ in $\E P$.
By Corollary~\ref{C:max1}, the element~$u$ colors an edge~$U$ on the upper boundary of~$K$, say, in the upper-left boundary. By Corollary~\ref{C:cov}(i), for $z \prec x \in \E P$, we get a peak sublattice $\SfS 7$ in which the middle edge $Z$ is colored by $z$ and upper-left edge $X$ is colored by $x$, or symmetrically. The upper-right edge $Y$ must have color~$y$.
Now we apply Corollary~\ref{C:covnew}(ii) to the edge $U$ and middle edge $Z$ of the peak sublattice $\SfS 7$, obtaining that $U \perspdn Y \swing Z$, in particular, $U \perspdn Y$. This is a contradiction, since $U$ is normal-up and $Y$ is normal-down.
\section{The Four-Crown Two-pendant Property}\label{S:Crown}
Finally, we verify the Four-Crown Two-pendant Property for the slim rectangular lattice $K$ and with a fixed $\E C_1$-diagram.
By way of contradiction, assume that the ordered set $\E R$ of Figure~\ref{F:notation} is a cover-preserving ordered subset of $\E P$, where $a,b,c,d$ are maximal elements of $\E P$. By~Corollary~\ref{C:max1}, there are edges $A,B,C,D$ on the upper boundary of $K$, so that $\col A = a$, $\col B = b$, $\col C=c$, $\col D = d$. By left-right symmetry, we can assume that the edge $A$ is on the upper-left boundary of $K$. Since $p \prec a, b$ in $\E P$, it follows from Lemma~\ref{L:disjoint} that the edge $B$ is on the upper-right boundary of $K$, and so is $D$. Similarly, $C$ is on the upper-left boundary of $K$.
There are four cases, (i) $C$ is below $A$ and $B$ is below $D$; (ii)~$C$~is below $A$ and $D$ is below $B$; and so on. The first two are illustrated in Figure~\ref{F:CABDx}.
\begin{figure}
\caption{Illustrating the proof of The Four-Crown Two-pendant Property}
\label{F:CABDx}
\end{figure}
We consider the first case. By Corollary~\ref{C:cov}(ii), there is a peak sublattice $\SfS 7$ with middle edge $P$ (as in the first diagram of Figure~\ref{F:CABDx}) so that $A$ and $B$ are down-perspective to the upper-left edge and the upper-right edge of this peak sublattice, respectively. We define, similarly, the edge $Q$ for $C$ and $B$, the edge $S$ for $A$ and~$D$, the edge $R$ for $C$ and $D$, and the edge $U$ for $R$ and $P$.
The ordered set $\E R$ is a cover-preserving subset of $\E P$, so we get, similarly, the peak sublattice~$\SfS 7$ with middle edge $U$. Finally, $v \prec q, s$ in $\E R$, therefore, there is a peak sublattice~$\SfS 7$ with middle edge $V$ with upper-left edge $V_l$ and the upper-right edge~$V_r$ so that $S \perspdn V_l$ and $S \perspdn V_r$, or symmetrically.
This concludes the proof of the Four-Crown Two-pendant Property and of Cz\'edli's Theorem.
Of course, the diagrams in Figure~\ref{F:CABDx} are only illustrations. The grid could be much larger, the edges $A, C$ and $B, D$ may not be adjacent, and there maybe lots of other elements in $K$. However, our argument does not utilize the special circumstances in the diagrams.
The second case is similar, except that we get the edge $V$ and cannot get the edge $U$. The third and fourth cases follow the same way.
\appendix
\section{Two more illustrations for Section~\ref{S:Crown}}\label{S:appendix}
\begin{figure}
\caption{Two more illustrations for Section~\ref{S:Crown}}
\label{F:CABDx2}
\end{figure}
\end{document} |
\begin{document}
\newcommand{\ci}[1]{_{ {}_{\scriptstyle #1}}}
\newcommand{\norm}[1]{\ensuremath{\|#1\|}} \newcommand{\abs}[1]{\ensuremath{\vert#1\vert}} \newcommand{\ensuremath{\partial}}{\ensuremath{\partial}} \newcommand{\mathcal{P}}{\mathcal{P}}
\newcommand{\ensuremath{\bar{\partial}}}{\ensuremath{\bar{\partial}}} \newcommand{\overline\partial}{\overline\partial} \newcommand{\mathcal{D}}{\mathcal{D}} \newcommand{\mathbb{B}}{\mathbb{B}} \newcommand{\mathbb{S}}{\mathbb{S}} \newcommand{\mathbb{T}}{\mathbb{T}} \newcommand{\mathbb{R}}{\mathbb{R}} \newcommand{\mathbb{Z}}{\mathbb{Z}} \newcommand{\mathbb{C}}{\mathbb{C}} \newcommand{\mathbb{N}}{\mathbb{N}} \newcommand{\mathcal{H}}{\mathcal{H}} \newcommand{\mathcal{L}}{\mathcal{L}} \newcommand{\widetilde\Delta}{\widetilde\Delta}
\newcommand{\langle }{\langle } \newcommand{\rangle }{\rangle } \newcommand{\operatorname{rk}}{\operatorname{rk}} \newcommand{\operatorname{card}}{\operatorname{card}} \newcommand{\operatorname{Ran}}{\operatorname{Ran}} \newcommand{\operatorname{OSC}}{\operatorname{OSC}} \newcommand{\operatorname{Im}}{\operatorname{Im}} \newcommand{\operatorname{Re}}{\operatorname{Re}} \newcommand{\operatorname{tr}}{\operatorname{tr}} \newcommand{\varphi}{\varphi} \newcommand{\f}[2]{\ensuremath{\frac{#1}{#2}}}
\newcommand{\entrylabel}[1]{\mbox{#1}
}
\newenvironment{entry} {\begin{list}{X}
{\renewcommand{\entrylabel}{\entrylabel}
\setlength{\labelwidth}{55pt}
\setlength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
} } {\end{list}}
\numberwithin{equation}{section} \newtheorem{dfn}{Definition}[section] \newtheorem{thm}{Theorem}[section] \newtheorem{lm}[thm]{Lemma} \newtheorem{cor}[thm]{Corollary} \newtheorem{conj}[thm]{Conjecture} \newtheorem{prob}[thm]{Problem} \newtheorem{prop}[thm]{Proposition} \newtheorem*{prop*}{Proposition}
\theoremstyle{remark} \newtheorem{rem}[thm]{Remark} \newtheorem*{rem*}{Remark}
\newtheorem{quest}[thm]{Question}
\title{Multilinear Dyadic Operators And Their Commutators}
\author{Ishwari Kunwar}
\address{Ishwari Kunwar, School of Mathematics\\ Georgia Institute of Technology\\ 686 Cherry Street\\ Atlanta, GA USA 30332-0160} \email{ikunwar3@math.gatech.edu}
\subjclass[2000]{Primary } \keywords{Multilinear Paraproducts, multilinear Haar Multipliers, dyadic BMO functions, Commutators.}
\begin{abstract} We introduce multilinear analogues of dyadic paraproduct operators and Haar Multipliers, and study boundedness properties of these operators and their commutators. We also characterize dyadic $BMO$ functions via boundedness of certain paraproducts and also via boundedness of the commutators of multilinear Haar Multipliers and paraproduct operators. \end{abstract}
\maketitle \setcounter{tocdepth}{1} \tableofcontents \section{Introduction and statement of main results} \noindent Dyadic operators have attracted a lot of attention in the recent years. The proof of so-called $A_2$ theorem (see \cite{Hyt}) consisted in representing a general Calder$\acute{\text{o}}$n-Zygmund operator as an average of dyadic shifts, and then verifying some testing conditions for those simpler dyadic operators. It seems reasonable to believe that, taking a similar approach, general multilinear Calder$\acute{\text{o}}$n-Zygmund operators can be studied by studying multilinear dyadic operators. Regardless of this possibility, multilinear dyadic operators in their own right are an important class of objects in Harmonic Analysis. Statements regarding those operators can be translated into the non-dyadic world, and are sometimes simpler to prove.\\
\noindent In this paper we introduce multilinear analogues of dyadic operators such as paraproducts and Haar multipliers, and study their boundedness properties. Corresponding theory of linear dyadic operators, which we will be using very often, can be found in \cite{Per}. In \cite{BMNT}, the authors have studied boundedness properties of bilinear paraproducts defined in terms of so-called ``smooth molecules". The paraproduct operators we study are more general multilinear operators, but defined in terms of indicators and Haar functions of dyadic intervals. In \cite{CRW} Coifman, Rochberg and Weiss proved that the commutator of a $BMO$ function with a singular integral operator is bounded in $L^p$, $1<p<\infty.$ The necessity of $BMO$ condition for the boundedness of the commutator was also established for certain singular integral operators, such as the Hilbert transform. S. Janson \cite{Jan} later studied its analogue for linear martingale transforms. In this paper we study commutators of multilinear dyadic operators, and characterize dyadic $BMO$ functions via boundedness of these commutators. For the corresponding theory for general multilinear Calder$\acute{\text{o}}$n-Zygmund operators we refer to \cite{GT} and \cite{LOPTT}.\\
\noindent We organize the paper as follows:\\
\noindent In section 2, we present an overview of some of the main tools we will be using in this paper. These include: the Haar system, linear Haar multipliers, dyadic maximal/square functions, linear/bilinear paraproduct operators and the space of dyadic $BMO$ functions. For more details we refer to \cite{Per}.\\
\noindent In section 3, we obtain a decomposition of the pointwise product of $m$ functions, $m \geq 2,$ which generalizes the paraproduct decomposition of two functions. On the basis of this decomposition we define multilinear paraproducts and investigate their boundedness properties as operators on products of Lebesgue spaces. We also define multilinear anologue of the linear paraproduct operator $\pi_b$, and characterize dyadic $BMO$ functions via boundedness of certain multilinear paraproduct operators.\\
\noindent In section 4, we define multilinear Haar multipliers in a way consistent with the definition of linear Haar multipliers and multilinear paraproducts, and then investigate their boundedness properties. We also study boundedness properties of their commutators with dyadic $BMO$ functions, and provide a characterization of dyadic $BMO$ functions via the boundedness of those multilinear commutators. In particular, we show that the commutators of the multilinear paraproducts with a function $b$ are bounded if and only if $b$ is a dyadic $BMO$ function. \\
\noindent Our main results involve the following operators:\\ \begin{itemize}
\item $\displaystyle P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m) = \sum_{I\in\mathcal{D}} \left(\prod_{j=1}^m f_j(I,\alpha_j)\right) h_I^{\sigma(\vec{\alpha})}, \quad \vec{\alpha} \in \{0,1\}^m \backslash\{(1,1,\ldots,1)\}. $\\
\item $\displaystyle \pi_b^{\vec{\alpha}}(f_1, f_2, \ldots, f_m) = \sum_{I \in \mathcal{D}} \langle b , h_I \rangle \left(\prod_{j=1}^m f_j(I,\alpha_j)\right) h_I^{1+\sigma(\vec{\alpha})},\quad \vec{\alpha} \in \{0,1\}^m.$\\
\item $\displaystyle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) := \sum_{I\in \mathcal{D}} \epsilon_I \left(\prod_{j=1}^m f_j(I,\alpha_j)\right) h_I^{\sigma(\vec{\alpha})},$ \\
$ \quad \vec{\alpha} \in =\{0,1\}^m \backslash \{(1,1,\ldots,1)\}, \, \epsilon = \{\epsilon_I\}_{I\in \mathcal{D}} \text{ bounded}.$\\
\item $\displaystyle [b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)(x) := b(x)T_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)(x) - T_\epsilon^{\vec{\alpha}}(f_1, \ldots, bf_i,\ldots,f_m)(x),$\\
$ 1 \leq i \leq m$, $ \vec{\alpha} \in \{0,1\}^m \backslash\{(1,1,\ldots,1)\},\, \epsilon = \{\epsilon_I\}_{I\in \mathcal{D}} \text{ bounded and } b\in BMO^d.$\\ \end{itemize}
\noindent In the above definitions, $\mathcal{D} := \{[m2^{-k}, (m+1)2^{-k}): m,k\in \mathbb{Z}\}$ is the standard dyadic grid on $\mathbb{R}$ and $h_I$'s are the Haar functions defined by $h_I = \displaystyle \frac{1}{\abs{I}^{1/2}}\left(\mathsf{1}_{I_+} - \mathsf{1}_{I_-}\right),$ where $I_-$ and $I_+$ are the left and right halves of $I.$ With $\left< \;,\;\right>$ denoting the standard inner product in $L^2(\mathbb{R}),$ $f_i(I,0) := \left< f_i,h_I\right>$ and $\displaystyle f_i(I,1) := \langle f_i, h_I^2\rangle = \frac{1}{\abs{I}} \int_I f_i,$ the average of $f_i$ over $I.$ The Haar coefficient $\langle f_i, h_I\rangle $ is sometimes denoted by $\widehat{f_i}(I)$ and the average of $f_i$ over $I$ by $\langle f_i \rangle _I$. For $\vec{\alpha} \in \{0,1\}^m,$ $\sigma(\vec{\alpha})$ to denotes the number of 0 components in $\vec{\alpha}$. For convenience, we will denote the set $\{0,1\}^m \backslash\{(1,1,\ldots,1)\}$ by $U_m.$\\
\noindent In the following main results $L^p$ stands for the Lebesgue space $L^p(\mathbb{R}):= \left\{f:\norm{f}_p < \infty \right\} $ with $\displaystyle\norm{f}_p = \norm{f}_{L^p} := \left(\int_\mathbb{R} \abs{f(x)}^p dx \right)^{1/p}.$ The Weak $L^p$ space, also denoted by $L^{p,\infty}$, is the space of all functions $f$ such that $$ \norm{f}_{L^{p,\infty}(\mathbb{R})}:= \sup_{t>0} t \, \left\vert \{x\in \mathbb{R}: f(x) >t \} \right\vert^{1/p} < \infty.$$ Moreover, $ \displaystyle \norm{b}_{BMO^d}:=\sup_{I\in \mathcal{D}}\frac{1}{\abs{I}}\int_I \abs{b(x) - \langle b \rangle _I} \,dx < \infty, $ is the dyadic $BMO$ norm of $b.$\\
\noindent We now state our main results:\\
\noindent \noindent \textbf{Theorem:} Let $ \vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in \{0,1\}^m$ and $ 1 < p_1, p_2, \ldots, p_m < \infty$ with $\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ Then \begin{enumerate}[label = $(\alph*)$] \item For $\vec{\alpha} \neq (1,1,\ldots,1),$ $\displaystyle \left\Vert P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \prod_{j=1}^m\norm{f_j}_{p_j}.$ \item For $\sigma(\vec{\alpha}) \leq 1,$ $\displaystyle \left\Vert\pi_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $b \in BMO^d.$\\
\item For $\sigma(\vec{\alpha}) > 1,$ $\displaystyle\left\Vert\pi_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \leq C_b \prod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\rangle }}{\sqrt{\abs{I}}} < \infty.$ \end{enumerate} In each of the above cases, the paraproducts are weakly bounded if $1\leq p_1, p_2, \ldots, p_m < \infty$.\\
\noindent \textbf{Theorem:} Let $\epsilon = \{\epsilon_I\}_{I\in\mathcal{D}}$ be a given sequence and let $\vec{\alpha} = (\alpha_1,\alpha_2, \ldots,\alpha_m) \in U_m.$ Let $1<p_1,p_2, \ldots,p_m<\infty$ with $$\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Then $T_\epsilon^{\vec{\alpha}}$ is bounded from $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}$ to $L^r$ if and only if $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty.$\\ Moreover, $T_\epsilon^{\vec{\alpha}}$ has the corresponding weak-type boundedness if $1 \leq p_1,p_2, \ldots,p_m<\infty.$\\
\noindent \textbf{Theorem:} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m,$ $1\leq i \leq m,$ and $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Suppose $b \in L^p$ for some $p \in (1,\infty).$ Then the following two statements are equivalent. \begin{enumerate}[label = $(\alph*)$] \item $b\in BMO^d.$\\ \item $\displaystyle [b,T_\epsilon^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r $ is bounded for every bounded sequence $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}}.$\\ \end{enumerate}
\noindent In particular, $b\in BMO^d$ if and only if $[b,P^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ is bounded.\\
\noindent \textbf{Acknowledgement:} The author would like to thank Brett Wick for suggesting him this research project, and for providing valuable suggestions.
\section{Notation and preliminaries}
\subsection{The Haar System and the Haar multipliers:} Let $\mathcal{D}$ denote the standard dyadic grid on $\mathbb{R},$ $$\mathcal{D} = \{[m2^{-k}, (m+1)2^{-k}): m,k\in \mathbb{Z}\}.$$ Associated to each dyadic interval $I$ there is a Haar function $h_I$ defined by $$h_I(x) = \frac{1}{\abs{I}^{1/2}}\left(\mathsf{1}_{I_+} - \mathsf{1}_{I_-}\right),$$ where $I_-$ and $I_+$ are the left and right halves of $I.$\\
\noindent The collection of all Haar functions $\{h_I: I \in \mathcal{D}\}$ is an orthonormal basis of $L^2(\mathbb{R}),$ and an unconditional basis of $L^p$ for $ 1 < p < \infty.$ In fact, if a sequence $\epsilon = \{\epsilon_I\}_{I \in \mathcal{D}}$ is bounded, the operator $T_\epsilon$ defined by $$T_\epsilon f(x) = \sum_{I \in \mathcal{D}} \epsilon_I \langle f, h_I \rangle h_I $$ is bounded in $L^p$ for all $1 < p < \infty.$ The converse also holds. The operator $T_\epsilon$ is called the Haar multiplier with symbol $\epsilon.$ \\
\subsection{The dyadic maximal function:} Given a function $f$, the dyadic Hardy-Littlewood maximal function $M^df$ is defined by $$M^df(x):= \sup_{x\in I\in \mathcal{D}} \frac{1}{\abs{I}} \int_I \abs{f(t)}\,dt.$$
\noindent For the convenience of notation, we will just write $M$ to denote the dyadic maximal operator. Clearly, $M$ is bounded on $L^\infty.$ It is well-known that $M$ is of weak type $(1,1)$ and strong type $(p,p)$ for all $1<p<\infty.$\\
\subsection{The dyadic square function:} The dyadic Littlewood-Paley square function of a function $f$ is defined by $$S f(x):= \left(\sum_{I \in \mathcal{D}} \frac{\abs{\langle f,h_I \rangle }^2}{\abs{I}} \mathsf{1}_I(x) \right)^{1/2}.$$ For $f\in L^p$ with $1<p<\infty,$ we have $\norm{Sf}_p \approx \norm{f}_p$ with equality when $p=2.$\\
\subsection{BMO Space} A locally integrable function $b$ is said to be of bounded mean oscillation if $$\norm{b}_{BMO}:=\sup_{I}\frac{1}{\abs{I}}\int_I \abs{b(x) - \langle b \rangle _I} \,dx < \infty, $$ where the supremum is taken over all intervals in $\mathbb{R}.$ The space of all functions of bounded mean oscillation is denoted by $BMO.$\\
\noindent If we take the supremum over all dyadic intervals in $\mathbb{R},$ we get a larger space of dyadic BMO functions which we denote by $BMO^d.$\\
\noindent For $0<r<\infty,$ define $$ BMO_r = \left\{b \in L_{loc}^r(\mathbb{R}): \norm{b}_{BMO_r} < \infty \right\},$$ where, $\displaystyle \norm{b}_{BMO_r} := \left(\sup_{I}\frac{1}{\abs{I}}\int_I \abs{b(x) - \langle b \rangle _I}^r \,dx \right)^{1/r}.$\\
\noindent For any $0<r<\infty,$ the norms $\norm{b}_{BMO_r}$ and $\norm{b}_{BMO}$ are equivalent. The equivalence of norms for $r > 1$ is well-known and follows from John-Nirenberg's lemma (see \cite{JN}), while the equivalence for $0<r<1$ has been proved by Hanks in \cite{HR}. (See also \cite{SE}, page 179.)\\
\noindent For $r=2$, it follows from the orthogonality of Haar system that $$ \norm{b}_{BMO_2^d} = \left(\sup_{I \in \mathcal{D}} \frac{1}{\abs{I}} \sum_{J \subseteq I} \abs{\widehat{b}(J)}^2\right)^{1/2}.$$
\subsection{The linear/ bilinear paraproducts:} Given two functions $f_1$ and $f_2$, the point-wise product $f_1f_2$ can be decomposed into the sum of bilinear paraproducts:
$$ f_1f_2 = P^{(0,0)}(f_1,f_2) + P^{(0,1)}(f_1,f_2) + P^{(1,0)}(f_1,f_2),$$
where for $\vec{\alpha} = (\alpha_1, \alpha_2) \in \{0,1\}^2$,
$$ P^{\vec{\alpha}}(f_1,f_2) = \displaystyle \sum_{I \in \mathcal{D}} f_1(I,\alpha_1) f_2(I, \alpha_2) h_I^{\sigma(\vec{\alpha})}$$ with $ f_i(I,0) = \langle f_i, h_I \rangle , \;\; f_i(I,1) = \langle f_i \rangle _I,\; \sigma(\vec{\alpha}) = \# \{i: \alpha_i = 0\}, \text{ and } h_I^{\sigma(\vec{\alpha})}$ being the pointwise product $h_I h_I \ldots h_I$ of $ \sigma(\vec{\alpha})$ factors. \\
The paraproduct $P^{(0,1)}(f_1,f_2)$ is also denoted by $\pi_{f_1}(f_2),$ i.e.,\\ $$ \pi_{f_1}(f_2) = \sum_{I \in \mathcal{D}} \langle f_1, h_I \rangle \langle f_2 \rangle _I h_I.$$ Observe that $$\langle \pi_{f_1}(f_2), g \rangle = \left\langle \sum_{I \in \mathcal{D}} \langle f_1, h_I \rangle \langle f_2 \rangle _I h_I, g \right\rangle = \sum_{I \in \mathcal{D}} \langle f_1, h_I \rangle \langle f_2 \rangle _I \langle g, h_I \rangle $$ which is equal to \begin{eqnarray*} \left\langle f_2, P^{(0,0)}(f_1,g) \right\rangle & = & \left\langle f_2, \;\sum_{I \in \mathcal{D}} \langle f_1, h_I \rangle \langle g, h_I \rangle h_I^2 \right\rangle \\ & = & \sum_{I \in \mathcal{D}} \langle f_1, h_I \rangle \langle g,h_I \rangle \langle f_2, h_I^2 \rangle \\ & = & \sum_{I \in \mathcal{D}} \langle f_1, h_I \rangle \langle f_2 \rangle _I \langle g, h_I \rangle . \end{eqnarray*}
\noindent This shows that $\pi_{f_1}^* = P^{(0,0)}(f_1, \cdot) = P^{(0,0)}(\cdot,f_1)$.\\
\noindent The ordinary multiplication operator $M_b: f \rightarrow bf$ can therefore be given by: $$M_b(f) = bf = P^{(0,0)}(b,f) + P^{(0,1)}(b,f) + P^{(1,0)}(b,f) = \pi_b^*(f) + \pi_b(f) + \pi_f(b).$$
\noindent The function $b$ is required to be in $L^\infty$ for the boundedness of $M_b$ in $L^p$. However, the paraproduct operator $\pi_b$ is bounded in $L^p$ for every $1 < p < \infty$ if $b \in BMO^d.$ Note that $BMO^d$ properly contains $L^\infty$. Detailed information on the operator $\pi_b$ can be found in \cite{Per} or \cite{Bla}.
\subsection{Commutators of Haar multipliers:} The commutator of $T_\epsilon$ with a locally integrable function $b$ is defined by $$ [b, T_\epsilon](f)(x) := T_\epsilon(bf)(x) - M_b (T_\epsilon(f))(x).$$ \noindent It is well-known that for a bounded sequence $\epsilon$ and $1<p<\infty$, the commutator $[b, T_\epsilon]$ is bounded in $L^p$ for all $p\in (1, \infty)$ if $b \in BMO^d.$\\ These commutators have been studied in \cite{Treil} in non-homogeneous martingale settings.\\
\section{Multilinear dyadic paraproducts}
\subsection{Decomposition of pointwise product $\displaystyle\prod_{j=1}^m f_j$}
\noindent In this sub-section we obtain a decomposition of pointwise product $\displaystyle\prod_{j=1}^m f_j$ of $m$ functions that is analogous to the following paraproduct decomposition : $$ f_1f_2 = P^{(0,0)}(f_1,f_2) + P^{(0,1)}(f_1,f_2) + P^{(1,0)}(f_1,f_2).$$ The decomposition of $\displaystyle\prod_{j=1}^m f_j$ will be the basis for defining \textit{multi-linear paraproducts} and \textit{m-linear Haar multipliers}, and will also be very useful in proving boundedness properties of multilinear commutators.\\
\noindent We first introduce the following notation:\\ \begin{itemize}
\item $f(I,0) := \widehat{f}(I) = \langle f,h_I \rangle = \displaystyle \int_\mathbb{R} f(x) h_I(x) dx. $
\item $f(I,1) := \langle f \rangle _I = \frac{1}{\abs{I}} \displaystyle \int_I f(x) dx. $\\
\item $U_m:= \left\{(\alpha_1, \alpha_2, \ldots,\alpha_m) \in \{0,1\}^m: (\alpha_1, \alpha_2,\ldots,\alpha_m) \neq (1,1, \ldots,1)\right\}.$\\
\item $\sigma(\vec{\alpha}) = \# \{i: \alpha_i =0\}$ for $\vec{\alpha} = (\alpha_1, \ldots,\alpha_m) \in \{0,1\}^m.$\\
\item $ (\vec{\alpha}, i) = (\alpha_1, \ldots,\alpha_m, i),\; (i,\vec{\alpha}) = (i,\alpha_1, \ldots,\alpha_m)$ for $\vec{\alpha} = (\alpha_1, \ldots,\alpha_m) \in \{0,1\}^m.$\\
\item $P_I^{\vec{\alpha}} (f_1, \ldots,f_m) = \prod_{j=1}^m f_j(I,\alpha_j)h_I^{\sigma(\vec{\alpha})}$ for $\vec{\alpha} \in U_m$ and $I \in \mathcal{D}.$\\
\item $P^{\vec{\alpha}}(f_1, \ldots,f_m) = \displaystyle\sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}} (f_1, \ldots,f_m) = \displaystyle\sum_{I\in\mathcal{D}}\prod_{j=1}^m f_j(I,\alpha_j)h_I^{\sigma(\vec{\alpha})}$ for $\vec{\alpha} \in U_m.$
\end{itemize}
\noindent
With this notation, the paraproduct decomposition of $f_1f_2$ takes the following form:
$$ f_1f_2 = P^{(0,0)}(f_1,f_2) + P^{(0,1)}(f_1,f_2) + P^{(1,0)}(f_1,f_2) = \sum_{\vec{\alpha} \in U_2} P^{\vec{\alpha}}(f_1,f_2).\\ $$
Note that \begin{equation} \label{IndexSetUm} U_m = \{(\alpha,1): \vec{\alpha} \in U_{m-1}\} \cup \{(\vec{\alpha},0): \vec{\alpha} \in U_{m-1}\} \cup \{(1,\ldots,1,0)\}. \end{equation}
\noindent
To obtain an analogous decomposition of $\displaystyle\prod_{j=1}^m f_j,$ we need the following crucial lemma:
\begin{lm}
Given $m\geq 2$ and functions $f_1,f_2, \ldots, f_m,$ with $f_i \in L^{p_i}, 1<p_i<\infty,$we have
$$\prod_{j=1}^{m} \langle f_j \rangle _J \mathsf{1}_J = \sum_{\vec{\alpha} \in U_m} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_m) \; \mathsf{1}_J, $$
for all $J\in\mathcal{D}.$
\end{lm}
\noindent
\begin{proof} We prove the lemma by induction on $m.$\\
\noindent
First assume that $m=2.$ We want to prove the following:
\begin{eqnarray} \label{AverageProduct} \langle f_1 \rangle _J \langle f_2\rangle _J\mathsf{1}_J&=& \sum_{\vec{\alpha} \in U_2} \sum_{J\subsetneq I}P^{\vec{\alpha}}_I(f_1,f_2) \; \mathsf{1}_J\\ &=& \nonumber\left(\sum_{J\subsetneq I}P^{(0,1)}_I(f_1,f_2)+\sum_{J\subsetneq I}P^{(1,0)}_I(f_1,f_2)+\sum_{J\subsetneq I}P^{(0,0)}_I(f_1,f_2)\right) \; \mathsf{1}_J\\ &=& \nonumber \left(\sum_{J\subsetneq I}\widehat{f_1}(I)\langle f_2 \rangle _I h_I+\sum_{J\subsetneq I}\langle f_1 \rangle _I \widehat{f_2}(I)h_I+\sum_{J\subsetneq I}\widehat{f_1}(I)\widehat{f_2}(I) h_I^2\right) \; \mathsf{1}_J. \end{eqnarray} \noindent We have, \begin{eqnarray*} &&\langle f_1 \rangle _J \left< f_2 \right>_J \mathsf{1}_J\\ &=& \left( \sum_{J\subsetneq I}\widehat{f_1}(I) h_I\right) \left( \sum_{J\subsetneq K}\widehat{f_2}(K) h_K\right)\mathsf{1}_J\\ &=& \sum_{J\subsetneq I}\widehat{f_1}(I) h_I \left( \sum_{I\subsetneq K}\widehat{f_2}(K) h_K + \widehat{f_2}(I) h_I+\sum_{J\subsetneq K \subsetneq I}\widehat{f_2}(K) h_K\right)\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq I}\widehat{f_1}(I) h_I \left(\sum_{J\subsetneq K \subsetneq I}\widehat{f_2}(K) h_K\right)\right\}\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq K}\widehat{f_2}(K) h_K \left(\sum_{K \subsetneq I}\widehat{f_1}(I) h_I\right)\right\}\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq K}\widehat{f_2}(K) \left<f_1\right>_K h_K \right\}\mathsf{1}_J\\ &=& \left\{\sum_{J\subsetneq I}\widehat{f_1}(I) \left< f_2\right>_I h_I + \sum_{J\subsetneq I}\widehat{f_1}(I) \widehat{f_2}(I) h_I^2 + \sum_{J\subsetneq I}\widehat{f_2}(I)\left<f_1\right>_Ih_I \right\}\mathsf{1}_J\\ &=& \left(\sum_{J\subsetneq I}\widehat{f_1}(I)\langle f_2 \rangle _I h_I+\sum_{J\subsetneq I}\langle f_1 \rangle _I \widehat{f_2}(I)h_I+\sum_{J\subsetneq I}\widehat{f_1}(I)\widehat{f_2}(I) h_I^2\right) \; \mathsf{1}_J.\\ \end{eqnarray*}
\noindent Now assume $m > 2$ and that
$$\prod_{j=1}^{m-1} \langle f_j \rangle _J \mathsf{1}_J = \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1}) \mathsf{1}_J. $$
\noindent Then, \begin{eqnarray*} &&\prod_{j=1}^{m} \langle f_j \rangle _J \mathsf{1}_J\\ &=& \left(\prod_{j=1}^{m-1} \langle f_j \rangle _J \mathsf{1}_J \right) \langle f_m\rangle _J\mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1}) \left( \sum_{J\subsetneq K}\widehat{f_m}(K) h_K\right) \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\left( \sum_{I\subsetneq K}\widehat{f_m}(K) h_K + \widehat{f_m}(I) h_I+\sum_{J\subsetneq K \subsetneq I}\widehat{f_m}(K) h_K\right)\mathsf{1}_J\\ \end{eqnarray*}
\noindent This gives \begin{eqnarray*} &&\prod_{j=1}^{m} \langle f_j \rangle _J \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\langle f_m \rangle _I \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1}) \widehat{f_m}(I) h_I \mathsf{1}_J\\
&& \quad +\sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\left( \sum_{J\subsetneq K \subsetneq I}\widehat{f_m}(K) h_K\right)\mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},1)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},0)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J\\ && \quad+ \sum_{J\subsetneq K}\widehat{f_2}(K) h_K \left(\sum_{\vec{\alpha} \in U_{m-1}}\sum_{K\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_{m-1})\right) \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},1)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},0)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J\\
&& \quad+ \sum_{J\subsetneq K}\widehat{f_m}(K) h_K \langle f_1\rangle _K \ldots\langle f_{m-1}\rangle _K \mathsf{1}_J\\ &=&\sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},1)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I}P_I^{(\vec{\alpha},0)} (f_1,f_2, \ldots, f_m) \mathsf{1}_J\\ && \quad + \sum_{J\subsetneq I} P_I^{(1,\ldots,1,0)}(f_1,f_2,\ldots,f_m) \mathsf{1}_J\\ &=& \sum_{\vec{\alpha} \in U_m} \sum_{J\subsetneq I}P_I^{\vec{\alpha}} (f_1,f_2, \ldots, f_m) \mathsf{1}_J. \end{eqnarray*} The last equality follows from (\ref{IndexSetUm}). \end{proof}
\begin{lm}
Given $m\geq 2$ and functions $f_1,f_2, \ldots, f_m,$ with $f_i \in L^{p_i}, 1<p_i<\infty,$we have
$$\displaystyle\prod_{j=1}^m f_j = \sum_{\vec{\alpha} \in U_m} P^{\vec{\alpha}}(f_1,f_2, \ldots, f_m). $$
\end{lm}
\noindent \begin{proof} We have already seen that it is true for $m=2.$ By induction, assume that \begin{eqnarray*} \displaystyle\prod_{j=1}^{m-1} f_j &=& \sum_{\vec{\alpha} \in U_{m-1}} P^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1})\\ &= & \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \end{eqnarray*}
\noindent Then, \begin{eqnarray*} \displaystyle\prod_{j=1}^m f_j &=& \left(\displaystyle\prod_{j=1}^{m-1} f_j \right) f_m\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \left(\sum_{J\in\mathcal{D}}\widehat{f_m}(J) h_J \right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \left( \sum_{I\subsetneq J}\widehat{f_m}(J) h_J + \widehat{f_m}(I) h_I+\sum_{J\subsetneq I }\widehat{f_m}(J) h_J\right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \langle f_m \rangle _I + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1}) \widehat{f_m}(I) h_I\\ && \quad + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1})\left(\sum_{J\subsetneq I }\widehat{f_m}(J) h_J \right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},1)}(f_1,f_2, \ldots, f_m) + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},0)}(f_1,f_2, \ldots, f_m)\\ && \quad + \sum_J\widehat{f_m}(J) h_J \left(\sum_{\vec{\alpha} \in U_{m-1}} \sum_{J\subsetneq I} P_I^{\vec{\alpha}}(f_1,f_2, \ldots, f_{m-1})\right)\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},1)}(f_1,f_2, \ldots, f_m) + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},0)}(f_1,f_2, \ldots, f_m)\\ && \quad + \sum_J\widehat{f_m}(J) h_J \langle f_1 \rangle _J \ldots\langle f_{m-1} \rangle _J\\ &=& \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},1)}(f_1,f_2, \ldots, f_m) + \sum_{\vec{\alpha} \in U_{m-1}} \sum_{I\in\mathcal{D}} P_I^{(\vec{\alpha},0)}(f_1,f_2, \ldots, f_m)\\ && \quad + P^{(1,\ldots,1,0)}(f_1,f_2, \ldots, f_m)\\ &=& \sum_{\vec{\alpha} \in U_m} P^{\vec{\alpha}}(f_1,f_2, \ldots, f_m). \end{eqnarray*} Here the last equality follows from $(\ref{IndexSetUm})$. \end{proof}
\subsection{Multilinear dyadic paraproducts}
\noindent On the basis of the decomposition of pointwise product $\prod_{j=1}^m f_j$ we now define multi-linear dyadic paraproduct operators, and study their boundedness properties.\\ \begin{dfn} For $m \geq 2$ and $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m) \in \{0,1\}^m$, we define \textit{multi-linear dyadic paraproduct operators} by $$ P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m) = \sum_{I\in\mathcal{D}} \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} $$ where $f_i(I,0) = \langle f_i, h_I \rangle$, $f_i(I,1) = \langle f_i \rangle_I$ and $\sigma(\vec{\alpha}) = \#\{i: \alpha_i = 0\}.$\\ \end{dfn} \noindent Observe that if $\vec{\beta} = (\beta_1, \beta_2, \ldots,\beta_m)$ is some permutation of $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m)$ and $(g_1, g_2, \ldots, g_m)$ is the corresponding permutation of $(f_1, f_2, \ldots, f_m)$, then $$P^{\vec{\alpha}} (f_1, f_2, \ldots, f_m) = P^{\vec{\beta}} (g_1, g_2, \ldots, g_m).$$ \noindent Also note that $P^{(1,0)}$ and $P^{(0,1)}$ are the standard bilinear paraproduct operators: $$ P^{(0,1)}(f_1,f_2) = \sum_{I\in\mathcal{D}} \langle f_1, h_I \rangle \langle f_2 \rangle_I h_I = P(f_1,f_2)$$ $$ P^{(1,0)}(f_1,f_2) = \sum_{I\in\mathcal{D}} \langle f_1 \rangle_I \langle f_2, h_I \rangle h_I = P(f_1,f_2).$$
\noindent In terms of paraproducts, the decomposition of point-wise product $\displaystyle\prod_{j=1}^m f_j$ we obtained in the previous section takes the form $$\displaystyle\prod_{j=1}^m f_j = \displaystyle \sum_{\substack {\vec{\alpha} \in \{0,1\}^m\\ \vec{\alpha} \neq (1,1,\ldots,1)}} P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m).$$
\noindent \begin{dfn} For a given function $b$ and $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m) \in \{0,1\}^m$, we define the paraproduct operators $\pi_b^{\vec{\alpha}}$ by $$\pi_b^{\vec{\alpha}}(f_1, f_2, \ldots, f_m) = P^{(0,\vec{\alpha})}(b,f_1, f_2, \ldots, f_m) = \sum_{I \in \mathcal{D}} \langle b , h_I \rangle \prod_{j=1}^m f_j(I, \alpha_j) \; h_I^{1+\sigma(\vec{\alpha})}$$ where $(0,\vec{\alpha}) = (0,\alpha_1,\ldots, \alpha_m) \in \{0,1\}^{m+1}.$\\ \end{dfn} \noindent Note that $$\pi_b^1(f) = P^{(0,1)}(b,f) = \sum_{I \in \mathcal{D}} b(I,0) f(I,1) h_I = \sum_{I \in \mathcal{D}} \langle b, h_I \rangle \langle f \rangle _I h_I = \pi_b(f).$$
\noindent The rest of this section is devoted to the boundedness properties of these multilinear paraproduct operators $P^{\vec{\alpha}}$ and $\pi_b^{\vec{\alpha}}.$\\
\noindent \begin{lm}\label{MPPTh1} Let $1 <p_1,p_2,\ldots,p_m, r < \infty$ and \,$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}$. Then for $\vec{\alpha} = (\alpha_1, \alpha_2, \ldots, \alpha_m) \in U_m$, the operators $P^{\vec{\alpha}}$ map $L^{p_1} \times \cdots\times L^{p_m} \rightarrow L^{r}$ with estimates of the form: $$\norm{P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)}_r \lesssim \prod_{j=1}^m\norm{f_j}_{p_j}$$ \end{lm}
\noindent \begin{proof} First we observe that, if $x\in I \in \mathcal{D},$ then $$\abs{\langle f \rangle _I} \leq \langle \abs{f}\rangle _I \leq Mf(x)$$ and that \begin{eqnarray*} \frac{\left\vert \langle f , h_I \rangle \right\vert}{\sqrt{\abs{I}}} & = & \frac{1}{\sqrt{\abs{I}}} \left\vert \int_\mathbb{R} f h_I \right\vert\\ & = & \frac{1}{\abs{I}} \left\vert \int_\mathbb{R} f \mathsf{1}_{I_+} - \int_\mathbb{R} f \mathsf{1}_{I_-} \right\vert\\ & = & \frac{1}{\abs{I}} \left(\int_{I_+} \abs{f} + \int_{I_-} \abs{f} \right)\\ & \leq & \frac{1}{\abs{I}} \int_{I} \abs{f} \\ & \leq & Mf(x). \end{eqnarray*}
\noindent \textbf{Case I:} $\sigma(\vec{\alpha}) = 1.$\\ Let $\alpha_{j_0} = 0.$ Then \begin{eqnarray*} \displaystyle P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m) &=& \sum_{I\in\mathcal{D}} \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& \sum_{I\in\mathcal{D}} \left(\prod_{\substack{j = 1\\j\neq j_0}}^m \langle f_j \rangle _I\right) \langle f_{j_0}, h_I\rangle h_I. \end{eqnarray*} \noindent Using square function estimates, we obtain \begin{eqnarray*} \left\Vert P^{\vec{\alpha}}(f_1, f_2, \ldots, f_m) \right\Vert_r &\lesssim& \left\Vert\left(\sum_{I\in \mathcal{D}} \prod_{\substack{j = 1\\j\neq j_0}}^m \left\vert\langle f_j\rangle _I \right\vert^2 \abs{\langle f_{j_0}, h_I \rangle }^2 \frac{\mathsf{1}_I}{\abs{I}}\right)^{1/2}\right\Vert_r\\ &\leq& \left\Vert\left(\prod_{\substack{j = 1\\j\neq j_0}}^m Mf_j \right) \left(\sum_{I\in \mathcal{D}} \abs{\langle f_{j_0}, h_I \rangle }^2 \frac{\mathsf{1}_I}{\abs{I}}\right)^{1/2}\right\Vert_r\\ &= & \left\Vert\left(\prod_{\substack{j = 1 \\j\neq j_0}}^m Mf_j \right) (Sf_{j_0}) \right\Vert_r\\ &\leq& \prod_{\substack{j = 1\\j\neq j_0}}^m \norm{Mf_j}_{p_j} \norm{Sf_{j_0}}_{j_0}\\ &\lesssim& \prod_{j=1}^m \norm{f_j}_{p_j}, \end{eqnarray*} where we have used H$\ddot{\text{o}}$lder inequality, and the boundedness of maximal and square function operators to obtain the last two inequalities.\\
\noindent \textbf{Case II:} $\sigma(\vec{\alpha}) > 1.$\\ Choose $j'$ and $j''$ such that $\alpha_{j'} = \alpha_{j''} = 0.$ Then \begin{eqnarray*} \left\vert P^{(0,0,\ldots,0)}(f_1,f_2,\ldots,f_m)(x) \right\vert & = & \left\vert \sum_{I\in \mathcal{D}}\left(\prod_{j:\alpha_j = 1} \langle f_j \rangle _I\right) \left( \prod_{\substack{j:\alpha_j = 0 \\j \neq j', \,j''}} \frac{\langle f_j, h_I\rangle }{\sqrt{\abs{I}}} \right) \langle f_{j'}, h_I\rangle \langle f_{j''}, h_I\rangle \frac{\mathsf{1}_I(x)}{\abs{I}} \right\vert \\ & \leq & \left(\prod_{ j: j \neq j',\,j''} Mf_j(x) \right) \left( \sum_{I\in \mathcal{D}} \abs{\langle f_{j'}, h_I\rangle } \abs{\langle f_{j''}, h_I\rangle } \frac{\mathsf{1}_I(x)}{\abs{I}} \right). \end{eqnarray*} By Cauchy-Schwarz inequality \begin{eqnarray}
\nonumber && \sum_{I\in\mathcal{D}} \left\vert\langle f_{j'},h_I\rangle \right\vert \, \left\vert\langle f_{j''},h_I \rangle \right\vert \frac{\mathsf{1}_I(x)}{\abs{I}}\\ \label{eq:sf}&\leq& \left(\sum_{I\in\mathcal{D}}\abs{\langle f_{j'},h_I\rangle }^2 \frac{\mathsf{1}_I(x)}{\abs{I}}\right)^{\frac{1}{2}} \left(\sum_{I\in\mathcal{D}}\abs{\langle f_{j''},h_I\rangle }^2 \frac{\mathsf{1}_I(x)}{\abs{I}}\right)^{\frac{1}{2}}\\
\nonumber &=& Sf_{j'}(x)\, Sf_{j''}(x). \end{eqnarray} Therefore, \begin{eqnarray*} \left\vert P^{(0,0,\ldots,0)}(f_1,f_2,\ldots,f_m)(x) \right\vert & \leq & \left(\prod_{ j: j \neq j',\,j''} Mf_j(x) \right) Sf_{j'}(x)\, Sf_{j''}(x). \end{eqnarray*} \noindent Now using generalized H$\ddot{\text{o}}$lder's inequality and the boundedness properties of the maximal and square functions, we get \begin{eqnarray*} \left\Vert P^{(0,0,\ldots,0)}(f_1,f_2,\ldots,f_m) \right\Vert_r
&\leq& \left(\prod_{ j: j \neq j',\,j''} \norm{Mf_j}_{p_j}\right) \norm{Sf_{j'}}_{p_{j'}}\, \norm{Sf_{j''}}_{p_{j''}}\\ & \lesssim &\prod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*}
\end{proof}
\noindent \begin{lm} \label{MPPTh2} Let $\vec{\alpha} = (\alpha_1, \ldots, \alpha_m) \in \{0,1\}^m$ and $1 <p_1, \ldots,p_m,r < \infty$ with $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ \begin{enumerate}[label = $(\alph*)$] \item For $\sigma(\vec{\alpha}) \leq 1,$ $\pi_b^{\vec{\alpha}}$ is a bounded operator from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r}$ if and only if \qquad $b \in BMO^d.$\\ \item For $\sigma(\vec{\alpha}) > 1,$ $\pi_b^{\vec{\alpha}}$ is a bounded operator from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r}$ if and only if $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\rangle }}{\sqrt{\abs{I}}} < \infty.$ \end{enumerate} \end{lm} \begin{proof} $(a)$ We prove this part first for $\sigma(\vec{\alpha}) = 0,$ that is, for $\alpha_1 = \cdots = \alpha_m = 1.$\\
\noindent Assume that $b \in BMO^d.$ Then for $(f_1, \ldots,f_m) \in L^{p_1} \times \cdots \times L^{p_m},$ we have \begin{eqnarray*} \pi_b^{\vec{\alpha}} (f_1,\ldots,f_m) &=& P^{(0,\vec{\alpha})}(b,f_1,\ldots,f_m) \\ &=& \sum_{I\in\mathcal{D}} \langle b, h_I \rangle \prod_{j=1}^m \langle f_j \rangle _I h_I\\ &=& \sum_{I\in\mathcal{D}} \langle \pi_b(f_1), h_I \rangle \prod_{j=2}^m \langle f_j \rangle _I h_I\\ &=& P^{(0, \alpha_2, \ldots,\alpha_m)} \left(\pi_b(f_1), f_2, \ldots, f_m \right). \end{eqnarray*} Since $b \in BMO^d$ and $f_1 \in L^{p_1}$ with $p_1 > 1,$ we have $\norm {\pi_b(f_1)}_{p_1} \lesssim \norm{b}_{BMO^d} \norm{f_1}_{p_1}.$ So, \begin{eqnarray*} \norm{\pi_b^{\vec{\alpha}} (f_1,\ldots,f_m)}_r &=& \norm{P^{(0,\alpha_2, \ldots, \alpha_m)}\left(\pi_b(f_1), f_2, \ldots, f_m \right)}_r \\ &\lesssim & \norm {\pi_b(f_1)}_{p_1} \prod_{j=2}^m\norm{f_j}_{p_j}\\ &\lesssim & \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}, \end{eqnarray*} where the first inequality follows from Theorem \ref{MPPTh1}.\\
\noindent Conversely, assume that $\pi_b^{(1,\ldots,1)}: L^{p_1} \times \cdots \times L^{p_m}\rightarrow L^{r}$ is bounded. Then for $f_i = \abs{J}^{-\frac{1}{p_i}}\mathsf{1}_J(x)$ with $J \in \mathcal{D},$ $$\left\Vert \pi_b^{(1,1,\ldots,1)}(f_1,f_2,\ldots,f_m)\right\Vert_r \leq \left \Vert \pi_b^{(1,1,\ldots,1)} \right \Vert_{L^{p_1}\times \cdots\times L^{p_m} \rightarrow L^r}, $$ since $\norm{f_i}_{p_i} = 1$ for all $ 1 \leq i \leq m.$ For such $f_i,$ \begin{eqnarray*} \left\Vert \pi_b^{(1,1,\ldots,1)}(f_1,f_2,\ldots,f_m)\right\Vert_r &=& \left\Vert \abs{J}^{-\left(\frac{1}{p_1}+\frac{1}{p_2}+\cdots+\frac{1}{p_m}\right)}\;\pi_b^{(1,1,\ldots,1)}(\mathsf{1}_J,\mathsf{1}_J,\ldots,\mathsf{1}_J)\right\Vert_r \\ &=& \abs{J}^{-\frac{1}{r}}\left\Vert \sum_{I\in\mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\rangle _I^m h_I \right\Vert_r. \end{eqnarray*} \noindent Taking $\epsilon_I = 1$ if $I\subseteq J$ and $\epsilon_I = 0$ otherwise, we observe that \begin{eqnarray*} \left\Vert \sum_{J\supseteq I \in \mathcal{D}}\widehat{b}(I) h_I \right\Vert_r &=& \left\Vert \sum_{J\supseteq I \in \mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\rangle _I^m h_I \right\Vert_r \\ &=& \left\Vert \sum_{I\in\mathcal{D}}\epsilon_I\widehat{b}(I) \langle \mathsf{1}_J\rangle _I^m h_I \right\Vert_r\\ &\lesssim& \left\Vert \sum_{I\in\mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\rangle _I^m h_I \right\Vert_r, \end{eqnarray*} where the last inequality follows from the boundedness of Haar multiplier $T_\epsilon$ on $L^r.$ Thus, we have \begin{eqnarray*} \sup_{J\in\mathcal{D}}\abs{J}^{-1/r}\left\Vert \sum_{J\supseteq I \in \mathcal{D}}\widehat{b}(I) h_I \right\Vert_r &\lesssim & \sup_{J\in\mathcal{D}}\abs{J}^{-1/r} \left\Vert \sum_{I\in\mathcal{D}}\widehat{b}(I) \langle \mathsf{1}_J\rangle _I^m h_I \right\Vert_r\\ &\lesssim& \left \Vert \pi_b^{(1,1,\ldots,1)} \right \Vert_{L^{p_1}\times \cdots\times L^{p_m} \rightarrow L^r}, \end{eqnarray*} proving that $b\in BMO^d.$\\
\noindent Now the proof for $\sigma(\vec{\alpha}) = 1$ follows from the simple observation that $\pi_b^{\vec{\alpha}}$ is a transpose of $\pi_b^{(1,\ldots,1)}$. For example, if $\sigma(\vec{\alpha}) = 1$ with $\alpha_1 = 0$ and $\alpha_2 = \cdots = \alpha_m =1$ and if $r'$ is the conjugate exponent of $r,$ then for $g \in L^{r'}$ \begin{eqnarray*} \left\langle \pi_b^{\vec{\alpha}}(f_1,\ldots,f_m), g \right\rangle &=& \left\langle \sum_{I \in \mathcal{D}} \langle b, h_I \rangle \langle f_1, h_I \rangle \prod_{j=2}^m \langle f_j \rangle _I h_I^2, g \right\rangle \\ &=& \sum_{I \in \mathcal{D}} \langle b, h_I \rangle \langle f_1, h_I \rangle \prod_{j=2}^m \langle f_j \rangle _I \langle g, h_I^2\rangle \\ &=& \sum_{I \in \mathcal{D}} \langle b, h_I \rangle \langle f_1, h_I \rangle \prod_{j=1}^m \langle f_j \rangle _I \langle g\rangle _I\\ &=& \left\langle \sum_{I \in \mathcal{D}} \langle b, h_I \rangle \langle g \rangle _I \prod_{j=1}^m \langle f_j \rangle _I h_I, f_1 \right\rangle \\ &=& \left\langle \pi_b^{(1, \ldots, 1)}(g,f_2,\ldots,f_m), f_1 \right\rangle . \end{eqnarray*}
\noindent $(b)$ Assume that $ \norm {b}_*\equiv \displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\rangle }}{\sqrt{\abs{I}}} < \infty.$ For $m =2$ we have \begin{eqnarray*} \displaystyle \int_\mathbb{R} \left\vert \pi_b^{(0,0)}(f_1,f_2) \right\vert^r dx &=& \displaystyle\int_\mathbb{R} \left\vert \sum_{I\in\mathcal{D}}\langle b,h_I\rangle \langle f_1,h_I\rangle \langle f_2,h_I \rangle h_I^3(x) \right\vert^r dx \\ &\leq & \int_\mathbb{R} \left( \sum_{I\in\mathcal{D}}\abs{\langle b,h_I\rangle }\, \abs{\langle f_1,h_I\rangle }\, \abs{\langle f_2,h_I \rangle } \frac{\mathsf{1}_I(x)}{\abs{I}^{3/2}} \right)^r dx \\ &\leq & \int_\mathbb{R} \left( \sup_{I\in\mathcal{D}} \frac{\abs{\langle b,h_I\rangle }}{\sqrt{\abs{I}}}\sum_{I\in\mathcal{D}} \abs{\langle f_1,h_I\rangle }\, \abs{\langle f_2,h_I \rangle } \frac{\mathsf{1}_I(x)}{\abs{I}} \right)^r dx\\ &=& \norm{b}_*^r \int_\mathbb{R} \left(\sum_{I\in\mathcal{D}} \abs{\langle f_1,h_I\rangle }\, \abs{\langle f_2,h_I \rangle } \frac{\mathsf{1}_I(x)}{\abs{I}} \right)^r dx. \end{eqnarray*} Using \eqref{eq:sf} and H$\ddot{\text{o}}$lder's inequality we obtain \begin{eqnarray*} \displaystyle \int_\mathbb{R} \left\vert \pi_b^{(0,0)}(f_1,f_2) \right\vert^r dx &\leq& \norm{b}_*^r \int_\mathbb{R} (Sf_1)^r(x)\,(Sf_2)^r(x)\,dx\\ &\leq& \norm{b}_*^r \left(\int_\mathbb{R} \left\{(Sf_1)^r(x)\right\}^{p_1/r}\,dx\right)^{r/p_1}\left(\int_\mathbb{R} \left\{(Sf_2)^r(x)\right\}^{p_2/r}\,dx\right)^{r/p_2}\\ &\leq& \norm{b}_*^r \norm{Sf_1}_{p_1}^r \norm{Sf_2}_{p_2}^r\\ &\lesssim & \norm{b}_*^r \norm{f_1}_{p_1}^r \norm{f_2}_{p_2}^r. \end{eqnarray*} \noindent Thus we have, $$ \norm{\pi_b^{(0,0)}(f_1,f_2)}_r \lesssim \norm{b}_* \norm{f_1}_{p_1} \norm{f_2}_{p_2}.$$ \noindent Observe that $$\pi_b^{(0,0)}(f_1,f_2)(I,0) = \langle \pi_b^{(0,0)}(f_1,f_2), h_I \rangle = \frac{1}{\abs{I}}\langle b,h_I\rangle \langle f_1,h_I\rangle \langle f_2,h_I \rangle .$$ \noindent Now consider $m > 2$ and let $\sigma(\vec{\alpha})>1$. Without loss of generality we may assume that $\alpha_1 = \alpha_2 = 0.$ Then\\ \begin{eqnarray*} \norm{\pi_b^{\vec{\alpha}} (f_1,f_2, \ldots,f_m)}_r & = & \left\Vert \sum_{I\in\mathcal{D}} \langle b,h_I\rangle \langle f_1,h_I\rangle \langle f_2,h_I\rangle \prod_{j=3}^m f_j(I, \alpha_j) h_I^{1+\sigma(\vec{\alpha})}\right\Vert_r\\ & = & \left\Vert \sum_{I\in\mathcal{D}} \frac{1}{\abs{I}}\langle b,h_I\rangle \langle f_1,h_I\rangle \langle f_2,h_I\rangle \prod_{j=3}^m f_j(I, \alpha_j) h_I^{\sigma(\vec{\alpha})-1}\right\Vert_r\\ &=& \left\Vert \sum_{I\in\mathcal{D}} \langle \pi_b^{(0,0)}(f_1,f_2), h_I \rangle \prod_{j=3}^m f_j(I, \alpha_j) h_I^{\sigma(\vec{\alpha})-1}\right\Vert_r\\ &=& \left\Vert P^{\vec{\beta}}(\pi_b^{(0,0)}(f_1,f_2),f_3,\ldots,f_m) \right\Vert_r\\ &\lesssim& \norm{\pi_b^{(0,0)}(f_1,f_2)}_{q} \prod_{j=3}^m \norm{f_j}_{p_j}\\ &\lesssim& \norm{b}_* \prod_{j=1}^m\norm{f_j}_{p_j}\\ \end{eqnarray*} where $\vec{\beta} =(0,\alpha_3,\ldots,\alpha_m) \in \{0,1\}^{m-1}$ and $\pi_b^{(0,0)}(f_1,f_2) \in L^q$ with $\frac{1}{p_1}+\frac{1}{p_2}=\frac{1}{q}, q>r>1.$\\
\noindent Conversely, assume that $\pi_b^{\vec{\alpha}}: L^{p_1} \times \cdots \times L^{p_m}\rightarrow L^{r}$ is bounded and that $\sigma(\vec{\alpha}) > 1.$ Choose any $J \in \mathcal{D}$, and take $f_j = \abs{J}^{\frac{1}{2} - \frac{1}{p_j}} h_J$ if $\alpha_j = 0,$ and $f_j = \abs{J}^{-\frac{1}{p_j} } \mathsf{1}_J$ if $\alpha_j = 1$ so that $\norm{f_j}_{p_j} = 1.$ Then $$ \left \Vert \pi_b^{\vec{\alpha}} (f_1, \ldots, f_m) \right \Vert_r \leq \left \Vert \pi_b^{\vec{\alpha}} \right \Vert _{L^{p_1} \times \cdots \times L^{p_m}}. $$ We also have \begin{eqnarray*} \left \Vert \pi_b^{\vec{\alpha}} (f_1, \ldots, f_m) \right \Vert_r &=& \left \Vert \abs{J}^{\frac{\sigma{(\vec{\alpha})}}{2} - \sum_{j=1}^m \frac{1}{p_j}} \langle b, h_J \rangle h_J^{1+\sigma(\vec{\alpha})} \right \Vert_r \\ &=& \abs{J}^{\frac{\sigma{(\vec{\alpha})}}{2} - \frac{1}{r}} \abs{\langle b, h_J \rangle } \left \Vert h_J^{1+\sigma(\vec{\alpha})} \right \Vert_r \\ &=& \abs{J}^{\frac{\sigma(\vec{\alpha})}{2} - \frac{1}{r}} \abs{\langle b, h_J \rangle } \abs{J}^{-\frac{1+\sigma(\vec{\alpha})}{2}}\left \Vert \mathsf{1}_J \right \Vert_r \\ &=& \abs{J}^{\frac{\sigma(\vec{\alpha})}{2} - \frac{1}{r}} \abs{\langle b, h_J \rangle } \abs{J}^{-\frac{1+\sigma(\vec{\alpha})}{2}}\abs{J}^{\frac{1}{r}}\\ &=& \frac{\abs{\langle b, h_J \rangle }}{\sqrt{\abs{J}}}. \end{eqnarray*}
\noindent Thus $ \frac{\abs{\langle b, h_J \rangle }}{\sqrt{\abs{J}}} \leq \left \Vert \pi_b^{\vec{\alpha}}\right \Vert _{L^{p_1} \times \cdots \times L^{p_m}}.$ Since it is true for any $J \in D,$ we have $$ \displaystyle \sup_{J \in \mathcal{D}} \frac{\abs{\langle b, h_J \rangle }}{\sqrt{\abs{J}}} \leq \left \Vert \pi_b^{\vec{\alpha}} \right \Vert _{L^{p_1} \times \cdots \times L^{p_m}} < \infty,$$ as desired.\\ \end{proof}
\noindent Now that we have obtained strong type $L^{p_1} \times\cdots\times L^{p_m} \rightarrow L^r$ boundedness estimates for the paraproduct operators $P^{\vec{\alpha}}$ and $\pi_b^{\vec{\alpha}}$ when $1 < p_1, p_2, \ldots, p_m,r < \infty$ and $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}$, we are interested to investigate estimates corresponding to $\frac{1}{m} \leq r < \infty$. We will prove in Lemma $\ref{MPPL}$ that we obtain weak type estimates if one or more $p_i$'s are equal to 1. In particular, we obtain $L^{1} \times\cdots\times L^{1} \rightarrow L^{\frac{1}{m},\infty}$ estimates for those operators. Then it follows from multilinear interpolation that the paraproduct operators are strongly bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^r$ for $1 < p_1, p_2, \ldots, p_m < \infty$ and $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r},$ even if $\frac{1}{m} < r \leq 1.$\\
\noindent We first prove the following general lemma, which when applied to the operators $P^{\vec{\alpha}}$ and $\pi_b^{\vec{\alpha}}$ gives aforementioned weak type estimates.\\
\noindent \begin{lm}\label{WBL} Let $T$ be a multilinear operator that is bounded from the product of Lebesgue spaces $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}$ for some $1 < p_1, p_2, \ldots, p_m < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Suppose that for every $I \in \mathcal{D}$, $T(f_1, \ldots, f_m)$ is supported in $I$ if $f_i = h_I$ for some $i \in \{1, 2, \ldots, m\}$. Then $T$ is bounded from $L^1 \times \cdots \times L^1 \times L^{p_{k+1}} \times \cdots\times L^{p_m} \rightarrow L^{\frac{q_k}{q_k + 1},\infty}$ for each $k = 1, 2, \ldots,m,$ where $q_k$ is given by $$\frac{1}{q_k} = (k-1) + \frac{1}{p_{k+1}} + \cdots+\frac{1}{p_{m}}.$$ In particular, $T$ is bounded from $L^{1} \times \cdots \times L^{1}$ to $L^{\frac{1}{m},\infty}$. \end{lm}
\noindent \begin{proof} We first prove that $T$ is bounded from $L^{1} \times L^{p_2}\times \cdots \times L^{p_m} $ to $ L^{\frac{q_1}{q_{1}+1},\infty}.$\\ Let $\lambda > 0$ be given. We have to show that $$\abs {\{ x: \abs {T(f_1, f_2, \ldots,f_m)(x)} > \lambda \}} \lesssim \left(\frac{\norm{f_1}_1 \prod_{j=2}^m\norm{f_j}_{p_j}}{\lambda}\right)^{\frac{q_1}{1+q_1}}$$ for all $(f_1, f_2, \ldots,f_m) \in L^{1} \times L^{p_2} \cdots \times L^{p_m}$.\\ Without loss of generality, we assume $\norm{f_1}_{1} = \norm{f_2}_{p_2}= \cdots = \norm{f_m}_{p_m} =1,$ and prove that $$\abs {\{ x: \abs {T(f_1, f_2, \ldots,f_m)(x)} > \lambda \}} \lesssim \lambda^{-\frac{q_1}{1+q_1}}.$$ For this, we apply Calder$\acute{\text{o}}$n-Zygmund decomposition to the function $f_1$ at height $\lambda^{\frac{q_1}{q_{1}+1}}$ to obtain `good' and `bad' functions $g_1$ and $b_1$, and a sequence $\{I_{1,j}\}$ of disjoint dyadic intervals such that $$ f_1 = g_1 + b_1, \;\;\; \norm{g_1}_{p_1} \leq \left(2 \lambda^{\frac{q_1}{q_{1}+1}}\right)^{ '} \norm{f_1}_1^{1/p_1} = \left(2 \lambda^{\frac{q_1}{q_{1}+1}}\right)^{\frac{p_1-1}{p_1}}\;\;\; \text{ and } \;\;\; b_1 = \sum_j b_{1,j},$$ where $$\text{supp}(b_{1,j}) \subseteq I_{1,j},\;\; \; \int_{I_{1,j}} b_{1,j} dx = 0,\;\; \text{ and } \;\; \sum_j\abs{I_{1,j}} \leq {\lambda}^{-\frac{q_1}{q_1+1}}\norm{f_1}_1 = {\lambda}^{-\frac{q_1}{q_1+1}}.$$\\ Multilinearity of $T$ implies that
$$\left|\left\{x:|T(f_1,\ldots,f_m)(x)| > \lambda \right\} \right|$$
$$ \leq \left|\left\{x:|T(g_1, f_2, \ldots,f_m)(x)| > \frac{\lambda}{2}\right\}\right| \; + \; \left|\left \{x:|T(b_1, f_2, \ldots,f_m)(x)| > \frac{\lambda}{2} \right \}\right|.$$ Since $g_1 \in L^{p_1}$ and $T$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}$, we have \begin{eqnarray*} \abs {\{ x: \abs {T(g_1, f_2, \ldots,f_m)(x)} > \lambda/2 \}}
& \lesssim & \left(\frac{2\norm{g_1}_{p_1} \displaystyle\prod_{j=2}^m f_j(J,\alpha_j)}{\lambda}\right)^r\\ & \leq & \left(\frac{2\left(2 \lambda^{\frac{q_1}{q_{1}+1}}\right)^{\frac{p_1 -1}{p_1}}} {\lambda}\right)^r\\ & \lesssim & \lambda^{r\left(\frac{q_1(p_1 -1)}{p_1(q_{1}+1)} -1 \right)} \end{eqnarray*} Now, $\frac{1}{r} = \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{p_1} + \frac{1}{q_1}$ implies that $r = \frac{p_1 q_1} {p_1+q_1}.$ So,\\ \begin{eqnarray*} r\left(\frac{q_1(p_1 -1)}{p_1(q_{1}+1)} -1\right) &=& \frac{p_1 q_1}{(p_1+q_1)}\left(\frac{p_1q_1 - q_1 - p_1q_1 - p_1}{p_1(q_{1}+1)}\right)\\
&=& \frac{p_1 q_1}{(p_1+q_1)}\frac{(-p_1 - q_1)}{p_1(q_{1}+1)}\\
&=& -\frac{q_1}{q_1+1}. \end{eqnarray*}
\noindent Thus we have: $$\abs {\{ x: \abs {T(g_1, f_2, \ldots,f_m)(x)} > \lambda/2 \}} \lesssim \lambda^{-\frac{q_1}{1+q_1}}.$$
\noindent From the properties of `bad' function $b_1$ we deduce that $\langle b_1, h_I \rangle \neq 0$ only if $I \subseteq I_{1,j}$ for some $j$. The hypothesis of the lemma on the support of $T(f_1, \ldots, f_m)$ then implies that $$ \text{supp}\left(T(b_1,f_2, \ldots, f_m)\right) \subseteq \cup_j I_{1,j}.$$
Thus, $$ \left|\left \{x:|T(b_1, f_2, \ldots,f_m)(x)| > \frac{\lambda}{2} \right \}\right| \leq \left| \cup_j I_{1,j} \right| \leq \lambda^{-\frac{q_1}{1+q_1}}.$$
\noindent Combining these estimates corresponding to $g_1$ and $b_1$, we have the desired estimate $$\abs {\{ x: \abs {T(f_1, f_2, \ldots,f_m)(x)} > \lambda \}} \lesssim \lambda^{-\frac{q_1}{1+q_1}}.$$
\noindent Now beginning with the $L^{1} \times L^{p_2}\times \cdots \times L^{p_m} \rightarrow L^{\frac{q_1}{q_{1}+1},\infty}$ estimate, we use the same argument to lower the second exponent to 1 proving that $T$ is bounded from $L^{1} \times L^{1}\times L^{p_3} \times \cdots \times L^{p_m} $ to $ L^{\frac{q_2}{q_{2}+1},\infty}, $ where $q_2$ is given by $\frac{1}{q_2} = 1 + \frac{1}{p_{3}} + \cdots +\frac{1}{p_{m}}.$\\
\noindent We continue the same process until we obtain $L^{1} \times L^{1}\times \cdots \times L^{1} \rightarrow L^{\frac{q_m}{q_{m}+1},\infty}$ boundedness of $T$ with $\frac{1}{q_m} = 1+1+ \cdots+1 \; (m-1 \text{ terms}) = m-1.$ This completes the proof since $\frac{q_m}{q_m+1} = \frac{1}{m}.$ \end{proof} \noindent \begin{lm} \label{MPPL}
Let $ \vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in \{0,1\}^m, 1 \leq p_1, p_2, \ldots, p_m < \infty$ and $\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ Then \begin{enumerate}[label = $(\alph*)$] \item For $\vec{\alpha} \neq (1,1,\ldots,1),$ $P^{\vec{\alpha}}$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}.$ \item If $b \in BMO^d$ and $\sigma(\vec{\alpha}) \leq 1, \,\pi_b^{\vec{\alpha}}$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}.$ \item If $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\rangle }}{\sqrt{\abs{I}}} < \infty$ and $\sigma(\vec{\alpha}) > 1, \,\pi_b^{\vec{\alpha}}$ is bounded from $L^{p_1} \times \cdots \times L^{p_m}$ to $L^{r,\infty}.$ \end{enumerate} \end{lm} \begin{proof} By orthogonality of Haar functions, $h_I(J,0) = \langle h_I, h_J \rangle = 0 $ for any two distinct dyadic intervals $I$ and $J.$ The Haar functions have mean value 0, so it is easy to see that $$\langle h_I \rangle _J \neq 0 \text{ only if } J \subsetneq I$$ since any two dyadic intervals are either disjoint or one is contained in the other.\\
\noindent Consequently, if some $f_i = h_I,$ then
$$P^{\vec{\alpha}}(f_1, f_2,\ldots,f_m) = \sum_{J\subseteq I}\prod_{j=1}^m f_j(J,\alpha_j) h_J^{\sigma(\vec{\alpha})}$$
and, $$\pi_b^{\vec{\alpha}}(f_1, f_2,\ldots,f_m) = \sum_{J\subseteq I}\langle b,h_J \rangle \prod_{j=1}^m f_j(J,\alpha_j) h_J^{1+ \sigma(\vec{\alpha})},$$ which are both supported in $I.$ Since the paraproducts are strongly (and hence weakly) bounded from $L^{p_1} \times \cdots \times L^{p_m}\rightarrow L^r$, the proof follows immediately from Lemma $\ref{WBL}.$ \end{proof}
\noindent Combining the results of Lemmas \ref{MPPTh1}, \ref{MPPTh2} and \ref{MPPL}, and using multilinear interpolation (see \cite{GLLZ}), we have the following theorem: \begin{thm} Let $ \vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in \{0,1\}^m$ and $ 1 < p_1, p_2, \ldots, p_m < \infty$ with $\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$ Then \begin{enumerate}[label = $(\alph*)$] \item For $\vec{\alpha} \neq (1,1,\ldots,1),$ $\displaystyle \left\Vert P^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \prod_{j=1}^m\norm{f_j}_{p_j}.$ \item For $\sigma(\vec{\alpha}) \leq 1,$ $\displaystyle \left\Vert\pi_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \lesssim \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $b \in BMO^d.$\\
\item For $\sigma(\vec{\alpha}) > 1,$ $\displaystyle\left\Vert\pi_b^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)\right\Vert_r \leq C_b \prod_{j=1}^m\norm{f_j}_{p_j},$ if and only if $\displaystyle\sup_{I\in \mathcal{D}} \frac{\abs{\langle b,h_I\rangle }}{\sqrt{\abs{I}}} < \infty.$ \end{enumerate} In each of the above cases, the paraproducts are weakly bounded if $1\leq p_1, p_2, \ldots, p_m < \infty$.\\ \end{thm}
\section {Multilinear Haar multipliers and multilinear commutators} \subsection{Multilinear Haar Multipliers}
\noindent In this subsection we introduce multilinear Haar multipliers, and study their boundedness properties.\\
\noindent \begin{dfn} Given $\vec{\alpha} = (\alpha_1,\alpha_2, \ldots,\alpha_m) \in \{0,1\}^m,$ and a symbol sequence $\epsilon = \{\epsilon_I\}_{I\in\mathcal{D}},$ we define \textit{m-linear Haar multipliers} by $$ T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \equiv \sum_{I\in \mathcal{D}} \epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}.$$ \end{dfn} \noindent \begin{thm}\label{MHMTh} Let $\epsilon = \{\epsilon_I\}_{I\in\mathcal{D}}$ be a given sequence and let $\vec{\alpha} = (\alpha_1,\alpha_2, \ldots,\alpha_m) \in U_m.$ Let $1<p_1,p_2, \ldots,p_m<\infty$ with $$\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Then $T_\epsilon^{\vec{\alpha}}$ is bounded from $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}$ to $L^r$ if and only if $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty.$\\ Moreover, $T_\epsilon^{\vec{\alpha}}$ has the corresponding weak-type boundedness if $1 \leq p_1,p_2, \ldots,p_m<\infty.$ \end{thm}
\noindent \begin{proof} To prove this lemma we use the fact that the linear Haar multiplier $$T_\epsilon(f) = \sum_{I\in\mathcal{D}} \epsilon_I \langle f,h_I\rangle h_I$$ is bounded on $L^p$ for all $1<p<\infty$ if $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty,$ and that $\langle T_\epsilon(f),h_I \rangle = \epsilon_I \langle f,h_I\rangle .$\\
\noindent By assumption $\sigma(\vec{\alpha})\geq 1$. Without loss of generality we may assume that $\alpha_i = 0$ if $1\leq i \leq \sigma(\vec{\alpha})$ and $\alpha_i = 1$ if $\sigma(\vec{\alpha}) < i \leq m.$ In particular, we have $\alpha_1 = 0.$ Then
$$\epsilon_I f_1(I,\alpha_1) = \epsilon_I \langle f_1,h_I\rangle = \langle T_\epsilon(f_1),h_I \rangle = T_\epsilon(f_1)(I,\alpha_1).$$ \noindent First assume that $\norm{\epsilon}_\infty:= \displaystyle \sup_{I \in \mathcal{D}}\abs{\epsilon_I} < \infty.$\\
\noindent Then, \begin{eqnarray*}
\norm{T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m)}_r &=& \left\Vert \sum_{I\in \mathcal{D}} \epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right\Vert_r\\ &=& \left\Vert\sum_{I\in \mathcal{D}} T_\epsilon(f_1)(I,\alpha_1)\prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right\Vert_r\\ &=& \norm{P^{\vec{\alpha}}(T_\epsilon(f_1),f_2, \ldots,f_{m})}_r\\ & \lesssim & \norm{T_\epsilon(f_1)}_{p_1} \prod_{j=2}^m \norm{f_j}_{p_j}\\ & \lesssim & \prod_{j=1}^m\norm{f_j}_{p_j}.\\ \end{eqnarray*}
\noindent Conversely, assume that $T_\epsilon^{\vec{\alpha}}$ : $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ is bounded, and let $\sigma(\vec{\alpha}) = k.$ Recall that $\alpha_i = 0$ if $1\leq i \leq \sigma(\vec{\alpha}) = k$ and $\alpha_i = 1$ if $k=\sigma(\vec{\alpha}) < i \leq m.$ Taking $f_i = h_I$ if $1 \leq i \leq k$ and $f_i = \mathsf{1}_I$ if $k < i \leq m,$ we observe that\\
\begin{eqnarray*} {\norm{T_\epsilon^{\vec{\alpha}}(f_1,f_2, \ldots,f_m)}}_r &=& \left(\int_{\mathbb{R}} \abs{\epsilon_I h_I^k(x)}^r dx\right)^{1/r} \\ &=& \left(\frac{\abs{\epsilon_I}^r}{\abs{I}^{kr/2}}\int_{\mathbb{R}} \mathsf{1}_I(x) dx\right)^{1/r} \\ & = & \frac{\abs{\epsilon_I}}{\abs{I}^{k/2}} \abs{I}^{1/r} \end{eqnarray*} and, \begin{eqnarray*} \prod_{j=1}^m\norm{f_j}_{p_j} &=& \prod_{i=1}^k \left(\int_{\mathbb{R}} \abs{ h_I(x)}^{p_i} dx \right)^{1/p_i} \prod_{j=k+1}^m\left(\int_{\mathbb{R}} \abs{\mathsf{1}_I(x)}^{p_j} dx\right)^{1/p_j}\\ &=& \prod_{i=1}^k\left(\frac{1}{\abs{I}^{p_i/2}} \int_{\mathbb{R}} \mathsf{1}_I(x) dx \right)^{1/p_i}\prod_{j=k+1}^m\left(\int_{\mathbb{R}} \mathsf{1}_I(x) dx \right)^{1/p_j}\\ & = & \prod_{i=1}^k\left(\frac{1}{\abs{I}^{1/2}} \abs{I}^{1/p_i}\right) \prod_{j=k+1}^m \abs{I}^{1/p_j}\\ & = & \frac{\abs{I}^{1/r}}{\abs{I}^{k/2}} \end{eqnarray*} Since $(f_1, f_2, \ldots,f_m) \in L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}$, the boundedness of $T_\epsilon$ implies that
$${\norm{T_\epsilon^{\vec{\alpha}}(f_1,f_2, \ldots,f_m)}}_r \leq {\norm{T_\epsilon^{\vec{\alpha}}}}_{L^{p_1}\times \cdots\times L^{p_m} \rightarrow L^r} \prod_{j=1}^m\norm{f_j}_{p_j}.$$ That is,$$\frac{\abs{\epsilon_I}}{\abs{I}^{k/2}} \abs{I}^{1/r} \leq {\norm{T_\epsilon^{\vec{\alpha}}}}_{L^{p_1}\times \cdots\times L^{p_m}} \frac{\abs{I}^{1/r}}{\abs{I}^{k/2}},$$ for all $I \in \mathcal{D}.$ Consequently, $\norm{\epsilon}_\infty = \displaystyle\sup_{I\in \mathcal{D}} \abs{\epsilon_I} \leq {\norm{T_\epsilon^{\vec{\alpha}}}}_{L^{p_1}\times \cdots\times L^{p_m}} < \infty,$ as desired. \\ If $1 \leq p_1,p_2, \ldots,p_m<\infty,$ the weak-type boundedness of $T_\epsilon^{\vec{\alpha}}$ follows from Lemma \ref{WBL}. \end{proof}
\subsection{Multilinear commutators} \noindent In this subsection we study boundedness properties of the commutators of $T_\epsilon^{\vec{\alpha}}$ with the multiplication operator $M_b$ when $b\in BMO^d.$ For convenience we denote the operator $M_b$ by $b$ itself. We are interested in the following commutators: $$[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)(x) \equiv (T_\epsilon^{\vec{\alpha}}(f_1, \ldots, bf_i,\ldots,f_m) - bT_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m))(x)$$
\noindent where $1\leq i \leq m.$\\
\noindent Note that if $b$ is a constant function, $[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)(x) = 0$ for all $x.$ Our approach to study the boundedness properties of $[b,T_\epsilon^{\vec{\alpha}}]_i: L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ with $1<p_1,p_2, \ldots,p_m < \infty$ and $\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}$ for non-constant $b$ requires us to assume that $b \in L^p$ for some $p\in (1,\infty),$ and that $r > 1.$ However, this restricted unweighted theory turns out to be sufficient to obtain a weighted theory, which in turn implies the unrestricted unweighted theory of these multilinear commutators. We will present the weighted theory of these commutators in a subsequent paper.\\
\begin{thm}\label{boc} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m.$ If $b \in BMO^d \cap L^p$ for some $1<p<\infty$ and $\norm{\epsilon}_\infty := \sup_{I\in \mathcal{D}} \abs{\epsilon_I} < \infty,$ then each commutator $[b,T_\epsilon^{\vec{\alpha}}]_i$ is bounded from $L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ for all $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r},$$
with estimates of the form: $$ \norm{[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)}_r \lesssim \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}.$$
\end{thm}
\noindent \begin{proof} It suffices to prove boundedness of $[b,T_\epsilon^{\vec{\alpha}}]_1,$ as the others are identical. Moreover, we may assume that each $f_i$ is bounded and has compact support, since such functions are dense in the $L^p$ spaces.\\
\noindent Writing $bf_1 = \pi_b(f_1) + \pi_b^*(f_1) + \pi_{f_1}(b)$ and using multilinearity of $T_\epsilon^{\vec{\alpha}}$, we have\\ $$T_\epsilon^{\vec{\alpha}} (bf_1, f_2, \ldots,f_m) = T_\epsilon^{\vec{\alpha}} (\pi_b(f_1),f_2, \ldots,f_m) + T_\epsilon^{\vec{\alpha}} (\pi_b^*(f_1),f_2, \ldots,f_m) + T_\epsilon^{\vec{\alpha}} (\pi_{f_1}(b),f_2, \ldots,f_m).$$
\noindent On the other hand, \begin{eqnarray*} b T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) &=& \sum_{I \in \mathcal{D}}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\left(\sum_{J \in \mathcal{D}} \widehat{b}(J) h_J\right)\\ &=& \sum_{I \in \mathcal{D}}\epsilon_I \widehat{b}(I)\prod_{j=1}^m f_j(I,\alpha_j) h_I^{1+\sigma(\vec{\alpha})}\\ && +\sum_{I \in \mathcal{D}}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\left(\sum_{I\subsetneq J} \widehat{b}(J) h_J\right)\\ && +\sum_{I \in \mathcal{D}}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\left(\sum_{J\subsetneq I} \widehat{b}(J) h_J\right)\\ &=& \pi_b^{\vec{\alpha}} (f_1, \ldots, T_\epsilon(f_i), \ldots,f_m) \\ && + \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I\prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \\ &&+ \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right) \end{eqnarray*} for some $i$ with $\alpha_i = 0.$ Indeed, some $\alpha_i$ equals 0 by assumption, and for such $i$, we have $$T_\epsilon(f_i)(I,\alpha_i) = \widehat{T_\epsilon(f_i)}(I) = \epsilon_I \widehat{f_i}(I) = \epsilon_I f_i(I,\alpha_i) .$$
\noindent For $(f_1,f_2,\ldots,f_m) \in L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m},$ we have \begin{eqnarray*} \norm{T_\epsilon^{\vec{\alpha}} (\pi_b(f_1),f_2, \ldots,f_m)}_r &\lesssim & \norm{\pi_b(f_1)}_{p_1}\prod_{j=2}^m \norm{f_j}_{p_j}\\ &\lesssim & \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j} \end{eqnarray*} \begin{eqnarray*} \norm{T_\epsilon^{\vec{\alpha}} (\pi_b^*(f_1),f_2, \ldots,f_m)}_r &\lesssim & \norm{\pi_b^*(f_1)}_{p_1}\prod_{j=2}^m \norm{f_j}_{p_j}\\ &\lesssim & \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*} and, \begin{eqnarray*} \norm{\pi_b^{\vec{\alpha}} (f_1, \ldots,T_\epsilon(f_i), \ldots,f_m)}_r &\lesssim & \norm{b}_{BMO^d}\norm{f_1}_{p_1} \cdots \norm{T_\epsilon(f_i)}_{p_i}\cdots\norm{f_m}_{p_m}\\ &\lesssim& \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}.\\ \end{eqnarray*}
\noindent So, to prove boundedness of $[b,T_\epsilon^{\vec{\alpha}}]_1$, is suffices to show similar control over the terms: \begin{equation} \label{term1}\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\right\Vert_r \end{equation} and, \begin{equation}\label{term2}\left\Vert T_\epsilon^{\vec{\alpha}} (\pi_{f_1}(b),f_2, \ldots,f_m)- \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right\Vert_r. \end{equation}
\noindent \textbf{Estimation of} $(\ref{term1})$:\\
\noindent Case I: $\sigma(\vec{\alpha})$ odd.\\ In this case, $$T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) = \sum_{I \in \mathcal{D}}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} = \sum_{I \in \mathcal{D}}\epsilon_I\abs{I}^{\frac{1-\sigma(\vec{\alpha})}{2}} \prod_{j=1}^m f_j(I,\alpha_j) h_I. $$ So, $$ \langle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m), h_I \rangle h_I = \epsilon_I\abs{I}^{\frac{1-\sigma(\vec{\alpha})}{2}} \prod_{j=1}^m f_j(I,\alpha_j) h_I = \epsilon_I \prod_{j=1}^m f_j(I,\alpha_j)h_I^{\sigma(\vec{\alpha})}. $$ This implies that \begin{eqnarray*} (\ref{term1}) &=&\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\langle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m), h_I \rangle h_I\right)\right\Vert_r\\ &= & \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J)\langle T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \rangle _J h_J\right\Vert_r\\ &=& \left\Vert \pi_b \left( T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \right)\right\Vert_r\\ &\lesssim & \norm{b}_{BMO^d}\left\Vert T_\epsilon^{\vec{\alpha}} (f_1,f_2, \ldots,f_m) \right\Vert_r\\ &\lesssim & \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*}
\noindent Case II: $\sigma(\vec{\alpha})$ even.\\
\noindent In this case at least two $\alpha_i's$ are equal to 0. Without loss of generality we may assume that $\alpha_1=0.$ Then denoting $T_\epsilon (f_1)$ by $g_1,$ $P^{(\alpha_2,\ldots,\alpha_m)}(f_2, \ldots,f_m)$ by $ g_2,$ and using the fact that $$\langle g_1 \rangle _J \langle g_2 \rangle _J \mathsf{1}_J = \left(\sum_{J\subsetneq I}\widehat{g_1}(I) \langle {g_2}\rangle _I h_I +\sum_{J\subsetneq I}\langle g_1\rangle _I \widehat{g_2}(I) h_I +\sum_{J\subsetneq I}\widehat{g_1}(I) \widehat{g_2}(I) h_I^2 \right)\mathsf{1}_J, $$ we have \begin{eqnarray*} && \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\epsilon_I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\right\Vert_r\\ &=&\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\sum_{J\subsetneq I}\widehat{g_1}(I) \widehat{g_2}(I) h_I^2 \right)\right\Vert_r\\ &= & \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) h_J\left(\langle g_1 \rangle _J \langle g_2 \rangle _J \mathsf{1}_J - \sum_{J\subsetneq I}\widehat{g_1}(I) \langle {g_2}\rangle _I h_I -\sum_{J\subsetneq I}\langle g_1\rangle _I \widehat{g_2}(I) h_I \right)\right\Vert_r\\ &\leq & \left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) \langle g_1 \rangle _J \langle g_2 \rangle _J h_J\right\Vert_r +\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) \langle P^{(0,1)}(g_1,g_2) \rangle _J h_J \right\Vert_r\\ && \qquad \qquad +\left\Vert \sum_{J\in\mathcal{D}} \widehat{b}(J) \langle P^{(1,0)}(g_1,g_2) \rangle _J h_J\right\Vert_r \\ &\lesssim & \norm{b}_{BMO^d}\norm{g_1}_{p_1}\norm{g_2}_{q} + \norm{b}_{BMO^d}\norm{P^{(0,1)}(g_1,g_2)}_{r} +\norm{b}_{BMO^d}\norm{P^{(1,0)}(g_1,g_2)}_r\\ &\lesssim & \norm{b}_{BMO^d}\norm{g_1}_{p_1}\norm{g_2}_{q}\\ &\lesssim & \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}. \end{eqnarray*} where, $q$ is given by $\displaystyle \frac{1}{q} = \sum_{j=2}^m \frac{1}{p_j}.$ Here the last three inequalities follow from Theorems $\ref{MPPTh1}$ and $\ref{MPPTh2},$ and the fact that $\norm{g_1}_{p_1} = \norm{T_\epsilon (f_1)}_{p_1} \lesssim \norm{f_1}_{p_1}.$ \\
\noindent \textbf{Estimation of} $(\ref{term2}):$\\
\noindent Case I: $\alpha_1 = 0.$\\
\noindent This case is easy as we observe that\\ \begin{eqnarray*} && T_\epsilon^{\vec{\alpha}} (\pi_{f_1}(b),f_2, \ldots,f_m)- \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& \sum_{I \in \mathcal{D}}\epsilon_I \widehat{\pi_{f_1}(b)}(I) \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \widehat{f_1}(I) \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ & =& \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \widehat{f_1}(I)\prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \widehat{f_1}(I) \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& 0. \end{eqnarray*} So there is nothing to estimate.
\noindent Case II: $\alpha_1 = 1.$\\
\noindent In this case, \begin{eqnarray*} && T_\epsilon^{\vec{\alpha}} (\pi_{f_1}(b),f_2, \ldots,f_m) - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ &=& \sum_{I \in \mathcal{D}}\epsilon_I \langle \pi_{f_1}(b) \rangle _I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} - \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \langle f_1\rangle _I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ & =& \sum_{I \in \mathcal{D}}\epsilon_I \left(\langle \pi_{f_1}(b) \rangle _I - \langle b\rangle _I \langle f_1\rangle _I \right)\prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ \end{eqnarray*}
\noindent Now, \begin{eqnarray*}\langle b \rangle _I \langle f_1 \rangle _I \mathsf{1}_I &=& \sum_{I\subsetneq J}\widehat{b}(J) \langle {f_1}\rangle _J h_J \mathsf{1}_I +\sum_{I\subsetneq J}\langle b\rangle _J \widehat{f_1}(J) h_J \mathsf{1}_I+\sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\mathsf{1}_I\\ &=& \langle \pi_b(f_1)\rangle _I \mathsf{1}_I +\langle \pi_{f_1}(b)\rangle _I \mathsf{1}_I+ \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\mathsf{1}_I. \end{eqnarray*} Hence, $\langle b \rangle _I \langle f_1 \rangle _I \mathsf{1}_I -\langle \pi_{f_1}(b)\rangle _I \mathsf{1}_I= \langle \pi_b(f_1)\rangle _I \mathsf{1}_I + \displaystyle \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \mathsf{1}_I.$\\
\noindent So we have \begin{eqnarray*} && T_\epsilon^{\vec{\alpha}} (\pi_{f_1}(b),f_2, \ldots,f_m)- \sum_{I \in \mathcal{D}}\epsilon_I \langle b\rangle _I \prod_{j=1}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ && = -\sum_{I \in \mathcal{D}}\epsilon_I \left( \langle \pi_b(f_1)\rangle _I \mathsf{1}_I + \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\right)\prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ && = -\sum_{I \in \mathcal{D}}\epsilon_I \langle \pi_b(f_1)\rangle _I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \\ && \hspace{1in} -\sum_{I \in \mathcal{D}}\epsilon_I \left( \sum_{I\subsetneq J}\widehat{b}(J) \widehat{f_1}(J) h_J^2\right)\prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\\ && = - T_\epsilon(\pi_b(f_1),f_2,\ldots,f_m) - \sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left(\sum_{I\subsetneq J}\epsilon_I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right).\\ \end{eqnarray*}
\noindent Since $$\norm{T_\epsilon(\pi_b(f_1),f_2,\ldots,f_m)}_r \lesssim \norm{\pi_b(f_1)}_{p_1}\prod_{j=2}^m f_j(J,\alpha_j) \lesssim \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j},$$ we are left with controlling $$\left\Vert \sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left(\sum_{I\subsetneq J}\epsilon_I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right)\right\Vert_r. $$
\noindent For this we observe that $$\left\Vert T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m) \right\Vert_q \lesssim \prod_{j=2}^m \norm{f_j}_{p_j},$$ and that
\begin{eqnarray*} \pi_b^*(f_1)\; T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m) &=& \sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left( \sum_{I\subsetneq J}\epsilon_I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\\ && + \sum_{J \in \mathcal{D}}\epsilon_J \widehat{b}(J) \widehat{f_1}(J) \prod_{j=2}^m f_j(J,\alpha_j) h_J^{2+ \sigma(\vec{\alpha})} \\ && +\sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2\left(\sum_{J\subsetneq I}\epsilon_I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right) \end{eqnarray*}
\noindent Now, following the same technique we used to control $(\ref{term1}),$ we obtain
$$\displaystyle \left\Vert\sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2\left(\sum_{J\subsetneq I}\epsilon_I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})}\right)\right\Vert_r\lesssim \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}.$$ We also have \begin{eqnarray*} \left\Vert \pi_b^*(f_1)\; T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m) \right\Vert_r &\leq& \left\Vert\pi_b^*(f_1)\right\Vert_{p_1} \left\Vert T_\epsilon^{(\alpha_2, \ldots,\alpha_m)}(f_2, \ldots,f_m)\right\Vert_q \\ &\lesssim& \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j} \end{eqnarray*} and, $$ \left\Vert\sum_{J \in \mathcal{D}}\epsilon_J \widehat{b}(J) \widehat{f_1}(J) \prod_{j=2}^m f_j(J,\alpha_j) h_J^{2+ \sigma(\vec{\alpha})}\right\Vert_r \lesssim \norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}.$$.
\noindent So we conclude that $$ \left\Vert\sum_{J \in \mathcal{D}}\widehat{b}(J) \widehat{f_1}(J) h_J^2 \left(\sum_{I\subsetneq J}\epsilon_I \prod_{j=2}^m f_j(I,\alpha_j) h_I^{\sigma(\vec{\alpha})} \right)\right\Vert_r\lesssim\norm{b}_{BMO^d}\prod_{j=1}^m\norm{f_j}_{p_j}.$$ Thus we have strong type boundedness of $$[b,T_\epsilon^{\vec{\alpha}}]_1 \rightarrow L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$$ for all $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\displaystyle \sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ \end{proof}
\noindent In the next theorem, we show that BMO condition is necessary for the boundedness of the commutators.\\
\noindent \begin{thm}\label{bmonecessity} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m,$ and $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Assume that for given $b$ and $i$, \begin{equation} \norm{[b,T_\epsilon^{\vec{\alpha}}]_i(f_1,f_2,\ldots,f_m)}_r \leq C_\epsilon \prod_{j=1}^m\norm{f_j}_{p_j}, \label{eq:bd} \end{equation} for every bounded sequence $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}},$ and for all $f_i \in L^{p_i}.$ Then $b\in BMO^d.$ \end{thm}
\noindent \begin{proof} Without loss of generality we may assume that $i=1.$ Fix $I_0 \in \mathcal{D}$ and let $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}}$ with $\epsilon_I =1$ for all $I\in \mathcal{D}.$ \\ \noindent \textbf{Case I:} $\alpha_1 = 0, \sigma(\vec{\alpha}) = 1.$\\ \noindent Take $f_1 = \mathsf{1}_{I_0}$ and $f_i = h_{I_0^{(1)}}$ for $i>1$, where $I_0^{(1)}$ is the parent of $I_0.$ Then, $$T_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)) = \sum_{I\in \mathcal{D}} \langle \mathsf{1}_{I_0}, h_I\rangle \langle h_{I_0^{(1)}}\rangle _I^{m-1} h_I=0,$$ and, \begin{eqnarray*} T_\epsilon^{\vec{\alpha}}(bf_1, f_2, \ldots, \ldots,f_m) &=& \sum_{I\in \mathcal{D}} \langle b\mathsf{1}_{I_0}, h_I\rangle \langle h_{I_0^{(1)}}\rangle _I^{m-1} h_I\\ &=& \sum_{I\subseteq I_0} \langle b\mathsf{1}_{I_0}, h_I\rangle \left( \frac{K(I_0,I_0^{(1)})}{\sqrt{\left\vert{I_0^{(1)}}\right\vert}}\right)^{m-1}h_I\\ &=& \left( \frac{K(I_0,I_0^{(1)})}{\sqrt{\left\vert{I_0^{(1)}}\right\vert}}\right)^{m-1}\sum_{I\subseteq I_0} \langle b, h_I\rangle h_I, \end{eqnarray*} where $ K(I_0,I_0^{(1)})$ is either $1$ or $-1$ depending on whether $I_0$ is the right or left half of $I_0^{(1)}.$ \\ \noindent For the second to last equality we observe that, if $I$ is not a proper subset of $I_0^{(1)},$ $ \langle h_{I_0^{(1)}}\rangle _I = 0,$ and that if $I$ is a proper subset of $I_0^{(1)}$ but is not a subset of $I_0$, then $\langle b\mathsf{1}_{I_0}, h_I\rangle =0.$ Moreover, for $I \subseteq I_0,$ $\langle b\mathsf{1}_{I_0}, h_I\rangle = \int_\mathbb{R}{b\mathsf{1}_{I_0} h_I} = \int_\mathbb{R}{b h_I} = \langle b, h_I\rangle .$\\
\noindent Now from inequality \eqref{eq:bd}, we get $$ \left\Vert \left( \frac{K(I_0,I_0^{(1)})}{\sqrt{\left\vert{I_0^{(1)}}\right\vert}}\right)^{m-1}\sum_{I\subseteq I_0} \langle b, h_I\rangle h_I \right\Vert_r \leq C_\epsilon \abs{I_0}^{\frac{1}{p_1}} \prod_{i=2}^{m}\frac{\abs{I_0^{(1)}}^{\frac{1}{p_i}}} {\sqrt{\abs{I_0^{(1)}}}}$$ $$i.e. \quad \left\Vert \sum_{I\subseteq I_0} \langle b, h_I\rangle h_I \right\Vert_r \leq 2^{\frac{1}{p_2}+ \cdots+\frac{1}{p_m}} C_\epsilon \abs{I_0}^{\frac{1}{r}}.$$ Thus for every $I_0 \in \mathcal{D},$ $$\frac{1}{\abs{I_0}^{\frac{1}{r}}}\left\Vert \sum_{I\subseteq I_0} \langle b, h_I\rangle h_I \right\Vert_r \leq 2^{\frac{1}{p_2}+ \cdots+\frac{1}{p_m}} C_\epsilon ,$$ and hence $b \in BMO^d.$\\
\noindent \textbf{Case II:} $\alpha_1 \neq 0$ \, or \, $\sigma(\vec{\alpha}) > 1.$\\ \noindent Taking $f_i =
\begin{cases}
h_{I_0}, &\text{if }\alpha_i = 0\\
\mathsf{1}_{I_0}, \;\;\; & \text{if }\alpha_i = 1,\\
\end{cases} $\; we observe that $$T_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)) = h_{I_0}^{\sigma(\vec{\alpha})} \;\;\text{ and } \;\;\;T_\epsilon^{\vec{\alpha}}(bf_1, f_2, \ldots, \ldots,f_m) = (bf_1)(I_0, \alpha_1)h_{I_0}^{\sigma(\vec{\alpha})}. $$ \noindent If $\alpha_1 = 0, $
$$ (bf_1)(I_0, \alpha_1) = {bh_{I_0}}(I_0, 0) = \widehat{bh_{I_0}}(I_0) = \int_\mathbb{R}{ bh_{I_0}h_{I_0}} = \frac{1}{\abs{I_0}}\int_\mathbb{R}{ b \mathsf{1}_{I_0}} = \langle b \rangle _{I_0}.$$ \noindent If $\alpha_1 = 1,$
$$ (bf_1)(I_0, \alpha_1) = {b\mathsf{1}_{I_0}}(I_0, 1) = \langle {b\mathsf{1}_{I_0}}\rangle _{I_0} = \langle {b}\rangle _{I_0}.$$ \noindent So in each case, \begin{eqnarray*} \norm{[b,T_\epsilon^{\vec{\alpha}}]_1(f_1,f_2,\ldots,f_m)}_r &=& \left\Vert{bT_\epsilon^{\vec{\alpha}}(f_1,f_2,\ldots,f_m)) - T_\epsilon^{\vec{\alpha}}(bf_1, f_2, \ldots, \ldots,f_m)}\right\Vert_r\\ &=& \left\Vert{b h_{I_0}^{\sigma(\vec{\alpha})} - \langle b \rangle _{I_0} h_{I_0}^{\sigma(\vec{\alpha})}}\right\Vert_r\\ &=& \left\Vert{(b - \langle b \rangle _{I_0}) h_{I_0}^{\sigma(\vec{\alpha})}}\right\Vert_r\\ &=& \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}}\norm{(b - \langle b \rangle _{I_0}) \mathsf{1}_{I_0}}_r.\\ \end{eqnarray*} \noindent On the other hand, $$\prod_{j=1}^m\norm{f_j}_{p_j} = \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}} \abs{I_0}^{\frac{1}{p_1}+ \cdots + \frac{1}{p_m}} = \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}} \abs{I_0}^{\frac{1}{r}}. $$ Inequality \eqref{eq:bd} then gives $$ \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}}\norm{(b - \langle b \rangle _{I_0}) \mathsf{1}_{I_0}}_r \leq C_\epsilon \frac{1}{(\sqrt{\abs{I_0}})^{\sigma(\vec{\alpha})}} \abs{I_0}^{\frac{1}{r}}$$ $$ \text{i.e. } \quad \frac{1}{\abs{I_0}^{\frac{1}{r}}}\norm{(b - \langle b \rangle _{I_0}) \mathsf{1}_{I_0}}_r \leq C_\epsilon. $$ Since this is true for any $I_0 \in \mathcal{D}$, we have $b\in BMO^d.$ \end{proof}
\noindent Combining the results from Theorems \ref{boc} and \ref{bmonecessity}, we have the following characterization of the dyadic BMO functions. Note that if $\epsilon_I = 1$ for every $I \in \mathcal{D}$, we have $T_\epsilon^{\vec{\alpha}} = P^{\vec{\alpha}},$ and that in the proof of Theorem \ref{bmonecessity}, only the boundedness of $[b, T_\epsilon^{\vec{\alpha}}]_i$ for $\epsilon$ with $\epsilon_I = 1$ for all $I\in \mathcal{D}$ was used to show that $b\in BMO^d.$\\ \begin{thm} Let $\vec{\alpha} = (\alpha_1,\alpha_2,\ldots,\alpha_m) \in U_m,$ $1\leq i \leq m,$ and $1<p_1,p_2, \ldots,p_m, r < \infty$ with $$\sum_{j=1}^m \frac{1}{p_j} = \frac{1}{r}.$$ Suppose $b \in L^p$ for some $p \in (1,\infty).$ Then the following two statements are equivalent. \begin{enumerate}[label = $(\alph*)$] \item $b\in BMO^d.$\\ \item $\displaystyle [b,T_\epsilon^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r $ is bounded for every bounded sequence $\epsilon = \{\epsilon_I\}_{I\in \mathcal{D}}.$ \end{enumerate}
\noindent In particular, $b\in BMO^d$ if and only if $[b,P^{\vec{\alpha}}]_i:L^{p_1}\times L^{p_2} \times \cdots\times L^{p_m}\rightarrow L^r$ is bounded.\\ \end{thm}
\begin{bibdiv} \begin{biblist}
\normalsize \bib{BMNT}{article}{ title={Bilinear paraproducts revisited},
author={B{\'e}nyi, {\'A}.},
author={Maldonado, D.},
author={Nahmod, A. R.},
author={Torres, R. H.},
journal={Mathematische Nachrichten},
volume={283},
number={9},
pages={1257--1276},
year={2010},
publisher={Wiley Online Library}}
\bib{Bla}{article}{ author={Blasco, O.}, title={Dyadic BMO, paraproducts and Haar multipliers}, journal={Contemp. Math., Vol 445, Amer. Math. Soc., Providence, RI,}, pages={11-18, MR 2381883}}
\bib{CRW}{article}{ title={Factorization theorems for Hardy spaces in several variables},
author={Coifman, R.R.},
author={Rochberg, R.},
author={Weiss, G.},
journal={Ann. of Math.},
volume={103},
pages={611-635},
year={1976}}
\bib{GLLZ}{article}{ author={Grafakos,L.}, author={Liu, L.}, author={Lu, S}, author={Zhao,F.}, title={The multilinear Marcinkiewicz interpolation theorem revisited: The behavior of the constant}, journal={J. Funct. Anal.}, volume={262}, year={2012}, pages={2289-2313}}
\bib{GT}{article}{ author={Grafakos,L.}, author={Torres, R.H.}, title={Multilinear Calder$\acute{\text{o}}$n-Zygmund theory}, journal={Adv. Math.}, volume={165}, year={2002}, number={1}, pages={124-164.}}
\bib{HR}{article}{title={Interpolation by the real method between $BMO$, $L^\alpha (0 < \alpha < \infty)$ and $H^\alpha (0 < \alpha < \infty)$}, author = {Hanks, R.}, journal={Indiana Univ. Math. J.}, volume ={26}, number ={4}, pages={679-689}, year = {1977}}
\bib{Hyt}{article}{ author={Hyt$\ddot{\text{o}}$nen ,Tuomas P.}, title={Representation of singular integrals by dyadic operators, and the $A_2$ theorem}, journal={arXiv:1108.5119}, year={2011}}
\bib{Jan}{article}{ author={Janson, S.}, title={BMO and commutators of martingale transforms}, journal={Ann. Inst. Fourier}, volume={31}, number = {1}, year={1981}, pages={265-270}}
\bib{JN}{article}{title={On functions of bounded mean oscillation}, author = {John, F.},author = {Nirenberg, L.}, journal={Comm. Pure Appl. Math.}, volume = {14} year = {1961}, page ={415–426}}
\bib{LOPTT}{article}{ title={New maximal functions and multiple weights for the multilinear Calder$\acute{\text{o}}$n-Zygmund theory},
author={Lerner, A.K.},
author={Ombrosi, S.},
author={P$\acute{\text{e}}$rez, C.},
author={Torres, R. H.},
author={Trujillo-Gonz$\acute{\text{a}}$lez, R.},
journal={Adv. in Math.},
volume={220},
number={4},
pages={1222--1264},
year={2009},
publisher={Wiley Online Library}}
\bib{Per}{article}{ author={Pereyra, M.C.}, title={Lecture notes on dyadic harmonic analysis}, journal={Contemporary Mathematics}, volume={289}, date={2001}, pages={1-60}}
\bib{SE}{book}{title={Harmonic Analysis: Real Variable Methods, Orthogonality, and Oscillatory Integrals}, author = {Stein, E. M.}, publisher ={Princeton Univ. Press, Princeton}, year = {1993}}
\bib{Treil}{article}{ author={Treil, S.}, title={Commutators, paraproducts and BMO in non-homogeneous martingale settings}, journal={http://arxiv.org/pdf/1007.1210v1.pdf}}
\end{biblist} \end{bibdiv}
\end{document} |
\begin{document}
\newtheorem{corollary}{Corollary} \newtheorem{definition}{Definition} \newtheorem{example}{Example} \newtheorem{lemma}{Lemma} \newtheorem{proposition}{Proposition} \newtheorem{theorem}{Theorem} \newtheorem{fact}{Fact} \newtheorem{property}{Property}
\newcommand{\bra}[1]{\langle #1|}
\newcommand{\ket}[1]{|#1\rangle}
\newcommand{\braket}[3]{\langle #1|#2|#3\rangle}
\newcommand{\ip}[2]{\langle #1|#2\rangle}
\newcommand{\op}[2]{|#1\rangle \langle #2|}
\newcommand{{\rm tr}}{{\rm tr}}
\newcommand {\E } {{\mathcal{E}}} \newcommand {\F } {{\mathcal{F}}} \newcommand {\diag } {{\rm diag}}
\title{\Large {\bf Separability of Bosonic Systems}} \author{Nengkun Yu$^{1,2}$} \email{nengkunyu@gmail.com} \affiliation{$^1$Institute for Quantum Computing, University of Waterloo, Waterloo, Ontario, Canada\protect\\ $^2$Department of Mathematics $\&$ Statistics, University of Guelph, Guelph, Ontario, Canada}
\begin{abstract} In this paper, we study the separability of quantum states in bosonic system. Our main tool here is the \lq \lq separability witnesses", and a connection between \lq \lq separability witnesses" and a new kind of positivity of matrices---\lq \lq Power Positive Matrices" is drawn. Such connection is employed to demonstrate that multi-qubit quantum states with Dicke states being its eigenvectors is separable if and only if two related Hankel matrices are positive semidefinite. By employing this criterion, we are able to show that such state is separable if and only if it's partial transpose is non-negative, which confirms the conjecture in [Wolfe, Yelin, Phys. Rev. Lett. (2014)]. Then, we present a class of bosonic states in $d\otimes d$ system such that for general $d$, determine its separability is NP-hard although verifiable conditions for separability is easily derived in case $d=3,4$. \end{abstract}
\pacs{03.65.Ud, 03.67.Hk}
\maketitle \textit{Introduction---}Entanglement, first recognized as a “spooky” feature of quantum machinery by Einstein, Podolsky, and Rosen \cite{EPR35}, lies at the heart of quantum mechanics. It has been discovered that entanglement plays an essential role in various fundamental applications and protocols in quantum information science, such as quantum teleportation, superdense coding and cryptography \cite{BW92,BBCJ+93,BB84}. Moreover, high order of multipartite entanglement has been shown to be requisite to reach the maximal sensitivity in metrological tasks \cite{GZN+10}.
In multipartite systems, a quantum state is called separable if it can be written as a statistical mixture of product states, otherwise, it is entangled. Research on separability criteria, that is, on computational methods to determine whether a given state is separable or entangled, turns out to be a a cumbersome problem and essential subject in quantum information theory. Starting from the famous PPT (Positive Partial Transpose) criterion \cite{PER96}, a considerable number of different separability criterions have been discovered (see the references in \cite{HHHH09,IOA07}). One fundamental tool of detecting entanglement is entanglement witnesses \cite{HHH96,TER00}, which is equivalent to the method of positive, but not completely positive maps. Entanglement witnesses are observables that completely characterize separable states and allow to detect entanglement physically. Their origin stems from Hyperplane separation theorem of geometry: the convex sets can be described by hyperplanes. In particular, a witness is an observable, which is non-negative for separable states, and it can have a negative expectation value for entangled states.
Despite great efforts and considerable progress have been made, the physical understanding and mathematical description of its essential characteristics remain however highly nontrivial tasks, especially when many-particle systems are analyzed. Moreover, it was shown by Gurvits \cite{GUR04} that this problem is NP-hard. However, it is still possible to have complete criterion for the separability of some interesting certain situations. A problem of great interest is to study the entanglement of bosonic system \cite{ESBL02,TG09,TAH+12,CBF+13,WY14}. For $N$-qubit bosonic system, a natural basis is $N$-qubit Dicke states(unormalized) which are defined as, \begin{equation*}
\ket{D_{N,n}} \! := P_{\textrm{sym}}\bigl( \ket{0}^{\otimes n} \otimes \ket{1}^{\otimes N-n} \bigr), \end{equation*} with $P_{\textrm{sym}}$ being the projection onto the Bosonic (fully symmetric) subspace, $i.e.$, $P_{\textrm{sym}} = \frac{1}{N!} \sum_{\pi\in S_N} U_\pi$, the sum extending over all permutation operators $U_\pi$ of the $N$-qubit systems. It is worth to note that the entanglement of pure Dicke state has been widely studied recently \cite{YU13,DVC00,YCGD10,YGD14,HKWG+09,BKMG+09,BTZLS+09,WKSW09}.
In this Letter, we focus on the problem of the separability criterion for quantum states in bosonic system by considering separable witnesses. We first draw a connection between the separable witnesses of general multi-qubit bosonic states and a new type of positivity of matrices---what we called \lq \lq Power Positivity". This connection is employed to study the separability of $N$-qubit quantum states which being the mixture of Dicke states. In particular, the separable witnesses of such states corresponds to diagonal \lq \lq Power Positive" matrices, that are just polynomials whose value of is always non-negative for non-negative variable. By employing the characterization of non-negative polynomials, an easily evaluated \textit{complete} criterion for the separability of mixture of Dicke states is demonstrated. Moreover, we show that any such separable state can be written as the mixture of $(N+1)(N+2)$ product states. We then study the separability of a class of states whose eigenvectors are generalized $d\otimes d$ Dicke states. It is proved that the separability problem of such states is NP-complete for general $d$, although very simple criterion is demonstrated for $d=3,4$.
\textit{Main Results---} In the $N$-qudit system $\mathcal{H}_1\otimes\mathcal{H}_2\otimes\cdots\otimes\mathcal{H}_N$ with $d$ being the dimension of each Hilbert space $\mathcal{H}_i$, the bosonic space is a subspace that spanned by pure quantum states which are invariant under the swap of any two subsystems among all $N$ subsystems, $i.e.$, for the swap operator exchanging the two qudits system $F_{i,j}$, \begin{equation*} S:\equiv\{\ket{\psi}:\ket{\psi}=F_{i,j}\ket{\psi},\ \mathrm{for\ all}\ i,\ j\ \mathrm{and\ Swap}\ F\}. \end{equation*} A mixed state $\rho$ is called bosonic if its support is a subspace of bosonic space where the support of $\rho$, $Supp(\rho)$, is the subspace spanned by the eigenvectors corresponding to its non-zero eigenvalues. In other words, $\rho=F_{i,j}\rho=\rho F_{i,j}$ holds for any $1\leq i,j\leq N$.
One very simple observation is that, if a bosonic state $\rho$ is separable, $i.e.$, there exist product states (unnormalized) $\otimes_{k=1}^N\op{\alpha_{j_k}}{\alpha_{j_k}}$ such that \begin{small} $$\rho=\sum_{j}\bigotimes_{k=1}^N\op{\alpha_{j_k}}{\alpha_{j_k}},$$ \end{small} then we can choose product states $\ket{\alpha_{j}}^{\otimes N}$, that is \begin{small} $$\rho=\sum_{j}\bigotimes_{k=1}^N\op{\alpha_{j}}{\alpha_{j}}.$$ \end{small} To see this, one only need to observe that \begin{small} \begin{equation*} \bigotimes_{k=1}^N\ket{\alpha_{j_k}}\in S\Rightarrow \exists\ket{\alpha_{j}},\bigotimes_{k=1}^N\ket{\alpha_{j_k}}=\ket{\alpha_{j}}^{\otimes N}. \end{equation*} \end{small} Now, we introduce the separable witnesses for \textit{bosonic} system as a useful tool: For $N$-qudit system, a Hermitian operator $W$ is called a separable witness of bosonic system if $W=P_SWP_S$ with $P_S$ being the projection of the bosonic space $S$ and it satisfies that \begin{eqnarray*}
{\rm tr}(W\alpha^{\otimes N})\geq 0, \mathrm{for~all}~\alpha=\op{\alpha}{\alpha}. \end{eqnarray*} The importance of separability witness is due to the following proposition. \begin{proposition} A bosonic state $\rho$ is separable if and only if ${\rm tr}(W\rho)\geq 0$ holds for all separability witness $W$ of bosonic system. \end{proposition} \textit{Remark}: This proposition can be generalized to the separability of quantum states lying in fixed subspace.
\textit{Proof:}---The only if part simply follows from the above observation about the structure of separable states of bosonic system. To show the validity of the if part, we assume the existence of entangled bosonic state $\rho$ such that ${\rm tr}(W\rho)\geq 0$ holds for all separability witness $W$ of bosonic system. Notice that the set of separable states of bosonic system is convex and compact. Entangled $\rho$ does not lie in this set, by hyperplane separation theorem, one can conclude that there exists a $H$ such that ${\rm tr}(H\rho)<0$ and ${\rm tr}(H\alpha^{\otimes N})\geq 0$ holds for all $\alpha$. Therefore, ${\rm tr}(W\alpha^{\otimes N})={\rm tr}(P_SHP_S\alpha^{\otimes N})={\rm tr}(HP_S\alpha^{\otimes N} P_S)={\rm tr}(H\alpha^{\otimes N})\geq 0$ for $W=P_SHP_S$, then $W$ is a separability witness. On the other hand, ${\rm tr}(W\rho)={\rm tr}(P_SHP_S\rho)={\rm tr}(HP_S\rho P_S)={\rm tr}(H\rho)<0$, which contradicts to the assumption.
$\blacksquare$
Notice that the set of separable witnesses forms a convex compact set. In order to check the separability of bosonic states, one way is to parameterize the set of separable witnesses, at least the set of extreme points of separable witnesses.
For simplicity, we mainly focus on the separable witnesses of $N$-qubit bosonic system.
Notice that any Hermitian $W=P_SHP_S$ corresponds to a Hermitian matrix $M:=(m_{i,j})_{(N+1)\times(N+1)}$ as follows \begin{eqnarray*} W:=\sum_{i,j=0}^N m_{i,j}\op{\widetilde{D_{N,i}}}{\widetilde{D_{N,j}}} \end{eqnarray*} where we employe the dual basis of Dicke states as \begin{equation*}
\ket{\widetilde{D_{N,n}}}:= {N \choose n}^{-1}P_{\textrm{sym}}\bigl( \ket{0}^{\otimes n} \otimes \ket{1}^{\otimes N-n} \bigr), \end{equation*} that is, $\ip{D_{N,m}}{\widetilde{D_{N,n}}}=\delta_{m,n}$.
Now we can derive the condition for $W$ being separable witness: ${\rm tr}(W\alpha^{\otimes N})\geq 0$ holds for all one-qubit $\ket{\alpha}$ is equivalent to \begin{eqnarray*} &&{\rm tr}(W\op{0}{0}^{\otimes N})\geq 0\Leftrightarrow m_{N,N}\geq 0, \\
&&{\rm tr}\{W[(\ket{1}+z\ket{0})(\langle{1}|+z^*\langle{0}|)]^{\otimes N}\}\geq 0, \end{eqnarray*}
for all $z\in \mathbb{C}$. O ne can observe that the second condition indicates the first one as $|z|\rightarrow \infty$.
Observe that $(\ket{1}+z\ket{0})^{\otimes N}=\sum_{j=0}^N z^j\ket{D_{N,j}}$, we know that the second condition given above is just \begin{eqnarray*} \vec{z}^{\dag} M \vec{z} \geq 0~\mathrm{for~all}~\vec{z}=(1,z,z^2,\cdots,z^{N})^{T}\in\mathbb{C}^{N+1}. \end{eqnarray*} This is what we called \lq \lq Power Positive Matrix", which is far different from \lq \lq Semi-definite", \lq \lq Complete Positve". Unfortunately, we are not able to give a complete description of the set of \lq \lq Power Positive Matrices", even one can easily conclude that it is a superset of \lq \lq Semi-definite Matrices".
Although it is difficult to check the separability of general $N$-qubit states, we demonstrate an easily verified analytical condition for the separability of the following general diagonal symmetric states which is necessary and sufficient.
In $N$-qubit bosonic system, one can naturally define the following class of quantum states, so called the general diagonal symmetric states, GDS \cite{WY14}, \begin{equation*} \rho=\sum_{n=0}^N\chi_n \op{D_{N,n}}{D_{N,n}}, \end{equation*} where $\chi_n$ represent the eigenvalues in the eigen-decomposition of $\rho$.
Notice that any GDS state $\rho$ enjoys the symmetry that for all diagonal qubit unitary $U_{\theta}=diag\{1,e^{i\theta}\}$, \begin{equation*} \rho=U_{\theta}^{\otimes N}\rho~U_{\theta}^{\dag \otimes N}. \end{equation*} Thus, for any separability witness $W$, we have \begin{equation*} {\rm tr}(W\rho)={\rm tr}(WU_{\theta}^{\otimes N}\rho~U_{\theta}^{\dag \otimes N})={\rm tr}(U_{\theta}^{\dag \otimes N}WU_{\theta}^{\otimes N}\rho), \end{equation*} $W_0$ is a \lq \lq diagonal" separable witness and ${\rm tr}(W_0\rho)={\rm tr}(W\rho)$ with \begin{equation*} W_0=\frac{1}{2\pi}\int_{0}^{2\pi}U_{\theta}^{\dag \otimes N}WU_{\theta}^{\otimes N}d\theta=\sum_{k=0}^N m_{k,k}\op{\widetilde{D_{N,k}}}{\widetilde{D_{N,k}}}. \end{equation*} Here, a separable witness $W=\sum_{i,j=0}^N m_{i,j}\op{\widetilde{D_{N,i}}}{\widetilde{D_{N,j}}}$ is called diagonal if $m_{i,j}=0$ for $i\neq j$. According to Proposition 1, we know that: \begin{proposition} A general diagonal symmetric state $\rho$ is separable if and only if ${\rm tr}(W_0\rho)\geq 0$ for all diagonal separable witness $W_0$. \end{proposition}
Recall the concept of \lq \lq Power Positive Matrix", $W_0$ is a separable witness if and only if $\sum_{k=0}^N m_{k,k}|z|^{2k}$ is always non-negative for all $z\in\mathbb{C}$. This is equivalent to \begin{equation*} g(r)\geq 0~\mathrm{for~all}~r\geq 0, \end{equation*} for real coefficient polynomial $g(x):=\sum_{k=0}^N m_{k,k}x^{k}$, whose value $g(x)$ is always non-negative for non-negative $x$. The characterization of such polynomials is accomplished by the following proposition. \begin{proposition} A real coefficient polynomial $g(x)$ satisfies that $g(r)\geq 0$ for all $r\geq 0$ if and only if there exist real coefficient polynomial $P_i(x),Q_i(x)$ such that \begin{equation*} g(x)=\sum_i xP_i^2(x)+\sum_iQ_i^2(x). \end{equation*} \end{proposition} \textit{Proof:}---The if part is simple. To show the validity of the only if part, we use the fundamental theorem of algebra, \begin{equation*} g(x)=a_0\prod (x-z_k)^{l_k}. \end{equation*} For non real root $z_k$, we know that for all real $r$, \begin{equation*} (r-z_k)(r-\bar{z_k})=(r-Re(z_k))^2+Im^2(z_k)\geq 0. \end{equation*} For non-positive $z_k$, we know that for all $r\geq 0$, \begin{equation*} r-z_k=r+(-z_k)\geq 0. \end{equation*} For positive $z_k$, its power $l_k$ must be even.
Thus, expanding $g(x)=a_0\prod(x-z_k)^{l_k}$ confirms the only if part.
$\blacksquare$
Invoking the relation between the diagonal separable witness $W_0$ and the polynomial $g(x)$, one can deduce the following, \begin{proposition} Extreme point of the diagonal separable witnesses for GDS has one of the following forms \begin{eqnarray*} S&=&\sum_{0\leq i,j\leq \frac{N}{2}}a_ia_j\op{\widetilde{D_{N,i+j}}}{\widetilde{D_{N,i+j}}},\\ T&=&\sum_{0\leq i,j\leq \frac{N-1}{2}}b_ib_j\op{\widetilde{D_{N,i+j+1}}}{\widetilde{D_{N,i+j+1}}}, \end{eqnarray*} with $a_k,b_k\in\mathbb{R}$. \end{proposition} Now we are ready to show our main result, \begin{theorem} The GDS state $\rho=\sum_{n=0}^N\chi_n \op{D_{N,n}}{D_{N,n}}$ is separable if and only if the following two Hankel Matrices \cite{PAR88} $M_0,M_1$ are positive semi-definite, $i.e.$, \begin{eqnarray} M_0:=\left(\begin{array}{cccc} \chi_0 & \chi_1 & \cdots & \chi_{m_0}\\ \chi_1 & \chi_2 & \cdots & \chi_{m_0+1}\\ \cdots & \cdots & \cdots & \cdots\\ \chi_{m_0} & \chi_{m_0+1} & \cdots & \chi_{2m_0} \end{array}\right)\geq 0,\\ M_1:=\left(\begin{array}{cccc} \chi_1 & \chi_2 & \cdots & \chi_{m_1}\\ \chi_2 & \chi_3 & \cdots & \chi_{m_1+1}\\ \cdots & \cdots & \cdots & \cdots\\ \chi_{m_1} & \chi_{m_1+1} & \cdots & \chi_{2m_1-1} \end{array}\right)\geq 0, \end{eqnarray} where $m_0:=[\frac{N}{2}]$ and $m_1:=[\frac{N+1}{2}]$. \end{theorem} \textit{Proof:}---According to Proposition 2 and Proposition 4, $\rho$ is separable if and only if ${\rm tr}(W_0\rho)\geq 0$ holds for any extreme point $W_0$ of the diagonal separable witnesses for GDS, that is, for all $\vec{a}=(a_0,\cdots,a_{m_0})^T\in\mathbb{R}^{m_0+1}$, $\vec{b}=(b_1,\cdots,b_{m_1})^T\in\mathbb{R}^{m_1}$the following quadratic forms are non-negative, \begin{eqnarray*} {\rm tr}(S\rho)=\sum_{0\leq i,j\leq m_0}\chi_{i+j}a_ia_j=\vec{a}^TM_0\vec{a}\geq 0,\\ {\rm tr}(T\rho)=\sum_{1\leq i,j\leq m_1}\chi_{i+j-1}b_ib_j=\vec{b}^TM_1\vec{b}\geq 0. \end{eqnarray*} It is equivalent to the non-negativity of real Hankel Matrices $M_0,M_1$.
$\blacksquare$
Now we are going to present the rigorous proof of the conjecture from \cite{WY14}. \begin{theorem} The GDS state $\rho=\sum_{n=0}^N\chi_n \op{D_{N,n}}{D_{N,n}}$ is separable if and only if it is PPT. More precisely, if and only if it is PPT under the partial transpose of $m_0=[\frac{N}{2}]$ subsystems. \end{theorem} \textit{Proof:}---First, it is sufficient to consider the partial transpose of the first $m_0$ subsystems by noticing the symmetric in the bosonic system. Assume $\rho$ is positive under the partial transpose of $m_0=[\frac{N}{2}]$ subsystems, according to Theorem 1, we only need to show $M_0,M_1\geq 0$ of Eq.(1,2).
One can write $\rho^{\Gamma}$ in basis $\ket{D_{m_0,j}}\ket{D_{m_1,k}}$ with $0\leq j\leq m_0,0\leq k\leq m_1$ by verifying the following equations, \begin{eqnarray*} \ket{D_{N,n}}&=&\sum_{j=0}^n\ket{D_{m_0,j}}\ket{D_{m_1,n-j}},~\mathrm{for}~n\leq m_0,\\ \ket{D_{N,n}}&=&\sum_{j=n-m_1}^{m_0}\ket{D_{m_0,j}}\ket{D_{m_1,n-j}},~\mathrm{for}~n > m_0, \end{eqnarray*} where $m_1=N-m_0$.
Since $\rho^{\Gamma}\geq 0$, then the restriction of $\rho^{\Gamma}$ on subspace spanned by $\{\ket{D_{m_0,j}}\ket{D_{m_1,j}},0\leq j\leq m_0\}$ is still non-negative, direct calculation leads us to the fact that this is just $M_0\geq 0$.
On the other hand, the restriction of $\rho^{\Gamma}$ on subspace spanned by $\{\ket{D_{m_0,j-1}}\ket{D_{m_1,j}},1\leq j\leq m_1\}$ is still non-negative, direct calculation leads us to the fact that this is just $M_1\geq 0$.
Invoking Theorem 1, we can conclude that $\rho$ is separable.
$\blacksquare$
One can have the following interesting corollary, \begin{corollary} GDS state $\rho=\sum_{n=0}^N\chi_n \op{D_{N,n}}{D_{N,n}}$ is positive under the partial transpose of $m_0=[\frac{N}{2}]$ subsystems, then it is positive under the partial transpose of arbitrary subsystems. \end{corollary}
In the following, we introduce a class of bipartite GDS states, and study the separability of such states: In $d\otimes d$ system, one can define the following general diagonal symmetric states, \begin{eqnarray*} \rho=\sum_{i,j=1}^d\chi_{i,j} \op{\psi_{i,j}}{\psi_{i,j}}, \end{eqnarray*} with $\ket{\psi_{i,j}}:=\begin{cases}\ket{ii} &{\rm if}\ i=j,\\ \ket{ij}+\ket{ji} &{\rm otherwise.}\end{cases}$ being some basis of the bosonic subspace of $d\otimes d$ system, $i.e.$, the symmetric subspace.
Notice that $\rho=(U\otimes U)\rho(U\otimes U)^{\dag}$ holds for all diagonal qudit unitary $U$. Then, $\rho$ is separable if and only if there exist qudit states $\ket{\alpha_k}=\sum_{j=1}^d x_{k,j}\ket{j}$ such that \begin{eqnarray*} \rho&=&\sum_{k} \int(U\otimes U)\alpha_k^{\otimes 2}(U\otimes U)^{\dag} dU\\
&=&\sum_{k} |x_{k,i}|^2|x_{k,j}|^2\op{\psi_{i,j}}{\psi_{i,j}}.\\ \Leftrightarrow \chi:&=&(\chi_{ij})_{d\times d}=\sum_{k} \vec{x_k}\vec{x_k}^{T}. \end{eqnarray*}
where $dU$ ranging over all diagonal unitaries, and $\vec{x_k}=(|x_{k,1}|^2,\cdots,|x_{k,d}|^2)^{T}\in\mathbb{R}^d$.
Recall that the cone of complete completely positive matrices \cite{DIA61,GW80} is define as \begin{eqnarray*} \mathcal{C}=\{\sum_{i}\vec{y_k}\vec{y_k}^{T}:\vec{y_k}\in\mathbb{R}^d_{+}\}, \end{eqnarray*} where $\mathbb{R}^d_{+}$ stands for the $d$-dimensional vector space whose entries are all non-negative.
It is widely known that the decision problem on checking the completely positivity of given matrix is NP-Hard for general $d$ while for $d=3,4$, checking that the matrix is positive semidefinite and has all entries $\geq 0$ is both necessary and sufficient. Formally, \begin{theorem} It is NP-Hard to decide whether a given $d\otimes d$ GDS state is separable. On the other hand, $\rho=\sum_{i,j=1}^d\chi_{i,j} \op{\psi_{i,j}}{\psi_{i,j}}$ is separable if and only if $\chi=(\chi_{ij})_{d\times d}$ is semi-definite positive. \end{theorem} In other words, we have the following: PPT criterion is not sufficient for detect the entanglement for bosonic states, even for GDS states unless at least $P=NP$, which is highly impossible.
\textit{Conclusion---}In this paper, we study the separability problem of bosonic system. An analytical condition for the separability of $n$-qubit states whose eigenstates are Dicke states is demonstrated. For bipartite qudit system, we present a class of standard bosonic states, for general $d$, its separability is NP-hard, while for $d=3,4$, the condition of separability is provided.
It is still not clear that whether there exist easily verified analytical conditions for the separability of general $n-$qubit bosonic states?
We thank Prof. John Watrous, Prof. Debbie Leung and Prof. Bei Zeng for their comments. NY is supported by NSERC, NSERC DAS, CRC, and CIFAR.
\end{document} |
\begin{document}
\def\rangle{\rangle} \def\langle{\langle} \def\hat{a}{\hat{a}} \def\hat{a}^2{\hat{a}^2} \def\hat{a}^\dagger{\hat{a}^\dagger} \def\hat{a}^{\dagger 2}{\hat{a}^{\dagger 2}} \def\aco\aao{\hat{a}^\dagger\hat{a}} \def\hat{b}{\hat{b}} \def\hat{b}^2{\hat{b}^2} \def\hat{b}^\dagger{\hat{b}^\dagger} \def\hat{b}^{\dagger 2}{\hat{b}^{\dagger 2}} \def\bco\bao{\hat{b}^\dagger\hat{b}} \def\begin{equation}{\begin{equation}} \def\end{equation}{\end{equation}} \def\begin{eqnarray}{\begin{eqnarray}} \def\end{eqnarray}{\end{eqnarray}} \def\cdot\cdot\cdot{\cdot\cdot\cdot} \def\begin{center}{\begin{center}} \def\end{center}{\end{center}} \title{Photon-added Coherent States in Parametric Down-conversion} \author{ S. Sivakumar\\Materials Physics Division\\ Indira Gandhi Centre for Atomic Research\\ Kalpakkam 603 102 INDIA\\ Email: siva@igcar.gov.in\\ Phone: 91-044-27480500-(Extension)22503} \maketitle \begin{abstract} Photon-added coherent states have been realized in optical parametric down-conversion by Zavatta {\em et al} [Science 306 (2004) 660-662]. In this report, it is established that the states generated in the process are {\em ideal} photon-added coherent states. It is shown that the scheme can generate higher order photon-added coherent states. A comparative study of the down-conversion process and atom-cavity interaction in generating the photon-added coherent states is presented.
\end{abstract} PACS: 42.50.Pq, 03.67.Bg, 03.67.Mn\\ Keywords: photon-added coherent states, parametric downconversion, cavity QED
\section{Introduction}\label{secI}
Quantum-classical divide continues to be enigmatic. Good experiments are necessary to improve our understanding of the issue. One way is to generate and study states that interpolate between the classical and quantum domains. Coherent state is considered to be a classical state in the sense that the Glauber-Sudarshan function is an admissible phase-space probability density distribution, {\em i.e.}, non-negative on the entire phase space. This feature is retained by coherent states of arbitrary amplitude. Additionally, coherent states exhibit minimum fluctuations(uncertainties) in their amplitudes and phases. States obtained by the action of harmonic oscillator creation operator on the coherent states do not admit non-negative phase-space distributions. Such states are classified as nonclassical. Photon-added coherent states (PACS) is defined as $\vert\alpha,m\rangle=\hat{a}^{\dagger m}\vert\alpha\rangle$ (unnormalized), where $\vert\alpha\rangle$ is a coherent state of amplitude $\alpha$, $\hat{a}^\dagger$ is the creation operator for the field and $m$ is a nonnegative integer\cite{gsatara}. The state $\vert\alpha,m\rangle$ is said to be a PACS of order $m$ and amplitude $\alpha$. Unlike the coherent states $\vert\alpha\rangle$, the PACS $\vert\alpha,m\rangle$ exhibits nonclassical features such as squeezing, sub-Poissonian statistics, etc \cite{gsatara}. Hence, the PACS is considered as a state that interpolates between the classical and nonclassical states. While the action of the creation operator on a coherent state leads to a nonclassical state, the action of the annihilation operator $\hat{a}$ on a coherent state does not change the state. Thus, experimentally realizing the action of the creation and annihilation operators on a state is a way of testing the fundamental commutation relation $\left[\hat{a},\hat{a}^\dagger\right]=I$ \cite{zavatta, parigi,dodonov}. More over, photon addition to any state, not necessarily the coherent state, of light is being viewed as a way of introducing nonclassicality\cite{parigi}.\\
Generation of the PACS is possible in the cavity-atom interaction\cite{gsatara}, conditional measurements of beam-splitter output\cite{dakna}, etc. Single-photon-added coherent states (SPACSs) have been experimentally realized in an all-optical scheme employing a nonlinear medium\cite{zavatta}. It has been shown that the higher order PACS $\vert\alpha,m\rangle$ corresponding to $m>1$, can be realized in this scheme\cite{gerry}. It may be added that the recent suggestion for tailoring the interaction in optomechanical systems (micro-resonator interacting with laser light) by proper detuning can be used to generate PACS in the optomechnical domain\cite{optojosa}. A common feature of the aforementioned systems is that they are all interacting, bipartite systems. During evolution, the two subsystems are entangled. This enables to make suitable conditional measurements on one of the subsystems so that the other subsystem is prepared in a PACS. Except for the cavity-atom scheme, the other proposals are based on bipartite, coupled oscillators. For instance, in the optomechanical scheme, the resonator mode is an oscillator and the laser field is another oscillator. Interaction between the oscillators can be tailored by detuning. In particular, the following forms of interactions are possible \cite{LYS}: \begin{eqnarray} H_{n}&\propto&\hat{a}^\dagger\hat{b}^\dagger+\hat{a}\hat{b},\\ H_{p}&\propto&\hat{a}^\dagger\hat{b}+\hat{a}\hat{b}^\dagger. \end{eqnarray} In the optomechanical case, the interactions
$H_{n}$ and $H_{p}$ correspond respectively to negative detuning when the resonator frequency is smaller than the laser frequency and positive detuning wherein the laser frequency is smaller than the resonator frequency. The operators $\hat{a}^\dagger$ and $\hat{b}^\dagger$ are the creation operators of the quantized optical field and the resonator mode respectively. The corresponding annihilation operators are $\hat{a}$ and $\hat{b}$ respectively. The Hamiltonian $H_n$ describes the opto-mechanical equivalent of the all-optical system in the experimental scheme of Zavatta {\it et al}. Similar interaction Hamiltonians arise in the context of optically coupled nano-resonators \cite{zhang} and optical parametric amplifiers\cite{LYS}.
\\
In this report, a comparative study of two processes, namely, parametric downcoversion and atom-cavity interaction, that can generate PACS is presented. By expressing the time-evolved states in a suitable non-orthogonal basis, it is established that the later method generates {\em ideal} PACS, a feature that is not present in the atom-cavity scheme. Further, the parametric downconversion itself is shown to be capable of generating {\em ideal} $m$-photon-added coherent state (MPACS), without requiring higher order processes.
\section{Cavity-atom scheme}
In the cavity-atom scheme, a two-level atom interacts with the electromagnetic field in a single mode cavity. The two levels of the atom are $\vert g\rangle$ and $\vert e\rangle$ respectively. Interaction between the two-level atom and the field mode of the cavity is described by the Jaynes-Cummings Hamiltonian\cite{berman} \begin{equation} \hat{H}_{JC}=\hbar\beta\left[\hat{a}^\dagger\vert g\rangle\langle e\vert+\hat{a}\vert e\rangle\langle g\vert\right]. \end{equation} Here $\hat{a}^\dagger$ and $\hat{a}$ are respectively the creation and annihilation operators for the
qunatized field in the cavity. The transition operators $\vert g\rangle\langle e\vert$ and $\vert e\rangle\langle g\vert$ are respectively the lowering and raising operators for the atom. The coupling strength between the atom and the cavity field is characterized by the coupling constant $\beta$. The initial state of the system is $\vert\alpha\rangle\vert e\rangle$, {\em i.e.}, the state of cavity field is the coherent state $\vert\alpha\rangle$ and the atom is in the excited state $\vert e\rangle$. For short times, the evolution operator $\exp\left[-it\hat{H}_{JC}/\hbar\right]$ can be truncated to first order in $\beta t$. In this approximation, the state of the system at time $t$ is \begin{equation} \vert\psi(t)\rangle_{\hbox{app}}\approx\vert\alpha\rangle\vert e\rangle-i\beta t\hat{a}^\dagger\vert\alpha\rangle\vert g\rangle. \end{equation} The approximate final state $\vert\psi(t)\rangle_{\hbox{app}}$ is an entangled state of the cavity field and the atom. If the atom is detected in its ground state $\vert g\rangle$, the cavity field is the SPACS $\hat{a}^\dagger\vert\alpha\rangle$ (not normalized), whose amplitude is same as that of the initial coherent state $\vert\alpha\rangle$. The suitability of this approximation depends on the interaction duration $t$ and the coupling constant $\beta$. To assess the nature of the approximation in a better way, the complete time-evolved state is required. To this end, the evolution operator is expanded as a power series in $\hat{H}_{JC}$. Using the series expression for the evolution operator , the time-evolved state is expressed as \begin{equation} \vert\psi(t)\rangle=\sum_{n=0}^{\infty} \frac{\tau^{2n}}{(2n)!}(\hat{a}\hat{a}^\dagger)^{n} \vert\alpha\rangle\vert e\rangle+\sum_{n=0}^\infty\frac{\tau^{2n+1}}{(2n+1)!} (\hat{a}\hat{a}^\dagger)^{n}\hat{a}^\dagger\vert\alpha\rangle\vert g\rangle. \end{equation} where $\tau=-i\beta t$. The states $\vert e\rangle$ and $\vert g\rangle$ are orthogonal to each other. Therefore, if the atom is in the ground state $\vert g\rangle$ on exit from the cavity, the state of the field in the cavity is \begin{equation} \vert\alpha,\tau\rangle\rangle=\sum_{n=0}^\infty\frac{\tau^{2n+1}}{(2n+1)!}(\hat{a}\hat{a}^\dagger)^{n} \hat{a}^\dagger\vert\alpha\rangle. \end{equation} The symbol $\vert\dots\rangle\rangle$ denotes the state of the cavity field. Using the identities\cite{gsawolf}, \begin{eqnarray} (\hat{a}^\dagger\hat{a})^n&=&\sum_{k=1}^n S(n,k)\hat{a}^{\dagger k}\hat{a}^k, \\ \hat{a}^{\dagger k}\hat{a}^k\hat{a}^\dagger&=&k\hat{a}^{\dagger k}\hat{a}^{k-1}+\hat{a}^{\dagger k+1}\hat{a}^k, \end{eqnarray} where $S(n,k)$ are the Stirling numbers of the second kind\cite{grad}, the expression for $\vert\alpha,\tau\rangle\rangle$ is recast as \begin{equation}\label{cavity} \vert\alpha,\tau\rangle\rangle=\sum_{m=1}^\infty\sum_{n=1}^\infty \frac{\tau^{2n+1}}{(2n+1)!}B(n,m,\alpha)\vert\alpha,m\rangle. \end{equation} Here, $$B(n,m,\alpha)=\alpha^{m-1}[mS(n,m)+S(n,m-1)m!\alpha]\sqrt{m! L_m(-\vert\alpha\vert^2)}$$ and $\vert\alpha,m\rangle=\hat{a}^{\dagger m}\vert\alpha\rangle$, are the MPACS (unnormalized)of amplitude $\alpha$. The function $L_m(-\vert\alpha\vert^2)$ is the Laguerre function of order $m$\cite{grad}. The above result implies that the cavity field is a superposition of various MPACS. The MPACS of different orders but of same amplitude are linearly independent and non-orthogonal. Hence, the superposition coefficients in Eq. \ref{cavity} cannot be interpreted as probability amplitudes.
On detecting the atom in its ground state, the cavity field is expected to be in the SPACS $\vert\alpha,1\rangle$. If it is indeed the case, it is not enough that the overlap between the states $\vert\alpha,\tau\rangle\rangle$ and $\vert\alpha,1\rangle$ is nearly unity. It is required that $\vert\langle\langle\alpha,\tau\vert\alpha,m\rangle\vert\approx \vert\langle\alpha,1\vert\alpha,m\rangle\vert$ for all $m$ to ensure that the states generated are indeed the PACS of amplitude $\alpha$. In Fig. \ref{fig:one}, the variation of overlap as the interaction duration increases is shown. The coupling constant $\beta$ is chosen to be $2\pi$ MHz. The respective overlap functions of the cavity state $\vert\alpha,\tau\rangle\rangle$ with the MPACS of order $m=1,2$ and 3 are shown. The initial coherent state is of amplitude $\alpha=0.8$ and it is expected that the scheme generates the SPACS of amplitude $\alpha=0.8$. As expected, for short interaction times the overlap between the cavity state $\vert\alpha,\tau\rangle\rangle$ and the SPACS $\vert\alpha,1\rangle$ remains close to unity and continues to be substantial $(>0.9)$ even when the interaction duration extends to 30$\mu$s. However, the overlap of the cavity state with higher order PACS are much smaller than the required values. For instance, the overlap with $\vert\alpha,2\rangle$ (dashed line in Fig. \ref{fig:one}) falls to 0.3 from the short time value of 0.74 if the interaction duration extends to 30 $\mu$s. Similarly, overlap with $\vert\alpha,3\rangle$ (dotted curve in Fig. \ref{fig:one}) decreases rapidly with the increase of interaction time. In short, as the interaction time becomes longer, the state of the cavity field differs significantly from the expected SPACS $\vert\alpha,1\rangle$.
\section{Coupled oscillators scheme}
In the coupled oscillators scheme, the bipartite system is composed of two oscillators which interact. The two oscillator systems could be the two modes of the electromagnetic field or the field mode of a microcavity and a laser field or two coupled microresonators, etc. In this work, the two oscillators are referred as $a$-mode and $b$-mode respectively. The creation and annihilation operators for the $a$-mode are $\hat{a}^\dagger$ and $\hat{a}$ respectively. The corresponding operators for the $b$-mode are $\hat{b}^\dagger$ and $\hat{b}$ respectively. The Hamiltonian describing the interaction between the two modes in this bipartite system is \begin{equation} \hat{H}_n=\hbar\lambda\left(\hat{a}^\dagger\hat{b}^\dagger+\hat{a}\hat{b}\right), \end{equation} where $\lambda$ is the coupling constant. For short times, the corresponding evolution operator $\exp(-i\lambda t\hat{H}_n/\hbar)$ is truncated to $1-i\lambda t\hat{H}_n$. If the initial state of the two modes is $\vert\alpha\rangle\vert 0\rangle$, then the evolved state is a superposition of $\vert\alpha\rangle\vert 0\rangle$ and $\vert\alpha,1\rangle\vert 1\rangle$. On detecting the $b$-mode in the one-photon state $\vert 1\rangle$, the $a$-mode is prepared in the SPACS $\vert\alpha,1\rangle$. \\
In order to know how well the generated state approximates the SPACS, it is necessary to solve for the dynamics without making any approximation. This is facilitated by the fact that the operators $\hat{a}^\dagger\hat{b}^\dagger$, $\hat{a}\hat{b}$ and $\aco\aao +\bco\bao$ provide a realization of the generators of the SU(1,1) algebra. Consequently, the evolution operator $\exp(-it\hat{H}_n/\hbar)$ admits the following factorization\cite{yamamoto}, \begin{equation} \exp(-i\lambda t(\hat{a}^\dagger\hat{b}^\dagger+\hat{a}\hat{b})=\exp(u\hat{a}^\dagger\hat{b}^\dagger)\exp[v(\aco\aao+\bco\bao+1)] \exp(w\hat{a}\hat{b}). \end{equation} With $\lambda t=r\exp(i\phi)$, the constants $u,v$ and $w$ are $\tanh r$,$-\log\cosh r$ and $-\tanh r$ respectively. This factorized form of the of the evolution operator is used to obtain the state of the bipartite system at time $t$. If the initial state is $\vert\alpha\rangle\vert 0\rangle$, the state $\vert\chi\rangle$ of the coupled oscillators at time $t$ is \begin{equation} \vert\chi\rangle=\frac{\exp(-\frac{\vert\alpha\vert^2\tanh^2r}{2})}{\cosh r}\sum_{n=0}^\infty\frac{(-i\exp(i\phi)\tanh r)^n}{\sqrt{n!}} \hat{a}^{\dagger n}\vert\tilde\alpha\rangle\vert n\rangle. \end{equation} In the above expression, the state $\vert\tilde\alpha\rangle=\vert\alpha/\cosh r\rangle$
is a coherent state of amplitude $\alpha/\cosh r$. The state $\vert\chi\rangle$ is an entangled state of the two modes. The Fock states of the $b$-mode appearing in the expression for $\vert\chi\rangle$ are orthogonal to each other while the states $\hat{a}^{\dagger n}\vert\tilde\alpha\rangle$ of the $a$-mode are not. The orthogonality of the states of the $b$-mode renders it possible to make conditional measurements so that the state of the other mode is the PACS. In particular, if $b$-mode is in the one-photon state $\vert 1\rangle$, the state of the $a$-mode is the SPACS $\vert\tilde\alpha,1\rangle=\hat{a}^\dagger\vert\tilde\alpha\rangle$. More generally, if $b$-mode is measured to be the number state $\vert m\rangle$, then state of the $a$-mode is $\hat{a}^{\dagger m}\vert\tilde\alpha\rangle$ which is MPACS. If photon losses due to absorption and other dissipative mechanisms are absent or negligible and the interaction duration is sufficiently longer so that $r=\vert\lambda\vert t>>1$, then the amplitude $\tilde\alpha$ of the state of the $a$-mode becomes nearly zero as $\cosh r$ becomes large. In this limit, the state of the $a$-mode is very close to the number state $\vert m\rangle$.\\
A major difference between the states generated in the cavity-atom interaction and those generated in the coupled oscillators is worth mentioning. In the later scheme, the state of the $a$-mode is precisely the SPACS of amplitude of $\alpha/\cosh r$ if the $b$-mode is in its first excited state. In general, if the $b$-mode is detected to be in the Fock state $\vert m\rangle$, the state of the $a$-mode is the MPACS $\vert\tilde\alpha,m\rangle$. In the atom-cavity case, the state of the cavity has contributions from the PACS of all orders of amplitude $\alpha$. This superposition is never an ideal PACS.\\
The overlap between the SPACS $\vert\tilde\alpha,1\rangle$ generated in the process of downconversion and the expected SPACS $\vert\tilde\alpha,1\rangle$ is \begin{equation} \vert\langle\alpha,1\vert\tilde\alpha,1\rangle\vert^2=\left[\frac{1+\frac{\vert\alpha\vert^2} {\cosh^2r}}{1+\vert\alpha\vert^2} \right] \exp\left[-\vert\alpha\vert^2(1-\frac{1}{\cosh^2r})\right]. \end{equation} The overlap between saturates at $\exp(-\vert\alpha\vert^2)/(1+\vert\alpha\vert^2)$ as $t\rightarrow\infty$. Hence, in this scheme too the overlap of the generated state with SPACS of amplitude $\alpha$ falls with interaction duration. However, as noted previously, the states generated are indeed PACS of suitably scaled amplitude.\\
The coefficient in the expression for $\vert\chi\rangle$ is the probability amplitude for realizing the state $\vert\tilde\alpha,m\rangle\vert m\rangle$. Hence, the relavant
probability is \begin{equation} P_m=\vert\langle\chi\vert\hat{a}^{\dagger m}\vert\tilde\alpha\rangle\vert m\rangle\vert^2=\frac{\exp(-\vert\alpha\vert^2\tanh^2r)} {\cosh^2r}{\tanh^{2m}r}L_m\left(-\frac{\vert\alpha\vert^2}{\cosh^2 r}\right). \end{equation} Since different Fock states of the $b$-mode are orthogonal to each other, the probability $P_m$ is the probability of realizing the MPACS $\hat{a}^{\dagger m}\vert\alpha\rangle$. Though this probability decreases with increasing $m$,
there is a finite probability of detecting the states corresponding to higher values of $m$. In practical terms, this would mean that more experimental runs will be required. Nevertheless, the method of Zavatta {\it et al } can generate {\em ideal} MPACS.\\
\section{Summary}
States generated in the parametric downcoversion process are {\em ideal} photon-added coherent states. The amplitude of the photon-added coherent state generated in the process is smaller in magnitude compared to the amplitude of the
initial seed coherent state. If the initial coherent state is of amplitude $\cosh(\lambda\tau)\alpha$,
the photon-added coherent state generated is of amplitude $\alpha$. This relation fixes the amplitude of the initial coherent state in terms of the interaction duration $(\tau)$, coupling constant ($\lambda$) and the required amplitude for the photon-added coherent state. The process is capable of generating ideal $m$-photon-added coherent states, though the probability of generation falls with increasing $m$. In contrast, the interaction between a two-level atom and a cavity field in a coherent state does not generate ideal photon-added coherent state of any amplitude. In the atom-cavity scheme, higher order processes are required to generate higher order photon-added coherent states. Typically, $m$-photon processes are necessary for generating $m$-photon-added coherent states.\\
{\bf Acknowledgement}\\
The author is grateful to Prof. G. S. Agarwal for useful discussions.
\begin{figure}\label{fig:one}
\end{figure}
\end{document} |
\begin{document}
\title{Arbitrarily accurate composite pulse sequences}
\author{Kenneth R. Brown$^\dagger$, Aram W. Harrow$^\dagger$,
and Isaac L. Chuang$^{\dagger*}$}
\affiliation{$^\dagger$Center for Bits and Atoms, MIT, Cambridge, MA 02139 \\ $^*$Department of Physics, MIT, Cambridge, MA 02139 }
\date{\today}
\begin{abstract} Systematic errors in quantum operations can be the dominating source of imperfection in achieving control over quantum systems. This problem, which has been well studied in nuclear magnetic resonance, can be addressed by replacing single operations with composite sequences of pulsed operations, which cause errors to cancel by symmetry. Remarkably, this can be achieved without knowledge of the amount of error $\epsilon$. Independent of the initial state of the system, current techniques allow the error to be reduced to $O(\epsilon^3)$. Here, we extend the composite pulse technique to cancel errors to $O(\epsilon^n)$, for arbitrary $n$. \end{abstract}
\pacs{}
\maketitle
Precise and complete control over closed quantum systems is a long-sought goal in atomic physics, molecular chemistry, condensed matter research, with fundamental implications for metrology\cite{freqmet,Bennett98a} and computation\cite{mikeandike,Steane98a}. Achieving this goal will require careful compensation for errors of both random and systematic nature. And while recent advances in quantum error correction\cite{qec,Steane96a,Knill97a} allow all such errors to be removed in principle, active error correction requires expanding the size of the quantum system, and feedback measurements which may be unavailable. Furthermore, in many systems, errors may be dominated by those of systematic nature, rather than random errors, as when the classical control apparatus is miscalibrated or suffers from inhomogeneities over the spatial extent of the target quantum system.
Of course, systematic errors can be reduced simply by calibration, but that is often impractical, especially when controlling large systems, or when the required control error magnitude is smaller than that easily measurable. Interestingly, however, systematic errors in controlling quantum systems can be compensated without specific knowledge of the magnitude of the error. This fact is lore\cite{Freeman-book} in the art of NMR, and is achieved using the method of {\em composite pulses}, in which a single imperfect pulse with fractional error $\epsilon$ is replaced with a sequence of pulses, which reduces the error to $O(\epsilon^n)$.
Composite pulse sequences have been constructed to correct for a wide variety of systematic errors \cite{Levitt79,Tycko83a,Freeman-book}. These include pulse amplitude, phase, and frequency errors and can be applied to any system with sufficient control. As system control increases, new uses for composite pulses emerge.
A remarkable example is the recent teleporation of an atomic state in ion traps \cite{Reibe04,Barrett04}. Barret {\it et al.} use a composite pulse for individual addressing, while Reibe {\it et al.} use a composite pulse to perform two qubit operations.
In the context of spectroscopy, the goal is often to maximize the measurable signal from a system which starts in a specific state. Thus, while composite sequences have been developed\cite{Levitt83} which can reduce errors to $O(\epsilon^n)$ for arbitrary $n$, these sequences are not general and do not apply, for example, to quantum computation, where the initial state is arbitrary, and multiple operations must be cascaded to obtain desired multi-qubit transformations.
Only a few composite pulse sequences are known which are {\em fully compensating},\cite{Levitt86a,Wimperis:94} meaning that they work on any initial state and can replace a single pulse without further modification of other pulses. As has been theoretically discussed\cite{Cummins00a,Jones02,McHugh:04} and experimentally demonstrated in ion traps \cite{Gulde:03,Reibe04,Barrett04} and Josephson junctions \cite{Collin04}, these sequences can be valuable for
precise single and multiple-qubit control using gate voltages or laser excitation.
Previously, the best fully compensating composite pulse sequence known\cite{Wimperis:94,Cummins00a,Jones02,McHugh:04} could only correct errors to $O(\epsilon^3)$\endnote{ In Ref. \cite{Cummins00a,Jones02,McHugh:04}, the distance measure used is one minus the fidelity, $1-\| V^\dagger U\|$ (``the infidelity'')
where $\| A\|$ is the norm of $A$. We use instead the trace distance,
$\| V-U\|$, following the NMR community. Thus, our composite pulses which are $n$th order in trace distance are $2n$th order in infidelity.}. Here, we present a new, and systematic technique for creating composite pulse sequences to correct errors to $O(\epsilon^n)$, for arbitrary $n$. The technique presented is very general and can be used to correct a wide variety of systematic errors. Below, our technique is illustrated for the specific case of systematic amplitude errors, using two approaches. Also discussed is the number of pulses required as a function of $n$.
The problem of systematic amplitude errors is modeled by representing single qubit rotations as
\begin{equation}
\R{\phi}{\theta} = \exp \left[ -i \frac{\theta}{2} \sigma_\phi \right] \,, \end{equation}
where $\theta$ is the desired rotation angle about about the axis
that makes the angle $\phi$ with the $\hat{x}$-axis and lies in the
$\hat{x}-\hat{y}$ plane, $\sigma_\phi=\cos (\phi) X + \sin (\phi) Y$, and $X$ and $Y$ are Pauli operators. $\R{\phi}{\theta}$ is the ideal operation, and due to errors, the actual operation is, instead, $\M{\phi}{\theta} = \R{\phi}{\theta(1+\epsilon)}$, where the angle of rotation differs from the desired $\theta$ by the factor $1+\epsilon$. Note that $\phi$ and $\theta$ may be specified arbitrarily, but the error $\epsilon$ is fixed for all operations, and unknown.
\noindent {\bf Two methods for constructing composite pulses.} A composite pulse sequence $\Rn{n}{\phi}{\theta}$ is a sequence of operations $\{M_\phi(\theta)\}$ such that $\Rn{n}{\phi}{\theta} = \R{\phi}{\theta} + O(\epsilon^{n+1})$, for unknown error $\epsilon$. To construct $\Rn{n}{\phi}{\theta}$, we begin with two simple observations: first, $\R{\phi}{-\theta\epsilon}\M{\phi}{\theta}=\R{\phi}{\theta}$ and second, $\M{\phi}{2k\pi}=\pm\R{\phi}{2k\pi\epsilon}$ when $k$ is an integer. A composite pulse sequence can thus be obtained by finding ways to approximate $\R{\phi}{-\theta\epsilon}$ by a product of operators $\R{\phi_l}{2k_l\pi\epsilon}$. We obtain this using two approaches.
The first approach we call the Trotter-Suzuki (TS) method. Suzuki has developed a set of Trotter formulas that when given a Hamiltonian $B$ and and a series of Hamiltonians $\{A_l\}$ such that $B=\sum{A_l}$ there exists a set of real numbers $\{p_{jn}\}$ such that \begin{equation}
\exp \left(-i B t\right)=
\prod_{j,l} \exp \left(-i p_{jn }A_l t \right)+O(t^{n+1}) \,, \label{TS} \end{equation} and $\sum_j p_{jn}=1$ \cite{Suzuki:92}. Without loss of generality, we may limit ourselves to expansions where the $p_{jn}$ are rational numbers, and assume the goal is to approximate $\R{0}{-\theta\epsilon}$. Using Eq.~(\ref{TS}), we set $t=\epsilon$ and $B=-(\theta/2) X$. Then we choose $A_1=A_3= m \pi (X \cos \phi +Y\sin \phi)$ and $A_2=2 m \pi (X \cos \phi - Y \sin \phi)$ where $\phi$ and $m$ fulfill the conditions that $ 4 m \pi \cos \phi=\theta/2$ ({\em i.e.,} $A_1+A_2+A_3=B$) and $q_{jn} = p_{jn}m$ is an integer. This yields an $n$th order correction sequence
\begin{eqnarray} \nonumber F_n&=&\prod_{j} \M{\phi}{2\pi q_{jn}}\M{-\phi}{4\pi
q_{jn}}\M{\phi}{2\pi q_{jn}} \\
&=& \R{0}{-\theta\epsilon}+O(\epsilon^{n+1}) = \Rn{n}{0}{-\theta\epsilon} \end{eqnarray}
and the associated $n$th order composite pulse sequence $F_n\M{0}{\theta} = \Rn{n}{0}{-\theta\epsilon} \R{0}{\theta\epsilon}\R{0}{\theta} = \Rn{n}{0}{\theta}$, thus giving a composite pulse sequence of arbitrary accuracy.
The second approach we refer to as the Solovay-Kitaev (SK) method, as it uses elements of the proof of the Solovay-Kitaev theorem \cite{KSV}.
First, note that rotations $U_{k}(A) = I+A\epsilon^k+O(\epsilon^{k+1})$ can be constructed for arbitrary $2 \times 2$ Hermitian matrices $A$, and $k\geq 1$, recursively. This is done using an observation (from \cite{KSV}) relating the commutator $[A,B] = AB-BA$ to a sequence of operations, $\exp (-iA \epsilon^l) \exp (-iB \epsilon^m) \exp (iA \epsilon^l) \exp (iB \epsilon^m)=\exp ([A,B] \epsilon^{l+m})+O(\epsilon^{l+m+1})$. Thus to generate $U_{k}(A)$ it suffices to generate $U_{\lceil{k/2}\rceil}(B)$ and $U_{\lfloor{k/2}\rfloor}(C)$ such that $[B,C]=A$ (choices of integers other than $\ceil{k/2}$ and $\floor{k/2}$ which sum to $k$ are also fine, but less optimal).
Next, we inductively construct a composite pulse sequence $F_n$ for $\R{0}{\theta}$. Note that the first order correction sequence can be written as $F_1=\M{\phi}{2\pi}\M{-\phi}{2\pi} = \R{0}{-\theta\epsilon}+O(\epsilon^2)$ by selecting $4\pi\cos(\phi)=\theta$. Assume we have $F_n=\R{0}{-\theta\epsilon}-i A_{n+1}\epsilon^{n+1}+O(e^{n+2})$. We can then construct a sequence to correct for the next order, using $F_{n+1}=U_{n+1}(A_{n+1}) F_n$, where $U_{n+1}(A_{n+1})$ is constructed as above. Iteratively applying this method for $k=1,\ldots,n$ yields an $n$th order composite pulse sequence, $F_n\M{0}{\theta} = \Rn{n}{0}{\theta}$, for any $n$. This method, which appears to be unrelated to previous composite pulse techniques\cite{Levitt83,Freeman-book}, gives an efficient algorithm to calculate sequences for specific $\theta$ and $\phi$ but not necessarily a short analytical description of the sequence. Furthermore, the Solovay-Kitaev technique relies on general properties of Hamiltonians and can be applied without modification to other systematic error models, {\it e.g.}, frequency errors.
\noindent{\bf Examples.} The TS and SK techniques described above are general and apply to a wide variety of errors; explicit application of the techniques to generate $\Rn{n}{0}{\theta}$ sequences for specific $n$ can take advantage of symmetry arguments, composition of techniques, and relax some of our assumptions to minimize both the residual error and the sequence length.
First, we explicitly write out the TS composite pulses and connect them to the well-known pulse sequences of Wimperis \cite{Wimperis:94}. We choose to use the TS formulas that are symmetric under reversal of pulses, {\em i.e} an anagram. These formulas remove all even-ordered errors by symmetry, and thus yield only even-order composite pulse sequences. For convenience, we introduce the notation $S_1(\phi_1,\phi_2,m)=\M{\phi_1}{m\pi}\M{\phi_2}{2m\pi}\M{\phi_1}{m\pi} $ and $S_n(\phi_1,\phi_2,m)=S_{n-1}(\phi_1,\phi_2,m)^{4^{n-1}} \times S_{n-1}(\phi_1,\phi_2,-2m)S_{n-1}(\phi_1,\phi_2,m)^{4^{n-1}}$. We can now define a series of $n$ order composite pulses P$n$ as
\begin{eqnarray} \mbox{P}0&=&\M{0}{\theta}\\ \mbox{P}2&=&\M{\phi_1}{2\pi}\M{-\phi_1}{4\pi}\M{\phi_1}{2\pi}\mbox{P}0\\ \mbox{P}{2j}&=&S_j(\phi_j,-\phi_j,2)\mbox{P}0 \end{eqnarray}
where $\phi_j=\cos^{-1} -\frac{\theta}{8\pi f_j}$ and $f_j=(2^{(2j-1)}-2)f_{j-1}$ when $f_1=1$. $\mbox{P}2$ is exactly the passband sequence PB1 described by Wimperis \cite{Wimperis:94}. Fig.~\ref{fig:signal} compares the performance of these high-order passband pulse sequences.
Wimperis also proposes a similar broadband sequence, BB1=$S_1(\phi_{B1},3\phi_{B1},1)\mbox{P}0$ where $\phi_{B1}=\cos^{-1} \left(-\frac{\theta}{4\pi}\right)$. The broadband sequence corrects over a wider range of $\epsilon$ by minimizing the first order commutator and thus the leading order errors. Furthermore, although BB1 and PB1 appear different when written as imperfect rotations, a transformation to true rotations shows that they have the same form,
\begin{eqnarray}
\mbox{PB}1&=&\M{\phi_1}{2\pi}\M{-\phi_1}{4\pi}\M{\phi_1}{2\pi}P0\nonumber \\
&=&\R{\phi_1}{2\pi\epsilon}\R{-\phi_1}{4\pi\epsilon}\R{\phi_1}{2\pi\epsilon}\mbox{P}0 \nonumber\\
\mbox{BB}1&=&\M{\phi_{B1}}{\pi}\M{3\phi_{B1}}{2\pi}\M{\phi_{B1}}{\pi}\mbox{P}0 \\
&=&\R{\phi_{B1}}{\pi\epsilon}\R{-\phi_{B1}}{2\pi\epsilon}\R{\phi_{B1}}{\pi\epsilon}\mbox{P}0. \end{eqnarray}
This ``toggled'' frame suggests a way to create higher-order broadband pulses. One simply takes a higher-order passband sequence and replaces each element $S_1(\phi_j,-\phi_j,m)$ with $S_1(\phi_{Bj},-\phi_{Bj}+4\phi_{Bj}(m/2\mod 2),m/2)$ where $\phi_{Bj}$ satisfies the condition $\cos (\phi_{Bj})=2\cos (\phi_j)$. Applying this to P$n$ creates a family of broadband composite pulses, B$n$.
Similar extensions allow creation of another kind of composite pulse (useful, for example, in magnetic resonance imaging), which {\em increase} error so as to perform the desired operation for only a small window of error. Such ``narrowband'' pulse sequences N$n$ may be obtained starting with a passband sequence, P$n$, and dividing the angles of the corrective pulses by $2$. These higher-order narrowband pulses may be compared with the Wimperis sequence NB$1$ \cite{Wimperis:94}, as shown in \fig{signal}.
\begin{figure}
\caption{ Comparison of the narrowband and broadband composite pulse sequences generated by the TS method. The Wimperis BB1, PB1, and NB1 sequences are included in this family, and are equivalent to B2, P2, and N2. }
\label{fig:signal}
\end{figure}
The SK method yields a third set of $n$th order composite pulses, SK$n$, and for concreteness, we present an explicit formulation of this method. It is convenient to let $U_{nX}(a)=I-ia^n\frac{X}{2}\epsilon^n +O(\epsilon^{n+1})$, such that one can then generate $U_{nZ}(a)=\M{90}{-\pi/2}U_{nX}(a)\M{90}{\pi/2}$ and $U_{nY}(a)=\M{45}{\pi/2}U_{nX}(a)\M{45}{-\pi/2}$ \endnote{The optimal way to generate $U_{nY}$ is to shift the phases, $\phi$, of the underlying $\M{\phi}{\theta}$ that generate $U_{nx}$ by 90 degrees.}. Using the first-order rotations \begin{equation}
U_{1X}(a)=\M{\phi}{2\pi\ceil{\frac{a}{4\pi}}}
\M{-\phi}{2\pi \ceil{\frac{a}{4\pi}}} \,, \end{equation} where $\phi=\cos^{-1} (a/(4\pi\ceil{\frac{a}{4\pi}}))$, as described above, we may recursively construct $U_{nX}(a) = U_{\floor{n/2}Y}(a)U_{\ceil{n/2}Z}(a)U_{\floor{n/2}Y}(-a)U_{\ceil{n/2}Z}(-a)$, for any $n>1$ and any $a$.
With these definitions, the first order SK composite pulse for $\Rn{n}{0}{\theta}$ is simply \begin{eqnarray}
\mbox{SK}1=U_{1X}(\theta)\M{0}{\theta}
=\R{0}{\theta}-i\frac{A_2}{2}\epsilon^2 +O(\epsilon^3) \,. \end{eqnarray} From the $2\times 2$ matrix $A_2$, we can then calculate the norm
$\|A_2\|$ and the planar rotation $R_{A_2}$ that performs $R_{A_2}
(-A_2) R_{A_2}^{-1}=\|A_2\| X$. The second order SK composite pulse is then \begin{eqnarray}
\mbox{SK}2 &=& M_{A_2}^{-1} U_{2X}(\|A_2\|^{1/2}) M_{A_2} {\rm SK}1 \\
&=& \R{0}{\theta}-i\frac{A_3}{2}\epsilon^3+O(\epsilon^4) \end{eqnarray} where $M_{A_2}$ is the imperfect rotation corresponding to the perfect rotation $R_{A_2}$.
The $n$th order SK composite pulse family is thus \begin{eqnarray}
\mbox{SK}n &=& M_{A_n}^{-1} U_{nX}(\|A_n\|^{1/n}) M_{A_n}
\, {\rm SK}(n-1) \\
&=&\R{0}{\theta}-i\frac{A_{n+1}}{2}\epsilon^{n+1}+O(\epsilon^{n+2}) \,. \end{eqnarray}
A nice feature of the SK method is that when given a composite pulse of order $n$ described by any method one can compose a pulse of order $n+1$. The ``pure'' SK method SK$n$ is outperformed in terms of both error reduction and pulse number by the TS method B$n$ for $n\leq 4$. Therefore, we apply the SK method for orders $n>4$ using B$4$ as our base composite pulse. We label these pulses SB$n$.
\noindent {\bf Performance and efficiency.} Two important issues with composite pulses are the actual amount of error reduction as a function of pulse error, and the time required to achieve a desired amount of error reduction. These performance metrics are shown in \fig{effective-error}, comparing the SK, broadband, and passband composite pulses for varying error $\epsilon$, and $\phi=0$, and using as the composite pulse error
$E=\|\R{\phi}{\theta}-\Rn{n}{\phi}{\theta}\|$. We find that for practical values of error reduction, $n<30$, the number of $\pi$ pulses required to reduce error to $O(\epsilon^n)$ grows as $\sim n^{3.09}$, which is close to the lower bound of $\sim n^3$ which can be analytically derived\cite{long-paper}. In contrast, the TS sequence B$n$ requires $O(\exp (n^2))$ pulses.
\begin{figure}
\caption{ Composite pulse error, $E$, as a function of base error, $\epsilon$ for a variety of composite pulse sequences. P$n$, B$n$, SK$n$ and SB$n$ are the $n$th order passband, broadband, SK, and combined B4-SK sequences, respectively. The number in the brackets refers to the number of imperfect $2\pi$ rotations in the correction sequence. Note how pulses of the same order (such as P6, B6, SK6, SB6) have the same slope (asymptotic scaling) for low values of $\epsilon$, but can have widely varying performance when $\epsilon$ is large.
The inset plots the scaling of this sequence length with order $n$ for SK$n$ (SB$n$ is very similar) for $n\leq 30$ and compares it with the upper bound obtained with numerical methods.}
\label{fig:effective-error}
\label{fig:logperf}
\end{figure}
For a wide range of base errors $\epsilon$, the TS formulation out-performs the SK method in achieving a low composite pulse error, $E$. The recursive nature of the TS methods builds off elements that remove lower order errors, resulting in a rapid increase of pulse number and a monotonic decrease in effective error at every order for any value of the base error. However, the SK approach is superior to the TS method for applications requiring incredible precision, $E\leq 10^{-12}$, from relatively precise controls, $\epsilon<10^{-2}$.
The SK and TS pulse sequences presented here are conceptually simple but may not be optimal. Integrating ideas from both methods, we can develop new families of composite pulses. As an example, the SK method relies on cancellation of error order by order by building up sequences of $2\pi$ pulses. However, there is no reason that the basic unit should be a single pulse. Instead, one can build a sequence from TS (B2) style pulse triplets, $G(\phi_1)=S_1(\phi_1,3\phi_1,1)$. By using an additional symmetry that the $\tr\left( YG(-\phi_1)G(\phi_1)\right)=0$, the leading order error is guaranteed to be proportional to $X$ at the cost of doubling the pulse sequence. The resulting pulses are of length $\exp(n)$ (compared to $\exp(n^2)$ for TS), broadband compared to SK sequences, and described in detail in \cite{long-paper}.
\noindent{\bf Conclusions.} We have presented a set of tools that allows one to generate arbitrarily accurate composite pulse sequences for systematic, but unknown, error. As an example, we have constructed explicit composite pulse sequences for errors in rotation angle. These can be constructed with $O(n^3)$ pulses, for $n\lesssim 30$. For high-precision applications such as quantum computation, these pulses allow one to perform accurate operations even with large errors. Practically, the B4 and B2=BB1 pulse sequences seem most useful, depending on the magnitude of error.
While we have focused on composite pulse sequences for rotation errors, we emphasize that these methods also apply to correcting systematic errors in control phase and frequency\cite{long-paper}. For example, a frequency error can be represented for an expected rotation $R_0(\theta)$ as $M^\prime_0(\theta)=\exp\left(-i(\theta/2 X +|\theta/2|\delta Z)\right).$ Note that $M^\prime_0(\theta/2)M^\prime_0(-\theta)M^\prime_0(\theta/2)$ yields to first order in $\delta$ the phase shift, $U_{1Z}(2\theta\delta)$. Starting with any fully compensating composite pulse sequence that corrects frequency errors to order $\delta^2$, e.g. CORPSE\cite{Cummins00a}, and the basic operation $U_{1Z}(2\theta\delta)$, one can then apply the SK technique to create a pulse sequence of $O(\delta^n)$\cite{long-paper}.
Furthermore, the TS and SK approaches can be extended to any set of operations that has a subgroup isomorphic to rotations of a spin. For example, Jones has used this isomorphism to create reliable two qubit gates based on an Ising interaction to accuracy $O(\epsilon^3)$ \cite{Jones02}. Similarly, the techniques outlined here can immediately be applied to gain arbitrary accuracy multi-qubit gates. Interestingly, the TS formula can be directly applied to any set of operations, if the operations suffer from proportional systematic timing errors. Therefore, this control method could also be applied to classical systems.
\begin{acknowledgments}
We are grateful to A. Childs and R. Cleve for stimulating discussions. AWH was supported by the NSA and ARDA under ARO contract number DAAD19-01-1-06.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{A Proof Checking View of Parameterized Complexity} \author{Luke Mathieson} \date{} \maketitle
\begin{abstract} The PCP Theorem is one of the most stunning results in computational complexity theory, a culmination of a series of results regarding proof checking it exposes some deep structure of computational problems. As a surprising side-effect, it also gives strong non-approximability results. In this paper we initiate the study of proof checking within the scope of Parameterized Complexity. In particular we adapt and extend the \pcp{n\log\log n}{n\log\log n} result of Feige \emph{et al.} to several parameterized classes, and discuss some corollaries. \end{abstract}
\section{Introduction}
The straight-forward view of most computational complexity classes is one of what problems can solved given certain computing power and resource restrictions. Alongside this is the \emph{verification} view of complexity, where we ask not what can be computed within a given set of restrictions, but whether a given solution can be verified under certain restrictions. The most famous of these is of course the equivalent definitions of ${\normalfont NP}{}$ as the class of all problems that can be solved in nondeterministic polynomial time or verified in deterministic polynomial time. This definition may be thought of as a \emph{proof system}, where a Turing Machine (the verifier) has access to the input and a proof, and in polynomial time checks that the proof is correct.
With access to a random bit string, it is possible to reduce the number of bits that the verifier reads from the proof. In fact, in the case of {\normalfont NP}{}, this is quite a surprising reduction; with only a logarithmic number of random bits, we need only a \emph{constant} number of bits from the proof to verify the proof. The trade-off being that if the proof is false, we may incorrectly accept it, but with probability at most one half.
Such proof systems have been well studied for traditional complexity classes such as {\normalfont NP}{}, {\normalfont PSPACE}{} and {\normalfont NEXP}{}. In this paper we begin to look at parameterized complexity through the same lens. In particular we demonstrate a relatively simple but non-trivial proof system for $\W[1]$. We also extend this to $\W[2]$, $\M[1]$, the bounded classes $\EW[1]$, $\EXPW[1]$ \& $\SW[1]$ and the classes of the $A$-hierarchy up to $\AW[*]$.
\subsection{Useful History}
This idea of classifying languages by membership proofs began to attract serious attention in the early to mid eighties, with Goldwasser, Micali \& Rackoff's~\cite{GoldwasserMR85} introduction of the idea of \emph{interactive proofs} (later published in a more complete form~\cite{GoldwasserMR89}) and Babai's~\cite{Babai85, BabaiM88} \emph{Arthur-Merlin games}. Both probabilistic approaches to proof verification.
Over time these classes were linked back to traditionally defined complexity classes. The class of problems with interactive proofs is precisely {\normalfont PSPACE}{}~\cite{Shamir90}. The class of problems with Arthur-Merlin style verifiers that use a polynomial number of rounds turns out to be the same as the class of problems with interactive proofs~\cite{GoldwasserS86}. If multiple, non-communicating provers (defined in~\cite{Ben-OrGKW88}) are allowed we obtain {\normalfont NEXP}{}~\cite{BabaiFL91, BabaiFL92} (Ben-Or \emph{et al.}~\cite{Ben-OrGKW88} also showed that for any number of provers, there was an equivalent protocol with at most two provers).
This work culminated in the development of \emph{probabilistically checkable proofs}~\cite{AroraS92} and what is now known as the PCP Theorem:
\begin{theorem}[The PCP Theorem~\cite{AroraLMSS98, AroraS98}]\label{thm:pcp} ${\normalfont NP}{}$ is the class of all languages that can be verified by a polynomial-time probabilistic Turing Machine (the verifier) that can access at most $O(\log n)$ random bits and at most $O(1)$ bits of an oracle string (the proof) such that any input that is in the language is accepted with probability $1$ and any input that is not in the language is accepted with probability at most $\frac{1}{2}$. \end{theorem}
Dinur~\cite{Dinur07} gives more accessible proof, via constraint satisfaction.
Far from being a theoretical curiosity, PCPs have a number of applications across computer assisted mathematics~\cite{BabaiFLS91} and cryptology~\cite{GoldwasserMR89} but possibly most interestingly PCP results have implications for approximation algorithms. It is PCP results that led to inapproximability results for \textsc{Max-Word}~\cite{Condon91}, \textsc{Max-3SAT}~\cite{AroraLMSS98}, \textsc{Max-Clique}~\cite{FeigeGLSS96} and in general that if $\text{{\normalfont P}{}} \neq \text{{\normalfont NP}{}}$ then no MAXSNP-hard problem is in PTAS.
\section{Parameterized Complexity Theory}
A \emph{parameterized problem} is a decision problem augmented with a special input, the \emph{parameter}. This may be more formally viewed as a language over some alphabet with a \emph{parameterization} that provides a positive integer parameter for each instance.
\begin{definition}[Parameterized Problem] A parameterized problem over alphabet $\Sigma$ is a pair $(\Pi,\kappa)$ where $\Pi \subseteq \Sigma^{*}$ and $\kappa:\Sigma^{*}\rightarrow\mathbb{N}$ is a parameterization. \end{definition}
Typically given an instance, the parameterization (as a function) is implied and we treat inputs as being accompanied by a integer, usually denoted $k$.
Parameterization allows a more relaxed notion of tractability:
\begin{definition}[Fixed-parameter Tractability] A parameterized problem $(\Pi,\kappa)$ is fixed-parameter tractable if there is an algorithm $\mathcal{A}$ and a computable function $f$ such that for all inputs $(x,\kappa(x))$ the algorithm $\mathcal{A}$ decides if $x\in\Pi$ in time bounded by $f(\kappa(x))\cdot{}\Card{x}^{O(1)}$. The class of all fixed-parameter tractable problems is {\normalfont FPT}{}. \end{definition}
This then gives a natural reduction schema:
\begin{definition}[FPT Reductions] Given two parameterized problems $(\Pi_{1},\kappa_{1})$ over $\Sigma_{1}$ and $(\Pi_{2},\kappa_{2})$ over $\Sigma_{2}$, an fpt reduction from $(\Pi_{1},\kappa_{1})$ to $(\Pi_{2},\kappa_{2})$ is a mapping $R:\Sigma_{1}^{*}\rightarrow\Sigma_{2}^{*}$ such that for all $x\in\Sigma_{1}^{*}$: \begin{enumerate} \item $x \in \Pi_{1} \Leftrightarrow R(x) \in \Pi_{2}$. \item $R$ can be computed in time bounded by $f(\kappa(x))\cdot{}\Card{x}^{O(1)}$. \item There is a computable function $g$ such that $\kappa_{2}(R(x)) \leq g(\kappa_{1}(x))$. \end{enumerate} \end{definition}
The last condition results in a very rich intractability theory for parameterized complexity. We will give details of the classes relevant for this paper, but a much fuller treatment can be found in the monographs of Downey \& Fellows~\cite{DowneyFellows99} and Flum \& Grohe~\cite{FlumGrohe06}.
We first define a hierarchy of propositional logic formul\ae{}. Let $\{a_{i}\}$ be a set of boolean literals, then we define the following formula classes: \begin{eqnarray*} \Gamma_{0,d} := \SB a_{1} \wedge \ldots \wedge a_{c} \;{|}\; c \leq d \SE\\ \Delta_{0,d} := \SB a_{1} \vee \ldots \vee a_{c} \;{|}\; c \leq d \SE \end{eqnarray*}
These can then be recursively stacked to give the classes $\Gamma_{t,d}$ and $\Delta_{t,d}$:
\begin{eqnarray*} \Gamma_{t,d} := \SB \bigwedge_{i \in I} \phi_{i} \;{|}\; \phi_{i} \in \Delta_{t-1,d} \SE\\ \Delta_{t,d} := \SB \bigvee_{i \in I} \phi_{i} \;{|}\; \phi_{i} \in \Gamma_{t-1,d} \SE \end{eqnarray*}
In addition we denote by $\Phi^{+}$ the subclass of a class of propositional formul\ae{} $\Phi$ where no literals are negated and by $\Phi^{-}$ the subclass of $\Phi$ where all literals are negated. Given a propositional formula over a variable set $X$ a truth assignment that sets $k$ variables of $X$ to \textsc{TRUE} is called a \emph{weight $k$ assignment}\footnote{This use of ``weight'' is standard in the parameterized complexity literature, but may conflict with definitions from other areas. In this paper, when we refer to the weight of an assignment, this is the meaning we intend.} or an assignment of weight $k$.
The fundamental problem for many parameterized intractability classes is the \textsc{Weighted Satisfiability} problem:
\pcproblem{WSAT($\Phi$)}{A boolean formula $\phi \in \Phi$ and a positive integer $k$.}{$k$.}{Is there a satisfying assignment for $\phi$ of weight $k$?}
We can then define the $W$-hierarchy:
\[\W[t] = \left[\WSAT{$\Gamma_{t,d}$}\right]^{{\normalfont FPT}{}}\]
where $t+d > 2$ and $[X]^{{\normalfont FPT}{}}$ denotes the closure of a parameterized problem $X$ under fpt reductions.
Even though we do not have quite the latitude to reduce the structure of the formula as in classical complexity (where everything in {\normalfont NP}{} can be reduced to a formula in 3-CNF), we can impose slightly more restriction to the formul\ae{}. In particular:
\[\W[1] = \left[\WSAT{$\Gamma_{1,2}^{-}$}\right]^{{\normalfont FPT}{}}\]
and
\[\W[2] = \left[\WSAT{$\Gamma_{2,1}^{+}$}\right]^{{\normalfont FPT}{}}\]
So for every problem in $\W[1]$ we can convert any instance into an instance of the \textsc{Weighted Satisfiability} problem where the formula is in 2-CNF and all literals are negated and for every problem in $\W[2]$ we can convert any instance into a CNF formula (of unbounded clause length) where all literals are positive (similar statements can be made for the other classes in the $W$-hierarchy, \emph{q.v.}~\cite{FlumGrohe06}).
At the other end of the parameterized intractability scale is the direct definitional analog of {\normalfont NP}{}:
\begin{definition}[{\normalfont\emph{para}-{\normalfont NP}{}}{}] A parameterized problem $(\Pi, \kappa)$ is in {\normalfont\emph{para}-{\normalfont NP}{}}{} if there is a computable function $f$ and nondeterministic Turing Machine that on input $(x,\kappa(x))$ decides $x\in\Pi$ in time bounded by $f(\kappa(x))\cdot\Card{x}^{O(1)}$. \end{definition}
It turns out however that {\normalfont\emph{para}-{\normalfont NP}{}}{}-complete problems seem much harder than $\W[1]$-complete problems and that $\W[1]$ provides a more natural analog of {\normalfont NP}{}\footnote{Very loosely speaking, barring a collapse, {\normalfont\emph{para}-{\normalfont NP}{}}{}-complete problems correspond to problems with time complexity $(\kappa(x))^{\Card{x}}$ or worse, whereas $\W[1]$-complete problems have complexity $\Card{x}^{\kappa(x)}$ (this bound is more formal than the given {\normalfont\emph{para}-{\normalfont NP}{}}{} one as the $W$-hierarchy is contained in {\normalfont XP}{}~\cite{FlumGrohe06}.)}.
The class {\normalfont XP}{} provides an alternate perspective on parameterized intractability:
\begin{definition} A parameterized problem $(\Pi, \kappa)$ is in {\normalfont XP}{} if there exists a computable function $f$ such that every instance $(x,\kappa(x))$ is decidable in time \[ \Card{x}^{f(\kappa(x))}+f(\kappa(x)) \] \end{definition}
The entirety of the $W$-hierarchy is contained in {\normalfont\emph{para}-{\normalfont NP}{}}{} $\cap$ {\normalfont XP}{}.
{\normalfont XP}{} in a certain sense plays a role similar to a parameterized version of EXPTIME, and as such contains a hierarchy that bears a relationship to the polynomial hierarchy and {\normalfont PSPACE}{}, the $A$-hierarchy.
Similar to the polynomial hierarchy, the $A$-hierarchy can be characterized by alternating quantified satisfiability problems. In this case of course, there is a parameterized flavour:
\pcproblem{AWSAT$_{l}$($\Phi$)}{A boolean propositional formula $\phi \in \Phi$, with the variable set $X$ partitioned into $l$ sets $X_{1}, \ldots, X_{l}$ and positive integers $k_{1}, \ldots, k_{l}$.}{$k = \sum_{i\in [l]}k_{i}$.}{Is there a $k_{1}$-sized subset of $X_{1}$ such that for all $k_{2}$-sized sets of $X_{2}$ there exists a $k_{3}$-sized subset of $X_{3}$... (\&c. for $l$ alternations) such that setting those variables to true satisfies $\phi$?}
If we employ the notation $\forall_{k}$ and $\exists_{k}$ to denote ``for all $k$-sized subsets'' and ``there exists a $k$-sized subset'' respectively, we can reframe the slightly awkward definition of \textsc{AWSAT$_{l}$} by asking if \[ \exists_{k_{1}}X_{1}\forall_{k_{2}}X_{2}\ldots Q_{k_{l}}X_{l}\phi \] is true, where $Q\in\{\forall,\exists\}$. If we remove the bound on $l$, then we obtain the \textsc{AWSAT} problem, which has the same essential structure. When talking about this family of problems informally, we will omit the subscript and refer to them generally as \textsc{AWSAT} problems. These classes then provide the basis for the $A$-hierarchy: \[ \A[l] = \left\{\begin{array}{ll} [\AWSATL{l}{$\Gamma_{1,2}^{-}$}]^{{\normalfont FPT}{}} & \text{for $l$ odd}\\ {}[\AWSATL{l}{$\Delta_{1,2}^{+}$}]^{{\normalfont FPT}{}}& \text{for $l$ even}\\ \end{array}\right. \]
One interesting superclass of the $A$-hierarchy is $\AW[*]$: \[ \AW[*] = [\AWSAT{$\Gamma_{1,2}^{-}$}]^{{\normalfont FPT}{}} \] Thus $\AW[*]$ is not entirely dissimilar to {\normalfont PSPACE}{}\footnote{Or something between {\normalfont PSPACE}{} and PH, though this also imprecise as natural parameterized versions of some {\normalfont PSPACE}{}-complete problems are $\AW[*]$-complete. Conversely \AWSAT{PROP}, the parameterized alternating satisfiability problem for the class of all propositional formul\ae{}, is $\AW[SAT]$-complete and $\AW[*] \subseteq \AW[SAT]$.}, however in the parameterized setting, there is no single analog of {\normalfont PSPACE}{}, with its role being spread between $\AW[*]$, $\AW[SAT]$, $\AW[P]$, XL and para-{\normalfont PSPACE}{}~\cite{FlumGrohe06}.
\subsection{Bounded Parameterized Complexity Classes}
In the definition of {\normalfont FPT}{} the function $f$ that gives the dependence on the parameter is only restricted to being computable. We can define analogs of {\normalfont FPT}{} and its intractability hierarchies with stronger restrictions on $F$ that still retain very similar structures.
\begin{definition}[{\normalfont EXPT}{}] A parameterized problem $(\Pi,\kappa)$ is in {\normalfont EXPT}{} if there is an algorithm $\mathcal{A}$ and such that for all inputs $(x,\kappa(x))$ the algorithm $\mathcal{A}$ decides if $x\in\Pi$ in time bounded by $2^{\kappa(x)^{O(1)}}\cdot{}\Card{x}^{O(1)}$. \end{definition}
\begin{definition}[{\normalfont EPT}{}] A parameterized problem $(\Pi,\kappa)$ is in {\normalfont EPT}{} if there is an algorithm $\mathcal{A}$ and such that for all inputs $(x,\kappa(x))$ the algorithm $\mathcal{A}$ decides if $x\in\Pi$ in time bounded by $2^{O(\kappa(x))}\cdot{}\Card{x}^{O(1)}$. \end{definition}
\begin{definition}[{\normalfont SUBEPT}{}] A parameterized problem $(\Pi,\kappa)$ is in {\normalfont SUBEPT}{} if there is an algorithm $\mathcal{A}$ and such that for all inputs $(x,\kappa(x))$ the algorithm $\mathcal{A}$ decides if $x\in\Pi$ in time bounded by\footnotemark\ $2^{o^{eff}(\kappa(x))}\cdot{}\Card{x}^{O(1)}$. \end{definition}
\footnotetext{$f\in o^{eff}(g)$ if there exists a computable, nondecreasing, unbounded function $h:\mathbb{N}\rightarrow\mathbb{N}$ such that $f(k) \leq \frac{g(k)}{h(k)}$.}
Typically the parameterizations of problems in {\normalfont SUBEPT}{} are of a different character to normal parameterizations. In the subexponential theory the parameterizations play the role of ``size measures'' for the problem, rather than being independent of the size of the problem. Such measures may be for example the number of variables in a logic sentence or the number of edges and vertices in a graph (this is also in contrast to the length of the \emph{encoding} of the problem).
These classes are accompanied by analogs of fpt reductions. These reduction schemes have slight technical differences to fpt reductions (\emph{q.v.} \cite{Weyer04}, \cite{FlumGroheWeyer06} and~\cite{ImpagliazzoPaturiZane01}, or \cite{FlumGrohe06} for a collected survey of these and other related work), however they still produce hierarchies akin to the $W$-hierarchy, for $t \geq 2$:
\[\EXPW[t] = \left[\WSAT{$\Gamma_{t,1}$}\right]^{{\normalfont EXPT}{}}\]
\[\EW[t] = \left[\WSAT{$\Gamma_{t,1}$}\right]^{{\normalfont EPT}{}}\]
Although the first levels of these hierarchies are more technically delicate than the $W$-hierarchy, we still have the following key identities:
\[\EXPW[1] = \left[\WSAT{$\Gamma_{1,2}^{-}$}\right]^{{\normalfont EXPT}{}}\]
and
\[\EW[1] = \left[\WSAT{$\Gamma_{1,2}^{-}$}\right]^{{\normalfont EPT}{}}\]
The hierarchy corresponding to {\normalfont SUBEPT}{} is mildly different\footnote{Incidentally {\normalfont SUBEPT}{} and the $S$-hierarchy correspond to parameterizations of the Exponential Time Hypothesis, making them particularly interesting parameterized classes. In fact, the entire $S$-hierarchy is contained in {\normalfont EPT}{}, with {\normalfont EPT}{} and {\normalfont SUBEPT}{} bearing a similar relationship as {\normalfont XP}{} and {\normalfont FPT}{}.}:
\[\SW[t] = \bigcup_{d\geq 1}\left[\text{\textsc{SAT($\Gamma_{t,d}$)}}\right]^{serf}\]
However we fortunately we also have that:
\[\SW[1] = \left[\text{$s$-$var$-}\WSAT{$\Gamma_{1,2}$}\right]^{serf}\]
Where \textsc{$s$-$var$-WSAT} is a different parameterization of the weighted satisfiability problem:
\pcproblem{$s$-$var$-WSAT($\Phi$)}{A formula $\phi \in \Phi$, an integer $k$.}{$\var(\phi)$ (the number of variables in $\phi$).}{Does $\phi$ have a satisfying assignment where $k$ variables are set to \textsc{True}?}
\subsection{The Miniaturization Isomorphism and the $M$-Hierarchy}
The $S$-hierarchy, despite being a bounded hierarchy of parameterized classes, reflects structure in the unbounded theory. This structure can be elucidated via the \emph{miniaturization isomorphism}. Given a parameterized problem $(\Pi,\kappa)$ over $\Sigma^{*}$ the \emph{miniaturization} of the problem is
\pcproblem{Mini-$(\Pi,\kappa)$}{$x\in\Sigma^{*}$, and $m\in\mathbb{N}$ in unary such that $\Card{x}\leq m$.}{$\lceil\frac{\kappa(x)}{\log m}\rceil$.}{Decide whether $x \in \Pi$.}
Under this mapping we have the following: \[ (\Pi,\kappa) \in \text{{\normalfont SUBEPT}{}} \Leftrightarrow \text{\textsc{Mini-}}(\Pi,\kappa) \in \text{{\normalfont FPT}{}} \] Consequently we can define an intractability hierarchy via this relationship, the $M$-hierarchy. For the purposes of this paper we need only the following: \[ (\Pi,\kappa) \in \SW[t]\text{-complete} \Leftrightarrow \text{\textsc{Mini-}}(\Pi,\kappa) \in \M[t]\text{-complete} \] However the $M$-hierarchy is closed under normal fpt reductions. The proofs of these results, and much more technical detail can be found in \cite{FlumGrohe06}, or the original papers~\cite{AbrahamsonDowneyFellows95, ChenCFHJKX04, ChenF04, ChenG07, ChenHKX04, DowneyEFPR03, FlumG04}, for context however, it is known that for all $t \geq 1$ we have $\M[t] \subseteq \W[t] \subseteq \M[t+1]$.
\section{Proof Checking, Interactive Proofs and PCPs}
\subsection{Notation and Notes}
For convenience we denote by $\mathbb{B}$ the set $\{0,1\}$.
The proof systems will often be phrased somewhat like interactive proofs, as this often seems an intuitive, natural presentation, however the proof string is in effect a table of polynomial coefficients indexed by length $m$ vectors over a field $\mathcal{F}$, along with the values of a truth assignment at points over this space.
\subsection{Basic Definitions}
\begin{definition}[PCP] A \emph{Probabilistically Checkable Proof System} (a PCP) for a problem $\Pi$ over alphabet $\Sigma$ is a probabilistic polynomial-time Turing Machine $V$ that given input $x$ and access to a proof string $\sigma \in \Sigma^{*}$ satisfies the following conditions: \begin{enumerate} \item If $x$ is a \textsc{Yes}{}-instance of $\Pi$, there is a $\sigma$ such that $V^{\sigma}$ accepts $x$ with probability $1$. \item If $x$ is a \textsc{No}{}-instance of $\Pi$, for every $\sigma$ the probability that $V^{\sigma}$ accepts $x$ is at most $\frac{1}{2}$. \end{enumerate} \end{definition}
The choice of $1$ and $\frac{1}{2}$ as the probabilities for the completeness and soundness of the verifier are in a sense somewhat arbitrary, for example, Babai, Fortnow \& Lund~\cite{BabaiFL91} use probabilities that vary with the length of the input, however the majority of results are stated directly with these probabilities, or are otherwise compatible.
\begin{definition}[Restricted PCP] Given two functions $r, p: \mathbb{N} \rightarrow \mathbb{N}$, a PCP is \emph{$(r,p)$-restricted} if for every input $x$, $V$ uses at most $O(r(\Card{x}))$ random bits and $O(p(\Card{x}))$ bits of the proof string $\sigma$. \end{definition}
The set of all problems with a $(r,p)$-restricted PCP is typically denoted \pcp{r}{p}. With this notation we can thus succinctly restate Theorem~\ref{thm:pcp}:
\begin{theorem}[PCP Theorem~\cite{AroraLMSS98, AroraS98}] {\normalfont NP}{} $=$ \pcp{\log n}{1}. \end{theorem}
\subsection{Arithmetization Protocols}
Lund~\emph{et al.}~\cite{LundFKN92} introduced a protocol for demonstrating PCP and interactive proof results which they used to show that every problem in {\normalfont P}{}$^{\#\text{{\normalfont P}{}}}$ has an interactive proof (a key step in motivating Shamir's~\cite{Shamir90} result).
This protocol has proven to be extremely useful and has been used in whole or part for many of the PCP related results~\cite{AroraLMSS98, AroraS98, BabaiFL91, FeigeGLSS96, Shamir90}. It is worthwhile to sketch an outline of this protocol to give an intuition for the working of the main result of this paper.
Given a complexity class $\mathcal{C}$ we select a suitable $\mathcal{C}$-complete problem $\Pi$ and produce a verifier that completes the following tasks: \begin{enumerate} \item For input $x$, the verifier constructs an arithmetical representation $\phi$ of $x$ such that the value of $\phi$ is dependent on whether $x$ is a \textsc{Yes}{}-instance of $\Pi$ or not. For example we may construct an arithmetic formula from a boolean formula such that the arithmetic formula is non-zero if and only if the boolean formula is satisfiable. \item A sufficiently large field over which to do the arithmetic is chosen. Typically this will be $\mathbb{Z}_{p}$ for some sufficiently large prime $p$. \item The verifier then checks the arithmetical representation a variable at a time by instantiating a single variable and obtaining a simplified representation in one variable from the proof which it can use to compare against the expected value. If the simplified representation is satisfactory, the verifier picks a random value from the field, permanently sets the variable to this value and replaces the expected value by the evaluation of the simplified expression with that random value. \item Step 3 is repeated until some value does not match expectation, at which point the proof is rejected, or until all variables have been instantiated at which point the expression is checked explicitly using elements of the solution obtained from the proof (\emph{e.g.} values from a truth assignment). \end{enumerate}
The key to the effectiveness of this protocol is in the restriction on the arithmetic representation and the size of the field. For clarity of discussion we will assume the representation to be a multinomial and the field to be $\mathbb{Z}_{p}$ for a sufficiently large prime $p$.
If the multinomial is of constant degree $d$, and the polynomial simplification over one variable obtained from the proof is false, it can agree with the true polynomial in at most $d$ places~\cite{Schwartz80}. So if the proof is false, it can ``look true'' for only a small number of values ($d$), and eventually some iteration of checking will observe an erroneous value with high probability ($1-\frac{dr}{p}$ where $r$ is the number of iterations).
\section{Parameterized PCPs}
Clearly we can adapt PCP notions to parameterized complexity.
\begin{definition}[Parameterized PCP] A \emph{Parameterized Probabilistically Checkable Proof System} (parameterized PCP, or \emph{p}-PCP) for parameterized problem $\Pi$ over alphabet $\Sigma$ is a probabilistic {\normalfont FPT}{}-time Turing Machine $V$ that given input $(x,k)$, an instance of $\Pi$, and access to an proof string $\sigma \in \Sigma^{*}$ satisfies the following conditions: \begin{enumerate} \item If $(x,k)$ is a \textsc{Yes}{}-instance of $\Pi$, there is a $\sigma$ such that $V^{\sigma}$ accepts $(x,k)$ with probability $1$. \item If $(x,k)$ is a \textsc{No}{}-instance of $\Pi$, for any choice of $\sigma$ the probability that $V^{\sigma}$ accepts $(x,k)$ is no greater than $\frac{1}{2}$. \end{enumerate} \end{definition}
As with non-parameterized PCPs, the completeness and soundness probabilities need not be $1$ and $\frac{1}{2}$, however these values are sufficient for our purposes and confusing the notation thus serves no purpose.
\begin{definition}[Restricted \emph{p}-PCP] Given two functions $r,p: \mathbb{N}\times\mathbb{N} \rightarrow \mathbb{N}$ a \emph{p}-PCP is \emph{$(r,p)$-restricted} if for every input $(x,k)$ it uses $O(r(\Card{x}, k))$ random bits and at most $O(p(\Card{x}, k))$ bits of the proof string $\sigma$. \end{definition}
We denote the set of all problems with an $(r,p)$-restricted \emph{p}-PCP by \ppcp{r}{p}.
For certain extreme values of the parameters, we can use the \ppcp{r}{p} notation to express some of the parameterized classes.
\begin{itemize} \item {\normalfont FPT}{} $=$ \ppcp{0}{0}, by definition problems in ${\normalfont FPT}{}$ have no access to a proof and need no randomness. \item {\normalfont FPT}{} $=$ \ppcp{f(k)+\log n}{0}. An ${\normalfont FPT}{}$-time algorithm can try all possible $f(k)+\log n$ random strings. \item {\normalfont FPT}{} $=$ \ppcp{0}{f(k)+\log n}. An ${\normalfont FPT}{}$-time algorithm can generate all proofs of length $f(k)+\log n$. \item {\normalfont\emph{para}-{\normalfont NP}{}}{} $=$ \ppcp{0}{f(k)n^{O(1)}}. By definition. \end{itemize}
\subsection{A Non-trivial Parameterized PCP for W[1]}
\begin{theorem}\label{thm:wsat-pcp} \sloppypar Let $(\phi, k)$ be an instance of \WSAT{2-CNF$^{-}$}{} where $\max\{\var(\phi),\cl(\phi)\} \leq 2^{m}$. There is an $(m\log m, m\log m)$-restricted probabilistic ${\normalfont FPT}{}$-time Turing Machine that rejects $(\phi, k)$ with high probability if $(\phi, k)$ is a \textsc{No}{}-instance of \WSAT{2-CNF$^{-}$}{}. That is, \WSAT{2-CNF$^{-}$}{} $\in$ \ppcp{m\log m}{m \log m}. \end{theorem}
\begin{proof} The protocol will follow the same general format as those of Lund \emph{et al.}~\cite{LundFKN92}, Babai, Fortnow \& Lund~\cite{BabaiFL91} and particularly Feige \emph{et al.}~\cite{FeigeGLSS96} in that we will construct an arithmetic representation of $\phi$ and use the proof to evaluate this function pointwise.
Let $\phi$ be a \textsc{2-CNF$^{-}$} with smallest $m$ such that $2^{m} \geq\{\var(\phi), \cl(\phi)\}$. Denote each clause and variable by a binary string over $m$ bits.
For $v \in \mathbb{B}^{m}$ and $i \in \{1,2\}$ define a set of functions $C_{c,i}: \mathbb{B}^{m} \rightarrow \mathbb{B}$ as \[ C_{c,i}(v) = \left\{ \begin{array}{cl} 1 & \text{if $v$ is the $i^{th}$ variable of clause $c$} \\ 0 & \text{otherwise}\end{array}\right. \] This can be done in such a fashion that each $C_{c,i}$ is multilinear in $m$ variables. We sketch an example; say that $v = v_{1}v_{2}v_{3} = 101$ is the $1^{st}$ variable of clause $c$, then $C_{c,1} = v_{1}(1-v_{2})v_{3}$. Then the only place (over $\mathbb{B}^{3}$) where this is $1$ is at $101$.
Let $A:\mathbb{B}^{m} \rightarrow \mathbb{B}$ be a truth assignment to the variables of $\phi$.
We then define the following function over some sufficiently large field. \[ SC(A,y) = \sum_{x_{1},x_{2}\in \mathbb{B}^{m}} \prod_{i \in \{1,2\}} C_{y,i}(x_{i})A(x_{i}) \] This evaluates to $0$ if and only if $A$ is a satisfying assignment for clause $y$. Then $\phi$ in its entirety, can be expressed as: \[ S(A) = \sum_{z\in\mathbb{B}^{m}} SC(A,z)\cdot\prod_{i \in [1,m]} r_{r}^{z_{i}} \] Where $z_{i}$ is the $i^{th}$ bit of the binary representation of $z$ and $(r_{1},\ldots,r_{m})$ is a set of independently chosen random numbers from $\mathcal{F}$. This additional term is included to ensure with high probability that in the extended function the sum is zero only when all clauses evaluate to zero under $A$ (again, Feige \emph{et al.}~\cite{FeigeGLSS96} demonstrate the correctness of this method). However we must also verify that: \[ \sum_{z \in \mathbb{B}^{m}} A(z) = k \] The first function now evaluates to zero if and only if all the clauses are satisfied and the second evaluates to $k$ if and only if the weight of the truth assignment is $k$.
We now employ the following proposition: \begin{proposition}[\cite{BabaiFL91}, \cite{FeigeGLSS96}] Given a field $\mathcal{F}$, every boolean function $f$ has a unique multilinear extension over $\mathcal{F}$. Moreover the value the extension at any point can be computed in time $2^{\arity(f)}$. \end{proposition}
In particular we can compute the multilinear extension of $C$ in any field of our choosing. Then assuming that $A$ is close to multilinear, $S$ is a multinomial of constant degree. Of course we cannot simply compute $A$ in ${\normalfont FPT}{}$-time, otherwise we'd have no reason for a $p$-PCP! However Babai, Fortnow \& Lund~\cite{BabaiFL91} demonstrate a procedure for testing multilinearity of a function that fails with high probability if the function is not multilinear and succeeds otherwise. Feige \emph{et al.}~\cite{FeigeGLSS96} improve this test, reducing the number of random and proof bits required to $O(m\log m)$.
We may now apply a protocol in the style of Lund \emph{et al.}~\cite{LundFKN92}, though Feige \emph{et al.}'s~\cite{FeigeGLSS96} version of the protocol is the direct inspiration.
Given a multinomial $h$ of constant degree $d$ over $q$ variables the function $g_{i}(x_{i})$ where the first $i-1$ variables are randomly instantiated \[ g_{i}(x_{i}) = \sum_{x_{i+1}, \ldots, x_{q} \in \mathbb{B}} h(r_{1}, \ldots, r_{i-1}, x_{i}, \dots, x_{q}) \] is a polynomial of degree $d$.
Assuming $A$ is multilinear with high probability (to ensure the degree bound of the multinomial), given an expected value $a_{i-1}$ we perform the $i^{th}$ iteration of the proof check as follows:
\begin{enumerate} \item Obtain from the proof the $d$ coefficients of the polynomial $g_{i}'$ that is purported to be $g_{i}$. \item Check that $g_{i}'(0)+g_{i}'(1) = a_{i-1}$, if not, then reject. \item If the first check passes, we may still have $g_{i} \neq g_{i}'$. However they can agree at at most $d$ points in $\mathcal{F}$. We can check this with high probability ($1-\frac{d}{\Card{\mathcal{F}}}$) by randomly picking a value $r_{i}$, setting $a_{i} := g_{i}'(r_{i})$ and verifying the formula recursively. \end{enumerate}
Initially we have $a_{0} = 0$. The process continues until all variables have been randomly instantiated, at which point we can check the final function directly by obtaining the two values of $A$ at the randomly generated points described by the instantiated variables and computing the value. By choosing $\mathcal{F}$ such that $\Card{\mathcal{F}}> \frac{md}{\varepsilon}$, the probability of accepting at some point over the $m$ rounds is $\varepsilon$.
The function checking the weight of the satisfying assignment can be checked using the same protocol.
As $\log\Card{\mathcal{F}} \in O(\log m)$, this protocol uses $O(m\log m)$ proof bits to obtain the polynomial coefficients and $O(m\log m)$ random bits in instantiating the function.
\end{proof}
\begin{corollary}\label{cor:w[1]-pcp} For every parameterized problem $\Pi \in \W[1]$ there exists a function $f: \mathbb{N} \rightarrow \mathbb{N}$ such that $\Pi \in$ \ppcp{(f(k)+\log n) \log(f(k)+\log n)}{(f(k)+\log n) \log(f(k)+\log n)} and hence $\W[1] \subseteq$ \ppcp{(f(k)+\log n) \log(f(k)+\log n)}{(f(k)+\log n) \log(f(k)+\log n)} where $n$ is the size of the instance and $k$ is the parameter. \end{corollary}
\begin{proof} As \WSAT{2-CNF$^{-}$}{} is $\W[1]$-complete, every problem in $\W[1]$ can be reduced to an instance of \WSAT{2-CNF$^{-}$}{} in time bounded by $f(k)n^{O(1)}$ for some computable function $f$. Hence the instance of \WSAT{2-CNF$^{-}$}{} produced by the reduced has at most $f(k)n^{O(1)}$ variables and $f(k)n^{O(1)}$ clauses. \end{proof}
\subsection{Unbounded Clauses and $\W[2]$}
The class $\Gamma^{+}_{2,1}$ of propositional formul\ae{} can be more naturally thought of as the class of all propositional CNF formul\ae{}. The protocol given for $\W[1]$ in the previous section, although defined for $\Gamma^{-}_{1,2}$, does not depend on the clause length --- the bounds on the number of bits used may change, but the clause length is not fundamental to the structure, unlike say, that the formula is in CNF as this restriction ensures that the arithmetization is multilinear.
\begin{theorem} \sloppypar Let $(\phi, k)$ be an instance of \WSAT{$\Gamma^{+}_{2,1}$} where $\max\{\var(\phi),\cl(\phi)\} \leq 2^{m}$ and $p$ is the length of the longest clause. There is an $(p\cdot m\log m, p\cdot m\log m)$-restricted probabilistic ${\normalfont FPT}{}$-time Turing Machine that rejects $(\phi, k)$ with high probability if $(\phi, k)$ is a \textsc{No}{}-instance of \WSAT{$\Gamma^{+}_{2,1}$}. That is, \WSAT{$\Gamma^{+}_{2,1}$} $\in$ \ppcp{p \cdot m\log m}{p \cdot m \log m}. \end{theorem}
\begin{proof} We can modify the $SC$ function to cope with greater clause length and positive rather than negative literals: \[ SC(A,y) = \sum_{x_{1},\ldots,x_{p}\in \mathbb{B}^{m}} \prod_{i \in \{1,p\}} C_{y,i}(x_{i})(1-A(x_{i})) \] The family of functions $C_{c,i}$ is also extended in the obvious way.
Then the protocol continues for $p\cdot m$ rounds rather than the $2\cdot m$ as for the $\Gamma^{-}_{1,2}$ case. We then need a factor of $p$ extra random bits, and we require $p$ values of the satisfying assignment $A$ for the final evaluation. \end{proof}
\begin{corollary} For every parameterized problem $\Pi \in \W[2]$ there exists a function $f: \mathbb{N} \rightarrow \mathbb{N}$ such that $\Pi \in$ \ppcp{p\cdot(f(k)+\log n) \log(f(k)+\log n)}{p\cdot(f(k)+\log n) \log(f(k)+\log n)} and hence $\W[2] \subseteq$ \ppcp{p\cdot(f(k)+\log n) \log(f(k)+\log n)}{p\cdot(f(k)+\log n) \log(f(k)+\log n)} where $n$ is the size of the instance and $k$ is the parameter and $p$ is the length of the longest clause in the equivalent \WSAT{$\Gamma^{+}_{2,1}$} instance. \end{corollary}
The catch with this of course is that $p$ may, in principle, be as long as the formula and hence $O(f(k)n^{O(1)})$, in which case we do no better (actually, clearly worse) than the trivial \emph{p}-PCP guaranteed by the fact that $\W[2]\subseteq \text{{\normalfont\emph{para}-{\normalfont NP}{}}{}}$.
\subsection{Extension to Bounded Parameterized Classes}
As \WSAT{2-CNF$^{-}$}{} is complete for both $\EXPW[1]$~\cite{Weyer04} and $\EW[1]$~\cite{FlumGroheWeyer06}, we can easily adapt the $\W[1]$ result. We omit the formal particulars of the restriction on the running time and reduction structures denoting them simply by prepending the bound to the nomenclature.
\begin{corollary} $\EXPW[1] \subseteq$ $2^{k^{O(1)}}$-\ppcp{(2^{k^{O(1)}}+\log n) \log(2^{k^{O(1)}}+\log n)}{(2^{k^{O(1)}}+\log n) \log(2^{k^{O(1)}}+\log n)} where $n$ is the size of the instance and $k$ is the parameter. \end{corollary}
\begin{corollary} $\EW[1] \subseteq$ $2^{O(k)}$-\ppcp{(2^{O(k)}+\log n) \log(2^{O(k)}+\log n)}{(2^{O(k)}+\log n) \log(2^{O(k)}+\log n)} where $n$ is the size of the instance and $k$ is the parameter. \end{corollary}
As \WSAT{2-CNF$^{-}$}{} is not $\SW[1]$-complete, we need to adjust the formula $CS$ used in Theorem~\ref{thm:pcp} as we can no longer assume that all variables are negated. Fortunately we can simply use the formula of Feige \emph{et al.}~\cite{FeigeGLSS96} more directly (adjusted for 2-CNF, rather than 3-CNF). Recall that $2^{m} \geq \max\{\var(\phi),\cl(\phi)\}$ (of course we are really just interested in taking a power of two so that the logarithms work neatly). As $\var(\phi)=k'$ is the parameter we know that $m \leq \log(4k'^{2})$. Given that $k$ is the parameter of the initial problem and $n$ is the size, the reduction scheme that closes the ${\normalfont S}$-hierarchy gives $k' = g(l)(k+\log n)$ for some ${\normalfont SUBEPT}{}$-time computable function $g$ over $\mathbb{N}$.
\begin{corollary}\sloppypar $\SW[1] \subseteq$ $2^{o^{eff}(k)}$-\ppcp{\log (g'(l)(k + \log n)^{2}) \log \log (g'(l)(k + \log n)^{2})}{\log (g'(l)(k + \log n)^{2}) \log \log( g'(l)(k + \log n)^{2})} where $n$ is the size of the instance, $k$ is the parameter and $g'$ is a ${\normalfont SUBEPT}{}$-time computable function over $\mathbb{N}$. \end{corollary}
Then from the miniaturization isomorphism we get:
\begin{corollary}\sloppypar $\M[1] \subseteq$ \ppcp{\log (f(\frac{k}{\log n})n^{O(1)}) \log \log (f(\frac{k}{\log n})n^{O(1)})}{\log (f(\frac{k}{\log n})n^{O(1)}) \log \log (f(\frac{k}{\log n})n^{O(1)})} where $n$ is the size of the instance and $\frac{k}{\log n}$ is the parameter. \end{corollary}
\section{Proof Checking for the $A$-Hierarchy}
Looking at the classes of the $A$-hierarchy, one may be put in mind of Shamir's~\cite{Shamir90} proof that IP={\normalfont PSPACE}{} via a Lund \emph{et al.}~\cite{LundFKN92} style protocol over instances of the \textsc{Quantified Boolean Satisfiability} problem. However, the restriction of the weight of the solution poses some interesting problems. While in Shamir's case, the universal quantification is truly universal, in ours it is universal only in the ``for all subsets of size $k$'' sense, hence it is difficult to translate an instance of \textsc{AWSAT} in the same fashion --- dealing with each universally quantified variable individually becomes complicated by the fact that its possible values depend on how many of the previous variables have been set to \textsc{TRUE}, which is further complicated by the assignment of a random value out of a much larger field.
From the parameterized perspective, it is also perhaps not sensible that we ask to verify a membership proof of an \textsc{AWSAT} problem in {\normalfont FPT}{}-time. If we consider a certificate for such an instance (with the technical consideration that $l\geq 2$) then we must verify not only a single weight $k$ satisfying assignment, but for those variables that are universally quantified, we must verify that \emph{all} weight $k_{i}$ assignments have accompanying assignments from the existentially quantified variables following them. That is, we are in effect expected to check on the order of $n^{k}$ assignments. This is reflected in the structure of the parameterized classes --- while the $W$-hierarchy is contained in {\normalfont\emph{para}-{\normalfont NP}{}}{}, apart from $\A[1]$, there is no evidence that the $A$-hierarchy is. However the $A$-hierachy is contained in {\normalfont XP}{}, hence we can solve these problems in time $f(k)+n^{f(k)}$ and naturally can thus check solutions within that bound.
With this in mind we suggest a slightly relaxed version of a parameterized PCP, where we make the obvious changes from {\normalfont FPT}{}-time to $f(k)+n^{f(k)}$. For simplicity we will denote this as a $n^{k}$-$p$-PCP.
\begin{theorem} \sloppypar Let $(\phi, X_{1},\ldots, X_{l}, k=k_{1}+\ldots+k_{l})$ be an instance of \AWSATL{l}{$\Gamma^{-}_{1,2}$} where $\max\{\var(\phi),\cl(\phi)\} \leq 2^{m}$ with $l$ odd and $k' = k_{2}+k_{4}+\ldots+k_{l-1}$. There is an $(n^{k'}\cdot m\log m, n^{k'}\cdot m\log m)$-restricted probabilistic $(f(k)+n^{f(k)})$-time Turing Machine that rejects $(\phi, k)$ with high probability if $(\phi, X_{1},\ldots, X_{l}, k=k_{1}+\ldots+k_{l})$ is a \textsc{No}{}-instance of \AWSATL{l}{$\Gamma^{-}_{1,2}$}. That is, \AWSATL{l}{$\Gamma^{-}_{1,2}$} $\in$ \ppcp{n^{k'}\cdot m\log m}{n^{k'}\cdot m\log m}. \end{theorem}
\begin{proof} The verifying TM $V$ begins by generating the $O(n^{k'})$ assignments to the variables of $X_{even} = X_{2} \cup X_{4} \cup \ldots X_{l-1}$. In effect we can treat this as a simple string $s$ over $\{0,1\}^{\Card{X_{even}}}$, which we will use to index elements of the truth assignment given in the proof string (which again we can treat as a table). For each assignment to $X_{even}$ we can reduce the input formula $\phi$ appropriately in polynomial time, substituting in the values of the literals and simplifying the formula to $\phi'$.
We then have a series of $\Gamma_{1,2}^{-}$ formul\ae{} with only \emph{existential} qualification, but this is equivalent to an instance of \WSAT{$\Gamma_{1,2}^{-}$}, only with the slight constraint that the truth assignment is required to consist of $\frac{l+1}{2}$ parts, corresponding to the odd indexed variable sets $X_{1},\ldots,X_{l}$.
Thus we can apply the protocol used for $\W[1]$, with the slight change that instead of checking simply that $\sum_{x_{i}}A(x_{i})=k$, we check the sequence of truth assignments $A^{s}_{j}$ where $j \in \SB 2h-1 \;{|}\; h \in \mathbb{N}^{+} \SE$, ensuring that for each the weight is $k_{j}$. \end{proof}
\begin{corollary} $\A[l] \subseteq$ \ppcp{f(k)n^{g(k)}\cdot \log (f(k)n^{O(1)})\log\log (f(k)n^{O(1)})}{f(k)n^{g(k)}\cdot \log (f(k)n^{O(1)})\log\log (f(k)n^{O(1)})} for all $l \geq 1$, where $n$ is the size of the input, $k$ is the parameter and $g$ and $f$ are computable functions. \end{corollary}
\begin{proof} As $\A[l]$ is closed under fpt-reductions, if $l$ is odd, we can reduce the input instance to an instance of \AWSATL{l}{$\Gamma^{-}_{1,2}$} with at most $f(k)n^{O(1)}$ clauses and variables, with parameter $g(k)$.
By containment, if $l$ is even, we can reduce the input to an instance of \AWSATL{l+1}{$\Gamma^{-}_{1,2}$}. \end{proof}
We note particularly that this $p$-PCP has the nice property of reducing to the $\W[1]$ $p$-PCP in the case where $l=1$. This is a generally desirable property as $\A[1] = \W[1]$ (though in general we only expect that $\W[t] \subseteq \A[t]$).
\begin{corollary} $\AW[*] \subseteq$ \ppcp{f(k)n^{g(k)\cdot \lfloor\frac{l}{2}\rfloor}\cdot \log (f(k)n^{O(1)})\log\log (f(k)n^{O(1)})}{f(k)n^{g(k)\cdot \lfloor\frac{l}{2}\rfloor}\cdot \log (f(k)n^{O(1)})\log\log (f(k)n^{O(1)})} \end{corollary}
\begin{proof} Any problem in $\AW[*]$ can be reduced to an instance of \AWSAT{$\Gamma^{-}_{1,2}$}. In this case $l$ is not fixed, but part of the input. However for a given instance, the number of even-index variable sets is at most $\lfloor\frac{l}{2}\rfloor$. \end{proof}
\section{Conclusion}
The development of parameterized PCPs, of which this is simply a first step, may have interesting results, particularly for parameterized approximation theory. Currently non-trivial parameterized approximations are few, and the status of key problems such as \textsc{Clique} and \textsc{Dominating Set} are essentially unknown. For parameterized PCPs to have an impact on this however, results need to be improved and extended. By employing directly the construction of Feige~\emph{et al.}~\cite{FeigeGLSS96} for \textsc{Max-Clique} we could obtain results if we can reduce the number of random bits of a $p$-PCP containing $\W[1]$ to a function of $k$ alone. This seems possible for the main part of the checking protocol --- we can simply randomly generate only $k$ of the values, and take all others as constant (say 0), with a corresponding alteration in the size of the field over which the values are generated, the probability of incorrectly accepting is in essence no different. A similar alteration to the multilinearity testing however is much more difficult. Another possible approach would be to explore the intersection of Dinur's~\cite{Dinur07} proof of the PCP theorem which employs certain constraint satisfaction problems and recent hardness results for parameterized versions of constraint satisfaction~\cite{BulatovMarx11}.
Extending the result of this paper to cover other classes also seems to be non-trivial, the alternation of boolean operators of unbounded arity in propositional classes that define the classes $\W[t]$ seems to preclude retaining the constant degree property essential to the protocol presented here (this is not a problem for ${\normalfont NP}{}$ as we do not need to keep track of the weight of the satisfying assignment, so the polynomial expansion experienced in reducing a formula to \textsc{3-CNF} creates no problem). However it seems likely that a tight $p$-PCP for $\W[1]$ would be part of a broader $p$-PCP that generalizes to $\W[t]$ for all $t$, implying that $t$ will play an important role in the final complexity description.
In the other direction it would be interesting to obtain a more general $p$-PCP for the other {\normalfont PSPACE}{} related parameterized classes, particularly $\AW[SAT]$ and $\AW[P]$.
\end{document} |
\begin{document}
\title{Characterization of polyconvex isotropic functions}
\begin{abstract} We present a singular value polyconvex conjugation. Employing this conjugation, we derive a necessary and sufficient criterion for polyconvexity of isotropic functions by means of the convexity of a function with respect to the signed singular values. Moreover, we present a new criterion for polyconvexity of isotropic functions by means of matrix invariants. \end{abstract}
\section{Introduction}\label{sec:Intro} Many applications in elasticity aim to find global minimizers for functionals of the form \begin{align*} I : W^{1,p}(\Omega) \to \mathbb{R}_\infty \coloneqq \mathbb{R} \cup \{ \infty \}; \quad I(u) = \int\limits_\Omega W(Du(x)) \, \textrm{d}x \end{align*} for $\Omega \subset \mathbb{R}^d$ with $d \in \{2,3\}$. Thereby, the lower semicontinuity of $I$ plays an important role in the direct method of calculus of variations and is strongly connected to the quasiconvexity of $W$. For finite energy densities, the notion of polyconvexity implies quasiconvexity. However, in contrast to quasiconvexity, polyconvexity allows the consideration of energy densities $W$ with $W(F)= \infty$. This becomes important for finite strain theory and enables in particular energies with $W(F)= \infty$ for $F$ with $\det(F) \leq 0$ together with $W(F) \to \infty$ for $\det(F) \to 0$, which was the original motivation for the introduction of the stronger notion of polyconvexity in \cite{Bal77}, \cite{Bal77b}, also see \cite{Bal02} for a survey.
If $W$ is isotropic, it can be reformulated in terms of the signed singular values. That means, for every isotropic function $W$, there exists a unique function ${\Phi : \mathbb{R}^{d} \to \mathbb{R}_\infty}$ that characterises $W$ in terms of the signed singular values of $F$. The function $\Phi$ is said to be singular value polyconvex if the corresponding $W$ is polyconvex.
It is well known that the singular value polyconvexity of $\Phi$ implies the existence of some convex function $g$ such that $\Phi(\nu) = g(\dutchcal{m}_d(\nu))$, where $\dutchcal{m}_d(\nu)$ are the elementary polynomials of $\nu$. For $d=3$, this means there exists a convex function $g$ such that \begin{align}\label{eq:Phi=g} \Phi(\nu_1, \nu_2, \nu_3) = g(\nu_1, \nu_2, \nu_3, \nu_2 \nu_3, \nu_1 \nu_3, \nu_1 \nu_2, \nu_1 \nu_2 \nu_3). \end{align} The condition \eqref{eq:Phi=g} was often formulated in terms of the singular values and not in terms of the signed singular values. Thus, $\Phi$ and $g$ were only defined on $[0,\infty)^d$ and $[0,\infty)^{2^d-1}$, respectively. This is only possible for energies which attain $\infty$ for $F$ with $\det(F) \leq 0$. However, even for this case, it was not proven that this condition is also sufficient. By assuming additionally that $g$ is non-decreasing with respect to the first $2^d-2$ arguments, a sufficient criterion for the singular value polyconvexity of $\Phi$ was derived in \cite{Bal77, Bal77b, Bal84}.
For $d=2$, it was shown that this non-decreasing property can be reduced to non-decreasing along several lines \cite{Sil97, Ros97, Sil02} and, thus, a necessary and sufficient characterization of polyconvexity was achieved. In \cite{Mie05}, sufficient and necessary conditions for polyconvexity were shown for $d \in \{2,3\}$ and it was shown that the monotonicity condition of \cite{Bal77} is in fact a restriction. However, the associated criterion is difficult to verify in practice and it is unclear how to use it to compute efficiently polyconvex hulls.
For the case $d=2$ and the case of finite energies, the non-decreasing property was removed in \cite{DK93, DM06} by employing signed singular values and, thus, defining $\Phi$ on $\mathbb{R}^2$ and $g$ on $\mathbb{R}^3$.
In this work, we introduce a polyconvex conjugation with respect to the signed singular values. After lifting the space of the signed singular values, we can identify this conjugation with the classical Legendre--Fenchel conjugation. Thus, we can show in Theorem \ref{thm:Phi=g}, that the criterion \eqref{eq:Phi=g} (formulated with the signed singular values) is not only necessary for the polyconvexity but already sufficient even without additional non-decreasing constraints on $g$. \begin{theorem}\label{thm:Phi=g} Let $d \in \{2,3\}$ and $\Phi : \mathbb{R}^{d} \to \mathbb{R}_\infty$ be $\Pi_d$-invariant. Then, $\Phi$ is lower semicontinuous singular value polyconvex if and only if there exists a lower semicontinuous and convex function $g : \mathbb{R}^{k_d} \to \mathbb{R}_\infty$ such that \begin{align*} &\Phi(\nu_1, \nu_2) = g(\nu_1, \nu_2, \nu_1 \nu_2) &&\textrm{ for } d = 2, \\ &\Phi(\nu_1, \nu_2, \nu_3) = g(\nu_1, \nu_2, \nu_3, \nu_2 \nu_3, \nu_1 \nu_3, \nu_1 \nu_2, \nu_1 \nu_2 \nu_3) &&\textrm{ for } d = 3. \end{align*} \end{theorem}
This criterion provides a powerful tool for the determination of isotropic polyconvex energies. Moreover, due to its if and only if character it can be employed for the determination of the polyconvex envelope of isotropic functions.
What is more, isotropic energy densities $W$ are often formulated in terms of the principle matrix invariants $I(U)$ of $U=\sqrt{F^\top F}$, i.e.~$W(F)= \psi(I(U))$. In \cite{Ste03}, the polyconvexity criterion of \cite{Bal77} was used for the derivation of a sufficient criterion for the polyconvexity of $W$ by means of the convexity of $\psi$. We transfer this argumentation to our stronger polyconvexity criterion and, thus, obtain a new sufficient criterion for the polyconvexity for energies which are formulated in the elementary symmetric polynomials of the signed singular values of $F$ or the principle invariants of $U$.
The paper is organized as follows. In Section \ref{sec:BasicDef}, we recap some general notions about polyconvexity and state the singular values polyconvex conjugation. In Section~\ref{sec:Duality}, we show that the application of this conjugation yields the lower semicontinuous polyconvex envelope. By lifting the space of the signed singular values, we can identify this conjugation with the classical Legendre--Fenchel conjugation and obtain the main results. In Section~\ref{sec:Invariants}, we use this characterization of polyconvexity in order to state some remark on polyconvexity with respect to matrix invariants.
\section{Basic definitions}\label{sec:BasicDef} Let $d \in \mathbb{N}_{>0}$. A function $W : \mathbb{R}^{d \times d} \to \mathbb{R}_\infty$ is called polyconvex if there exists a convex function $g : \mathbb{R}^{m_d}\to \mathbb{R}_\infty$ such that \begin{align}\label{eq:def:Polyconvexity} W(F) = G(\mathcal{M}(F)) \end{align} for every $F \in \mathbb{R}^{d \times d}$, where $\mathcal{M}(F) \in \mathbb{R}^{K_d}$ denotes the minors of $F$ and $K_d = \sum\limits_{i = 1}^d \binom{n}{i} ^2$.
The application of polyconvexity in the calculus of variations goes hand in hand with the lower-semicontinuity of $G$ in \eqref{eq:def:Polyconvexity}. Thus, we say that $W$ is lower semicontinuous polyconvex if $G$ is additionally lower semicontinuous.
A function $W : \mathbb{R}^{d \times d} \to \mathbb{R}_\infty$ is called isotropic if it is $\mathrm{SO}(d)$-invariant, i.e.~$W(F) = W(R_1 FR_2)$ for all $F \in \mathbb{R}^{d \times d}$ and $R_1, R_2 \in \mathrm{SO}(d)$. Thus, $W$ can be reformulated in terms of the signed singular values, i.e.~there exists a unique $\Phi : \mathbb{R}^{d} \to \mathbb{R}_\infty$, such that $W(F) = \Phi(\nu)$ for all $F \in \mathbb{R}^{d\times d}$ and all $\nu \in \mathbb{S}(F)$, where \begin{align*}
\mathbb{S}(F) \coloneqq \{ \nu \in \mathbb{R}^d \mid |\nu_1|, \dots, |\nu_d| \textrm{ are the singular values of } F, \ \overset{\bullet}{\nu} = \det(F) \} \end{align*} is the set of all signed singular values of $F$ and $\overset{\bullet}{\nu} \coloneqq \prod_{i =1}^{d} \nu_i$. For instance $\Phi$ can be obtained from $W$ by $\Phi(\sigma) \coloneqq W(\operatorname{diag}(\nu))$. Note that the $\mathrm{SO}(d)$-invariance of $W$ implies that $\Phi$ is $\Pi_d$-invariant, where \begin{align*}
\Pi_d \coloneqq \{P \operatorname{diag}(\epsilon) \mid P \in \operatorname{Perm}(d), \epsilon \in \{-1,1\}^d, \overset{\bullet}{\epsilon} = 1 \}, \end{align*} $\overset{\bullet}{\epsilon} \coloneqq \prod_{i =1}^{d} \epsilon_i$ and $\operatorname{Perm}(d)$ denotes the set of permutation matrices, i.e.~${\Phi(\nu) = \Phi(S \nu)}$ for all $\nu \in \mathbb{R}^d$ and all $S \in \Pi_d$. The condition $\overset{\bullet}{\epsilon} = 1$ in the definition of $\Pi_d$ comes from the fact that $W$ is only $\mathrm{SO}(d)$- and not $\mathrm{O}(d)$-invariant. Namely, $W(F_1) = W(F_2)$ if the singular values of $F_1$ are equal to the singular values of $F_2$ as well as $\det(F_1) = \det(F_2)$.
Employing the one-to-one correspondence between $W$ and $\Phi$, we say $\Phi$ is singular value polyconvex if the corresponding $W$ is polyconvex. Moreover, we say $\Phi$ is lower semicontinuous singular value polyconvex (lsc svpc) if $W$ is (lsc pc).
In order to formulate the polyconvexity condition in terms of the signed singular values, we lift the space of the signed singular values in an analogous way as the set of matrices $F \in \mathbb{R}^{d \times d}$ is lifted via $\mathcal{M}(F) \in \mathbb{R}^{K_d}$. Therefore, we define ${\dutchcal{m}_d : \mathbb{R}^d \to \mathbb{R}^{k_d}}$, where $k_d =2^d-1$, for $d \in \{2,3\}$ by \begin{align*} &\dutchcal{m}_2(\nu) = (\nu_1, \nu_2, \nu_1 \nu_2) &&\textrm{ for } d = 2, \\ &\dutchcal{m}_3(\nu) = (\nu_1, \nu_2, \nu_3, \nu_2 \nu_3, \nu_1 \nu_3, \nu_1 \nu_2, \nu_1 \nu_2 \nu_3) &&\textrm{ for } d = 3. \end{align*} Note that $\dutchcal{m}_d$ can be defined analogously by means of the elementary polynomials for arbitrary $d \in \mathbb{N}$.
With this notation at hand, we introduce the singular value polyconvex conjugation. \begin{definition} Let $\Phi : \mathbb{R}^{d} \to \mathbb{R}_\infty$, then its singular value polyconvex conjugation ${\Phi^\wedge : \mathbb{R}^{k_d} \to \overline{\mathbb{R}} \coloneqq \mathbb{R} \cup \{-\infty,\infty\}}$ is defined by \begin{align*} &\Phi^{\wedge}(\beta) \coloneqq \sup\limits_{\nu \in \mathbb{R}^d } \langle\beta, \dutchcal{m}_d(\nu)\rangle - \Phi(\nu) \end{align*} where $\langle \cdot, \cdot \rangle$ denotes the Euclidean scalar product. Moreover, for $\Theta : \mathbb{R}^{k_d} \to \overline{\mathbb{R}}$, we define the dual singular value polyconvex conjugation by \begin{align*} &\Theta^{\vee} (\nu) \coloneqq \sup\limits_{\beta \in \mathbb{R}^{k_d} } \langle\beta, \dutchcal{m}_d(\nu) \rangle - \Theta(\beta). \end{align*} \end{definition}
For sake of clarity, we note that \begin{align*} &\langle\beta, \dutchcal{m}_2(\nu) \rangle = \beta_1 \nu_1 + \beta_2 \nu_2 + \beta_3 \nu_1 \nu_2 &&\textrm{ for } d = 2, \\ &\langle\beta, \dutchcal{m}_3(\nu) \rangle = \beta_1 \nu_1 + \beta_2 \nu_2 + \beta_3 \nu_3 + \beta_4 \nu_2 \nu_3 + \beta_5 \nu_1 \nu_3 + \beta_6 \nu_1 \nu_2+ \beta_7 \nu_1\nu_2\nu_3 &&\textrm{ for } d = 3. \end{align*}
We show in Theorem \ref{thm:main} that $\Phi^{\wedge \vee} =(\Phi^{\wedge})^{\vee}$ is the lower semicontinuous singular value polyconvex envelope of $\Phi$, i.e~$\Phi^{\wedge \vee}$ is (lsc svpc), $\Phi^{\wedge \vee} \leq \Phi$ and $\Phi^{\wedge \vee} = \Phi$ if $\Phi$ (lsc svpc). Thereby, the set of diagonal matrices plays an important role, as we will see. We note that for diagonal matrices $\operatorname{diag}(\nu)$, the minors $\mathcal{M}(\operatorname{diag}(\nu))$ contains several zero entries which are independent of $\nu$. Namely, every entry of $\mathcal{M}(\operatorname{diag}(\nu))$ that does not correspond to the determinant of a submatrix which is symmetric with respect to the diagonal is zero. In order to simplify notation, we identify \begin{align}\notag \mathbb{R}^{k_2}& \cong \mathbb{R}^{2 \times 2} \times \mathbb{R}, \\\label{eq:id2} (A_{11}, A_{12}, A_{21}, A_{22}, c) &\mapsto (A, c), \\\notag \mathbb{R}^{k_3} &\cong \mathbb{R}^{3 \times 3} \times\mathbb{R}^{3 \times 3} \times \mathbb{R}, \\\label{eq:id3} (A_{11}, A_{12}, A_{21}, A_{22}, B_{11}, B_{12}, B_{21}, B_{22}, c) &\mapsto (A,B,c). \end{align} Having this identification, we define ${\mathcal{M}(F) = (F, \det(F))}$ for $F\in \mathbb{R}^{2 \times 2}$ and
\noindent ${\mathcal{M}(F) = (F, \operatorname{adj}(F)^\top, \det(F))}$ for $F\in \mathbb{R}^{3 \times 3}$, where $\operatorname{adj}(F) = \det(F)A^{-1}$.
With these identifications, we obtain \begin{align*} &\mathcal{M}(\operatorname{diag}(\nu)) = \left(\left(\begin{array}{cc} \nu_1 & 0\\ 0 &\nu_2 \end{array}\right) , \nu_1 \nu_2 \right) &&\textrm{ for } d=2, \\ &\mathcal{M}(\operatorname{diag}(\nu)) = \left(\left(\begin{array}{ccc} \nu_1 & 0& 0\\ 0 &\nu_2 & 0 \\ 0 & 0 & \nu_3 \end{array}\right) ,\left(\begin{array}{ccc} \nu_2 \nu_3 & 0& 0\\ 0 &\nu_1 \nu_3 & 0 \\ 0 & 0 & \nu_1 \nu_2 \end{array}\right), \nu_1 \nu_2 \nu_3 \right) &&\textrm{ for } d=3. \end{align*} Having only $k_d$ many non-zero entries, we define the projection $\operatorname{P} : \mathbb{R}^{K_d} \to \mathbb{R}^{k_d}$ via the identifications $\mathbb{R}^{5} \cong \mathbb{R}^{2 \times 2} \times \mathbb{R}$, $\mathbb{R}^{19} \cong \mathbb{R}^{3 \times 3} \times \mathbb{R}^{3 \times 3}\times \mathbb{R}$ by \begin{align*} &\operatorname{P}(A,c) = (a_{11}, a_{22}, b_{11}, b_{22}, c), \\ &\operatorname{P}(A,B,c) = (a_{11}, a_{22}, a_{33}, b_{11}, b_{22}, b_{33}, c) \end{align*} and see that \begin{align}\label{eq:M=PM}
\dutchcal{m}_d(\nu) = \operatorname{P}( \mathcal{M}(\operatorname{diag}(\nu))) \end{align} for $\nu \in \mathbb{R}^d$.
Moreover, we show a supporting singular value polyconvex hyperplane property. For this purpose, we employ the classical Legendre--Fenchel conjugation: let ${f : \mathbb{R}^n \to \overline{\mathbb{R}}}$ for $n \in \mathbb{N}$. Then, the Legendre--Fenchel conjugation $f^* : \mathbb{R}^n \to \overline{\mathbb{R}}$ of $f$ is given by \begin{align*} f^*(y) = \sup\limits_{y \in \mathbb{R}^n} \langle y,x\rangle - f(x). \end{align*} One of the main well known properties of this conjugation is that $f^{**} \coloneqq (f^*)^* $ is the lower semicontinuous convex envelope of $f$ for $f : \mathbb{R}^n \to \mathbb{R}_\infty$, i.e.~$f^{**}$ is lower semicontinuous and convex, $f^{**} \leq f$ and $f^{**} = f$ if $f : \mathbb{R}^n \to \mathbb{R}_\infty$ is lower semicontinuous and convex. From this conjugation, we obtain the supporting hyperplane property of lower semicontinuous and convex functions. \begin{lemma}\label{lem:SupportingHyperplaneEps} Let $n \in \mathbb{N}$ and $f : \mathbb{R}^{n} \to \mathbb{R}_\infty$ be lower semicontinuous and convex. Then, for every $x \in \mathbb{R}^n$ with $f(x) < \infty$ and every $\varepsilon >0$, there exists $x_\varepsilon \in \mathbb{R}^n$ such that \begin{align}\label{eq:SupportingHyperplaneEps} f(y) \geq f(x)- \varepsilon + \langle x_\varepsilon, y-x\rangle \end{align} for every $y \in \mathbb{R}^n$. Moreover, for every $x \in \mathbb{R}^n$ with $f(x) = \infty$ and every $\varepsilon >0$, there exits $x_\varepsilon \in \mathbb{R}^n$ such that \begin{align}\label{eq:SupportingHyperplaneEps2} f(y) \geq \frac{1}{\varepsilon} + \langle x_\varepsilon, y-x\rangle \end{align} for every $y\in \mathbb{R}^n$. \end{lemma} \begin{proof} Since $f$ is lower semicontinuous and convex, we obtain \begin{align}\label{eq:001} f(x) = f^{**}(x) = \sup\limits_{x^* \in \mathbb{R}^n} \langle x^*, x\rangle -f^*(x^*). \end{align} $\bullet$ Case $f(x) < \infty$: due to \eqref{eq:001}, for every $\varepsilon >0$, there exists $x^*_\varepsilon \in \mathbb{R}^n$ such that \begin{align*} f(x) &= f^{**}(x) \leq \varepsilon + \langle x_\varepsilon^*, x\rangle -f^*(x_\varepsilon^*) = \varepsilon + \langle x_\varepsilon^*, x\rangle - \sup\limits_{y \in \mathbb{R}^n} \langle x_\varepsilon^*, y\rangle -f(y) \\ &\leq \varepsilon + \inf\limits_{y \in \mathbb{R}^n} \langle x_\varepsilon^*, x-y\rangle +f(y). \end{align*} Thus, \begin{align*} f(x) \leq \varepsilon + \langle x_\varepsilon^*, x-y\rangle +f(y) \end{align*} for every $y \in \mathbb{R}^n$, which implies \eqref{eq:SupportingHyperplaneEps}.
$\bullet$ Case $f(x) = \infty$: due to \eqref{eq:001}, for every $\varepsilon >0$, there exists $x^*_\varepsilon \in \mathbb{R}^n$ such that \begin{align*} \tfrac{1}{\varepsilon} \leq \langle x_\varepsilon^*, x\rangle -f^*(x_\varepsilon^*) = \langle x_\varepsilon^*, x\rangle - \sup\limits_{y \in \mathbb{R}^n} \langle x_\varepsilon^*, y\rangle -f(y) \leq \inf\limits_{y \in \mathbb{R}^n} \langle x_\varepsilon^*, x-y\rangle +f(y). \end{align*} Thus, \eqref{eq:SupportingHyperplaneEps2} holds for every $x \in \mathbb{R}^n$. \end{proof} Furthermore, we note that for $f : \mathbb{R}^n \to \bar{\mathbb{R}}$, it holds that \begin{align}\label{eq:f**<f} f^{**} \leq f \end{align} and for $f,g : \mathbb{R}^n \to \bar{\mathbb{R}}$ with $f\leq g$ it holds \begin{align} \begin{aligned}\label{eq:f**<g**} g^* &\leq f^*, \\ f^{**} &\leq g^{**}. \end{aligned} \end{align}
Moreover, for $\Phi : \mathbb{R}^d \to \mathbb{R}_\infty$, we denote the singular value polyconvex envelope of $\Phi$ by \begin{align*} &\operatorname{SvPc} \Phi(\nu) \coloneqq \sup \{\Psi(\nu) \mid \Psi \leq \Phi \textrm{ and } \Psi \textrm{ singular value polyconvex} \}. \end{align*}
\section{Duality of the singular value polyconvex conjugation}\label{sec:Duality}
\begin{lemma}\label{eq:Polyconvexity_Invariance} Let $d \in \{2,3\}$. A function $W$ is polyconvex if and only if $W(R_1 \cdot R_2)$ is polyconvex for all $R_1, R_2 \in SO(d)$. \end{lemma} \begin{proof} We assume that $W$ is polyconvex, i.e.~there exists a convex $G$ such that \begin{align*} G(\mathcal{M}(F))= W(F). \end{align*} We identify $M = (A,c)$ in the case of $d =2$ as in \eqref{eq:id2} and $M = (A,B,c)$ in the case of $d = 3$ as in \eqref{eq:id3}. Then, we obtain \begin{align*} &\tilde{G}(M) = \tilde{G}((A,c)) = G(R_1 A R_2, c) &&\textrm{ for } d=2 \\ &\tilde{G}(M) = \tilde{G}((A,B,c)) = G(R_1 A R_2, R_1 B R_2, c) &&\textrm{ for } d=3. \end{align*} Note that the convexity of $G$ implies the convexity of $\tilde{G}$ (and vice verse). Moreover, we note that $\det(R_1FR_2) = \det(F)$ and \begin{align*} \operatorname{adj}(R_1FR_2)^\top &= (\operatorname{adj}(R_2) \operatorname{adj}(F) \operatorname{adj}(R_1))^\top = (\det(R_1)R_1^{-1})^\top \operatorname{adj}(F) (\det(R_2)R_2^{-1})^\top \\ &= (R_1^{-1})^\top \operatorname{adj}(F)(R_2^{-1})^\top = R_1 \operatorname{adj}(F)^\top R_2. \end{align*} This enables the identification \begin{align*} \tilde{G}(\mathcal{M}(F)) &= G((R_1F R_2, \det(F))) = G(\mathcal{M}(R_1 F R_2)) = W(R_1 F R_2) \end{align*} for $d=2$ and \begin{align*} \tilde{G}(\mathcal{M}(F)) &= G((R_1F R_2, R_1 \operatorname{adj}(F)^\top R_2,\det(F))) \\ &= G(( R_1 F R_2, \operatorname{adj}(R_1 F R_2)^\top ,\det(R_1 F R_2)) \\ &= G(\mathcal{M}(R_1 F R_2)) = W(R_1 F R_2), \end{align*} for $d =3$, which shows that $W(R_1 \cdot R_2)$ is polyconvex as well.
The equivalence follows by the symmetry of the statement. \end{proof}
We translate the supporting hyperplane property, which is given by Lemma~\ref{lem:SupportingHyperplaneEps}, into the setting of the singular value polyconvex conjugation. This yields a necessary condition for singular value polyconvexity. We adapt the proof of \cite[Proposition 3.1]{Mie05}, presenting it in more detail as well. \begin{lemma}\label{lem:SupportingPolyHyperplane} Let $\Phi$ be (lsc svpc). Then, for every $\nu \in \mathbb{R}^d$ with $\Phi(\nu) < \infty$ and $\varepsilon >0$, there exists $\beta_\varepsilon \in \mathbb{R}^{k_d}$ such that \begin{align*} \Phi(\gamma) \geq \Phi(\nu) - \varepsilon + \max\limits_{S \in \Pi_d}\langle\beta_\varepsilon, \dutchcal{m}_d(S\gamma)\rangle - \max\limits_{\tilde{S} \in \Pi_d}\langle \beta_\varepsilon, \dutchcal{m}_d(\tilde{S}\nu)\rangle \end{align*} for every $\gamma \in \mathbb{R}^d$. Moreover, for every $ \nu \in \mathbb{R}^d$ with $\Phi(\nu) =\infty$ and every $\varepsilon >0$, there exists $\beta_\varepsilon \in \mathbb{R}^{k_d}$ such that \begin{align*} \Phi(\gamma) \geq \tfrac{1}{\varepsilon} + \max\limits_{S \in \Pi_d}\langle\beta_\varepsilon, \dutchcal{m}_d(S\gamma)\rangle - \max\limits_{\tilde{S} \in \Pi_d}\langle \beta_\varepsilon, \dutchcal{m}_d(\tilde{S}\nu)\rangle. \end{align*} \end{lemma} \begin{proof} Since $\Phi$ is (lsc svpc), there exists $W$, which is polyconvex and (lsc) such that \begin{align*} \Phi(\sigma(F)) = W(F). \end{align*} If $\Phi(\nu) < \infty$, Lemma \ref{lem:SupportingHyperplaneEps} gives $B_\varepsilon \in \mathbb{R}^{K_d}$ such that \begin{align*} W(G) \geq W(\operatorname{diag}(\nu)) - \varepsilon + \langle B_\varepsilon, \mathcal{M}(G) - \mathcal{M}(\operatorname{diag}(\nu))\rangle \end{align*} for all $G\in \mathbb{R}^{d \times d}$. Taking the maximum over $G \in \{ \operatorname{diag}(S \gamma) \mid S \in S_d \}$ for fixed $\gamma \in \mathbb{R}^d$ yields \begin{align*} \Phi(\gamma) &\geq \Phi(\nu) - \varepsilon + \max\limits_{S \in \Pi_d} \langle B_\varepsilon, \mathcal{M}(\operatorname{diag}(S\gamma))\rangle - \langle B_\varepsilon, \mathcal{M}(\operatorname{diag}(\nu))\rangle \\ & \geq\Phi(\nu) - \varepsilon + \max\limits_{S \in \Pi_d}\langle B_\varepsilon, \mathcal{M}(\operatorname{diag}(S\gamma))\rangle - \max\limits_{\tilde{S} \in \Pi_d}\langle B_\varepsilon, \mathcal{M}(\operatorname{diag}(\tilde{S}\nu))\rangle \\ &= \Phi(\nu) - \varepsilon + \max\limits_{S \in \Pi_d}\langle \beta_\varepsilon, \operatorname{P}( \mathcal{M}(\operatorname{diag}(S\gamma)))\rangle - \max\limits_{\tilde{S} \in \Pi_d}\langle \beta_\varepsilon, \operatorname{P}(\mathcal{M}(\operatorname{diag}(\tilde{S}\nu)))\rangle \\ &= \Phi(\nu) - \varepsilon + \max\limits_{S \in \Pi_d}\langle\beta_\varepsilon, \dutchcal{m}_d(S\gamma)\rangle - \max\limits_{\tilde{S} \in \Pi_d}\langle \beta_\varepsilon, \dutchcal{m}_d(\tilde{S}\nu)\rangle, \end{align*} where we could project $\beta_\varepsilon \coloneqq \operatorname{P}(B_\varepsilon)$ since only diagonal matrices are involved, and in the last step we use \eqref{eq:M=PM}.
If $\Phi(\nu) = \infty$, Lemma \ref{lem:SupportingHyperplaneEps} gives $B_\varepsilon \in \mathbb{R}^{K_d}$ such that \begin{align*} W(G) \geq \tfrac{1}{\varepsilon} + \langle B_\varepsilon, \mathcal{M}(G) - \mathcal{M}(\operatorname{diag}(\nu))\rangle \end{align*} for all $G\in \mathbb{R}^{n \times n}$. Taking again the maximum over $G \in \{ \operatorname{diag}(S \gamma) \mid S \in S_d \}$ yields analogously as above \begin{align*} \Phi(\gamma) &\geq \tfrac{1}{\varepsilon} + \max\limits_{S \in \Pi_d} \langle B_\varepsilon, \mathcal{M}(\operatorname{diag}(S\gamma))\rangle - \langle B_\varepsilon, \mathcal{M}(\operatorname{diag}(\nu))\rangle \\ & \geq \tfrac{1}{\varepsilon} + \max\limits_{S \in \Pi_d}\langle\beta_\varepsilon, \dutchcal{m}_d(S\gamma)\rangle - \max\limits_{\tilde{S} \in \Pi_d}\langle \beta_\varepsilon, \dutchcal{m}_d(\tilde{S}\nu)\rangle, \end{align*} where $\beta_\varepsilon = \operatorname{P}(B_\varepsilon)$. \end{proof}
In order to be able to show that $\Phi^{\wedge \vee}$ is lower semicontinuous singular value polyconvex (lsc svpc) in Theorem \ref{thm:main} below, we show in Proposition \ref{lem:Lambda_lsc-svpc} that ${\nu \mapsto \max\limits_{S \in \Pi_d} \langle \beta, \dutchcal{m}_d(S \nu) \rangle}$ is (lsc svpc). The proof of Proposition \ref{lem:Lambda_lsc-svpc} is based on Lemma \ref{lem:B=D}, which extends \cite[Proposition 3.5]{Mie05} to signed singular values. In order to convince ourselves that this extension is valid and to give deeper insights, we recap the intermediate results of \cite{Mie05} (see Lemma \ref{lem:Schur}) and adapt the proof of \cite[Proposition 3.5]{Mie05}.
Let $\odot : \mathbb{R}^{d \times d} \times\mathbb{R}^{d \times d} \to \mathbb{R}^{d \times d}$ be the Schur product for matrices and be defined by the pointwise multiplication which is given by $A \odot B = (A_{ij} B_{ij})_{i,j =1, \dots, d}$. Moreover, we write the scalar product of matrices by $A : B = \sum\limits_{i,j =1, \dots, d} A_{ij} B_{ij}$. Employing the Schur product, we obtain analogously to \cite[Lemma 3.3]{Mie05} the following result. \begin{lemma}\label{lem:Schur} Let $d \in \{2,3\}$, $\beta \in \mathbb{R}^{k_d}$ and $R_1, R_2 \in \mathrm{SO}(d)$. Then, it holds \begin{align*} \langle \beta, \operatorname{P}( \mathcal{M}(R_1\operatorname{diag}(\nu) R_2) )\rangle = (R_1 \odot R_2^\top) : N + \beta_{k_d} \overset{\bullet}{\nu} \end{align*} with \begin{align}\label{eq:def:N} N = \begin{cases} (\beta_1, \beta_2)^\top \nu^\top &\textrm{ for } d= 2, \\ (\beta_1, \beta_2, \beta_3)^\top \nu^\top + (\beta_4, \beta_5, \beta_6)^\top \tilde{\nu}^\top &\textrm{ for } d= 3 \end{cases} \end{align} with $\tilde{\nu} = (\nu_2 \nu_3, \nu_1 \nu_3, \nu_1 \nu_2)^\top$. \end{lemma} In Lemma \ref{lem:B=D}, we will consider the supremum of $A \mapsto A : N + c\overset{\bullet}{\nu}$ over \begin{align*} \mathcal{T}_d \coloneqq \{A = R_1 \odot R_2 \mid R_1, R_2 \in \mathrm{SO}(d) \}. \end{align*} A linear function over a compact set $T$ always attains its maximum on $\operatorname{ex}(\operatorname{conv}(T))$, the extremal points of the convex hull of $T$. For $C,T \in \mathbb{R}^{d \times d}$ with $C$ convex, we define \begin{align*} &\operatorname{conv}(T) \coloneqq \left\{\sum_{j=1}^{d^2+1} \lambda_j A_j\mid \lambda_j \geq 0, \sum_{j=1}^{d^2+1} \lambda_j =1, A_j \in T \right\},\\ &\operatorname{ex}(C) \coloneqq \{ A \in C \mid C\setminus \{A\} \textrm{ is convex} \}. \end{align*} For $d \in \{2,3\}$, the set $\operatorname{ex}(\operatorname{conv}(\mathcal{T}_d))$ was explicitly computed to be $\Pi_d$ in \cite[Proposition 3.4]{Mie05}. Employing this together with Lemma \ref{lem:Schur}, we can conclude the following lemma analogously to \cite{Mie05}. \begin{lemma}\label{lem:B=D} Let $d \in \{2,3\}$, $\beta\in \mathbb{R}^{k_d}$ and $\nu \in \mathbb{R}^d$. Then, \begin{align*} \sup\limits_{R_1, R_2 \in \mathrm{SO}(d)} \langle\beta, \operatorname{P} \left(\mathcal{M}( R_1 \operatorname{diag}(\nu) R_2) \right) \rangle = \max\limits_{S \in \Pi_d} \langle\beta, \dutchcal{m}_d(\nu)\rangle. \end{align*} \end{lemma} \begin{proof} By the Krein--Milman theorem, linear functionals attain their extrema over a compact set on extremal points and, thus, we obtain with Lemma \ref{lem:Schur}: \begin{align*} \sup\limits_{R_1, R_2 \in \mathrm{SO}(d)} \langle \beta, \operatorname{P} \left(\mathcal{M}( R_1 \operatorname{diag}(\nu) R_2) \right) \rangle = \sup\limits_{R_1, R_2 \in \mathrm{SO}(d)} R_1 \odot R_2^\top : N + \beta_{k_d} \overset{\bullet}{\nu} \\ = \sup\limits_{A \in \mathcal{T}_d} A : N + \beta_{k_d} \overset{\bullet}{\nu} = \max\limits_{S \in \Pi_d} S :N + \beta_{k_d} \overset{\bullet}{\nu} = \max\limits_{S \in \Pi_d} \langle\beta, \dutchcal{m}_d(\nu)\rangle, \end{align*} where $N$ is given by \eqref{eq:def:N}. The last step follows for $d=2$ by \begin{align*} S : N = S : ((\beta_1, \beta_2)^\top \nu^\top) = \langle (\beta_1, \beta_2)^\top, S \nu\rangle \end{align*} and for $d =3$ with $S\widetilde{\nu} = \widetilde{S \nu}$ by \begin{align*} S : N = S : ((\beta_1, \beta_2, \beta_3)^\top \nu^\top+ ((\beta_4, \beta_5, \beta_6)^\top \tilde{\nu}^\top)) \\ = \langle(\beta_1, \beta_2, \beta_3)^\top, S\nu\rangle+ \langle(\beta_4, \beta_5, \beta_6)^\top, \widetilde{S\nu} \rangle. \end{align*} \end{proof}
\begin{lemma}\label{lem:Lambda_lsc-svpc} Let $d \in \{2,3\}$. For every $\beta \in \mathbb{R}^{k_d}$, the mapping \begin{align*} \nu \mapsto \max\limits_{S \in \Pi_d} \langle \beta, \dutchcal{m}_d(S\nu) \rangle \end{align*} is (lsc svpc). \end{lemma} \begin{proof} We show Lemma \ref{lem:Lambda_lsc-svpc} for $d =3$, the case $d=2$ follows similarly. We identify again $\mathbb{R}^{19} \cong \mathbb{R}^{3 \times 3} \times \mathbb{R}^{3 \times 3} \times \mathbb{R}$. Then, we note that for every $\beta \in \mathbb{R}^{k_d}$ and ${R_1, R_2 \in \mathrm{SO}(3)}$, the mapping \begin{align*} (A,B,c) \mapsto \langle \beta, \operatorname{P}(R_1 A R_2,R_1 B R_2, c) \rangle \end{align*} is linear, thus convex and continuous. Since convexity and lower semicontinuity is preserved under the supremum, the mapping \begin{align*} G_\beta(A,B,c) \coloneqq \sup\limits_{R_1, R_2 \in \mathrm{SO}(3)} \langle\beta, \operatorname{P} (R_1 A R_2,R_1 B R_2, c) \rangle \end{align*} is convex and lower semicontinuous. Thus, $W_\beta$ defined by \begin{align*} W_{\beta}(F) \coloneqq G_{\beta}(F, \operatorname{adj}(F)^\top, \det(F)) = G_{\beta}(\mathcal{M}(F)) \end{align*} is lower semicontinuous polyconvex. Moreover, \begin{align*} W_\beta(F) &= \sup\limits_{R_1, R_2 \in \mathrm{SO}(3)} \langle\beta, \operatorname{P} (R_1 F R_2,R_1 \operatorname{adj}(F)^\top R_2, \det(F)) \rangle \\ &= \sup\limits_{R_1, R_2 \in \mathrm{SO}(3)} \langle\beta, \operatorname{P} (R_1 \tilde{R_1}F \tilde{R_2}R_2,R_1\tilde{R_1} \operatorname{adj}(F)^\top\tilde{R_2}R_2, \det(F)) \rangle \\ &=\sup\limits_{R_1, R_2 \in \mathrm{SO}(3)} \langle\beta, \operatorname{P} (R_1 \tilde{R_1}F \tilde{R_2}R_2,R_1 \operatorname{adj}(\tilde{R_1}F\tilde{R_2})^\top R_2, \det(F)) \rangle =W_\beta(\tilde{R_1}F\tilde{R_2}) \end{align*} for every $\tilde{R}_1, \tilde{R}_2 \in \mathrm{SO}(d)$ and, thus, $W_\beta$ is isotropic. Then, the corresponding $\Phi_\beta$ which is given by \begin{align*} \Phi_\beta(\nu) = W_\beta(\operatorname{diag}(\nu)) \end{align*} is (lsc svpc).
With Lemma \ref{lem:B=D}, we can reduce the supremum over $R_1,R_2 \in \mathrm{SO}(3)$ into the following maximum \begin{align*} \Phi_\beta(\nu) &= W_\beta(\operatorname{diag}(\nu)) \\ &= \sup\limits_{R_1, R_2 \in \mathrm{SO}(3)} \langle\beta, \operatorname{P} (R_1 \operatorname{diag}(\nu) R_2,R_1 \operatorname{adj}( \operatorname{diag}(\nu))^\top R_2, \det(\operatorname{diag}(\nu)) \rangle \\ &= \max\limits_{S \in \Pi_d} \langle\beta, \dutchcal{m}_d(\nu)\rangle \end{align*} Hence, $\nu \mapsto \max\limits_{S \in \Pi_d} \langle\beta, \dutchcal{m}_d(\nu)\rangle $ is (lsc svpc). \end{proof}
\begin{theorem}\label{thm:main} Let $d \in \{2,3\}$ and $\Phi : \mathbb{R}^d \to \mathbb{R}_\infty$ and $\Psi : \mathbb{R}^d \to \overline{\mathbb{R}}$. Then, the following statements hold: \begin{itemize} \item[(i)]\label{thm:main:1} $\Phi^{\wedge \vee}$ is lower semicontinuous singular value polyconvex (lsc svpc). \item[(ii)]\label{thm:main:2} $\Phi^{\wedge \vee} \leq \operatorname{SvPc} \Phi \leq \Phi$. \item[(iii)]\label{thm:main:3} If $\Phi \leq \Psi$, then $\Phi^{\wedge} \geq \Psi^{\wedge}$ and $\Phi^{\wedge \vee} \leq \Psi^{\wedge \vee}$. \item[(iv)]\label{thm:main:4} $\Phi^{\wedge} = (\operatorname{SvPc}\Phi)^{\wedge}$. \item[(v)]\label{thm:main:5} if $\Phi$ is (lsc svpc), then $\Phi = \Phi^{\wedge \vee}$. \end{itemize} \end{theorem} \begin{proof} \begin{itemize} \item[(i)] We note that $\nu \mapsto \langle\beta, \dutchcal{m}_d(\nu)\rangle$ is (lsc svpc) (cf. Lemma \ref{lem:Lambda_lsc-svpc}) for every $\beta \in \mathbb{R}^{k_d}$ and, hence, $\nu \mapsto \langle\beta, \dutchcal{m}_d(\nu)\rangle -\Phi^{\wedge}(\beta)$ is (lsc svpc). This can be carried over to the supremum over $\beta \in \mathbb{R}^{k_d}$ and, thus, $\Phi^{\wedge \vee}$ is (lsc svpc).
\item[(ii)] The definition of $\Phi^{\wedge}$ implies $\Phi(\nu) \geq \langle\beta, \dutchcal{m}_d(\nu)\rangle - \Phi^{\wedge}( \beta)$ for every ${\nu \in \mathbb{R}^d}$ and every $\beta \in \mathbb{R}^{k_d}$. Then, from the definition of $\Phi^{\wedge \vee}$, it follows immediately $\Phi^{\wedge \vee} \leq \Phi$. We observe that $\Phi^{\wedge \vee}$ is (lsc svpc) (see Theorem \ref{thm:main}(i)) and, thus, \begin{align*} \Phi^{\wedge \vee} \leq \operatorname{SvPc} \Phi \leq \Phi. \end{align*} \item[(iii)] This is a direct consequence of the definition of the singular value polyconvex conjugation.
\item[(iv)] Since $\Phi \geq \operatorname{SvPc} \Phi$ it follows $\Phi^{\wedge} \leq (\operatorname{SvPc} \Phi)^{\wedge}$. Now, we assume that there exists $\beta_0 \in \mathbb{R}^{k_d}$ such that $\Phi^{\wedge}(\beta_0) < (\operatorname{SvPc} \Phi)^{\wedge}(\beta_0)$. We obtain from the definition of $\Phi^{\wedge}$ that \begin{align*} \Phi^{\wedge}(\beta_0) \geq \langle\beta_0, \dutchcal{m}_d(\nu)\rangle - \Phi(\nu) \end{align*} for every $\nu \in \mathbb{R}^d$ and thus \begin{align}\label{eq:14} \Phi(\nu) \geq \langle\beta_0, \dutchcal{m}_d(\nu)\rangle - \Phi^{\wedge}(\beta_0). \end{align} Since $\nu \mapsto \langle\beta_0, \dutchcal{m}_d(\nu)\rangle$ is also (svpc), we can conclude with \eqref{eq:14} \begin{align*} \langle\beta_0, \dutchcal{m}_d(\nu)\rangle - \Phi^{\wedge}(\beta_0) \leq \operatorname{SvPc} \Phi (\nu) \end{align*} for every $\nu \in \mathbb{R}^d$. However, \begin{align*} (\operatorname{SvPc} \Phi)^{\wedge}(\beta_0) &= \sup\limits_{\nu \in \mathbb{R}^d} \langle\beta_0, \dutchcal{m}_d(\nu)\rangle- \operatorname{SvPc} \Phi(\nu) \\ &\leq \sup\limits_{\nu \in \mathbb{R}^d}\langle\beta_0, \dutchcal{m}_d(\nu)\rangle - \langle\beta_0, \dutchcal{m}_d(\nu)\rangle + \Phi^{\wedge}(\beta_0) = \Phi^{\wedge}(\beta_0), \end{align*} which yields a contradiction. Thus, we have $(\operatorname{SvPc} \Phi)^{\wedge} = \Phi^{\wedge}$.
\item[(v)] By Theorem \ref{thm:main}(ii), it is sufficient to show that $\Phi \leq \Phi^{\wedge \vee}$.
If $\Phi(\nu) <\infty$ for $\nu \in \mathbb{R}^d$, Lemma \ref{lem:SupportingPolyHyperplane} gives for every $\varepsilon >0$ a $\beta_\varepsilon \in \mathbb{R}^{k_d}$ such that \begin{align*} \Phi(\gamma) \geq \Phi(\nu) - \varepsilon + \langle\beta_\varepsilon, \dutchcal{m}_d(\gamma)\rangle - \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle \end{align*} for all $\gamma \in \mathbb{R}^d$, which implies \begin{align*} - \Phi^\wedge(\beta_\varepsilon) &= -\sup\limits_{\gamma \in \mathbb{R}^d} \langle\beta_\varepsilon, \dutchcal{m}_d(\gamma)\rangle - \Phi(\gamma) \geq \Phi(\nu) - \varepsilon - \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle. \end{align*} Then, we obtain \begin{align*} \Phi^{\wedge \vee}(\nu) &= \sup\limits_{\beta \in \mathbb{R}^{k_d} } \langle\beta, \dutchcal{m}_d(\nu)\rangle - \Phi^\wedge(\beta) \geq \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle - \Phi^\wedge(\beta_\varepsilon) \\ & \geq \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle +\Phi(\nu) - \varepsilon - \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle = \Phi(\nu) - \varepsilon. \end{align*} Sending $\varepsilon \to 0$ yields $\Phi^{\wedge \vee}(\nu) \geq\Phi(\nu)$.
If $\Phi(\nu) <\infty$ for $\nu \in \mathbb{R}^d$, Lemma \ref{lem:SupportingPolyHyperplane} gives for every $\varepsilon >0$ a $\beta_\varepsilon \in \mathbb{R}^{k_d}$ such that \begin{align*} \Phi(\gamma) \geq \tfrac{1}{\varepsilon}+ \langle\beta_\varepsilon, \dutchcal{m}_d(\gamma)\rangle - \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle , \end{align*} which implies \begin{align*} - \Phi^\wedge(\beta_\varepsilon) &= -\sup\limits_{\gamma \in \mathbb{R}^d} \langle\beta_\varepsilon, \dutchcal{m}_d(\gamma)\rangle - \Phi(\gamma) \geq \tfrac{1}{\varepsilon}- \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle . \end{align*} Then, we obtain \begin{align*} \Phi^{\wedge \vee}(\nu) &= \sup\limits_{\beta \in \mathbb{R}^{k_d} } \langle\beta, \dutchcal{m}_d(\nu)\rangle - \Phi^\wedge(\beta) \geq \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle - \Phi^\wedge(\beta_\varepsilon) \\ & \geq \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle + \tfrac{1}{\varepsilon} - \langle\beta_\varepsilon, \dutchcal{m}_d(\nu)\rangle = \tfrac{1}{\varepsilon}. \end{align*} Sending $\varepsilon \to 0$ yields $\Phi^{\wedge \vee}(\nu) = \infty$. \end{itemize} \end{proof} Employing \ref{thm:main}, we can show that $\Phi^{\wedge \vee}$ provides the singular value lower semicontinous polyconvex envelope of $\Phi$. \begin{corollary}\label{cor:main} Let $ d \in \{2,3\}$ and $\Phi : \mathbb{R}_d \mapsto \mathbb{R}_\infty$. Then, \begin{align*} \Phi^{\wedge \vee}(\nu) = \sup\{\Psi(\nu) \mid \Psi \leq \Phi, \Psi \textrm{ is (lsc svpc)} \}. \end{align*} \end{corollary} \begin{proof}We show Corollary \ref{cor:main} by contradiction. Assume there exists $\Psi$, which is (lsc svpc), and $\nu_0 \in \mathbb{R}^d$ such that $\Psi \leq \Phi$ and $\Phi^{\wedge \vee}(\nu_0) < \Psi(\nu_0)$. Then, the pointwise maximum $\max\{\Psi, \Phi^{\wedge \vee} \}$ is (lsc svpc) and, thus, we can assume without loss of generality that $\Phi^{\wedge \vee} \leq \Psi$ holds as well.
Theorem \ref{thm:main} (v) implies that $\Psi = \Psi^{\wedge \vee}$. Moreover, Theorem \ref{thm:main} (iii) yields $\Psi^{\wedge \vee} \leq \Phi^{\wedge \vee}$ and thus $\Psi = \Psi^{\wedge \vee}\leq \Phi^{\wedge \vee}$, which stands in a contradiction to $\Phi^{\wedge \vee}(\nu_0) < \Psi(\nu_0)$. \end{proof}
The singular value polyconvex conjugation can be identified with classical Legendre--Fenchel conjugation. \begin{lemma}\label{lem:Equivalence:Conj} Let $\Phi : \mathbb{R}^d \to \mathbb{R}_\infty$ and $h : \mathbb{R}^{k_d} \to \mathbb{R}^d$ such that \begin{align*} h(x) = \begin{cases} \Phi(\nu) &\textrm{ if } x = \dutchcal{m}_d(\nu), \\ \infty &\textrm{ else}. \end{cases} \end{align*} Then, $\Phi^{\wedge\vee}(\nu) = h^{**}(\dutchcal{m}_d(\nu))$ for all $\nu \in \mathbb{R}^d$. \end{lemma} \begin{proof} First, we note that $\langle \beta , x \rangle - h(x) = - \infty$ if $x \not \in \operatorname{Im}(\dutchcal{m}_d)$. Thus, we obtain \begin{align*} \Phi^\wedge(\beta) &= \sup\limits_{\nu \in \mathbb{R}^{d}} \langle \beta , \dutchcal{m}_d(\nu) \rangle - \Phi(\nu) = \sup\limits_{x \in \mathbb{R}^{k_d}} \langle \beta , x \rangle - h(x) = h^*(\beta^*) \end{align*} for every $\beta \in \mathbb{R}^{k_d}$. Inserting this in the dual conjugations yields \begin{align*} \Phi^{\wedge\vee}(\nu) &= \sup\limits_{\beta \in \mathbb{R}^{k_d}} \langle \beta , \dutchcal{m}_d(\nu) \rangle - \Phi^\wedge(\beta) = \sup\limits_{\beta \in \mathbb{R}^{k_d}} \langle \beta , \dutchcal{m}_d(\nu) \rangle - h^*(\beta) = h^{**}(\dutchcal{m}_d(\nu)). \end{align*} \end{proof}
With Lemma \ref{lem:Equivalence:Conj}, we can prove our main Theorem \ref{thm:Phi=g}. \begin{proof}[Proof of Theorem \ref{thm:Phi=g}]
Assume $\Phi$ is (lsc svpc). Theorem~\ref{thm:main} yields $\Phi^{\wedge\vee} = \Phi$ and, thus, $h^{**}|_{\operatorname{Im}(\dutchcal{m}_d)} = h|_{\operatorname{Im}(\dutchcal{m}_d)}$ for $h$ chosen as in Lemma \ref{lem:Equivalence:Conj}. Therefore, we can choose $g = h^{**}$ which is lsc convex and obtain $g(\dutchcal{m}_d(\nu)) = h^{**}(\dutchcal{m}_d(\nu))= \Phi^{\wedge\vee}(\nu) = \Phi(\nu)$ for all $\nu \in \mathbb{R}^d$.
On the other hand, if $g$ is lsc convex, we obtain $g^{**} =g$. Then, we choose \begin{align*}
h(x)\coloneqq \begin{cases} g(x) &\textrm{ for } x \in\operatorname{Im}(\dutchcal{m}_d), \\ \infty &\textrm{ else}. \end{cases} \end{align*} and obtain with \eqref{eq:f**<g**} and \eqref{eq:f**<f} \begin{align*}
g|_{\operatorname{Im}(\dutchcal{m}_d)} = g^{**}|_{\operatorname{Im}(\dutchcal{m}_d)}\leq h^{**}|_{\operatorname{Im}(\dutchcal{m}_d)} \leq h|_{\operatorname{Im}(\dutchcal{m}_d)} = g_{\operatorname{Im}(\dutchcal{m}_d)}.
\end{align*} Thus, $h^{**}|_{\operatorname{Im}(\dutchcal{m}_d)} = h|_{\operatorname{Im}(\dutchcal{m}_d)}$ and Lemma \ref{lem:Equivalence:Conj} yields $\Phi^{\wedge\vee} = \Phi$. Then, Theorem \ref{thm:main} implies that $\Phi$ is (lsc svpc). \end{proof}
Having proven our main result Theorem \ref{thm:Phi=g}, we compare it with the polyconvexity criterion for isotropic functions that was presented in \cite{Bal77}. \begin{remark} For isotropic functions $W$ with $W(F) = \infty$ for $\det(F)< 0$, $W$ can be formulated in terms of the singular values and, thus, $\Phi$ is only defined on the positive orthant $[0,\infty)^d$. In \cite{Bal77}, it was shown for such $W$ and $d \in \{2,3\}$ that the corresponding $\Phi$ is singularvalue polyconvex if there exists a convex ${g :[0,\infty)^{k_d} \to \bar{\mathbb{R}}}$ that is non-decreasing with respect to all but the last argument such that $\Phi(\nu) = g(\dutchcal{m}_d(\nu))$ for all $\nu \in [0,\infty)^d$.
Comparing with Theorem \ref{thm:Phi=g}, we see that using the signed singular values and the larger domain $\mathbb{R}^{k_d}$ yields some of these non-decreasing properties on $[0,\infty)^{k_d}$ by the convexity and the symmetry. For $d=2$ it was established in \cite{Sil97, Ros97, Sil02} that the non-decreasing property of \cite{Bal77} can be weakened. Namely, ${\Phi : [0,\infty)^2 \to \mathbb{R}}$ is singular value polyconvex if and only if ${g : [0,\infty)^3 \to \mathbb{R}}$ is convex and for every $\alpha, \delta >0$ the functions $[0,\infty) \ni t \mapsto g(\alpha + t,t,\delta)$ and $[0,\alpha] \ni t \mapsto g(\alpha + t, \alpha-t,\delta)$ are non-decreasing. Thus, we see that the criterion of \cite{Bal77} is not necessary and is in fact more restrictive than the criterion of Theorem \ref{thm:Phi=g}. \end{remark}
\section{Some remarks on polyconvexity with respect to matrix invariants}\label{sec:Invariants} Isotropic functions $W$ with $W(F)= \infty$ for $\det(F)\leq 0$ can be formulated in terms of the matrix invariants $I(U)$ of $U=\sqrt{F^\top F}$, i.e.~there exists $\psi : [0,\infty)^d \to \mathbb{R}$ such that $W(F)= \psi(I(U))$ if $\det(F)> 0$. For $d=3$, the invariants of $U$ are ${I(U) = (\operatorname{tr}(U), \operatorname{tr}(\operatorname{adj}(U)), \det(U))}$). By employing the polyconvexity criterion of \cite{Bal77} and choosing $g: [0,\infty)^{k_d} \to \mathbb{R}$ constant along certain hyperplanes, it was shown in \cite{Ste03} that, for $d \in \{2,3\}$, $W$ is polyconvex if $\psi: [0,\infty)^d \to \mathbb{R}$ is convex and non-decreasing in all except the last argument. Indeed, this particular choice of $g$ weakens the criterion but simplifies its applications and allows the direct consideration of functions that are formulated in terms of matrix invariants. We adapt the approach of \cite{Ste03} on Theorem~\ref{thm:Phi=g}. Hence, we obtain a new sufficient criterion for polyconvexity for functions which are given in terms of the elementary symmetric polynomials of the signed singular values. This new criterion gets along without the restriction that $\psi$ is non-decreasing. However, we obtain the symmetry assumptions \eqref{eq:Sym:psi} since we have to use the elementary symmetric polynomials of the signed singular values instead of the invariants of $U$.
We denote by $e=e(x)$ the elementary symmetrical polynomials, i.e.~ \begin{align*} &e: \mathbb{R}^2 \to \mathbb{R}^2, \ x \mapsto (x_1 + x_2, x_1x_2), \\ &e: \mathbb{R}^3 \to \mathbb{R}^3, \ x \mapsto (x_1 + x_2 + x_3, x_2 x_3 +x_1 x_3 +x_1x_2, x_1 x_2 x_3). \end{align*} We note that $e$ is injective up to permutations, i.e.~$e(x) = e(\tilde{x})$ if and only if $\tilde{x} = S x$ for $S \in \operatorname{Perm}(d)$. Thus, for every $\operatorname{Perm}(d)$-invariant $\Phi : \mathbb{R}^d \to \mathbb{R}_\infty$, there exists ${\psi : \mathbb{R}^d \to \mathbb{R}_\infty}$ such that \begin{align*} \Phi(\nu) = \psi(e(\nu)), \end{align*} where $\psi$ is uniquely defined on $ \operatorname{Im}(e)$. On the other hand, every ${\psi : \mathbb{R}^d \to \mathbb{R}_\infty}$ defines a $\operatorname{Perm}(d)$-invariant function $\Phi$. However, in our application of isotropic functions, $\Phi$ is not only $\operatorname{Perm}(d)$ but also $\Pi_d$-invariant. This yields \begin{align}\label{eq:Sym:psi} \psi( e(\nu)) = \psi(e(S \nu)) \end{align} for all $\nu \in \mathbb{R}^d$ and $S \in \{ \operatorname{diag}(\epsilon) \mid \epsilon \in \{-1,1\}^d, \overset{\bullet}{\epsilon}= 1\}$.
Thus, we can formulate: \begin{proposition}\label{prop:Steigman:criterion} Let $d \in \{2,3\}$, $\Phi: \mathbb{R}^d \to \mathbb{R}_\infty$ be $\Pi_d$-invariant and given by \begin{align*} \Phi(\nu) = \psi(e(\nu)) \textrm{ for } \nu \in \mathbb{R}^d \end{align*} for $\psi :\mathbb{R}^d \to \mathbb{R}_\infty$ convex and lower semicontinuous. Then, $\Phi$ is (lsc svpc). \end{proposition} \begin{proof} We prove the result for $d =3$; it follows in the same way for $d=2$.
We choose $g(\nu_1,\nu_2,\nu_3, \mu_1, \mu_2, \mu_3, \delta) \coloneqq \psi (\nu_1 + \nu_2 +\nu_3, \mu_1 +\mu_2+\mu_3,\delta)$. Then, it can be easily seen that $g : \mathbb{R}^{k_3} \to R_\infty$ is convex if and only if $\psi$ is convex. Moreover, $g$ is lower semi continuous if and only if $\psi$ is lower semicontinuous. Thus, the convexity and lower semicontinuity of $\psi$ can be transferred on $g$ and Theorem \ref{thm:Phi=g} implies that $\Phi$ is (lsc svpc). \end{proof}
We note that Proposition \ref{prop:Steigman:criterion} is only a sufficient and not a necessary condition for the (lsc svpc)~ of $\Phi$. The reason is the specific choice of $g$ in the proof of Proposition \ref{prop:Steigman:criterion}. Thus, the non convexity or non lower semicontinuity of $\psi$ would only show that this particular choice for $g$ does not fulfil the assumptions of Theorem \ref{thm:Phi=g}, but another $g$ may be suitable.
Moreover, it is not clear that this new criterion is stronger than the criterion of \cite{Ste03}. In fact, we could remove the highly restricting non-decreasing property for $\psi$, but we have to define $\psi$ on the whole of $\mathbb{R}^d$ and the $\Pi_d$-invariance of $\Phi$ implies the symmetry \eqref{eq:Sym:psi}.
\section{Conclusion} We derived a polyconvex singular value conjugation for isotropic functions in the space of the singular values. We proved that conjugation together with its dual yield the lower semicontinuous singular value polyconvex envelope. By identifying this conjugation with the classical Legendre--Fenchel conjugation, we derived a necessary and sufficient criterion for polyconvexity in terms of the singular values. This criterion is useful for the verification of the polyconvexity for isotropic functions. Moreover, this criterion as well as the singular value polyconvex conjugation provide a new powerful tool for the explicit computation of the lower semicontinuos polyconvex envelope for isotropic functions.
Furthermore, we derived a new criterion for the polyconvexity of isotropic functions which are given with respect to the matrix invariants of $\sqrt{F^\top F}$ or, more generally, the elementary symmetric polynomials of the singular values of $F$.
\end{document} |
\begin{document}
\begin{abstract}
Two commonly studied compactifications of Teichmüller spaces of finite type surfaces with respect to the Teichmüller metric are the horofunction and visual compactifications. We show that these two compactifications are related, by proving that the horofunction compactification is finer than the visual compactification. This allows us to use the simplicity of the visual compactification to obtain topological properties of the horofunction compactification. Among other things, we show that the horoboundary of Teichmüller space is path connected and that its Busemann points are not dense, we determine for which surfaces the horofunction compactification is isomorphic to the visual one, and we show that some horocycles diverge in the visual compactification based at some point.
As an ingredient in one of the proofs we show that the extremal length is not $C^{2+\varepsilon}$ for any $\varepsilon>0$ along some paths that are smooth with respect to the piecewise linear structure on measured foliations.
\end{abstract}
\title{A qualitative description of the horoboundary of the Teichmüller metric}
\section{Introduction}
The horofunction compactification of a metric space is defined in terms of the metric, so its properties are well aligned for studying the metric properties of the space. For example, all geodesic rays converge to points and isometries of the space can be extended to homeomorphisms of the compactification. This compactification was first introduced by Gromov \cite{Gromov} as a natural, general compactification, based on previous ideas of Busemann. The horofunction compactification has since found applications, such as obtaining asymptotic properties of random walks on weakly hyperbolic spaces by Maher--Tiozzo \cite{Tiozzo}, determine the isometry group of some Hilbert geometries by Lemmens--Walsh \cite{Walsh4} and obtain properties of quantum metric spaces by Rieffel \cite{Rieffel}. The compactification is obtained by embedding the metric space $X$ into the space $C(X)$ of continuous functions on $X$ via the map $h:X\hookrightarrow C(X)$ defined by
\[
h(p)(\cdot)=d(p,\cdot)-d(p,b),
\]
where $b\in X$ is an arbitrarily chosen basepoint. As explained, for example, by Walsh \cite[Section 2]{Walsh3}, if the space $X$ is proper then $h$ is and embedding, the closure of $h(X)$ is compact and the \emph{horofunction compactification} of $X$ is defined as the pair $(h,\overline{h(X)})$. By considering two functions equivalent if they differ by a constant one can show that the compactification does not depend on the basepoint $b$. While this compactification has been rather useful, it is sometimes hard to visualize, and there are not that many examples where the horofunction boundary is explicitly known. Some cases where the horofunction compactification is understood include Hadamard manifolds and some of their quotients, by Dal'bo--Peigné--Sambusetti \cite{Dal'bo}, as well as the Heisenberg group with the Carnot--Carath\'{e}odory metric, by Klein--Nicas \cite{Klein}, and Hilbert geometries, by Walsh \cite{Walsh2}.
On the other hand, for a proper, uniquely geodesic, straight metric space $X$ (see \cref{se:metricdefinitions} for definitions) the visual compactification based at some point $b\in X$ is defined by pasting the set of geodesic rays exiting $b$, denoted $D_b$, to the space $X$ in such a way that a sequence $(x_n)\subset X$ converges to some ray $\gamma\in D_b$ if the distance $d(b,x_n)$ goes to infinity as $n\to\infty$, and the geodesic ray between $b$ and $x_n$ converges uniformly on compacts to $\gamma$. See \cref{se:metricdefinitions} for details on the topology of $X\cup D_b$. This compactification may depend on the basepoint $b$, which restricts its usefulness. It can even happen that isometries of $X$ that move the basepoint can not be extended continuously to the compactification, as Kerckhoff showed for Teichmüller spaces \cite{Kerckhoff}. However, the visual compactification usually has a simple geometric interpretation. For example, for a Hadamard manifold, as well as for a Teichmüller space with the Teichmüller metric, this compactification is homeomorphic to a closed ball of the same dimension as the space, where the boundary of that ball is the space of geodesic rays based at $b$. In the context of Teichmüller spaces with the Teichmüller metric, the visual compactification is often called the Teichmüller compactification.
\subsection{Horoboundary of proper, uniquely geodesic, straight metric spaces}
To make this work as general as possible, we begin our analysis by using the aforementioned metric properties of the Teichmüller metric. The relationship between the horofunction compactification and the visual compactification is established by observing that, for such a metric space, a sequence converging to a point in the horofunction compactification also converges in the visual compactification. This allows us to build a continuous map $\Pi$ from the horofunction compactification $\overline{h(X)}$ to the visual compactification $X\cup D_b$, showing that the former is finer than the latter.
Given a geodesic $\gamma$, the path $\gamma(t)$ converges, as $t\to\infty$, to the \emph{Busemann point} associated to $\gamma$ in the horofunction compactification, which we denote $B_\gamma$. As the map $\Pi$ is defined in terms of sequences it follows that $\Pi(B_\gamma)=\gamma$. The existence of the map $\Pi$ shows a strong relation between the horofunction and the visual compactification, which we state in the following result.
\begin{theorem}\label{th:projectionfunction}
Let $(X,d)$ be a proper, uniquely geodesic, straight metric space. For any basepoint $b\in X$, there is a continuous surjection $\Pi$ from the horofunction compactification to the visual compactification based at b such that $\Pi(B_\gamma)=\gamma$ for every ray $\gamma$ starting at $b$ and $\Pi(h(p))=p$ for every $p\in X$.
In particular, the horofunction compactification of $X$ is finer than the visual compactification of $X$ based at any point.
\end{theorem}
Most of the subsequent results in the paper follow as applications of this theorem.
It is not the first time that a map such as $\Pi$ appears in the literature. Similar maps have been found for $\delta$-hyperbolic spaces by Webster--Winchester \cite{Webster}. Walsh defined such a map for Hilbert geometries \cite{Walsh2}, which satisfy the hypothesis of the theorem whenever there are no coplanar noncollinear segments in the boundary of the convex set, as shown by de la Harpe \cite[Proposition 2]{Harpe}.
The map $\Pi$ does not induce a fibration, as its fibers $\Pi^{-1}(\gamma)$ vary from points to higher dimensional sets (see Theorem \ref{th:dimensionfiberslowerbound}). Still, \cref{th:projectionfunction} characterizes the horoboundary as the disjoint union of all the fibers $\Pi^{-1}(\gamma)$. Furthermore, our analysis of the topology of these fibers shows that they are path connected (see Proposition \ref{pr:pathconnected}), which gives the following characterization of the connectivity of the horoboundary.
\begin{proposition}\label{pr:finslerconnected}
The horoboundary of a proper, uniquely geodesic straight metric space is connected if and only if its visual boundary based at some point (and hence, any) is connected.
\end{proposition}
The \emph{Busemann map} $B$ from the visual compactification $X\cup D_b$ to the horofunction compactification is defined by setting $B(\gamma)=B_\gamma$ for each geodesic ray $\gamma\in D_b$ and $B(p)=h(p)$ for each $p\in X$. With this definition, the map satisfies $\Pi \circ B =\operatorname{id}$. As the next result shows, the continuity of this map is related with the topology of the horofunction compactification.
\begin{proposition}\label{pr:horobocompfiner}
The visual compactification of a proper, uniquely geodesic, straight metric space based at some point is isomorphic to its horofunction compactification if and only if the Busemann map is continuous.
\end{proposition}
The Busemann map is essentially the identity inside $X$, so the only possible points of discontinuity are at the boundary. It is therefore of interest to find a criterion for the continuity of $B$ at the boundary, which turns out to give a criterion for when the fibers $\Pi^{-1}(\gamma)$ are singletons.
\begin{restatable}{proposition}{continuityatqintro}\label{pr:continuityatqintro}
Let $X$ be a proper, uniquely geodesic, straight metric space, $b\in X$ a basepoint and $B$ the corresponding Busemann map and Furthermore, let $\gamma$ be a geodesic ray based at $b$. Then the following three statements are equivalent:
\begin{enumerate}
\item The Busemann map $B$ restricted to the boundary is continuous at $\gamma$.
\item The fiber $\Pi^{-1}(\gamma)$ is a singleton.
\item The Busemann map $B$ is continuous at $\gamma$.
\end{enumerate}
\end{restatable}
In other words, we have reduced the continuity of $B$ to the continuity restricted to the boundary. This result can then be applied to different settings to obtain a more precise characterization. In the case of Teichmüller spaces \cref{pr:continuityatqintro} can be used to get an explicit criterion for the continuity of the Busemann map in terms of the quadratic differentials associated to the geodesic rays, giving us a characterization of the fibers that are singletons.
\subsection{Horoboundary of the Teichmüller metric}
Let $S$ be a compact surface with (possibly empty) boundary and finitely many marked points, where we allow marked points to be on the boundary. Denote by $\mathcal{T}(S)$ its Teichmüller space equipped with the Teichmüller metric. Furthermore, for any quadratic differential $q$ based at some basepoint $b\in \mathcal{T}(S)$, denote by $\tray{q}{\cdot}$ the geodesic ray in $\mathcal{T}(S)$ starting at $b$ in the direction $q$, and $V(q)$ the vertical foliation associated to $q$, see \cref{se:backgroundteichmuller} for a quick introduction or the book by Farb--Margalit \cite{primer} for a more in-depth explanation of these concepts. Recall that a measured foliation is \emph{indecomposable} if it is either a thickened curve, or a component with a transverse measure that cannot be expressed as the sum of two projectively distinct non zero transverse measures. Furthermore, each measured foliation can be decomposed uniquely into finitely many indecomposable components (see \cref{se:measuredfoliations} for detailed definitions). Walsh has shown the following characterization of the convergence of Busemann points in terms of the convergence of the associated quadratic differentials.
\begin{theorem}[Walsh {\cite[Theorem 10]{Walsh}}]\label{th:infusibledefinitionintro}
Let $(q_n)$ be a sequence of unit area quadratic differentials based at $b\in \mathcal{T}(S).$ Then, $B_{\tray{q_n}{\cdot}}$ converges to $B_{\tray{q}{\cdot}}$ if and only if both of the following hold:
\begin{enumerate}
\item $(q_n)$ converges to $q$ with respect to the $L^1$ norm on $T^*_b\mathcal{T}(S)$;
\item for every subsequence $(G^n)_n$ of indecomposable measured foliations such that, for each $n\in \mathbb{N}$, $G^n$ is a component of $V(q_n)$, we have that every limit point of $G^n$ is indecomposable.
\end{enumerate}
\end{theorem}
While Walsh's proof is done in the context of surfaces without boundary, it can be easily extended to our setting. In view of this theorem, we say that a sequence of quadratic differentials $(q_n)$ \emph{converges strongly} to $q$ if it satisfies the two conditions of \cref{th:infusibledefinitionintro}. Furthermore, we say that $q$ is \emph{infusible} if every sequence of quadratic differentials converging to $q$ converges strongly. By \cref{pr:continuityatqintro}, a quadratic differential $q$ is infusible if and only if the Busemann map is continuous at $\tray{q}{\cdot}$. In \cref{th:maxcondition}, we derive a topological characterization of the vertical foliations of infusible quadratic differentials. This allows us to determine precisely which surfaces only admit infusible quadratic differentials, yielding the following result.
\begin{theorem}\label{th:homeomorphictovisualcomp}
Let $S$ be a compact surface of genus $g$ with $b_m$ and $b_u$ boundary components with and without marked points respectively and $p$ interior marked points. Then the horofunction compactification of $\mathcal{T}(S)$ is isomorphic to the visual compactification if and only if $3g+2b_m+b_u+p\le 4$.
\end{theorem}
This result had been previously proven by Miyachi \cite{Miyachi3} for surfaces without boundary, that is, when $b_m=b_u=0$. For the cases where we do not have an isomorphism Miyachi found non-Busemann points in the boundary. These points are in the closure of Busemann points, which prompted Liu--Su to ask the following question
\begin{question}[Liu--Su {\cite[Question 1.4.2]{LiuSu}}]
Is the set of Busemann points dense in the horofunction boundary?
\end{question}
We give a negative answer to this question, summed up in the following result.
\begin{theorem}\label{th:busemannnotdense}
Let $S$ be a closed surface of genus $g$ with $p$ marked points. Then the Busemann points are not dense in the horoboundary of $\mathcal{T}(S)$ whenever $3g+p\ge5$.
\end{theorem}
To achieve this result we use Liu--Su's \cite{LiuSu} and Walsh's \cite{Walsh} characterization of the horofunction compactification as the Gardiner--Masur compactification. The latter compactification consists of certain real-valued functions on the space of measured foliations. Note that we use a slightly different definition than usual, as the definition we use is more well suited for our computations, and more easily extendable to surfaces with boundary (see \cref{se:GMboundaryWalshpaper} for the precise definition). For each point in the horofunction compactification there is an associated real-valued function on the set of measured foliations. We show that the functions associated to elements in the closure of Busemann points are polynomials of degree 2 with respect to some variables (see Proposition \ref{pr:busemanclosureshape} for the precise statement). We then show that the elements of the Gardiner--Masur boundary found by Fortier Bourque in \cite{Fortier} do not satisfy that condition. The main ingredient for this last part of the reasoning is the following result, which shows that extremal length is not $C^{2+\epsilon}$ along certain smooth paths.
\begin{theorem}\label{th:extremallengthnotsmoothintro}
Let $S$ be a closed surface of genus $g$ with $p$ marked points and empty boundary satisfying $3g+p\ge5$. Then there is a point $X\in\mathcal{T}(S)$ and a path $G_t$, $t\in[0,t_0]$, in the space of measured foliations on $X$, smooth with respect to the canonical piecewise linear structure of the space of measured foliations, such that $\operatorname{Ext}(G_t)$ is not $2+\varepsilon$ Hölder continuous for any $\varepsilon>0$.
\end{theorem}
The canonical piecewise linear structure of the space of measured foliations was developed by Bonahon \cite{Bonahon}, \cite{Bonahon2} and \cite{Bonahon3}. The first derivative of the extremal length along such a path was determined by Miyachi \cite{Miyachi2}, so our proof is based on finding cases where Miyachi's expression is not $C^{1+\varepsilon}$. This follows from an explicit computation, whose complication is greatly reduced by using previous estimates established by Markovic \cite{Markovic}. It is not known whether the extremal length is twice differentiable along such paths. Rees proves that Teichmüller distance is $C^{2}$ \cite{Rees}, but not $C^{2+\varepsilon}$ \cite{Rees2}, so if the extremal length is $C^2$ we would have an analogous phenomenon.
Another well-studied compactification of Teichmüller space was given by Thurston \cite{Thurston}. While neither Thurston's nor the horofunction compactification is finer than the other, these two compactifications are related. Indeed, as shown by Miyachi \cite{Miyachi}, there is a bicontinuous map from the union of $\mathcal{T}(S)$ and uniquely ergodic foliations in Thurston's boundary to a subset of the horofunction compactification. Masur showed \cite{Masur2} that this result can be interpreted to say that these two compactifications are the same almost everywhere according to the Lebesgue measure on Thurston's boundary. The image of uniquely ergodic foliations by the bicontinuous map is the set of Busemann points associated to uniquely ergodic foliations. As we show in \cref{th:nowheredense}, this set is nowhere dense within the horoboundary. Hence these two compactifications are not the same almost everywhere according to any strictly positive measure on the horoboundary (see \cref{co:nofullmeasure}).
Under some smoothness properties of the Teichmüller metric we are able to use the maps $\Pi_b$ to give an alternative definition of the horofunction compactification based on geometric notions. This definition characterizes the horofunction compactification as the reachable subset of the product of all visual compactifications obtained by choosing different basepoints (see \cref{se:alternativehorofunctiondefinition} for details). Hence, the horofunction compactification can be interpreted as a collection of the asymptotic information provided by all visual compactifications. As a straightforward result of this alternative definition we get the following characterization of converging sequences in the horofunction compactification.
\begin{corollary}\label{co:convergingtohoriffconvergingeveryvisintro}
A sequence $(x_n)\subset \mathcal{T}(S)$ converges in the horofunction compactification if and only if the sequence converges in all the visual compactifications.
\end{corollary}
Considering the horocycles diverging in the horofunction compactification found by Fortier Bourque \cite{Fortier} we get that there is some visual compactification in which these horocycles do not converge.
\begin{corollary}\label{co:divergencehorocycles}
Let $S$ be a closed surface of genus $g$ with $p$ marked points, such that $3g+p\ge5$. There is a basepoint such that a horocycle diverges in the visual compactification based at that point.
\end{corollary}
This contrasts with the behavior of Teichmüller rays, which converge in all visual compactifications \cite[Theorem 7]{Walsh}.
The structure of the horoboundary provided by \cref{th:projectionfunction}, as well as the path-connectivity of the fibers, allows us to prove the following connectivity result.
\begin{theorem}\label{th:teichconnected}
The horoboundary of any Teichmüller space of real dimension at least 2 is path connected.
\end{theorem}
Furthermore, we also prove that whenever the surface has empty boundary the map $\Pi$ restricted to the horoboundary admits a section, while it only admits a section for the simpler cases if the boundary is nonempty (see \cref{th:globalsection} for details).
\begin{figure}
\caption{Sketch of the shape of the horoboundary of the Teichmüller metric for surfaces without boundary.}
\label{fi:cartoonfinal}
\end{figure}
\cref{fi:cartoonfinal} shows a sketch of what we think the horoboundary looks like based on the results of this paper. The outer circle represents the section given by Theorem \ref{th:globalsection}. Each line perpendicular to the sphere represents one of the fibers induced by the map $\Pi$, so it is associated with a unique Teichmüller ray starting at $b$. Note that while by Proposition \ref{pr:pathconnected} the fibers are path connected, by Theorem \ref{th:dimensionfiberslowerbound} they are bigger than segments in some cases. Furthermore, a priori they might not be contractible.
The nearest point to the basepoint $b$ of each fiber represents the Busemann point associated to the geodesic joining $b$ to the fiber. This point could indeed be considered the nearest point to $b$ from the fiber, as one can access it in a straight way, through a geodesic exiting $b$. On the other hand, the points in the outer circle represents the points associated to the section alluded to earlier. These can be accessed through a sequence of Busemann points whose associated fiber is a point, which can be considered as the most tangentially possible way to reach points in the boundary.
Following a result by Masur \cite{Masur2}, with respect to the measure on the fibers induced by the Lebesgue measure on the set of Teichmüller rays exiting $b$, almost all the fibers are actually points. As we shall see in \cref{th:nowheredense} these points are nowhere dense in the boundary.
Note that there exist paths within the horoboundary connecting the fibers without passing through the section, and a priori there may be paths not represented in the sketch along which the fibers vary continuously.
For surfaces for which the map $\Pi$ does not admit a global section, a similar sketch could be drawn, although there would be no continuous global section in some cases. Hence, the outer circle would be broken at some places.
Finally, Liu--Su's and Walsh's characterization of the horofunction compactification as the Gardiner--Masur compactification can be used to translate some of these findings to results regarding the asymptotic value of extremal length functions. For example, we get the following estimate.
\begin{theorem}\label{th:limits}
Let $(q_n)$ be a sequence of unit quadratic differentials converging strongly to a unit quadratic differential $q$. Denote $G_j$ the components of the vertical foliation associated to $q$, and $H(q)$ the horizontal foliation. Then, for any $F\in \mathcal{MF}$ and sequence $(t_n)$ of real values converging to positive infinity we have
\[\lim_{n\to\infty} e^{-2t_n}\operatorname{Ext}_\tray{q_n}{t_n}(F)=\sum_j\frac{i(G_j,F)^2}{i(G_j,H(q))}.\]
\end{theorem}
This generalizes a previous result proven by Walsh in \cite[Theorem 1]{Walsh}, where the same is shown for $q_n$ constant.
\subsection{Outline of the paper and a note for the reader interested in surfaces without boundary}
The paper is structured as follows. In \cref{se:metricdefinitions} we introduce the necessary metric notions used in the paper. We follow in \cref{se:horofunctionmetric} by proving the results related to the more general metric setting, such as showing that the horofunction compactification is finer than the visual one. In \cref{se:backgroundteichmuller} we give a short review of the necessary background on Teichmüller spaces. In \cref{se:continuitybusemanteich} we determine which quadratic differentials are infusible, and find which surfaces admit infusible quadratic differentials, getting a proof of \cref{th:homeomorphictovisualcomp}. In \cref{se:shapeoffibers} we characterize the points in the closure of Busemann points, and get some bounds on the dimension of the fibers of the map $\Pi$. In \cref{se:nondensity} we show that Busemann points are not dense. In \cref{se:globaltopology} we determine which surfaces result in the map $\Pi$ having a section, and prove that the horoboundary is path connected. Finally, in \cref{se:formulas} we use the previous results to obtain estimates regarding asymptotic values of extremal lengths.
Some of the most dense parts of this paper are due to the added complexity of considering surfaces with boundary. As such, the reader focused in surfaces with empty boundary might want to omit the corresponding sections on a first reading. One of the largest related parts starts after the remark following \cref{th:maxcondition} and ends before the start of \cref{se:horocycles}. The other sizable part starts with \cref{pr:continuoussection2} and ends at the start of the proof of \cref{th:teichconnected}, where we note that the proof is significantly simpler in the case of surfaces without boundary.
\subsection*{Acknowledgments}
The author would like to thank Maxime Fortier Bourque and Vaibhav Gadre for many helpful discussions and corrections.
\section{Metric definitions}\label{se:metricdefinitions}
\subsection{Compactifications}
A compactification of a space serves, among other things, as a way of characterizing convergence to infinity. Formally, a \emph{compactification} of a topological space $X$ is a pair $(f,\overline{X})$, where $\overline{X}$ is a compact topological space and $f:X\to \overline{X}$ is an embedding with $f(X)$ dense in $\overline{X}$. The boundary of a compactification $\partial \overline{X}=\overline{X}-X$ then describes the different ways of converging to infinity provided by that compactification. We shall usually identify the points in $X$ with the ones in $\overline{X}$ via the map $f$, and say that a sequence $(x_n)\subset X$ converges in $\overline{X}$ if $f(x_n)$ converges.
A compactification $(f_1,X_1)$ is \emph{finer} than another one $(f_2,X_2)$ if there exists a continuous extension $\overline{f}_2:X_1\to X_2$ of $f_2$ such that $\overline{f}_2\circ f_1 =f_2$. Since $f_2(X)$ is dense in $X_2$, the continuous extension $\overline{f}_2$ is surjective. Furthermore, we can restrict the map $\overline{f}_2$ to the boundary to get a surjective map $\left.\overline{f}_2\right\vert_{\partial X_1}:\partial X_1\to \partial X_2$, which can be seen as a projection. Having a compactification finer than another ones means, from an intuitive point of view, that the finer compactification catalogs more ways of converging to infinity than the other one. Namely, any sequence in $X$ converging in the finer compactification converges also in the coarser one, while the opposite may not be true.
We say that two compactifications are isomorphic if each one is finer than the other one. The following Lemma found in \cite[Lemma 17]{Walsh} coincides with the intuitive notion of finer compactifications.
\begin{lemma}\label{le:walshcompact}
Let $(f_1,X_1)$ and $(f_2,X_1)$ be two compactifications of $X$ such that $f_2$ extends continuously to an injective map $\overline{f}_2:X_1\to X_2$. Then the two compactifications are isomorphic.
\end{lemma}
We will usually refer to the space $\overline{X}$ as the compactification when the embedding is clear from the context.
Since the images of $X$ by the embedding are dense, the extensions we get to compare the compactifications are unique. That is, we have the following result
\begin{lemma}
Let $(f_1,X_1)$ and $(f_2,X_2)$ be two compactifications of $X$ such that $X_1$ is finer than $X_2$. Then the extension $\overline{f_2}:X_1\to X_2$ is unique.
\end{lemma}
\begin{proof}
For any $x\in X$ we have $\overline{f_2}(f_1(x))=f_2(x)$. Hence, the image of $\overline{f_2}$ is determined on a dense subset of $X_1$, so by continuity it is determined on $X_1$.
\end{proof}
\subsection{Visual compactification of proper, uniquely geodesic, straight spaces.} \label{se:visualdefinition}
Let $(X,d)$ be a metric space. We shall say that a map $\gamma$ from an interval $I\subset \mathbb{R}$ to $X$ is a \emph{geodesic} if it is an isometric embedding, that is, if $d(\gamma(t),\gamma(s))=|t-s|$. We shall consider two geodesics to be equal if their image is equal and have the same orientation. A space is \emph{uniquely geodesic} if for any two distinct points $a,b\in X$ there is a unique geodesic starting at $a$ and ending at $b$.
Furthermore, we say that the space is \emph{proper} if the closed balls $D(x,r)=\{p\in X\mid d(p,x)\le r\}$ are compact.
If geodesic segments can be extended uniquely, that is, if for any geodesic segment $\gamma_1$ there is a unique bi-infinite geodesic $\gamma_2$ such that $\gamma_1\cap \gamma_2=\gamma_1$, we say that the space is \emph{straight}.
Let then $X$ be a proper, uniquely geodesic, straight space and let $D_b$ be the set of infinite geodesic rays starting at $b$, with the topology given by uniform convergence on compact sets. Furthermore, denote $S_b^1=\{x\in X \mid d(x,b)=1\}$ the sphere of radius 1 around $b$.
\begin{lemma}\label{le:geodesicsequalsphere}
The map from $D_b$ to $S_b^1$ defined by sending $\gamma\in D_b$ to $\gamma(1)$ is a homeomorphism.
\end{lemma}
\begin{proof}
Since the topology on $D_b$ is given by uniform convergence on compact sets, the point $\gamma(1)$ varies continuously with respect to $\gamma$.
On the other hand, since the space is straight and has unique geodesics, given any point $a\in S_b^1$ there is a unique geodesic ray starting at $b$ and passing through $a$. This is the inverse to the map obtained by evaluating the geodesics. To see that the relation is continuous we consider a sequence $(a_n)\subset S_b^1$ converging to some $a$, and denote $(\gamma_n)$ and $\gamma$ the associated geodesics. Assume $\gamma_n$ does not converge to $\gamma$. Then we have a subsequence without $\gamma$ as an accumulation point. For any $t>0$, the geodesic segments $\left.\gamma\right\vert_{[0,t]}$ are contained in the ball of radius $t$, which is compact, as $X$ is proper. As these are geodesics we have equicontinuity, so by Arzelà--Ascoli we can take a subsequence converging uniformly to some path $\gamma'$. Since the distance function is continuous, $\gamma'$ is a geodesic. Furthermore, $\gamma'(1)=\lim_{n\to\infty} \gamma_n(1)=\lim_{n\to\infty} a_n= a$. By uniqueness of geodesics, $\gamma'$ and $\gamma$ are equal when restricted to $[0,1]$, which by straightness implies they are equal. Hence, $\gamma_n$ converges to $\gamma$ uniformly on the compact $[0,t]$.
\end{proof}
Following a similar reasoning it is possible to show the following, still under the same hypotheses on $X$.
\begin{lemma}\label{le:straightspacesareballs}
The space $X$ is homeomorphic to $D_b\times [0,\infty)/D_b\times\{0\}$.
\end{lemma}
\begin{proof}
We define the map $C:D_b\times [0,\infty)/D_b\times\{0\}\to X$ given by $C(\theta,r)=\theta(r)$. This is well defined, as $C(\theta,0)=b$ for any $\theta\in D_b$. Furthermore, this is a bijection, since for every $x\in X-\{b\}$ there is a unique geodesic ray from $b$ to $x$. The map is continuous, as the topology on $D_b$ is given by uniform convergence on compact sets. To see that the inverse is continuous consider a sequence $a_n\in X$ converging to some $a\in X$. If $a=b$, then $d(a_n,b)\to 0$, so we have continuity. Otherwise we denote $r_n=d(a_n,b)$ and $r=d(a,b)$. We have $r_n\to r$, so denoting $(\gamma_n)$ and $\gamma$ the unique geodesic in $D_b$ such that $\gamma_n(r_n)=a_n$ and $\gamma(r)=a$ and applying Arzelà--Ascoli's theorem in the same way as in \cref{le:geodesicsequalsphere}, we have that $\gamma_n$ converges to $\gamma$.
\end{proof}
The space $D_b\times [0,\infty)/D_b\times\{0\}$ can be included into the compact space $D_b\times [0,\infty]/D_b\times\{0\}$, which can be written as $\left(D_b\times [0,\infty)/D_b\times\{0\}\right)\cup D_b\times\{\infty\}$. Using the homeomorphism from \cref{le:straightspacesareballs}, we can use this inclusion to give a compact topology on the space $X\cup D_b$. The \emph{visual compactification} is defined as the pair $(i,X\cup D_b)$, where $i$ is the inclusion $i:X\to X\cup D_b$ and the topology on the space $X\cup D_b$ is the one we just defined. We shall denote $X\cup D_b$ as $\vbc{X}_b$, or $\vbc{X}$ when the basepoint is not relevant to the discussion.
\subsection{Horofunction compactification}\label{se:horodefinition}
The second compactification that will play a part in this paper is slightly more involved and difficult to visualize.
Let $X$ be a proper, uniquely geodesic, straight metric space. Given a basepoint $b\in X$, one can embed $X$ into the space of continuous functions from $X$ to $\mathbb{R}$ via the map $h:X\to C(X)$ defined by
\[h(x)(\cdot ):=d(x,\cdot)-d(x,b).\]
The topology given to $C(X)$ is that of uniform convergence on compact sets. The map $h$ is indeed continuous, as the distance function is continuous. Furthermore, $h$ is injective, as $h(x)$ has a strict global minimum at $x$. It can also be proven that since $X$ is proper, $h$ is an embedding. For more details about this construction see \cite[Section 2]{Walsh3}. Furthermore, the properness of $X$ implies it is second countable, so the closure of $h(X)$ is compact, Hausdorff and second countable. We shall denote the closure of $h(X)$ on $C(X)$ as $\hbc{X}$. The \emph{horofunction compactification} is defined as the pair $(h,\hbc{X})$. We call the set $\hbd{X}=\hbc{X}-X$ the \emph{horofunction boundary} or \emph{horoboundary}, and we call its members \emph{horofunctions}. If we want to specify the chosen basepoint we write $\hbc{X}_b$. However, it is possible to see that quotienting the compactification by letting $f\sim g$ whenever the difference is constant we get an isomorphic compactification, showing that the horofunction compactification does not depend on the basepoint.
Usually the easier points to identify in the horoboundary are the Busemann points. These are the ones that can be reached as a limit along almost geodesics, which is a slight weakening of the notion of geodesic by allowing an additive constant approaching $0$. That is, a path $\gamma:[0,\infty)\to X$ is an \emph{almost geodesic} if for each $\varepsilon>0$,
\[\left|d(\gamma(0),\gamma(s))+d(\gamma(s),\gamma(t))-t\right|<\varepsilon\]
for all $s$ and $t$ large enough, with $s\le t$.
Rieffel \cite{Rieffel} proved that every almost geodesic converges to a limit in $\hbd{X}$. A horofunction is called a \emph{Busemann point} if there exists an almost geodesic converging to it. We shall denote the Busemann point associated in this way to the almost geodesic $\gamma$ by $B_\gamma$.
\section{Horofunction compactification of proper, uniquely geodesic, straight metric spaces.}\label{se:horofunctionmetric}
Fix a uniquely geodesic, proper and straight metric space $(X,d)$ and a basepoint $b\in X$. We will assume $X$ satisfies these hypotheses through this section. For each geodesic ray $\gamma\in \vbd{X}$ starting at $b$ there is an associated Busemann point $B_\gamma\in \hbd{X}$. We can extend this map to all the visual compactification by setting it as the identification with the map $h$ on $X$ given by the horofunction compactification. That is, we define the \emph{Busemann map} $B:\vbc{X}\to \hbc{X}$ by setting $B(\gamma)=B_\gamma$ for $\gamma\in \vbd{X}$ and $B(x)=h(x)$ for $x\in X$. The relevance of this map can be seen with the following result.
\begin{lemma}\label{le:visualfineriffBcontinuous}
The visual compactification $(i, \vbc{X})$ is finer than the horofunction compactification $(h,\hbc{X})$ if and only if the map $B$ is continuous. \end{lemma} \begin{proof}
We have that $B(i(x))=h(x)$, so $B$ is an extension of $h$ to $\vbc{X}$. Hence, if $B$ is continuous, then the visual compactification is finer than the horofunction compactification.
On the other hand, if the visual compactification is finer than the horofunction compactification, then we have a continuous map $f:\vbc{X}\to \hbc{X}$. For every $x\in X$, we have $f(i(x))=h(x)=B(i(x))$. Furthermore, for any ray $\gamma$ starting at the basepoint we have $f(\gamma)=\lim_{t\to\infty} f(i(\gamma(t))=\lim h(\gamma(t))=B(\gamma)$. Hence, $B=f$, and $B$ is continuous. \end{proof}
In general, the Busemann map may not be surjective nor continuous. However, we have the following.
\begin{proposition}\label{pr:bussmapinjective}
For a proper, uniquely geodesic, straight metric space $(X,d)$ the Busemann map is injective. \end{proposition} \begin{proof}
For each $x\in X$, the associated function $h(x)$ has a global minimum at $x$, while $B_\gamma$ is unbounded below for every $\gamma\in \vbd{X}$. Hence, in the interior of $\vbc{X}$ the map is injective and $B(X)\cap B(\vbd{X})=\emptyset$. Assume we have $\gamma, \gamma'\in \vbd{X}$ such that $\gamma\neq \gamma'$ and $B(\gamma)=B(\gamma')=\xi$. Then, for a given sequence $t_n\to\infty$ we have $\lim_{n\to\infty}h(\gamma(t_n))=\lim_{n\to\infty}h(\gamma'(t_n))=\xi$. For any $t\in\mathbb{R}$ and any $n$ such that $t_n>t$ we have
\[h(\gamma(t_n))(\gamma(t))=d(\gamma(t_n),\gamma(t))-d(\gamma(t_n),\gamma(0))=t_n-t-t_n=-t,\]
and similarly for $\gamma'$.
Hence $\xi(\gamma(t))=\xi(\gamma'(t))=-t$ for all $t$.
Fix now a $t>0$. We have
\begin{align*}
-t =\xi(\gamma'(t))=&\lim_{n\to\infty}(d(\gamma'(t),\gamma(t_n))-d(b,\gamma(t_n)))\\
=&\lim_{n\to\infty}(d(\gamma'(t),\gamma(t_n))-t_n).
\end{align*}
That is, there is a sequence $\varepsilon_n$ with $\varepsilon_n\to 0$ such that
\[t_n-t+\varepsilon_n \ge d(\gamma'(t),\gamma(t_n))\ge t_n-t-\varepsilon_n.\]
for every $n$.
By straightness we can extend $\gamma$ in the negative direction towards $\gamma(-s)$ for some $s>0$. We shall now show that the geodesic $\gamma$ does not minimize the distance between $\gamma(-s)$ and $\gamma(t_n)$ for $n$ big enough. Since the space is straight, the geodesic segment between $\gamma(-s)$ and $b$ can be extended uniquely, so concatenating it with the segment between $b$ and $\gamma'(t)$ does not result in a geodesic. Hence, the distance between $\gamma'(t)$ and $\gamma(-s)$ is strictly smaller than $s+t$. That is, there is some $\delta>0$ such that $d(\gamma(-s),\gamma'(t))<t+s-\delta$. As shown in \cref{fi:badtriangles} we get a path going from $\gamma(-s)$, to $\gamma(t_n)$, passing through $\gamma'(t)$ that has length $t+s-\delta+t_n-t+\varepsilon_n=t_n+s-\delta+\varepsilon_n$. Hence, taking $n$ big enough so that $\varepsilon_n<\delta$ we get that the geodesic segment between $\gamma(-s)$ and $\gamma(t_n)$ is not minimizing. This is a contradiction, from which we conclude that $\gamma=\gamma'$. Therefore, $B$ is injective.
\begin{figure}
\caption{The triangles involved in the proof of Proposition \ref{pr:bussmapinjective}.}
\label{fi:badtriangles}
\end{figure} \end{proof}
Hence, given a Busemman point $\xi$ in $B(\hbd{X})$ we have a unique associated geodesic ray $\gamma\in\hbd{X}$ such that $\xi(\gamma(t))=-t$ for all $t$. Our next aim is to build a similar relation for all other horofunctions. Our approach is similar to the one used by Walsh in \cite[Section 7]{Walsh}.
We say that a geodesic $\gamma$ is an \emph{optimal geodesic} for a certain horofunction $\xi\in \hbc{X}$ if $\xi(\gamma(t))-\xi(\gamma(0))=-t$ for all $t\in \mathbb{R}$. We shall now see that each function in the horoboundary has at least one optimal geodesic.
\begin{lemma}\label{le:fixingvaluealongpath}
Let $X$ be a proper, uniquely geodesic, straight metric space and let $\xi\in \hbd{X}$ be a horofunction. Suppose that
$(x_n)\subset X$ converges to $\xi$, with $x_n=\gamma_n(t_n)$, $\gamma_n\in\vbd{X}$ and $(\gamma_n)$ converging to $\gamma$ as $n\to \infty$. Then $\xi(\gamma(t))=-t$ for every $t\in \mathbb{R}$. That is, $\gamma(t)$ is an optimal geodesic for $\xi$. \end{lemma} \begin{proof}
Fix $t$. We have that \[\xi(\gamma(t))=\lim_{n\to\infty}(d(\gamma(t),\gamma_n(t_n))-d(b,\gamma_n(t_n)))=\lim_{n\to\infty}(d(\gamma(t),\gamma_n(t_n))-t_n).\]
As $n$ goes to infinity, $\gamma_n$ converges to $\gamma$. Hence by the given topology on the visual boundary, the maps $\gamma_n(\cdot)$ converge uniformly on compact sets to the geodesic $\gamma(\cdot)$. In particular, denoting $d(\gamma(t),\gamma_n(t))=\varepsilon_n$ we have $\varepsilon_n\to 0$.
We get then \cref{fi:squeezedtriangle}, so by the triangle inequality, \[|d(\gamma(t),\gamma_n(t_n))-(t_n-t)|=|d(\gamma(t),\gamma_n(t_n))-d(\gamma_n(t),\gamma_n(t_n))|\le\varepsilon_n,\]
and so $\xi(\gamma(t))=-t$.
\begin{figure}
\caption{
In the proof of Lemma \ref{le:fixingvaluealongpath}, $\gamma_n$ converges to $\gamma$, so $\gamma_n(t)$ converges to $\gamma(t)$, and hence the distance between $\gamma_n(t_n)$ and $\gamma_n(t)$ gets arbitrarily close to the distance between $\gamma_n(t_n)$ and $\gamma(t)$.
}
\label{fi:squeezedtriangle}
\end{figure} \end{proof}
Since $\vbd{X}$ is compact, for any horofunction $\xi\in\hbd{X}$ and sequence $(x_n)\subset X$ converging to $\xi$ we can take a subsequence such that the hypotheses of \cref{le:fixingvaluealongpath} are satisfied, so each $\xi\in\vbd{X}$ does have at least one optimal geodesic.
If $\xi$ has another optimal geodesic $\gamma'$ with $\gamma'(0)=\gamma(0)$ we have at least two geodesics along which $\xi(\gamma(t))=\xi(\gamma'(t))=-t$ for all $t$. Following a reasoning similar to the one in the proof of \cref{pr:bussmapinjective}, we get a contradiction. This time, however, we have to be a bit more careful about the distances, as instead of two fixed rays we have a fixed ray and a sequence converging to a distinct fixed ray.
\begin{proposition}\label{pr:optimalpath}
Let $\xi\in \hbd{X}$ and $b\in X$. Then there is a unique optimal geodesic for $\xi$ passing through $b$. \end{proposition} \begin{proof}
Let $(x_n)=(\gamma_n(t_n))$ be a sequence converging to $\xi$, with $(\gamma_n) \subset \vbd{X}$, and take a subsequence such that $\gamma_n$ converges to some geodesic $\gamma$. By \cref{le:fixingvaluealongpath}, $\gamma$ is an optimal geodesic. Assume that we have a different optimal geodesic $\gamma'$ passing through $b$.
Using that $h(\gamma_n(t_n))$ converges pointwise to $\xi$ we have
\begin{align*}
-t =\xi(\gamma'(t))=&\lim_{n\to\infty}(d(\gamma'(t),\gamma_n(t_n))-d(b,\gamma_n(t_n)))\\
=&\lim_{n\to\infty}(d(\gamma'(t),\gamma_n(t_n))-t_n).
\end{align*}
Hence, there is a sequence $\varepsilon_n$ with $\varepsilon_n\to 0$ such that
\[t_n-t+\varepsilon_n \ge d(\gamma'(t),\gamma_n(t_n)) \ge t_n-t-\varepsilon_n.\]
We proceed by showing that for $n$ big enough there is some $s>0$ such that the geodesic $\gamma_n$ does not minimize the distance between $\gamma_n(-s)$ and $\gamma_n(t_n)$. As in the proof of Proposition \ref{pr:bussmapinjective} we have $d(\gamma'(t),\gamma(-s))<s+t$. Fix $s>0$ and pick $\delta>0$ such that $d(\gamma'(t),\gamma(-s))< t+s-\delta$. Since $\gamma_n$ converges to $\gamma$ uniformly on compact sets, $\gamma_n(-s)$ converges to $\gamma(-s)$.
Hence, $d(\gamma'(t),\gamma_n(-s))$ converges to $d(\gamma'(t),\gamma(-s))$.
Then for $n$ big enough we have $d(\gamma'(t),\gamma_n(-s)) < t+s-\delta$. Consider then $n$ big enough so that $\varepsilon_n\le \delta/2$ as well. As in the proof of Proposition \ref{pr:bussmapinjective}, the triangle between $\gamma'(t),\gamma_n(-s)$ and $\gamma_n(t_n)$ gives $d(\gamma_n(-s),\gamma_n(t_n))<t_n+s$. This is a contradiction, which proves the uniqueness of $\gamma$. \end{proof}
Given a basepoint $b\in X$ we can now define a map $\Pi_b:\hbc{X}\to\vbc{X}_b$ by sending any $\xi\in\hbd{X}$ to the unique ray obtained by considering the positive subset of the optimal geodesic $\gamma$ of $\xi$ with $\gamma(0)=b$, and by sending $h(x)$ to $x$ for any $x\in X$. This map is indeed an extension of the relation we had established for Busemann points in $\mathcal{B}(\vbd{X})$, since if $\xi=B(\gamma)$ for $\gamma\in D_b$ then $\gamma$ is an optimal geodesic of $\xi$, giving us $\Pi_b(B(\gamma))=\gamma$.
We will often write $\Pi$ instead of $\Pi_b$ whenever the basepoint is not relevant to the discussion. To prove that $\Pi$ is continuous, we first have to see the following result. \begin{proposition}\label{pr:welldefined}
Let $(x_n)\subset X$ be a sequence converging to $\xi\in\hbd{X}$. Then, $(x_n)$ has a unique accumulation point in the visual compactification. Further, this accumulation point depends only on $\xi$. \end{proposition} \begin{proof}
Since $\hbd{X}$ is compact, $(x_n)$ has accumulation points. If $(x_n)$ has two accumulation points we can take two subsequences converging to two different geodesics, which by Lemma \ref{le:fixingvaluealongpath} are optimal geodesics, contradicting Proposition \ref{pr:optimalpath}.
If there is another sequence $(y_n)$ converging to $\xi$ with a different accumulation point the result follows by merging both sequences and repeating the reasoning. \end{proof}
Hence, $\Pi$ can be alternatively defined by sending any $\xi\in \hbd{X}$ to the unique accumulation point in $\vbc{X}$ of the sequences converging to $\xi$ in $\hbc{X}$, and by sending $h(x)$ to $x$ for any $x\in X$. By Lemma \ref{le:fixingvaluealongpath}, this definition is equivalent to the previous one.
By this second definition of the map $\Pi$, we see how it is mostly related to the convergence of sequences, so using a diagonal sequence argument we can prove its continuity. \begin{proposition}\label{pr:fibermapcontinuous}
The map $\Pi$ is continuous. \end{proposition} \begin{proof}
Take a sequence $(\xi_n)\subset \hbc{X}$ converging to $\xi$. If $\xi\in h(X)$ we have that, as $h(X)$ is open, $\xi_n\in h(X)$ for $n$ big enough. Hence, $\Pi(\xi_n)=h^{-1}(\xi_n)$, which converges to $h^{-1}(\xi)$, as $h$ is an homeomorphism with its image.
If $\xi\in \hbd{X}$ we split the sequence into two subsequences, one contained in $h(X)$ and one contained in $\hbd{X}$. The one contained in $h(X)$ converges to $\xi$, so by definition of $\Pi$ and we have $\Pi(\xi)=\lim_{n\to\infty} h^{-1}(\xi_n)$.
Assume then that $(\xi_n)\subset \hbd{X}$ converges to $\xi$. We want to see that $\gamma_n=\Pi(\xi_n)$ converges to $\gamma=\Pi(\xi)$. For each $\xi_n$ we can take a sequence $\left(h(\gamma_n^m(t_n^m))\right)_m$ converging, as $m\to\infty$ to $\xi_n$. By Proposition \ref{pr:welldefined} the sequence $\gamma_n^m(t_n^m)$ converges to $\gamma_n$. Let $\gamma'$ be an accumulation point of $\gamma_n$. Take a convergent subsequence of $\gamma_n$ converging to $\gamma'$, and relabel it as $\gamma_n$. Let $(V_n)$ be a nested sequence of open neighborhoods of $\xi$ in $\hbc{X}$ such that $\xi_n\in V_n$ and $\bigcap_n V_n = \{\xi\}$ and let $(W_n)$ be a nested sequence of open neighborhoods of $\gamma'$ in $\vbc{X}$ such that $\gamma_n\in W_n$ and $\bigcap_n W_n= \{\gamma'\}$. We can take such sequences of sets, as both spaces are metrizable.
For each $n$, there exists $m(n)$ big enough so that $\gamma_n^{m(n)}\in W_n$ and $h(\gamma_n^{m(n)}(t_n^{m(n)}))\in V_n$. By the first condition on $m(n)$, we have that $\gamma_n^{m(n)}$ converges to $\gamma'$. By the second condition, $h(\gamma_n^{m(n)}(t_n^{m(n)}))$ converges to $\xi$, so by the the definition of $\Pi$ and Proposition \ref{pr:welldefined} the sequence $\gamma_n^{m(n)}$ converges to $\Pi(\xi)=\gamma$. Hence, $\gamma=\gamma'$, so the only accumulation point of $(\gamma_n)$ is $\gamma$ and by compactness of $\vbd{X}$ the sequence $(\gamma_n)$ converges to $\gamma$. \end{proof}
By combining Propositions \ref{pr:welldefined} and \ref{pr:fibermapcontinuous} we get that $\Pi$ is the map announced at the introduction, giving us a proof of Theorem \ref{th:projectionfunction}. As mentioned in the introduction, this map shows that the horofunction compactification is finer than the visual compactification. By using the Busemann map to insert the visual boundary inside the horoboundary, we can consider the map $\Pi$ as a projection.
One straightforward consequence of the continuity of $\Pi_b$ is as follows. \begin{corollary}\label{co:convergencegeodesics}
Let $\gamma$ be a geodesic ray, not necessarily starting at the basepoint $b\in X$. Then, $\gamma$ converges in the visual compactification of $X$ based at $b$. \end{corollary} \begin{proof}
The ray $\gamma$ converges in the horofunction compactification to $B_\gamma$. Since $\Pi_b$ is continuous, the ray also converges in the visual compactification based at $b$ to $\Pi_b(B_\gamma)$. \end{proof} For Teichmüller spaces with the Teichmüller metric this result was first proved by Walsh \cite[Theorem 7]{Walsh}.
By Lemma \ref{le:visualfineriffBcontinuous}, the visual compactification is finer than the horofunction compactification if and only if the Busemann map is continuous. Hence, since the horofunction compactification is always finer than the visual compactification, we obtain an isomorphism whenever this is the case, resulting in \cref{pr:horobocompfiner}.
To get a better picture of the shape of the horoboundary, we shall study the shape of the preimages of the projection $\Pi$ restricted to the boundary. That is, for a given point $\gamma$ in the visual boundary we are interested in finding out information about the fiber $\Pi^{-1}(\gamma)$. We first prove the following lemma, which we will use to get bounds on the values of $\Pi^{-1}(\gamma)$.
\begin{lemma}\label{le:strictinequality}
Fix a geodesic $\gamma \in \vbd{X}$ and $p\in X$ not in the bi-infinite extension of the geodesic $\gamma$. Then, the function $h(\gamma(\cdot))(p)$, with domain $(-\infty,\infty)$, is strictly decreasing. \end{lemma} \begin{proof}
Take $t,s>0$ with $s<t$. By the triangle inequality we have
\[d(\gamma(t),p)\le d(\gamma(s),p)+ d(\gamma(t),\gamma(s))=d(\gamma(s),p)+t-s.\]
Further, we have strict inequality, as equality would give us two different paths with the same length between $\gamma(t)$ and $p$, with one of them being geodesic. Hence,
\begin{align*}
h(\gamma(t))(p)&=d(\gamma(t),p)-d(\gamma(t),b)\\
&<d(\gamma(s),p)+t-s-t\\
&=h(\gamma(s))(p).
\end{align*} \end{proof}
The set $C(X)$ can be partially ordered by saying that $f\ge g$ whenever $f(x)\ge g(x)$ for all $x\in X$. If $p=\gamma(r)$ for some $r$ and $s>t$ we have $h(\gamma(s))(p)=h(\gamma(t))(p)$ for $r\le t$ and $h(\gamma(s))(p)<h(\gamma(t))(p)$ otherwise. Hence, adding the previous lemma we have $h(\gamma(s))<h(\gamma(t))$ whenever $s>t$. By attempting to extend this relation to the horofunction boundary we get that Busemann points are maximal in their fibers.
\begin{proposition}\label{pr:upperbound}
Let $\gamma\in \vbd{X}$ and $\xi\in \Pi^{-1}(\gamma)$. Then, $\xi\le B(\gamma)$. \end{proposition}
\begin{proof}
Choose any sequence $(x_n)\subset X$ such that $h(x_n)$ converges to $\xi$. Since $\xi\in\Pi^{-1}(\gamma)$ the sequence $x_n$ converges to $\gamma$ in $\vbc{X}$, so we can write $x_n=\gamma_n(t_n)$ with $t_n$ converging to infinity and $\gamma_n$ converging to $\gamma$.
Fix $p\in X$ and let $\varepsilon>0$. Denote $s_n=\sup\{t: d(\gamma(t),\gamma_n(t))<\varepsilon \text{ and } t<t_n\}$. The geodesics $\gamma_n$ converge to $\gamma$ uniformly on compact sets, so $s_n\to\infty$ as $n\to\infty$. Hence, by definition of the Busemann point and since $d(\gamma_n(s_n),\gamma(s_n))<\varepsilon$,
\[B_\gamma(p)=
\lim_{n\to\infty}h(\gamma(s_n),p)\ge\limsup_{n\to\infty}h(\gamma_n(s_n),p)-2\varepsilon .\]
Hence, by Lemma \ref{le:strictinequality},
\begin{align*}
\xi(p)=
\lim_{n\to\infty}h(\gamma_n(t_n)(p))\le\limsup_{n\to\infty}h(\gamma_n(s_n)(p))\le B_\gamma(p)+2\varepsilon.
\end{align*}
Since $\varepsilon$ can be arbitrarily small, we get the proposition. \end{proof}
While it might not be possible to get a similar unique minimum in each fiber, we can get the following result.
\begin{proposition}\label{pr:lowerbound}
Let $\gamma\in \vbd{X}$ and $\xi\in \Pi^{-1}(\gamma)$. Furthermore, let $(x_n)\subset X$ be a sequence converging to $\xi$ with $x_n=\gamma_n(t_n)$. For any $p$, define $\eta(p)=\liminf_{n\to\infty} B(\gamma_n)(p)$. Then, $\xi\ge \eta$. \end{proposition}
\begin{proof}
The proof follows a similar reasoning as the last one.
Fix $p\in X$, choose a subsequence so $B(\gamma_n)(p)$ converges to $\eta(p)$ and let $(\varepsilon_m)$ be a sequence of positive numbers coverging to $0$. For each $\varepsilon_m$, take $n(m)$ big enough so that $B(\gamma_{n(m)})(p)\ge \eta(p)-\varepsilon_m$. Further, take $s_{m}$ bigger than $t_{n(m)}$, and big enough so that
\[h(\gamma_{n(m)}(s_m)(p)\ge B(\gamma_{{n(m)}})(p)-\varepsilon_m.\]
Such an $s_m$ always exists by the definition of $B(\gamma_{n(m)})$.
In particular, we have that
\[\liminf_{m\to\infty}h(\gamma_{n(m)}(s_m)(p)\ge \eta(p).\]
By Lemma \ref{le:strictinequality} we have
\begin{align*}
\xi(p)=\lim_{m\to\infty}h(\gamma_{n(m)}(t_{n(m)})(p)\ge\liminf_{m\to\infty} h(\gamma_{n(m)}(s_m))(p)\ge \eta(p).
\end{align*} \end{proof}
The intuition one might get from these propositions is that approaching $\gamma$ ``through the boundary'', that is, through the furthest way possible from the interior of $X$, gives a lower bound on the possible values of approaching through other angles, and approaching it in a straight way, that is, through the geodesic, gives an upper bound. Hence, when these two ways of approaching it are the same, every other possible angle of approach should also yield the same limit. Following this reasoning we get our next result, announced in the introduction.
\continuityatqintro*
\begin{proof}
$(1)\implies(2)$:
Take $\xi\in \Pi^{-1}(\gamma)$. By Proposition \ref{pr:upperbound} we have $\xi\le B(\gamma)$. Since $B$ is continuous at $\gamma$ when restricted to the boundary we have that for any $\gamma_n\to \gamma$ the horofunctions $B(\gamma_n)$ converge to $B(\gamma)$. Hence, by Proposition \ref{pr:lowerbound}, $\xi\ge B(\gamma)$, so $\xi=B(\gamma)$ and we have (2).
$(2)\implies (3)$: Take then any $(x_n)\subset \vbc{X}$ converging to $\gamma$, consider the sequence $(B(x_n))\subset \hbc{X}$ and let $\eta$ be an accumulation point. By the definition of $\Pi$ we have $\eta\in \Pi^{-1}(\gamma)$, so $\eta=B(\gamma)$ since we assumed than $\Pi^{-1}(\gamma)$ is a singleton. This shows that $B$ is continuous at $\gamma$.
Finally, it is clear that $(3) \implies (1)$. \end{proof}
The relation obtained in Lemma \ref{le:strictinequality} can be exploited further. Indeed, trying to carry it to the boundary in a more delicate manner we can see that the fibers are path connected.
\begin{proposition}\label{pr:pathconnected}
Let $\gamma \in \vbd{X}$. For any $\xi\in \Pi^{-1}(\gamma)$ there exists a continuous path from $B(\gamma)$ to $\xi$ contained in $\Pi^{-1}(\gamma)$. \end{proposition} \begin{proof}
Take a sequence $(x_n)\subset X$ converging to $\xi$ in the horofunction compactification, and write $x_n=\gamma_n(u_n)$.
As we have seen in the proof of Proposition \ref{pr:lowerbound}, we can take a sequence $(l_n)\subset \mathbb{R}$ with $\gamma_n(l_n)$ converging to $B_\gamma$ such that $l_n<u_n$ for all $n$.
For each $n$ we have a path $\tilde{\alpha}^n(t)$ connecting $\gamma_n(l_n)$ and $\gamma_n(u_n)$ by setting $\tilde{\alpha}^n(t)=\gamma_n(tu_n+(1-t)l_n)$ for $t\in[0,1]$.
We would like to carry this path to the limit, getting a path between $\xi$ and $B(q)$. However, directly taking such a limit might result in some discontinuities, so we have to choose a parametrization carefully.
To find a good parametrization we shall use a certain functional as a control. We want the functional to carry discontinuities and strict increases in the path of functions to discontinuities and strict increases in the value of the functional. Since $X$ is proper, it is separable, so let $(p_i)_{i\in \mathbb{N}}$ be a countable dense set in $X$. We define the functional $I:\hbc{X}\to \mathbb{R}$ given by
\[I(f)=\sum_{i\in\mathbb{N}} \frac{f(p_i)}{2^i d(b,p_i)}.\]
Since $|f(x)|\le d(b,x)$ for all $f\in\hbc{X}$, the summation in the definition of $I(f)$ is absolutely convergent, so $I(f)$ is defined, finite, continuous with respect to $f$, and for any two $f,g\in \hbc{X}$ we have $I(f+g)=I(f)+I(g)$. Furthermore, since $(p_n)$ is dense and we are taking continuous functions, we have that the functional translates strict inequalities. That is, $f>g$ implies $I(f)>I(g)$. Hence, if $I(f)=0$ and $f\ge 0$ we have $f=0$.
We define then the function $F_n(t)=I(h(\gamma_n(t))$. By continuity of $I$ this function is continuous, and by Lemma \ref{le:strictinequality} it is strictly decreasing with respect to $t$. That is, we have continuous strictly decreasing functions $F_n:[l_n,u_n]\to [F_n(u_n),F_n(l_n)]$. Hence, we can define implicitly the continuous parametrizations $s_n:[0,1]\to [l_n,u_n]$ by taking the unique value $s_n(t)$ such that
\[F_n(s_n(t))=(1-t)F_n(l_n) + t F_n(u_n).\]
Denote the $F_n(s_n(t))$ as $E_n(t)$. By the continuity of $I$ we have that $E_n(t)$ converges to $(1-t)I(B_\gamma) + t I(\xi)$ as $n\to\infty$, which we denote $E(t)$.
Take now a countable dense set $(t^k)_{k\in \mathbb{N}}\subset [0,1]$ containing $0$ and $1$. We are now ready to start defining the path $\alpha:[0,1]\to \Pi^{-1}(\gamma)$, and we begin defining it for the dense set $(t^k)$.
For $k=1$ we define $\alpha(t^1)$ as an accumulation point of $h(\gamma_n(s_n(t^1)))$.
Denote $(\gamma_{m^1(n)})$ the subsequence of $\gamma_n$ such that $h(\gamma_{m^1(n)}(s_{m^1(n)}(t^1)))$ converges to $\alpha(t^1)$.
Define inductively $\alpha(t^k)$ and $(\gamma_{m^k(n)})$ by taking an accumulation point and a corresponding converging subsequence of $h(\gamma_{m^{k-1}(n)}(s_{m^{k-1}(n)}(t^k)))$. By the continuity of $I$ we have \[I(\alpha(t^k))=\lim_{n\to\infty} (F_{m^k(n)}(s_{m^k(n)}(t^k)))=E(t^k).\]
For each pair $i>j$ we have that $m^i(n)$ is a subsequence of $m^j(n)$, so $h(\gamma_{m^i(n)}(s_{m^i(n)}(t^j)))$ converges to $\alpha(t^j)$.
Assume $t^i>t^j$.
By Lemma \ref{le:strictinequality} we have that $h(\gamma_{m^i(n)}(s_{m^i(n)}(t^i)))<h(\gamma_{m^i(n)}(s_{m^i(n)}(t^j)))$, so $\alpha(t^i)\le \alpha(t^j)$.
We now have to prove that the definition we have given for $\alpha$ on $(t^k)$ can be extended continuously to $[0,1]$. Fix any $t\notin (t^k)$ and take a subsequence of $t^k$, labeled $t^{k_n}$, such that $t^{k_n}\to t$.
We shall now see that $\alpha(t^{k_n})$ converges to a function which does not depend on the chosen subsequence, and define $\alpha(t)$ as that limit. We can split and reorder the sequence $(t^{k_n})$ into $(t^+_n)$ and $(t^-_n)$ satisfying $t^+_n>t^+_{n+1}>t>t^-_{n+1}>t^-_n$.
The associated $\alpha(t^\pm_n)$ are ordered, so for any $p\in X$ the sequence $\alpha(t^\pm_n)(p)$ is an increasing (or decreasing) sequence of of values in $\mathbb{R}$, bounded above (or below) by $\alpha(0)(p)$ (or $\alpha(1)(p)$). Hence, both sequences converge pointwise, which implies uniform convergence on compact sets, as these functions are 1-Liptschitz. Furthermore, these limits do not depend on the chosen sequence, since if we had any other we could intercalate them and the sequences would still converge. Denote then $\alpha^+$ the limit associated to $t^+_n$, and $\alpha^-$ the limit associated to $t^-_n$. Since $\alpha(t^+_n)<\alpha(t^-_m)$ for all $n,m$ we have $\alpha^+\le \alpha^-$. For each $\alpha(t^k)$ we have $I(\alpha(t^k)) = E(t^k)$. Hence by the continuity of $I$ we have that
\[ I(\alpha^+)= E(t)=I(\alpha^-).\]
That is, we have
\[I(\alpha^--\alpha^+)=0.\]
Since $\alpha^-$ and $\alpha^+$ are continuous and $\alpha^--\alpha^+\ge 0$ we have $\alpha^-=\alpha^+.$ We thus define $\alpha(t)$ to be either one. The same reasoning shows that $\alpha$ is continuous. \end{proof}
We would like to remark that several choices where made in the proof of the previous lemma, and the obtained path may not be unique.
We can use the previous result to observe that the horoboundary is connected if and only if the visual boundary is connected. \begin{proof}[Proof of Proposition \ref{pr:finslerconnected}]
Assume that the visual boundary is not connected. Then we have $U, V\subset \vbd{X}$ nonempty and open such that $U\cap V=\emptyset$ and $U\cup V=\vbd{X}$. As $\Pi$ is continuous, the sets $\Pi^{-1}(U)$ and $\Pi^{-1}(V)$ are open, so the horoboundary is not connected.
For the other implication, assume that the visual boundary is connected while the horoboundary is not connected. Then we have $U, V \subset \hbd{X}$ nonempty and open such that $U\cap V=\emptyset$ and $U\cup V=\hbd{X}$. Since fibers are path connected, each of them is contained in only one of $U$ or $V$, so $\Pi(U)$ and $\Pi(V)$ are disjoint. Since $U\cup V=\hbd{X}$ we have $\Pi(U)\cup \Pi(V)=\vbd{X}$, and since both $U$ and $V$ are nonempty, so are the images. Hence, both images cannot be open at the same time, as $\vbd{X}$ is connected. Therefore, these sets cannot be both closed. Assume $\Pi(U)$ is not closed. We then have a sequence $(\gamma_n)\subset \Pi(U)$ converging to a point in $\Pi(V)$. Again, since $U\cup V=\hbd{X}$, we have that $U=\Pi^{-1}\Pi(U)$ and $V=\Pi^{-1}\Pi(V)$. Hence, any lift of the sequence $(\gamma_n)$ to $\Pi^{-1}\Pi(U)$ is contained in $U$ and, since $\hbd{X}$ is compact, has accumulation points which, by the continuity of the projection map, are be contained in $\Pi^{-1}\Pi(V)=V$. Hence, $U$ is not closed and we get a contradiction. \end{proof}
\subsection{An alternative definition of the horofunction compactification} \label{se:alternativehorofunctiondefinition} Under what a priori seem to be more restrictive hypotheses on the space $X$ it is possible to characterize the horofunction compactification as a subset of the product of all of its visual compactifications. We detail the construction in this section.
The new extra hypotheses are both related to the differentiability of the distance function. We say a that a uniquely geodesic metric space $X$ is \emph{$C^1$ along geodesics} if given a point $p\in X$ and a geodesic segment $\gamma$ that does not intersect $p$, the distance function $d(\gamma(t),p)$ is first differentiable and the value of the derivative depends continuously on both $t$ and $p$. Furthermore, the space $X$ has \emph{constant distance variation} if for any two distinct geodesics $\gamma,\eta$ with $\gamma(0)=\eta(0)$ we have either
\[
\left.\frac{d}{dt}d(\gamma(t),\eta(s))\right\vert_{t=0}=\left.\frac{d}{dt}d(\gamma(t),\eta(1))\right\vert_{t=0}
\] for all $s>0$, or $\left.\frac{d}{dt}d(\gamma(t),\eta(s))\right\vert_{t=0}$ does not exist for any $s>0$.
Many commonly studied metric spaces have constant distance variation. For example, spaces with bounded curvature, either above of below, have constant distance variation, as explained in the book by Burago--Burago--Ivanov \cite[Section 4]{CourseMetricGeometryBook}. Importantly to our case, complete, uniquely geodesic Finsler manifolds satisfy both hypotheses, as explained in the book by Bao-Chern--Shen \cite[Section 5]{IntroductionFinslerGeometry}, as well as straightness and properness. Therefore, the results from this section can be applied to Teichmüller spaces with the Teichmüller distance, as these are complete, uniquely geodesic Finsler manifolds, see for example the work of Royden \cite{Royden}.
Consider the product of all the possible visual compactifications obtained by changing the basepoint, \[
E=\prod_{b\in X} \vbc{X}_b, \] with the usual product topology. See the book by Munkres \cite[Chapters 2.19 and 5.37]{Munkres} for some background on infinite products of topological spaces. Denote $\pi_b$ the projection from $E$ to $\vbc{X}_b$. By definition of the product topology, the diagonal inclusion $i:X\hookrightarrow E$ such that by $\pi_b(i(x))=x$ for every $x,b\in X$ is continuous, and has continuous inverse restricted to $i(X)$ given by $\pi_b$. Hence, $i(X)$ is homeomorphic to $X$. That is, $i$ is an embedding. Furthermore, by Tychonoff's theorem the product is compact, as each factor of the product is compact. Hence the closure $\overline{i(X)}$, which we shall denote $\Vbc{X}$, is compact. The pair $(i,\Vbc{X})$ is then a compactification of $X$, which tracks the information given by the visual boundary at each point. That is, a sequence in $X$ converges in the topology of $\Vbc{X}$ if and only if it converges for every possible visual compactification $\vbc{X}_b$. The main interest of this compactification comes from the following result.
\begin{theorem}\label{th:horocompisvisualfromeverypoint}
Let $X$ be a proper, uniquely geodesic, straight metric space which is $C^1$ along geodesics and has constant distance variation. Then $(i,\Vbc{X})$ is isomorphic to $(h,\hbc{X})$. \end{theorem}
Denote $\Pi_b$ the continuous map from $\hbc{X}$ to $\vbc{X}_b$ given by \cref{th:projectionfunction}. The isomorphism between $\hbc{X}$ and $\Vbc{X}$ is defined by recording the value of each possible $\Pi_b$ within $\Vbc{X}$. That is, we define $\widetilde{\Pi}:\hbc{X}\to\Vbc{X}$ in such a way that $\pi_b\circ\widetilde{\Pi}:=\Pi_b$ for each $b\in X$. The only property required to prove that $\widetilde{\Pi}$ is an isomorphism not following directly from previous results is the injectivity. By Proposition \ref{pr:optimalpath} we know that if $f\in\Pi_b^{-1}(\gamma)$ then $\gamma$ is an optimal geodesic of $f$. That is, $f(\gamma(t))-f(\gamma(s))=-(t-s)$. Hence, if $f,g\in\Pi_b^{-1}(\gamma)$, then they differ by a constant along the geodesic $\gamma$. If $f$ and $g$ are horofunctions in the preimage of a point by $\widetilde{\Pi}$, then they differ by a constant along infinitely many geodesics, which cover $X$. However, the constant might depend on the geodesic, so we need a way to connect these constants. We proceed by strengthening Proposition \ref{pr:optimalpath} to show that any two functions in $\Pi_b^{-1}(\gamma)$ also have the same directional derivatives at points in $\gamma$, which allows us to connect the geodesics. Precisely, we prove the following. \begin{proposition}\label{pr:firstderivativehorofunction}
Let $X$ be a proper, uniquely geodesic, straight metric space which is $C^1$ along geodesics and has constant distance variation. Furthermore, let $\gamma$ be a geodesic ray starting at $b$, and let $\alpha$ be a geodesic starting at some point on $\gamma$. Then, $\left.\frac{d}{dt} f\circ\alpha (t) \right\vert_{t=0}$ exists and its value is the same for all $f\in \Pi^{-1}_b(\gamma)$. \end{proposition} \begin{proof}
For any $b'\in\gamma$ we have that $\gamma$ is an optimal geodesic of $f$ passing through $b'$. Denoting $\gamma_{b'}$ the geodesic ray starting at $b'$ with the same bi-infinite extension as $\gamma$ we have that $f\in\Pi_{b'}^{-1}(\gamma_{b'})$, by Proposition \ref{pr:optimalpath}. Hence, we can assume that $\alpha(0)=b$ by changing the basepoint if necessary. Let $x_n$ be a sequence converging to $f$. Furthermore, let $\eta^n_t$ be the geodesic from $\alpha(t)$ to $x_n$ and $g_n(t)$ be the value of $\left.\frac{d}{ds}h(x_n)\circ \alpha(s)\right\vert_{s=t}$. By the definition of the map $h$ we have $g_n(t)=\left.\frac{d}{ds}d(\alpha(s),x_n)\right\vert_{s=t}$. By the constant distant variation we have $g_n(t)=\left.\frac{d}{ds}d(\alpha(s),\eta^n_t(1))\right\vert_{s=t}$, which since $X$ is $C^1$ along geodesics depends continuously on $\eta^n_t(1)$ and $t$.
By Proposition \ref{pr:welldefined} each geodesic $\eta^n_t$ converges as $n\to\infty$ to some geodesic $\eta_t$, so $\eta^n_t(1)$ converges to $\eta_t(1)$, and so $g_n$ converges pointwise to $g(t)=\left.\frac{d}{ds}d(\alpha(s),\eta_t(1))\right\vert_{s=t}$.
Take some $\delta>0$ and assume the convergence is not uniform on $[-\delta,\delta]$. Then there is some $\varepsilon>0$ such that for each $n$ there is at least one $t_n\in [-\delta,\delta]$ such that $|g_n(t_n)-g(t_n)|>\varepsilon$. Since $[-\delta,\delta]$ is compact we can take a converging subsequence such that $t_n$ converges to some $T\in[-\delta,\delta]$. Hence, the point $\eta_{t_n}^n(1)$ does not converge to $\eta_T(1)$, so by properness of $X$ we can take a subsequence such that $\eta_{t_n}^n(1)$ converges to some $p\in X$ different from $\eta_T(1)$. Let $\beta$ be the geodesic starting at $\alpha(T)$ passing through $p$. The geodesics $\eta^n_{t_n}$ converge uniformly to $\beta$, and $\beta\neq\eta_T$. For any fixed $t>0$ we have, following the same reasoning than in the proof of Proposition \ref{pr:welldefined},
\[
f(\beta(t))-f(\beta(0))=\lim_{n\to\infty}d(x_n,\beta(t))-d(x_n,\beta(0))=-t.
\]
Hence, $\beta$ is an optimal geodesic of $f$ passing through $\alpha(T)$. However, $f\in \Pi^{-1}_{\alpha(T)}(\eta_T)$, so $\eta_T$ is also an optimal geodesic passing through $\alpha(T)$, contradicting Proposition \ref{pr:optimalpath}.
Hence, the convergence of $(h(x_n)\circ\alpha)'=g_n$ to $g$ is uniform on $[-\delta,\delta]$. Therefore, $f$ is differentiable and $f'(0)=g(0)=\left.\frac{d}{ds}d(\alpha(s),\gamma(1))\right\vert_{s=0}$, which is the same for all $f\in\Pi^{-1}(\gamma)$. \end{proof}
\begin{proof}[Proof of Theorem \ref{th:horocompisvisualfromeverypoint}]
Each $\Pi_b$ is continuous, so by the definition of the product topology the map $\widetilde{\Pi}$ is continuous. Hence, by \cref{le:walshcompact} to see that $\widetilde{\Pi}$ is an isomorphism it is enough to show that $\widetilde{\Pi}$ is injective.
Let $f,g\in \hbc{X}$ be such that $\widetilde{\Pi}(f)=\widetilde{\Pi}(g)$. If there is some $b\in X$ such that $\pi_b\circ \widetilde{\Pi}(f)\in X$ then $f=h(\pi_b\circ \widetilde{\Pi}(f))=g$. Assume then $\pi_b\circ \widetilde{\Pi}(f)\in\vbd{X}_b$ for all $b\in X$. By Proposition \ref{pr:firstderivativehorofunction} they have the same directional derivatives at every point. Let $\alpha$ be a geodesic from a fixed basepoint $b$ to any other point. We have $(f\circ \alpha)'=(g\circ\alpha)'$, so $f-g$ is constant along $\alpha$, and hence everywhere, since any point can be connected to $b$ by a geodesic. Hence, $f$ and $g$ are the same horofunctions. \end{proof}
By the previous observation about the convergence in $\Vbc{X}$, this characterization gives us the following equivalence for the convergence to points in the horoboundary. \begin{corollary}\label{co:convergingtohoriffconvergingeveryvis}
Let $X$ be a proper, uniquely geodesic, straight metric space, $C^1$ along geodesics and with constant distance variation.
A sequence $(x_n)\subset X$ converges in the horofunction compactification if and only if the sequence converges in all the visual compactifications. \end{corollary} Restricting the result to the Teichmüller metric we get \cref{co:convergingtohoriffconvergingeveryvisintro} announced in the introduction.
\section{Background on Teichmüller spaces}\label{se:backgroundteichmuller}
A \emph{surface with marked points} $S$ is a pair $(\Sigma,P)$, where $\Sigma$ is a compact, orientable surface with possibly empty boundary, and $P\subset \Sigma$ is a finite, possibly empty, set of points, where we allow points to be on the boundary. The \emph{Teichmüller space} $\mathcal{T}(S)$ is the set of equivalence classes of pairs $(X,f)$ where $X$ is a Riemann surface and $f:\Sigma \to X$ is an orientation-preserving homeomorphism. Two pairs $(X,f)$ and $(Y,g)$ are equivalent if there is a conformal diffeomorphism $h:X\to Y$ such that $g^{-1}\circ h \circ f$ is isotopic to identity rel $P$.
The \emph{Teichmüller distance} between two points $[(X,f)],[(Y,g)]\in \mathcal{T}(S)$ is defined as the value $\frac{1}{2} \log \inf K$, where the infimum is taken over all $K\ge 1$ such that there exists a $K$-quasiconformal homeomorphism $h:X\to Y$ with
$g^{-1}\circ h \circ f$ isotopic to identity rel $P$. Together with the smooth structure provided by the Fenchel--Nielsen coordinates $\mathcal{T}(S)$ satisfies all the metric properties discussed in the previous section. That is, $\mathcal{T}(S)$ with the Teichmüller distance is a proper, uniquely geodesic and straight metric space which is $C^1$ along geodesics and has constant distance variation. See \cite[Part 2]{primer} for some background on the Teichmüller metric and the Fenchel--Nielsen coordinates.
A \emph{quadratic differential} on a Riemann surface $X$ is a map $q:TX\to \mathbb{C}$ such that $q(\lambda v)=\lambda^{2}q(v)$ for every $\lambda \in \mathbb{C}$ and $v\in TX$. Considering only holomorphic quadratic differentials with finite area $\int_X |q|$ we get a characterization of the cotangent space to the Teichmüller space based at $[(X,f)]$.
Given a point $p\in \mathcal{T}(S)$ and a quadratic differential $q\in T_p^*\mathcal{T}(S)$ there is a unique geodesic $\gamma$ such that $\gamma(0)=p$ and $\gamma'(0)=|q|/q$. We shall denote such a geodesic as $\tray{q}{\cdot}$ and denote the associated Busemann points as $B(q)$ or $B_q$.
\subsection{Measured foliations}\label{se:measuredfoliations}
A \emph{multicurve} on $S$ is an embedded $1$-dimensional submanifold of $\Sigma \backslash P$ with boundary in $\partial \Sigma \backslash P$ such that
\begin{itemize}
\item no circle component bounds a disk with at most 1 marked point;
\item no arc component bounds a disk with no interior marked points and at most 1 marked point on $\partial \Sigma$ and
\item no two components are isotopic to each other in $\Sigma$ rel $P$.
\end{itemize}
Each of the components is called \emph{curve.} A \emph{weighted multicurve} is a multicurve together with a positive weight associated to each curve. We shall consider (weighted) multicurves up to isotopy rel $P$. If a simple curve is a circle we shall denote it \emph{closed curve}, and \emph{proper arc} otherwise.
A \emph{measured foliation} on $S$ is a foliation with isolated prong singularities, where we allow 1-prong singularities at marked points, equipped with an invariant transverse measure $\mu_F$ \cite[Exposé 5]{FLP}. Denoting $\alpha_i$ and $w_i$ the components and the weights of $\alpha$ respectively, the intersection number $i(\alpha,F)$ is defined as $\inf\sum_i w_i \int_{\alpha_i} |\mu_F|d\alpha_i$, where the infimum is taken over all representatives of $\alpha$. Two measured foliations $F$ and $G$ are \emph{equivalent} if $i(\alpha,F)=i(\alpha,G)$ for every multicurve $\alpha$. We shall always consider measured foliations up to this equivalence relation. The set of measured foliations is usually denoted as $\mathcal{MF}$, and its topology is defined in such a way that a sequence $(F_n)\subset \mathcal{MF}$ converges to $F$ if and only if $i(\alpha,F_n)$ converges to $i(\alpha,F)$ for every multicurve $\alpha$.
Given a quadratic differential one can define the \emph{vertical foliation} as the union of \emph{vertical trajectories}, that is, maximal smooth paths $\gamma$ such that $q(\gamma'(t))<0$ for every $t$ in the interior of the domain.
This foliation can be equipped with the transverse measure given by $|\operatorname{Re} \sqrt{q}|$.
This measured foliation is called the \emph{vertical measured foliation} of $q$, and shall be denoted as $V(q)$.
This map is actually an homeomorphism.
As such, given a measured foliation $F$ and a complex structure $X$ there is a unique quadratic differential $q_{F,X}$ on $X$ such that $V(q_{F,X})=F$. We call this quadratic differential the \emph{Hubbard--Masur} differential associated to $F$ on $X$ \cite{Hubbard}.
Furthermore, for each $\lambda>0$ we have $q_{\lambda F,X}=\lambda q_{F,X}$. Similarly, the \emph{horizontal foliation} $H(q)$ can be defined as the union of maximal smooth paths $\gamma$ such that $q(\gamma'(t))>0$, with the transverse measure $|\operatorname{Im}\sqrt{q}|$.
It is possible to associate a measured foliation to each weighted multicurve by thickening each proper arc and closed curve to a rectangle or cylinder respectively with width equal to the weight of the curve, and then collapsing the rest of the surface.
The intersection numbers are maintained by this construction.
This association is injective, and hence we shall consider the set of weighted multicurves as a subset of the measured foliations, and use both expressions of weighted multicurve indistinctly.
By removing the critical graph, a measured foliation is decomposed into a finite number of connected components, each of which is either a thickened curve, or a minimal component which does not intersect the boundary, in which every leaf is dense \cite[Chapter 24.3]{Strebel}. Each transverse measure within the minimal components can be further decomposed into a sum of finitely many projectively distinct ergodic measures. A foliation $F'$ is an indecomposable component of $F$ if it is either a thickened curve or a minimal component with a transverse measure that cannot be decomposed as a sum of more than one projectively distinct ergodic measure. Every foliation can be decomposed uniquely into a union of indecomposable foliations. For a surface of genus $g$ with no boundaries nor marked points Papadopoulos shows \cite{Papadopoulos} that the maximum number of indecomposable components for any foliation is $3g-3$. It is possible to get an upper bound for foliations on surfaces with boundary and marked points by swapping the marked points for boundaries and using the doubling trick we will explain in \cref{se:doublingtrick}.
It was shown by Thurston that for surfaces without boundary it is possible to achieve a dense subset by restricting to simple closed curves, see Fathi--Laudenbach--Poénaru \cite{FLP} for a reference. When there are boundaries the picture gets slightly more complicated, but it has been shown by Kahn, Pilgrim and Thurston in \cite[Proposition 2.12]{kahn} that multicurves can be seen as a dense subset. More precisely, they show the following.
\begin{proposition}[Kahn--Pilgrim--Thurston]\label{pr:curvesdense}
Let $F$ be a measured foliation in $S$ not containing proper arcs. Then there exists a sequence of multicurves composed solely of closed curves approaching $F$.
\end{proposition}
The result can be extended to any foliation by cutting along the proper arcs and approaching the foliation in the resulting surfaces by multicurves. Then, joining the multicurves from the proposition with the proper arcs and the adequate weights we get a sequence of multicurves converging to our original foliation.
\subsection{Extremal length}
Given a marked conformal structure on $S$, that is, a point $X\in \mathcal{T}$, the \emph{extremal length} of $F$ on $X$ is defined as
\[\operatorname{Ext}_X(F):=\int_X |q_{F,X}|.\]
The map $\operatorname{Ext}:\mathcal{MF}(S)\times \mathcal{T}(S)\to \mathbb{R}$ is continuous and homogeneous of degree 2 in the first variable.
Given two points $x,y\in\mathcal{T}(S)$ we can define the function
\[
K_{x,y}:=\sup_{F\in P_b} \frac{\operatorname{Ext}_x(F)}{\operatorname{Ext}_y(F)},
\]
where $P_b$ are the measured foliations $F$ satisfying $\operatorname{Ext}_b(F)=1$. As revealed by Kerckhoff's formula \cite{Kerckhoff}, the value $(1/2)\log K_{x,y}$ coincides with the usual definition of the Teichmüller distance $d(x,y)$.
\subsection{The doubling trick}\label{se:doublingtrick}
Let $X$ be a Riemann surface with nonempty boundary. Denote by $\overline{X}$ the mirror surface, obtained by composing each atlas of $X$ with the complex conjugation. Gluing $X$ to $\overline{X}$ along the corresponding boundary components we obtain the \emph{conformal double} $X^d=X\cup \overline{X}/\sim$ of $X$. Note that $X^d$ has empty boundary. Given a foliation $F$ or a quadratic differential $q$ on $X$, we can repeat the same process, obtaining the corresponding conformal doubles $F^d$ and $q^d$ on $X^d$. For a more detailed treatment of this argument see \cite[Section II.1.5]{Abikoff}.
The main interest of the conformal doubles is that these are surfaces without boundary, so most of the results relating to Teichmüller theory of surfaces without boundary can be translated to surfaces with boundary. We have the following.
\begin{proposition}
Let $X$ be a Riemann surface with boundary, and $F$ be a foliation on $X$. Then,
\[
\operatorname{Ext}_{X^d}(F^d)=2 \operatorname{Ext}_X(F).
\]
\end{proposition}
\begin{proof}
We have $q_{F^d,X^d}=q_{F,X}^d$, so the result follows, as $\int_{X^d}|q_{F,X}^d|=2\int_X|q_{F^d,X^d}|$.
\end{proof}
\begin{figure}
\caption{Visual representation of the doubling trick.}
\label{fi:doublingtrick}
\end{figure}
\subsection{The Gardiner--Masur compactification}\label{se:GMboundaryWalshpaper}
For a surface $S$ with marked points and empty boundary we can embed $\mathcal{T}(S)$ into the space of continuous functions from the set $\mathcal{S}$ of simple closed curves on $S$ to $\mathbb{R}$ via the map $\phi:\mathcal{T}(S)\to P(\mathbb{R}^\mathcal{S})$ defined by
\[
\phi(X)=\left[\operatorname{Ext}_X(\alpha)^{1/2}\right]_{\alpha\in\mathcal{S}},
\]
where the square brackets indicate a projective vector.
Gardiner and Masur show \cite{Gardiner} that this map is indeed an embedding, and that $\phi(\mathcal{T}(S))$ is precompact. The Gardiner--Masur compactification of a surface without boundary is then defined as the pair $(\phi,\overline{\phi(\mathcal{T}(S))})$.
Alternatively, after choosing a basepoint $b\in \mathcal{T}(S)$, it is also possible to consider the map $\mathcal{E}:\mathcal{T}(S)\to C(\mathcal{MF})$ defined by
\[
\mathcal{E}(X)(\cdot):=\left(\frac{\operatorname{Ext}_X(\cdot)}{K_{b,X}}\right)^{1/2},
\]
This map is quite similar to the original map $\phi$, the differences being that $\mathcal{E}$ considers all measured foliations instead of just the closed curves, and normalizes instead of projectivizing. Walsh proves \cite{Walsh} that, for surfaces without boundary, the map $\mathcal{E}$ defines a compactification in the same way that $\phi$ does, and in fact this compactification is isomorphic to the one defined by $\phi$.
The compactification defined by $\mathcal{E}$ fits better our goal, so we shall define the Gardiner--Masur compactification of surfaces with boundary as the one obtained by using $\mathcal{E}$. With this in mind, we first need the following result.
\begin{proposition}
Let $S$ be a compact surface with possibly boundary and marked points. Then the map $\mathcal{E}:\mathcal{T}(S)\to C(\mathcal{MF})$ is injective.
\end{proposition}
\begin{proof}
Assume we have $x,y\in \mathcal{T}(S)$ with $\mathcal{E}(x)(F)=\mathcal{E}(y)(F)$ for all $F\in \mathcal{MF}$. Then,
\[
K_{x,y}=\sup_{F\in P_b}\frac{\operatorname{Ext}_x(F)}{\operatorname{Ext}_y(F)}=\frac{K_{b,x}}{K_{b,y}}
\]
and
\[
K_{y,x}=\sup_{F\in P_b}\frac{\operatorname{Ext}_y(F)}{\operatorname{Ext}_x(F)}=\frac{K_{b,y}}{K_{b,x}}=K_{x,y}^{-1}.
\]
However, $K_{y,x}=K_{x,y}$, since the Teichmüller distance is symmetric. Hence, $K_{x,y}=1$ and, by Kerckhoff's formula, $d(x,y)=1/2 \log K_{x,y}=0$.
\end{proof}
Miyachi shows \cite{Miyachi3} that the set $E(S):=\{\mathcal{E}(X)\mid X\in\mathcal{T}(S)\}$ is precompact when $S$ is a surface without boundary. Given a surface with boundary $S$, denote $\mathcal{MF}^d(S)$ the set of measured foliations on $S^d$ obtained by doubling the foliations $\mathcal{MF}(S)$.
The set $E(S^d)|_{\mathcal{MF}^d(S)}=\{\mathcal{E}(X)|_{\mathcal{MF}^d(S)}\mid X\in\mathcal{T}(S^d)\}$, obtained by restricting the functions in $E(S^d)$ to $\mathcal{MF}^d$, is precompact. Furthermore, we can embed $E(S)$ into $E(S^d)|_{\mathcal{MF}^d(S)}$ by sending $f\in E(S)$ to $f^d\in E(S^d)|_{\mathcal{MF}^d(S)}$ defined by $f^d(F^d)=f(F)$. Hence, $E(S)$ is precompact.
We define the \emph{Gardiner--Masur compactification} for a surface with boundary as the closure $\overline{E}$ of $E(S)$, together with the map $\mathcal{E}$. We shall be using the same characterization for surfaces without boundary.
One of the relevant features of the Gardiner--Masur compactification is that it coincides with the horofunction compactification. Indeed, Liu--Su \cite{LiuSu} and Walsh \cite{Walsh} prove that for surfaces without boundary these two compactifications are isomorphic. In the following, we shall extend the relevant results to surfaces with boundary. We begin with the driving theorem from Walsh's paper.
\begin{theorem}[Extension of {\cite[Theorem 1]{Walsh}} to surfaces with boundary]
Let $\tray{q}{\cdot}:\mathbb{R}_+\to\mathcal{T}(S)$ be the Teichmüller ray with initial unit-area quadratic differential $q$, and let $F$ be a measured foliation. Then,
\[
\lim_{t\to\infty} e^{-2t}\operatorname{Ext}_{\tray{q}{t}}(F)=\sum_j\frac{i(G_j,F)^2}{i(G_j,H(q))},
\]
where the $\{G_j\}$ are the indecomposable components of the vertical foliation $V(q)$, and $H(q)$ is the horizontal foliation.
\end{theorem}
\begin{proof}
If $S$ does not have boundary the result follows from Walsh's paper. Assume then that $S$ has boundary. Let $p$ be the number of proper arcs of $V(q)$, and reorder the components so $G_j$ is a proper arc for $j\le p$. The conformal double $G_j^d$ is indecomposable whenever $G_j$ is a proper arc, and decomposes into two components otherwise, as it is not incident to the boundary of $S$. Denote $G^1_j$ and $G^2_j$ the two components of $G_j$ for $j>p$. We have
\begin{equation*}
2\lim_{t\to\infty} e^{-2t}\operatorname{Ext}_{R{q}{t}}(F)=\lim_{t\to\infty} e^{-2t}\operatorname{Ext}_{R{q^d}{t}}(F^d)=
\sum_{j\le p}\frac{i(G^d_j,F^d)^2}{i(G^d_j,H(q)^d)}
+\sum_{i\in \{1,2\}}\sum_{j> p}\frac{i(G^i_j,F^d)^2}{i(G^i_j,H(q)^d)}.
\end{equation*}
For foliations $G,F\in \mathcal{MF}(S)$ we have $i(G^d,F^d)=2i(G,F)$. Hence, $i(G^d_j,F^d)=2i(G_j,F)$. Using the symmetry, $i(G^1_j,F^d)=i(G^2_j,F^d)$, so for $j>p$ we have $i(G^1_j,F^d)=i(G_j,F)$. Using these identities we get the result.
\end{proof}
Following the same reasoning we can extend as well the next result.
\begin{lemma}[Extension of {\cite[Lemma 3]{Walsh}} to surfaces with boundary]\label{le:walshlowerbound}
Let $q$ be a unit area quadratic differential. Then,
\[
e^{-2t}\operatorname{Ext}_{\tray{q}{t}}(F)\ge \sum_j\frac{i(G_j,F)^2}{i(G_j,H(q))},
\]
where $t\in \mathbb{R}_+$ and $\{G_j\}$ are the indecomposable components of the vertical foliation $V(q)$.
\end{lemma}
Most of the results in Walsh's paper use the previous theorem. In particular, we have the following.
\begin{corollary}[Extension of {\cite[Corollary 1]{Walsh}} to surfaces with boundary]\label{co:walshbusemanshape}
Let $q$ be a quadratic differential and denote by $G_j$ the components of its vertical foliation. Then, the Teichmüller ray $\tray{q}{\cdot}$ converges in the Gardiner--Masur compactification to
\[\left(\sum_j \frac{i(G_j,\cdot)^2}{i(G_j,H(q)}\right)^{1/2}.\]
\end{corollary}
The relation between the Gardiner--Masur compactification is given by the map $\Xi:\overline{E}\to \hbc{\mathcal{T}(S)}$ defined by
\[
\Xi(f)(x):=\frac{1}{2}\log\sup_{F\in \mathcal{P}}\frac{f(F)^2}{\operatorname{Ext}_x(F)}.
\]
The following result can be extended to surfaces with boundary by repeating the proof found in Walsh's paper in this context.
\begin{theorem}[Extension of {\cite[Lemma 21]{Walsh}} to surfaces with boundary]
The map $\Xi$ is an isomorphism between the compactifications $(\mathcal{E},\overline{E})$ and $(h,\hbc{\mathcal{T}(S)})$.
\end{theorem}
Directly from the definition of $\Xi$ we have the following
\begin{corollary}\label{co:orderpreserved}
Let $f,g\in \overline{E}$. If $f\ge g$ then $\Xi(f)\ge \Xi(g)$.
\end{corollary}
We shall denote the representation of the Busemann point $B(q)$ in the Gardiner--Masur compactification as $\mathcal{E}(q)$. By \cref{co:walshbusemanshape} we have an explicit representation of $\mathcal{E}(q)$.
As we have seen in \cref{pr:horobocompfiner,pr:continuityatqintro}, the continuity of the Busemann map has some interesting implications, and it is enough to look for continuity of the map restricted to the boundary. Related to this question we have the following result, which can also be derived by the same proof found in Walsh's paper, applied to this context.
\begin{theorem}[Extension of {\cite[Theorem 10]{Walsh}} to surfaces with boundary]\label{th:Busemanncontinuityifstrong}
Let $(q_n)$ be a sequence of quadratic differentials based at $b\in \mathcal{T}(S).$ Then $B(q_n)$ converges to $B(q)$ if and only if both of the following hold:
\begin{enumerate}
\item $(q_n)$ converges to $q$;
\item for every subsequence $(G^n)_n$ of indecomposable elements of $\mathcal{MF}$ such that, for each $n\in \mathbb{N}$, $G^n$ is a component of $V(q_n)$, we have that every limit point of $G^n$ is indecomposable.
\end{enumerate}
\end{theorem}
In view of this theorem, we say that a sequence of quadratic differentials $(q_n)$ converges \emph{strongly} to $q$ if it does so in the sense described by the theorem.
Finally, while the following result may be extendable to surfaces with boundary, we only use it in the context of surfaces without boundary, so we shall not be working on finding an extension.
\begin{theorem}[{\cite[Theorem 3]{Walsh}}]
For the Teichmüller space of a surface without boundary with the Teichmüller metric,
for any basepoint $X\in \mathcal{T}(S)$, all Busemann points can be expressed as $B(q)$ for some quadratic differential $q$ based at $X$.
\end{theorem}
\section{Continuity of the Busemann map for Teichmüller spaces}\label{se:continuitybusemanteich}
We begin by using \cref{pr:continuityatqintro} to determine when the Busemann map is continuous. Recall that a sequence $(q_n)$ converges to $q$ strongly if and only if the sequence satisfies the conditions of Theorem \ref{th:Busemanncontinuityifstrong}. That is, a sequence $(q_n)$ converges to $q$ strongly if and only if the associated Busemann points $B(q_n)$ converge to $B(q)$. With this in mind we introduce the following notion.
\begin{definition}
Let $q$ be a quadratic differential. We say that $q$ is \emph{infusible} if any sequence of quadratic differentials converging to $q$ converges strongly. We say that $q$ is \emph{fusible} if it is not infusible.
\end{definition}
In other words, we say that $q$ is fusible when it can be approached by a sequence of quadratic differentials $(q_n)$ such that there is some sequence $(G^n)$ of measured foliations with each $G^n$ being an indecomposable component of $V(q_n)$, with $(G^n)$ having at least one decomposable accumulation point. The following statement follows directly from this definition, \cref{pr:continuityatqintro} and Walsh's result.
\begin{proposition}
Let $q$ be a unit area quadratic differential. The Busemann map $B$ is continuous at $q$ if and only if $q$ is infusible.
\end{proposition}
\begin{proof}
If $q$ is fusible then we have a sequence converging to $q$ but not strongly. Hence, by Theorem \ref{th:Busemanncontinuityifstrong} the sequence $(B(q_n))$ does not converge to $B(q)$, and so the Busemann map is not continuous at $q$.
If $q$ is infusible we have that any sequence $(q_n)$ converging to $q$ does so strongly, and so we have that $B(q_n)$ converges to $B(q)$, so $B$ is continuous at $q$ when restricted to the boundary. By \cref{pr:continuityatqintro} this implies that $B$ is continuous at $q$.
\end{proof}
We shall now find a criterion on the vertical foliation to determine when a unit area quadratic differential is infusible.
\begin{definition}
Let $F$ be a measured foliation on a surface $S$ and let $G$ be one of its indecomposable components. We say that $G$ is a \emph{boundary annulus} if it is an annulus parallel to a boundary with no marked points, and a \emph{boundary component} if it is a boundary annulus or a proper arc. If $G$ is not a boundary component, we shall call it an \emph{interior component}.
Each of the connected components of the surface obtained after removing the proper arcs shall be called \emph{interior part}. If each of these interior parts has at most one interior component, then we say that $F$ is \emph{internally indecomposable}.
If $F$ is not internally indecomposable we say that it is \emph{internally decomposable}.
\end{definition}
For surfaces without boundary, a foliation $F$ is internally indecomposable if and only if it is indecomposable, as we do not have boundary components. Given these definitions we can state our main result of this section
\begin{theorem}\label{th:maxcondition}
Let $q$ be a quadratic differential. Then $q$ is infusible if and only if its vertical foliation $V(q)$ is internally indecomposable.
\end{theorem}
This result is somewhat straightforward whenever $S$ does not have boundary, as in order to have a sequence $(q_n)$ that converges to $q$ but not strongly we need a sequence of components of $V(q_n)$ converging to a decomposable component of $V(q)$, but if $S$ is closed and $V(q)$ is internally indecomposable, then $V(q)$ only has one indecomposable component. Conversely, if $V(q)$ has more than one indecomposable component, as $S$ does not have boundary $V(q)$ can be approached by a sequence of simple closed curves, so the associated sequence of quadratic differentials converges to $q$ but not strongly.
For surfaces with boundary the proof is more involved, as simple closed curves are no longer dense. However, the density of multicurves from Proposition \ref{pr:curvesdense} allows us to follow a slightly similar reasoning. We begin by proving some results regarding the shape that foliations have to take when approaching a foliation with boundary components, namely, boundary components have to be eventually included in the approaching foliations.
\begin{proposition}\label{pr:notsplittingboundary}
Let $(F_n)$ be a sequence of measured foliations converging to a measured foliation $F$, let $G$ be the union of the boundary components of $F$ and let $H$ be such that $F=H+G$. Then, for $n$ big enough, $F_n=H_n+a_n G$, with $a_n$ converging to $1$ and $H_n$ converging to $H$.
\end{proposition}
In particular, the proper arcs of the limiting foliation have to be included in the approaching foliations. Hence, we will be able to separate the surface along these proper arcs into the interior parts of the limiting foliation, and study the convergence in each of these parts.
We say that a subset of a boundary component is a \emph{boundary arc} if it is homeomorphic to an open interval or a circle, does not contain marked points and, if it is homeomorphic to an open interval, it is delimited by marked points.
Repeating the argument by Chen--Chernov--Flores--Fortier Bourque--Lee--Yang \cite{Fortier2} to a more general setting we get the following characterization of foliations on simple surfaces, which we shall use to solve the simpler cases.
\begin{lemma}\label{le:polygonsfinitelymanyarcs}
Let $S$ be a sphere with one boundary component possibly containing boundary marked points and one interior marked point. Then every indecomposable foliation on $S$ is a proper arc and there are finitely many distinct proper arcs.
\end{lemma}
\begin{proof}
Assuming that there is some foliation $F$ with a recurrent leaf to some part of $S$ we get a contradiction, as explained in the proof of \cite[Lemma 4.1]{Fortier2}. Hence, each indecomposable foliation is a proper arc.
Let $b_1$ and $b_2$ be two boundary arcs. Fix three proper arcs with endpoints on $b_1$ and $b_2$. Any intersection between these arcs can be removed by doing isotopies moving the endpoints along the arcs $b_1$ or $b_2$. Hence, these arcs can be isotoped to not intersect each other. Since there is only one interior marked point, two of these arcs delimit a rectangle with no marked interior marked points, so are isotopic. Hence, there are at most two different proper arcs between $b_1$ and $b_2$. Since there are finitely many boundary arcs there are finitely many pairs, and so there are finitely many different proper arcs.
\end{proof}
We shall first see the proposition for the case where $G$ contains a proper arc and we are approaching with a sequence of indecomposable foliations.
\begin{lemma}\label{le:notsplittingboundary4}
Let $S$ be a surface and let $(F_n)$ be a sequence of indecomposable foliations on $S$ converging to a measured foliation $G$. Then $G$ is either a multiple of a proper arc $\gamma$, in which case $F_n$ is also a multiple of $\gamma$ for $n$ big enough, or $G$ does not contain a proper arc.
\end{lemma}
\begin{proof}
Assume $G$ contains a proper arc $\gamma$ with weight $w>0$ and denote $b$ one of the boundary arcs where $\gamma$ is incident.
Our first step is seeing that, for $n$ big enough, $F_n$ intersects $b$. We shall do this by finding different test curves $\beta$ depending on the shape of $b$.
If the boundary component containing $b$ has at most one marked point, we consider $\beta$ to be a curve parallel to that boundary component as in \cref{fi:samplecurves1}.
Otherwise we consider $\beta$ to be the curve defined by taking a small arc starting at the boundary arc next to $b$, concatenating with a curve parallel to $b$, and concatenating another segment with endpoint in the boundary arc after $b$, as shown in \cref{fi:samplecurves2}.
\begin{figure}
\caption{Sample curves used in the proof of \cref{le:notsplittingboundary4}}
\label{fi:samplecurvesboth}
\end{figure}
If the curve $\beta$ is contractible then $S$ is a sphere with one boundary component, so by \cref{le:polygonsfinitelymanyarcs} the result follows. Assume then that $\beta$ is not contractible. We have $i(\gamma,\beta)>0$, so $i(G,\beta)>0$ and hence $i(F_n,\beta)>0$ for $n$ big enough, which implies that $F_n$ intersects $b$. Hence, since $F_n$ is indecomposable, it is a weighted proper arc, which we denote $w_n\gamma_n$, where $w_n>0$ is the weight at $\gamma_n$ is a proper arc.
Denote $b_1$ and $b_2$ the boundary arcs where $\gamma$ has its endpoints, and denote $\beta_1$ and $\beta_2$ the associated test curves shown in \cref{fi:samplecurvesboth}. If both endpoints are in the same boundary arc we set $b_2$ and $\beta_2$ as null curves. We shall now find a multicurve $A$ surrounding $\gamma$, $b_1$ and $b_2$ such that any leaf of $G$ intersecting $A$ but not $\gamma$ has an endpoint in either $b_1$ or $b_2$. The multicurve $A$ is chosen so that, together with the boundaries where $\gamma$ has its endpoints, delimits the smallest surface containing $\gamma$. The precise shape of $A$ depends on whether the endpoints of $\gamma$ are in the same boundary component or not, and the distribution of marked points in these boundaries.
If both endpoints of $\gamma$ are in different boundary components we proceed differently according to the distribution of marked points at these boundaries. If each of the boundaries contains at most one marked point then we define $A$ as the curve shown in \cref{fi:boundarycurves1}. If one of the boundary components has two or more marked points, but the other has at most one marked point we define $A$ as the arc shown in \cref{fi:boundarycurves2}. Finally, if each of the boundaries contains at least two marked marked points we define $A$ as the multicurve formed by the curves $A_1$ and $A_2$ as shown in \cref{fi:boundarycurves3}.
If both endpoints $\gamma$ are in the same boundary we also proceed differently according to the distribution of marked points. In all cases $A$ is defined as a multicurve formed by two curves. If each possible segment within the boundary component joining the two endpoints has at most one marked points we proceed as in \cref{fi:boundarycurves4}. If one of these segments has two or more marked points, while the other has at most one we proceed as in \cref{fi:boundarycurves5}. Finally, if both of these segments have two or more marked points we proceed as in \cref{fi:boundarycurves6}.
\begin{figure}
\caption{Construction of the curves $A_1$ and $A_2$ whenever $\gamma$ has endpoints in different boundary components in the proof of \cref{le:notsplittingboundary4}}
\label{fi:boundarycurves1}
\label{fi:boundarycurves2}
\label{fi:boundarycurves3}
\end{figure}
\begin{figure}
\caption{Construction of the curves $A_1$ and $A_2$ whenever $\gamma$ has endpoints in the same boundary component in the proof of \cref{le:notsplittingboundary4}}
\label{fi:boundarycurves4}
\label{fi:boundarycurves5}
\label{fi:boundarycurves6}
\end{figure}
In any of the cases above if a component of $A$ is non essential we remove it from $A$. The following argument also applies whenever $A$ is a null curve. Put $A$ and $G$ in minimal position and denote $P$ the surface containing $\gamma$, delimited by $A$ and the boundary components where $\gamma$ has its endpoints. Let $\alpha$ be a connected component of a non critical leaf of $G$ restricted to $P$ intersecting $A$. Since $G$ contains $\gamma$, the proper arc $\alpha$ cannot intersect $\gamma$. Furthermore, by observing the possible configurations, if $\alpha$ has one endpoint in $A_1$, the other one cannot be in $A_2$, as whenever we have both $A_1$ and $A_2$, these are separated within $P$ by the proper arc $\gamma$. Furthermore, if both endpoints are in $A_1$ then $\alpha$ can be isotoped to not intersect $A$. Therefore, the other endpoint of $\alpha$ is in either $b_1$ or $b_2$. Hence, $i(G,\beta_1)+i(G,\beta_2)\ge i(G,A)+w \, i(\gamma,\beta_1)+w \, i(\gamma,\beta_2)>i(G,A)$. Since $w_n \gamma_n$ converges to $G$, this last inequality implies that for $n$ big enough,
\[i(\gamma_n,\beta_1)+i(\gamma_n,\beta_2)>i(\gamma_n,A).\]
Fix $n$ such that $\gamma_n$ satisfies the previous inequality. Assume $\gamma_n$ has just one endpoint inside $P$. Then, $i(\gamma_n,\beta_1)+i(\gamma_n,\beta_2)=1$, so $i(\gamma_n,A)=0$ and $\gamma_n$ cannot leave $P$. If $\gamma_n$ has both endpoints in $P$ then $i(\gamma_n,\beta_1)+i(\gamma_n,\beta_2)=2.$ Furthermore, if $\gamma_n$ leaves $P$, then it has to reenter at some point, resulting in $i(\gamma_n,A_1+A_2)=2$. Hence, $\gamma_n$ stays inside $P$.
The weights $w_n$ do not converge to $0$, as $w_ni(\gamma_n,\beta)$ converges to $i(G,\beta)$, but $i(\gamma_n,\beta)\le 2$. Since $\gamma$ is contained in $G$ we have $i(G,\gamma)=0$. Therefore, for any $\epsilon>0$ and $n$ big enough we have $w_ni(\gamma_n,\gamma)<\epsilon$, so for $n$ big enough $i(\gamma_n,\gamma)=0$. Since $\gamma_n$ does not intersect $\gamma$ and stays inside $P$, $\gamma_n$ can be isotoped to stay inside one of the components obtained after removing $\gamma$ from $P$. Denote $C$ such component. The component $C$ has either one or two boundary components and no interior marked points or one boundary component and one interior marked point. By \cref{le:polygonsfinitelymanyarcs} the only case where we do not have finitely many different proper arcs is when $C$ has two boundary components. However, in that case one of the boundary components is associated to a curve in $A$, so $\gamma_n$ does not intersect it and that boundary can be treated as a marked point. Hence, in all cases there are finitely many possible proper arcs, and so $\gamma_n$ is a multiple of $\gamma$ for $n$ big enough.
\end{proof}
When the boundary component is an annulus we have to be a bit more careful, so we start by proving it for approaching curves.
\begin{lemma}\label{le:notsplittingboundary5}
Let $S$ be a surface and let $(w_n\gamma_n)$ be a sequence of weighted curves on $S$ converging to a foliation $G$, where $(w_n)$ are the weights and $(\gamma_n)$ the curves. Then $G$ is either a multiple of a boundary annulus $\gamma$, in which case $\gamma_n$ is $\gamma$ for $n$ big enough, or $G$ does not contain a boundary annulus.
\end{lemma}
\begin{proof}
If $S$ is a polygon with at most one interior marked point, then $G$ cannot contain a boundary annulus. If $S$ is a cylinder then at least one of the boundaries must not contain marked points. Hence, the number of curves is finite, as there is only one possible closed curve, and for counting the proper arcs we can consider the boundary without marked points as a marked point and apply \cref{le:polygonsfinitelymanyarcs}. In that case, the conclusion follows.
Assume then that $S$ is neither a disk with at most one interior marked point nor a cyclinder with no interior marked points. Then there is a pair of pants $P$ in $S$ containing $\gamma$ where each boundary component of $P$ is either non contractible or contractible to a marked point. Denote $B_1$ the boundary component parallel to $\gamma$ and $B_2$ and $B_3$ the other two boundary components of $P$. Furthermore, assume that $G$ contains $\gamma$ with weight $w$.
Begin by assuming that $B_2$ and $B_3$ are not contractible to marked points. Let $C$ be the proper arc contained in $P$ with both endpoints in $B_1$. Put $B_2$, $B_3$ and $C$ in a minimal position with respect to $G$, and consider a connected component of a noncritical leaf of $G$ intersecting $C$ restricted to $P$. This noncritical leaf either is isotopic to $\gamma$, or to the curves $F$, $E$ and $D$ shown in \cref{fi:curvelabeling}. Since the leaves of $G$ do not intersect, there cannot be leaves isotopic to $E$ and leaves isotopic to $D$ at the same time. Breaking symmetry, assume there are no leaves isotopic to $D$. Then, $i(C,G)=i(C,\gamma)+i(B_3,G)>i(B_3,G)\ge i(B_2,G)$. Doing the same reasoning assuming that there are no leaves isotopic to $E$ we get $i(C,G)>\max(i(B_2,G),i(B_3,G))$. Hence, since $w_n\gamma_n$ converges to $G$, $\gamma_n$ has to satisfy
\[
i(C,\gamma_n)>\max(i(B_2,\gamma_n),i(B_3,\gamma_n))
\]
for $n$ big enough.
For each $n$ put $B_3$, $B_2$ and $C$ in a minimal position with respect to $\gamma_n$, and consider the restriction of $\gamma_n$ to $P$. Assume $\gamma_n$ is not $\gamma$. Then, the curves on the restriction of $\gamma_n$ to $P$ intersecting $C$ are isotopic to either $E,F$ and $D$, but not $\gamma$. As before, this restriction cannot contain curves isotopic to $E$ and curves isotopic to $D$ for the same $n$, so assuming there are no curves isotopic to $D$ we have $i(C,\gamma_n)=i(B_3,\gamma_n)$ which is a contradiction. Doing the same reasoning assuming that there are no curves isotopic to $E$ also gives a contradiction. Hence, $\gamma_n$ is $\gamma$ for $n$ big enough.
If $B_2$ or $B_3$ are contractible to marked points we have $i(G,B_2)$ or $i(G,B_3)$ is $0$, and a similar reasoning yields the same result.
\begin{figure}
\caption{Curve labeling for the proof of Lemma \ref{le:notsplittingboundary5}}
\label{fi:curvelabeling}
\end{figure}
\end{proof}
\begin{proof}[Proof of Proposition \ref{pr:notsplittingboundary}]
Let $(F_n)$ be a sequence of measured foliations converging to $F$. As pointed out before, Proposition \ref{pr:curvesdense} can be extended to get sequences of weighted multicurves $(\gamma_n^m)_m$ converging to each $F_n$. Denote $\gamma_{n,1}^m,\gamma_{n,2}^m,\ldots,\gamma_{n,k(n,m)}^m$ the weighted curves of $\gamma_n^m$. For each $n$ we take a subsequence such that $k(n,m)$ is constant with respect to $m$, and $\gamma^{m}_{n,i}$ converges for each $i$ as $m\to\infty$. Denoting $F_{n,i}$ the limit of $\gamma^m_{n,i}$ as $m\to\infty$, we can write $F_n=\sum F_{n,i}$.
Denote $\beta_j$ the boundary components of $F$. That is, $\sum \beta_j=G$. Furthermore, denote $b_{n,j}$ and $b_{n,j}^m$ the weights of $\beta_j$ on $F_n$ and $\gamma_n^m$, where we set the weight to be $0$ if $\beta_j$ is not contained in the foliation. It is clear that if $b_{n,j}=0$ then $b_{n,j}^m\to 0$, as we must have $b_{n,j}\ge \liminf_{m\to\infty} b_{n,j}^m$. If $b_{n,j}>0$ for some $n$, then $F_{n,i}$ contains $\beta_j$ for some $i$. Hence, by Lemmas \ref{le:notsplittingboundary4} and \ref{le:notsplittingboundary5} we have $F_{n,i}$ and $\gamma_{n,i}^m$ are both multiples of $\beta_j$ for $m$ big enough. Then, since each of the multicurves in $\gamma_n^m$ has to be different, $\beta_j$ is not contained in any other foliation $F_{n,i}$ for that given $n$, so $F_{n_i}=b_{n,j}\beta_j$ and $\gamma_{n,i}^m$ can be written as $b_{n,i}^m\beta_j$ for $m$ big enough, with $b_{n,i}^m$ converging to $b_{n,j}$ as $m\to\infty$.
Assume for some $j$ we have $b_{n,j}$ not converging to $1$. We can then take a subsequence such that $b_{n,j}$ converges to some $\lambda\neq 1$. Denote $\delta=|1-\lambda|/2$. For each $n$, there exists some $m_0(n)$ big enough so that $|1-b_{n,j}^m|>\delta$ for all $m\ge m_0(n)$. We can then take a diagonal sequence $\gamma_n^{m(n)}$ converging to $F$ with $m(n)\ge m_0(n)$. However, following the previous reasoning we get that $\gamma_n^{m(n)}$ should contain $\beta_{j}$ for $n$ big enough, and the weight should converge to the weight in $G$, that is, to $1$. However, $|1-b_{n,j}^{m(n)}|>\delta$, giving us a contradiction. Hence, $b_{n,j}$ converges to $1$ for all $j$. Let then $a_n=\min_j(b_{n,j})$. Since $b_{n,j}\ge a_n$ we can define $H_n=F_n-a_n G$ and we have $F_n=H_n+a_nG$. Finally, $a_n\to 1$ as $n\to\infty$, so the proposition is proved.
\end{proof}
\begin{proposition}\label{pr:ifpart}
Let $q$ be a unit area quadratic differential such that $V(q)$ is internally indecomposable. Then $q$ is infusible.
\end{proposition}
\begin{proof}
Assume $q$ is fusible, that is, we have a sequence of quadratic differentials $(q_n)$ converging to $q$ but not strongly. Let $F_i^n$ be the indecomposable components of $V(q_n)$. To have non-strong convergence we must have at least one sequence of indecomposable components converging to a decomposable component $G$, which we assume is $(F_1^n)_n$. Let $\beta$ be a boundary component of $V(q)$. By Proposition \ref{pr:notsplittingboundary} for $n$ big enough a multiple of $\beta$ must be contained in $V(q_n)$. Furthermore, $\beta$ cannot be contained in $G$. Since $G$ cannot contain boundary components, it must contain at least two interior components. On the other hand, since $V(q)$ is internally indecomposable, each interior part obtained by removing the proper arcs contains at most one interior component. Hence, for $n$ big enough $F_1^n$ must intersect at least two interior parts, that is, $F_1^n$ must cross at least one proper arc. However, for each proper arc $\gamma$ there is some $n$ big enough such that $\gamma$ is contained in the foliation $V(q_n)$, so $F_1^n$, a component of $V(q_n)$, intersects the foliation $V(q_n)$, giving us a contradiction.
\end{proof}
To prove the other direction we shall first see the following lemma.
\begin{lemma}\label{le:interesctingcurves}
Let $S$ be a compact surface with with possibly nonempty boundary and finitely many marked points, let $k\ge 2$ and let $\alpha=\{\alpha_1,\alpha_2,\ldots ,\alpha_k\}$ be a collection of non intersecting closed curves on $S$. Furthermore, let $p$ be the number of curves in $\alpha$ parallel to a boundary. Then there exists a collection of $\max(\lceil(p/2)\rceil),1)$ non intersecting curves intersecting each $\alpha_i$.
\end{lemma}
Our main interest in the lemma is that the amount of curves needed is strictly smaller than the amount of closed curves in $\alpha$. This will allow us, by doing Dehn twists along the closed curves in $\alpha$, to create a sequence of foliations converging to a foliation with strictly more components, which can be translated to a sequence of quadratic differentials that converge but not strongly. The proof of this lemma is based on a reasoning found in \cite[Proposition 3.5]{primer}.
\begin{proof}
We start by replacing all boundaries of $S$ without parallel curves in $\alpha$ by marked points. Let then $\alpha'$ be a completion of $\alpha$ to a pair of pants decomposition. Glue the remaining boundaries pairwise until we have at most one left. After cutting the surface along the closed curves that were not parallel to boundaries we get a collection of $\lceil p/2 \rceil$ tori with one boundary component and some spheres with $b$ boundary components and $n$ marked points, with $b+n=3$ and $b\ge 1$. If $p$ is odd, one of these spheres has a boundary of $S$ as a boundary. We join the boundaries of each of these surfaces with non intersecting arcs, as shown in \cref{fi:pathslaying}, that is, in such a way that each boundary component has two arcs incident to it. We can then paste these surfaces back together in order to obtain a collection $\beta_1,\beta_2,\ldots,\beta_l$ of pairwise disjoint curves in $S$. If $p$ is odd this collection contains precisely one proper arc, as we only have two endpoints coming from the boundary we did not paste. If $p$ is even the collection does not contain any proper arc. By the bigon criterion each $\beta_j$ is in minimal position with respect to each $\alpha_i$, and each $\alpha_i$ intersects either one or two of the $\beta_j$. Furthermore, since we did not cut along the original boundaries we pasted from $S$, each $\alpha_i$ parallel to a boundary of $S$ intersects precisely one of the $\beta_j$. Suppose we have $\beta_j$ and $\beta_{j'}$ intersecting a curve $\kappa\in \alpha'$ and that $\beta_j$ and $\beta_{j'}$ are distinct. Since we have at most one proper arc, at least one of $\beta_j$ and $\beta_{j'}$ is a closed curve. Hence, doing a half twist about $\kappa$, $\beta_j$ and $\beta_{j'}$ become a single curve. Since this process does not create any bigons, the resulting collection is still in minimal position with $\alpha$. Continuing this way we obtain a single curve $\gamma$ intersecting each curve in $\kappa$. Furthermore, $\gamma$ intersects each pasted boundary once. Cutting along the pasted boundaries, we get the curves from the lemma. If $p$ is odd, $\beta$ is a proper arc, so each cut along a pasted boundary increases the curve count by one, totalling $ (p+1)/2$ curves. If $p$ is even, $\beta$ is a closed curve, so the first cut transforms it into a proper arc, and the following ones increase the curve count by one, giving a total of $\max(p/2,1)$ curves.
\begin{figure}
\caption{Laying out of curve segments for the proof of Lemma \ref{le:interesctingcurves}}
\label{fi:pathslaying}
\end{figure}
\end{proof}
\begin{proposition}\label{pr:onlyifpart}
Let $F$ be an internally decomposable measured foliation. Then, $F$ can be approached by a sequence of weighted multicurves with fewer components than $F$.
\end{proposition}
\begin{proof}
By the extension to Proposition \ref{pr:curvesdense}, we have a sequence of weighted multicurves $\gamma^n$ converging converging to $F$, with the only proper arcs being the ones contained in $F$. Cutting the surface along the proper arcs of $\gamma^n$ and quotienting these proper arcs to points we get $k$ many surfaces $Z_1,Z_2,\ldots ,Z_k$ with boundary. Let $\gamma^n_i$ be the restriction of $\gamma^n$ to $Z_i$, and let $F_i$ be the limit of $\gamma^n_i$. The foliation $F$ is the union of the foliations $F_i$ and the proper arcs.
Fix some $i$ such that $F_i$ is nonempty, and let $\alpha_1,\ldots, \alpha_b$ be the closed curves parallel to the boundaries of $Z_i$. Let $a^n_1,\ldots, a^n_b$ be the weights of $\alpha_1,\ldots,\alpha_b$ in $\gamma^n_i$. We can take a subsequence such that $a^n_j$ converges for each $j$ to some $a_j$. If $a_j>0$, the closed curve $\alpha_j$ is contained in $F_i$. If $a_j=0$, then the weights $a^n_j$ can be set to $0$ on the multicurves $\gamma^n_i$ while leaving the limit intact. Hence, we can assume that $a^n_j=0$ for all $j$ such that $a_j=0$. Let $p$ and $u$ be the number of closed curves with $a_j>0$ parallel to boundaries with or without marked points respectively. Since we have removed all the closed curves with $a_j=0$, the multicurve $\gamma^n_i$ contains precisely $p$ and $u$ closed curves parallel to boundaries with or without marked points for $n$ big enough. Denote by $B$ the set of closed curves parallel to boundary components without marked points. Applying Lemma \ref{le:interesctingcurves} to the multicurve $\gamma^n_i$ minus $B$ we get $\max(\lceil(p/2)\rceil),1)$ curves $\beta^n_i$ intersecting all closed curves in $\gamma^n_i$ except the ones parallel to boundaries without marked points. Doing the appropriate Dehn twists along the closed curves of $\gamma^n_i$ and rescaling to the curves $\beta^n_i$, and adding with the corresponding weights the curves in $B$, we get a sequence converging to $\gamma^n_i$ with $\max(\lceil(p/2)\rceil),1)+u$ many components. As such, taking a diagonal sequence we can get a sequence of multicurves converging to $F_i$ with each multicurve containing $\max(\lceil(p/2)\rceil),1)+u$ components.
Finally, since $F$ is internally decomposable, there is at least one $F_i$ with at least 2 interior components, so one of these multicurves has strictly less components than the limiting foliations, and we have non-strong convergence.
\end{proof}
Theorem \ref{th:maxcondition} follows by combining Propositions \ref{pr:ifpart} and \ref{pr:onlyifpart}.
We do not need $S$ to have a lot of topology to find internally decomposable foliations. In fact, determining which surfaces do not support internally decomposable foliations we get the following result.
\begin{proposition}\label{pr:continuousBusemann}
Let $S_{g,b_m,b_u,p}$ be a surface of genus $g$ with $b_m$ and $b_u$ boundaries with and without marked points respectively and $p$ interior marked points. Then the Busemann map is continuous if and only if $3g+2b_m+b_u+p\le 4$.
\end{proposition}
We shall split the proof in the following two lemmas
\begin{lemma}\label{le:nonexceptionalimpliesfusible}
Let $S_{g,b_m,b_u,p}$ be a surface with $3g+2b_m+b_u+p > 4$. Then it admits an internally decomposable foliation.
\end{lemma}
\begin{proof}
A multicurve consisting of two interior closed curves generates an internally decomposable foliation, so we just have to find such a pair for each possible surface satisfying the hypothesis. If $S$ has genus at least 2 we can take a multicurve consisting of $2$ non separating closed curves. If $S$ is a torus with at least 2 boundaries or marked points, or a boundary with marked points, we can take a non separating closed curve and a separating closed curve around 2 boundaries or marked points, or around a boundary with marked points. If $S$ is a sphere with at least 5 marked points or boundaries, we can take a closed curve around two interior points or boundaries, and a closed curve around two different interior points or boundaries. If $S$ is a sphere with 1 boundary with marked points and at least 3 other boundaries or interior points we can take a closed curve around the boundary with marked points, and a closed curve around two other interior points or boundaries. Lastly, if $S$ is a sphere with 2 boundaries with marked points and another interior marked point or boundary we take a closed curve around each boundary with marked points.
\end{proof}
\begin{lemma}\label{le:exceptionalimpliesnonfusible}
Let $S_{g,b_m,b_u,p}$ be a surface with $3g+2b_m+b_u+p\le 4$. Then every foliation on $S$ is internally indecomposable.
\end{lemma}
\begin{proof}
Assume we have an internally decomposable foliation on $S_{g,b_m,b_u,p}$. Then we can get an internally decomposable foliation on $S_{g,0,0,b_u+p+2b_m}$ by removing the boundary components, replacing the boundaries without marked points with marked points and each boundary with marked points for 2 marked points. Furthermore, if we have at least one marked point, we can get an internally decomposable foliation in $S_{g,0,0,b_u+p+2b_m+k}$, $k\in \mathbb{N}$, by replacing a marked point with a $k+1$ marked points.
Hence, we only need to prove that a torus with one marked point and a sphere with 4 marked points do not admit internally decomposable foliations. However, since these do not have boundaries, a foliation being internally decomposable translates to a foliation having at least two indecomposable components.
Assume the torus with one marked point admits a foliation with two indecomposable components. We can replace the marked point with a boundary, and add to the foliation a boundary component parallel to that boundary. Considering the doubled surface explained in \cref{se:doublingtrick} we get a closed surface of genus $2$ without boundaries nor marked points, with at least 5 indecomposable components. Recall that the maximum number of indecomposable components for a foliation on a surface of genus $g$ is $3g-3$, so for genus $2$ the maximum is $3$, giving us a contradiction. A similar process applies for the sphere with 4 marked points.
\end{proof}
\begin{proof}[Proof of Proposition \ref{pr:continuousBusemann}]
The Busemann map is continuous at every point in the interior of Teichmüller space, as it is the identity when restricted in there and $\vbd{X}$ is closed. Hence, we only need to prove continuity or discontinuity at the points on the boundary. By Lemma \ref{le:nonexceptionalimpliesfusible} if $3g+2b_m+b_u+p>4$ then $S$ admits an internally decomposable foliation $F$, so by \cref{th:maxcondition} the Hubbard--Masur quadratic differential associated to $F$ at the basepoint $X$ is fusible and hence the Busemann map is not continuous at that point. On the other hand, if $3g+2b_m+b_u+p\le 4$ then by \cref{le:exceptionalimpliesnonfusible} for any quadratic differential $q$, the vertical foliation $V(q)$ is internally indecomposable, so again by \cref{th:maxcondition} every quadratic differential is infusible an $B$ is continuous at every boundary point.
\end{proof}
By combining Proposition \ref{pr:continuousBusemann} with Proposition \ref{pr:horobocompfiner}, we get the precise classification of surfaces with horofunction compactification isomorphic to visual compactification announced in Theorem \ref{th:homeomorphictovisualcomp} from the introduction.
\begin{proof}[Proof of Theorem \ref{th:homeomorphictovisualcomp}]
As shown in Proposition \ref{pr:horobocompfiner}, the visual compactification and the horofunction compactification are isomorphic if and only if the Busemann map is continuous, so the theorem follows by applying Proposition \ref{pr:continuousBusemann}.
\end{proof}
\subsection{Criteria for convergence}\label{se:horocycles}
One straightforward consequence of the horofunction compactification being finer than the visual compactification is the following criterion regarding the convergence of sequences in the horofunction compactification.
\begin{corollary}\label{co:convergencepaths}
Let $(x_n)\subset \mathcal{T}(S)$ be a sequence. If $(x_n)$ converges to a quadratic differential $q$ in the visual compactification, then all accumulation points of $(x_n)$ in the horofunction compactification are contained in $\Pi^{-1}(q)$. In particular, if $V(q)$ is internally indecomposable, then $(x_n)$ converges in the horofunction compactification.
Furthermore, if $(x_n)$ does not converge in the visual compactification, then it does not converge in the horofunction compactification.
\end{corollary}
\begin{proof}
If $x_n$ converges in the visual compactification to a quadratic differential $q$ then by the continuity of $\Pi$ all its accumulation points are in $\Pi^{-1}(q)$. If $V(q)$ is internally indecomposable, then by \cref{th:maxcondition} the quadratic differential $q$ is infusible, so the Busemann map is continuous at $q$ and by \cref{pr:continuityatqintro} the fiber $\Pi^{-1}(q)$ is a singleton. Therefore $x_n$ converges to $\Pi^{-1}(q)$, as that is the only accumulation point of $x_n$ and the horofunction compactification is compact.
On the other hand, if $x_n$ converges to $\xi$ in the horofunction compactification, by continuity of $\Pi$, $x_n$ converges to $\Pi(\xi)$ in the visual compactification.
\end{proof}
A frequent topic in the study of compactifications of Teichmüller spaces is the convergence of certain measure-preserving paths. We shall see now how the previous results can be applied in that study.
Let $X\in\mathcal{T}(S)$ be a point in Teichmüller space and $q$ be a unit quadratic differential based at $X$. It is a well known fact that there exists a unique orientation-preserving isometric embedding $\iota :\mathbb{H}\to \mathcal{T}(S)$ from the hyperbolic plane $\mathbb{H}$ to the Teichmüller space such that $\iota(i)=X$ and $\iota^{*}(q)=i$, see the work of Herrlich--Schmithüsen \cite{Herrlich} for a detailed explanation. The path $\iota(i+t)$ for $t\in \mathbb{R}_+$ is called the \emph{horocycle} generated by $q$. Since $\iota$ is an isometric embedding, $h(X)(p)=d(\iota^{-1}X,\iota^{-1}b)-d(\iota^{-1}X,\iota^{-1}p)$ for $X,b,p\in \iota(\mathbb{H})$. That is, if we restrict the evaluations of horofunctions to the image of the Teichmüller disc, the value coincides with the values in the hyperbolic plane. Hence, since the path $i+t$ is a horosphere of the Busemann point obtained by moving along the geodesic $e^ti$ along the hyperbolic plane, the path $\iota(i+t)$ is also a horosphere of the corresponding Busemann point $B(q)$, obtained by moving along the geodesic $\iota(e^t i)$. That is, it makes sense to call these paths horocycles.
Since $\iota$ is an isometric embedding, the geodesic between $X$ and $\iota(i+t)$ is contained in $\iota(\mathbb{H})$. Furthermore, the pushforward and pullback maps are continuous, so denoting $q_t$ the unit quadratic differential spawning the geodesic between $X$ and $\iota(i+t)$, we have $\lim_{t\to\infty} \iota^* (q_t)=i$, and $\iota_*(i)=q$, so $\lim_{t\to\infty} q_t=q$. The distance between $\iota(i+t)$ and $X$ grows to infinity, so any horocycle path generated by some $q$ based at $X$ converges to $q$ in the visual compactification based at $X$. Hence, horocycles generated by infusible quadratic differentials converge in the horofunction compactification, which had been previously shown by Jiang--Su \cite{jiang} and Alberge \cite{Vincent} in the context of surfaces without boundary.
\begin{corollary}\label{co:convergencehorocycles}
Let $S$ be a compact surface with possibly nonempty boundary and finitely many marked points and let $q$ be an infusible quadratic differential based at any $X\in \mathcal{T}(S)$. Then the horocycle generated by $q$ converges in the horofunction compactification.
\end{corollary}
\begin{proof}
The horocycle path converges to $q$ in the visual compactification based at $X$, so by \cref{co:convergencepaths} all accumulation points in the horofunction compactification are contained in $\Pi^{-1}_X(q)$. Furthermore, since $q$ is infusible, $\Pi^{-1}_X(q)$ is a singleton, so the horocycle path has a unique accumulation point in the horofunction compactification, and hence it converges.
\end{proof}
On the other hand, Fortier Bourque found some diverging horocycles in the horofunction compactification.
\begin{theorem}[Fortier Bourque {\cite[Theorem 1.1]{Fortier}}]
Let $S$ be a closed surface of genus $g$ with $p$ marked points, such that $3g+p\ge5$. Then there is some fusible quadratic differential $q$ based at some basepoint $X\in\mathcal{T}(S)$ such that the associated horocycle path does not converge in the horofunction compactification.
\end{theorem}
\cref{co:convergencepaths} gives an upper limit on the set of accumulation points, as it has to be contained in $\Pi^{-1}_X(q)$.
Furthermore, by \cref{co:convergingtohoriffconvergingeveryvis} we have that a path converges in the horofunction compactification if and only if it converges in each visual compactification. Hence, such a divergent horocycle also diverges in some visual compactification. That is, we get Corollary \ref{co:divergencehorocycles}.
This contrasts with the behavior of Teichmüller rays, which by \cref{co:convergencegeodesics} or \cite[Theorem 7]{Walsh} converge in all visual compactifications.
\section{Dimension of the fibers}\label{se:shapeoffibers}
Our first approach in determining the shape of the fibers is looking at the limits of Busemann points, which by Proposition \ref{pr:lowerbound} give us bounds on the elements of $\Pi^{-1}(q)$. For a given quadratic differential $q$ and a foliation $G$ we define $\mathcal{W}^q(G)$ as the map from measured foliations to $\mathbb{R}$ given by
\[
\mathcal{W}^q(G)=\frac{i(G,\cdot)^2}{i(G,H(q))},
\]
if $i(G,H(q))>0$, and $\mathcal{W}^q(G)=0$ otherwise.
By the extension of Walsh's \cref{co:walshbusemanshape} describing Busemann points in the Gardiner--Masur compactification, we see that the element $\Xi^{-1} B_q$ has the form $\sqrt{\sum_i \mathcal{W}^q(V_i)}$, where $V_i$ are the indecomposable components of $V(q)$. Hence, a reasonable path to follow for understanding the limits of Busemann points is understanding the limits of $\mathcal{W}^q$ as $q$ varies.
\begin{lemma}\label{le:limith}
Let $q_n$ be a sequence of quadratic differentials on $X$ converging to $q$, and let $V_j^n$, $0<i\le c(n)$ be the indecomposable components of $V(q_n)$. Let $G^n$ be a sequence of non zero measured foliations of the form $\sum \alpha_j^n V_j^n$, converging to a measured foliation $G$. Then
\[
\lim_{n\to\infty} \mathcal{W}^{q_n}(G^n) = \mathcal{W}^q (G)
\]
if $G$ is non zero and $\lim_{n\to\infty} \mathcal{W}^{q_n}(G^n)=0$ if $G$ is zero, where the convergence is pointwise in both cases.
\end{lemma}
\begin{proof}
For any measured foliation $F$ we have $\mathcal{W}^{q_n}(G^n)(F)=\frac{i(G^n,F)^2}{i(G^n,H(q_n))}$, so if $G$ is non zero the lemma follows by continuity of the intersection number.
If $G$ is zero the result follows from applying the same proof than in \cite[Lemma 27]{Walsh}.
\end{proof}
Denote $\mathcal{B}$ the set of Busemann points, $\overline{\mathcal{B}}$ its closure and $\overline{\mathcal{B}}(q)$ the intersection $\overline{\mathcal{B}}\cap \Pi^{-1}(q)$. We can use the previous lemma to show that the elements of $\overline{\mathcal{B}}(q)$ satisfy certain properties.
\begin{proposition}\label{pr:busemanclosureshape}
Let $S$ be a closed surface with possibly marked points, $\xi\in \overline{\mathcal{B}}(q)$ and $V_i$, $i\in\{1,\ldots,k\}$ be the indecomposable components of $V(q)$. Denote $x_i=\frac{i(V_i,\cdot)}{i(V_i,H(q))}$. Then, the square of the representation of $\xi$ in the Gardiner--Masur compactification, $(\Xi^{-1} \xi)^2$, is an homogeneous polynomial of degree $2$ in the variables $x_i$, whose coefficients sum to $1$.
\end{proposition}
Recall that we are using a normalized version of the Gardiner--Masur compactification. Under the projectivized version the sum of the coefficients cannot have any fixed value.
\begin{proof}
Since the surface does not have boundary, all Busemann points are of the form $B(q')$ for some quadratic differential of unit area $q'$. Consider a sequence $(q_n)$ such that $B(q_n)$ converges to $\xi$ and $q_n$ converges to $q$. Let $c(n)$ be the number of indecomposable vertical components of $V(q_n)$, and let $V^n_j$, $0<j\le c(n)$ be those components. We know that $c(n)$ is bounded by some number depending on the topology of the surface. Take a subsequence such that $c(n)$ is equal to some constant $c$ and $V^n_j$ converges for each $j$. The sum $\sum_{i=1}^c V^n_j$ converges as $n\to\infty$ to $\sum_{i=1}^k V_i$, so the limit of each $V^n_j$ has to be of the form $\sum_{i=1}^k \alpha_j^i V_i$. Furthermore, $\sum_{j=1}^c \alpha_j^i=1$, since
\[
\sum_{i=1}^k V_i=V(q)=\lim_{n\to\infty} V(q_n)=\lim_{n\to\infty}\sum_{j=1}^cV_{j}^n=\sum_{j=1}^c\sum_{i=1}^k\alpha_j^iV_i=\sum_{i=1}^k\left(\sum_{j=1}^c\alpha_j^i\right)V_i.
\]
The element associated to the Busemann point $B(q_n)$ in the Gardiner--Masur compactification satisfies
\[
\mathcal{E}_{q_n}^2=\sum_{j=1}^c \mathcal{W}^{q_n}(V^n_j).
\]
Hence, applying Lemma \ref{le:limith} we get the following expressions for the square of the limit of Busemann points:
\[
(\Xi^{-1}\xi)^2=\sum_{j=1}^c \mathcal{W}^q\left(\sum_{i=1}^k \alpha_j^i V_i\right)=\sum_{j=1}^c\frac{\left(\sum_{i=1}^k \alpha_j^i i(V_i,H(q)) x_i\right)^2}{\sum_{i=1}^k \alpha_j^i i(V_i,H(q))}.
\]
That is, we get a homogeneous polynomial of degree 2 in the variables $x_i$. Since $q$ has unit area, the sum of the coefficients is
\[
\sum_{j=1}^c \sum_{i=1}^k \alpha_j^i i(V_i,H(q))=\sum_{i=1}^k i(V_i, H(q))=1,
\]
which completes our claim.
\end{proof}
By Proposition \ref{pr:upperbound}, the Busemann point $B(q)$ gives an upper bound on all functions in $\Pi^{-1}(q)$. While Proposition \ref{pr:lowerbound} does not give us a lower bound directly, we can use Lemma \ref{le:walshcompact} to get one. For a unit area quadratic differential $q$, let $Z_j$ be the interior parts of $V(q)$, and denote $G_j$ the union of interior indecomposable components within $Z_j$. Further, let $P_i$ be the boundary components of $V(q)$. We define the \emph{minimal point} at $q$ as
\[
M(q)=\Xi \left(\sum_i \mathcal{W}^q(P_i)+\sum_j \mathcal{W}^q\left(G_j\right)\right)^{1/2}.
\]
\begin{proposition}\label{pr:lowerboundGM}
Let $q$ be a quadratic differential. Then, for any $\xi\in\Pi^{-1}(q)$, we have
\[\Xi^{-1}\xi\ge \Xi^{-1} M(q)\]
in the Gardiner--Masur compactification.
Furthermore, $M(q)\in \Pi^{-1}(q)$ whenever each $G_j$ has at most two annuli parallel to the boundaries of $Z_j$ with marked points.
\end{proposition}
We note that if $S$ does not have boundary we simply have $M(q)=\Xi i(V(q),\cdot)^2$, which by the proposition is always contained in $\Pi^{-1}(q)$.
The minimality is essentially derived from the following well-known inequality.
\begin{lemma}[Titu's lemma]\label{le:elementaryinequality}
For any positive reals $a_1,\ldots, a_n$ and $b_1,\ldots, b_n$ we have
\[
\sum_j\frac{a_j^2}{b_j}\ge \frac{\left(\sum_j a_j\right)^2}{\sum_j b_j}.
\]
\end{lemma}
\begin{proof}
The inequality can be written as
\[
\sum_i b_i\sum_j\frac{a_j^2}{b_j}\ge \left(\sum_j a_j\right)^2,
\]
so the result follows after applying the Cauchy--Schwartz inequality.
\end{proof}
The implication this lemma has for our discussion is that $\mathcal{W}^q(\cdot)$ is convex, in the sense that for any $G=\sum_i G_i$ and any measured foliation $F$ we have
\[
\sum_i \mathcal{W}^q(G_i)(F) \ge \mathcal{W}^q(G)(F).
\]
\begin{proof}[Proof of Proposition \ref{pr:lowerboundGM}]
If $q$ is infusible then each $G_j$ is indecomposable, so $M(q)=B(q)$, the fiber $\Pi^{-1}(q)$ has one point and the proposition is satisfied.
Consider then $q$ fusible and $\xi\in \Pi^{-1}(q)$. Let $(x_n)=(\tray{q_n}{t_n})\subset \mathcal{T}$ converging to $\xi$. By Lemma \ref{le:walshlowerbound} we have $\Xi^{-1}(h(x_n))\ge\Xi^{-1} B(q_n)$. Hence, $\Xi^{-1}\xi\ge \liminf_{n\to\infty}\Xi^{-1} B(q_n)$.
Given a measured foliation $F$, take a subsequence so that \[\liminf_{n\to\infty}\Xi^{-1} B(q_n)(F)=\lim_{n\to\infty}\Xi^{-1} B(q_n)(F).\]
The foliations $V(q_n)$ converge to $V(q)$, so by Proposition \ref{pr:notsplittingboundary} for $n$ big enough all boundary components $P_i$ are contained within $V(q_n)$. Hence, for $n$ big enough the foliations $V(q_n)$ can be split to the interior parts $Z_j$ by cutting along the proper arcs. Denote $G_j^n$ the interior components of the foliation $V(q_n)$ restricted to $Z_j$. Let $G_{j,k}^n$ be the indecomposable components of $G_j^n$. The sequence $G_j^n$ converges to $G_j$, so we can take a subsequence such that each $G_{j,k}^n$ converges to some foliation $G_{j,k}$ with $\sum_k G_{j,k}=G_j$. Applying Lemma \ref{le:limith} we have
\[
\lim_{n\to\infty}\Xi^{-1} B(q_n)(F)=\lim_{n\to\infty}\sum_i^n \mathcal{W}^{q_n}(P_i)+\sum_j \sum_k \mathcal{W}^{q_n}\left(G^n_{j,k}\right)=\sum_i \mathcal{W}^q(P_i)+\sum_j \sum_k \mathcal{W}^q\left(G_{j,k}\right).
\]
Hence, applying Lemma \ref{le:elementaryinequality} to the second sum we get the first part of the proposition.
To observe that the limit is actually reached we can repeat the proof of Proposition \ref{pr:onlyifpart} and observe that a proper arc for each interior part is enough to approach the foliation whenever each interior part of the foliation has at most two annuli parallel to boundaries with marked points.
\end{proof}
By \cref{co:orderpreserved} this lower bound is carried to the horofunction representation and by Proposition \ref{pr:upperbound} we have an upper bound. Hence, we have the chain of inequalities
\[M(q) \le \xi \le B(q),\]
for any $\xi\in \Pi^{-1}(q)$.
As we see in the next proposition, this chain can be translated as well to the Gardiner--Masur compactification.
\begin{proposition}\label{pr:upperboundGM}
Let $\xi\in\Pi^{-1}(q)$. Then,
\[\Xi^{-1}\xi \le \Xi^{-1} B(q).\]
\end{proposition}
\begin{proof}
We have a sequence of points $\tray{q_n}{t_n}$ converging to $\xi$, with $q_n$ converging to $q$.
By Lemma \ref{le:fixingvaluealongpath} we have $\xi(\tray{q}{t})=-t$. Further, $\tray{q_n}{t_n}$ converges in the Gardiner--Masur compactification to the function $f(G)^2=\lim_{n\to\infty} e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(G)$, and we have $\Xi f (x) = \xi(x)$. Hence,
\[\frac{1}{2}\log\frac{f(F)}{\operatorname{Ext}_\tray{q}{t}(F)}\le \frac{1}{2}\log\sup_{G\in P} \frac{f(G)}{\operatorname{Ext}_\tray{q}{t}(G)}=-t.
\]
Upon exponentiating and reordering the terms, we get
\[\lim_{n\to\infty} e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)=f(F)\le e^{-2t}\operatorname{Ext}_\tray{q}{t}(F)
\]
for all $t$. Letting $t\to\infty$, the right hand side converges to $(\Xi^{-1}B(q)(F))^2,$ so we get the proposition.
\end{proof}
Using these bounds we can further refine the characterization of points in $\Xi^{-1}\Pi^{-1}(q)$.
\begin{proposition}\label{pr:firstderivative}
Let $q$ be a quadratic differential, let $V_i$, $i\in\{1,\ldots,k\}$ be the indecomposable components of $V(q)$ and let $x_i(F)=\frac{i(V_i,F)}{i(V_i,H(q))}$. Given $f\in \Xi^{-1}\Pi^{-1}(q)$ and $c>0$ we have, for all $F\in \mathcal{MF}$,
\[
f(F)^2=c^2+2 c \sum_i i(V_i,H(q))(x_i(F)-c)+\sum_{i,j} O \left((x_i(F)-c)(x_j(F)-c)\right).
\]
In particular, as a function of the values $x_i(F)$ at the point $x_i=c$ for all $i$, $f^2(x_1,\ldots ,x_k)$ takes value $c$, is differentiable and satisfies $\frac{\partial}{\partial x_i} f^2(x_1,\ldots,x_k) = 2 c\:i(V_i,H(q)).$
\end{proposition}
\begin{proof}
We have that $\left(\Xi^{-1} M(q)\right)^2\le f^2\le \left(\Xi^{-1} B(q)\right)^2$. Denoting $a_i=i(V_i,H(q))$ and $x_i=x_i(\cdot)$ we have by Lemmas \ref{le:elementaryinequality} and \ref{pr:lowerboundGM} that $\left(\sum a_ix_i\right)^2\le \left(\Xi^{-1} M(q)\right)^2$. Writing the bounds on $f^2$ in terms of the variables $x_i$, we obtain
\[
\left(\sum a_ix_i\right)^2\le f^2\le \sum a_ix^2_i.
\]
Adding that $\sum a_i=1$, we have that $f^2$ is bounded below by the arithmetic mean, and above by the quadratic mean. Rewritting both sides as a polynomial in $x_i-c$, we get
\[
c^2+2c\sum a_i(x_i-c)+\left(\sum a_i (x_i-c)\right)^2\le f^2\le c^2+2c\sum a_i(x_i-c)+\sum a_i (x_i-c)^2,
\]
so the first part of the proposition is satisfied. Subbing in the value $x_i(F)=c$ we get the second part.
\end{proof}
By Propositions \ref{pr:optimalpath} and \ref{pr:firstderivativehorofunction} all members of $\Pi^{-1}(q)$ share their values along $\tray{q}{\cdot}$, as well as the directional derivatives at the points of the geodesic. For a given $q$ we have $x_i(\lambda H(q))=\lambda$ for all $i$ and all $\lambda>0$. Hence, Proposition \ref{pr:firstderivative} shows a similar relation for the representations of the elements of $\Pi^{-1}(q)$ in the Gardiner--Masur compactification, as they share their value, as well as some derivatives, at all foliations of the form $\lambda H(q)$.
As shown by Fortier Bourque \cite{Fortier}, the Gardiner--Masur boundary contains extremal length functions, so we can use Proposition \ref{pr:firstderivative} to get some information on the differentials of these functions. Namely, we recover in a more restricted setting the following result, proven in \cite[Theorem 1.1]{Miyachi2}.
\begin{theorem}[Miyachi]\label{th:differentialextremallenght}
Let $G_t$, $t\in[0,t_0]$ be a path in the space of measured foliations on $X$ which admits a tangent vector $\dot{G}_0$ at $t=0$ with respect to the canonical piecewise linear structure. Then, the extremal length $\operatorname{Ext}(G,X)$ is right-differentiable at $t=0$ and satisfies
\[
\left.\frac{d}{dt^+} \operatorname{Ext}(G_t,X)\right\vert_{t=0}=2 i(\dot{G}_0, F_{G_0,X}),
\]
where $F_{G_0,X}$ is the horizontal foliation of the Hubbard--Masur differential associated to $G_0$ on $X$.
\end{theorem}
The concrete extremal length functions in the Gardiner--Masur boundary we are going to use are given by the following theorem.
\begin{theorem}[Fortier Bourque]\label{th:maxsresult}
Let $\{w_1,\ldots,w_k\}$ be weights with $w_i>0$, let $\phi_n=\tau_1^{\lfloor n w_1 \rfloor}\circ \dots \circ \tau_k^{\lfloor n w_k \rfloor}$ be a sequence of Dehn multitwist around a multicurve $\{\alpha_1,\ldots ,\alpha_k\}$ in a surface $S$ and let $X\in \mathcal{T}(S)$. Then the sequence $\phi_n(X)$ converges to
\[
\left[\operatorname{Ext}^{1/2}\left(\sum_{i=1}^k w_i i(F,\alpha_i)\alpha_i,X\right)\right]_{F\in \mathcal{MF}(S)}
\]
in the projective Gardiner--Masur compactification as $n\to\infty.$
\end{theorem}
The precise statement of this result is slightly weaker \cite[Corollary 3.4]{Fortier}, but the same proof yields this extension.
Fix a multicurve $\{\alpha_1,\ldots ,\alpha_k\}$, weights $\{w_1,\ldots, w_k\}$ and denote $\alpha=\sum w_i \alpha_i$. Furthermore, normalize the weights $\{w_1,\ldots, w_k\}$ so that there is a unit area quadratic differential $q$ such that $V(q)=\alpha$. Denote $V_i$ the vertical components of $V(q)$. That is, $V_i=w_i\alpha_i$. We are able to recover Miyachi's formula when $i(V_i,H(q))=w_i$ for all $i$. The sequence $\phi_n(X)$ converges in the visual compactification based at $X$ to $q\in T_X\mathcal{T}(S)$. By Theorem \ref{th:maxsresult} the function $f(F)=\lambda^{1/2}\operatorname{Ext}^{1/2}\left(\sum_{i=1}^k w_i i(F,\alpha_i)\alpha_i,X\right)$ is in $\Xi^{-1}\Pi^{-1}(q)$ for some $\lambda>0$. We have $i(F,\alpha_i)=x_i(F)i(V_i,H(q))/w_i$. So, assuming $i(V_i,H(q))=w_i$ we can write
\[
f^2=\lambda\operatorname{Ext}\left(\sum_{i=1}^k x_i(F)V_i,X\right).
\]
We have $x_i(H(q))=1$ for all $i$, so by Proposition \ref{pr:firstderivative} the value of $\lambda$ satisfies
\[
f^2(H(q))=\lambda\operatorname{Ext}\left(V(q),X\right)=1.
\]
Since $q$ has unit area, $\operatorname{Ext}\left(V(q),X\right)=1$, so $\lambda=1$. Let $I$ be any foliation such that $H(q)+I$ is well defined, and let $F_t=H(q)+tI$. We have
\[
f^2(F_t)=\operatorname{Ext}\left(\sum_i V_i + t\sum_i x_i(I) V_i,X\right).
\]
Hence, denoting $J=\sum x_i(I) V_i$ and $G_t=V(q)+tJ$ we can apply Proposition \ref{pr:firstderivative} to get
\[
\frac{d}{dt} \operatorname{Ext}\left(G_t,X\right)=\frac{d}{dt} f^2(F_t)=\sum_i \frac{d x_i}{dt} \cdot \frac{\partial f^2}{\partial x_i} =
\sum_i \frac{i(V_i,I)}{i(V_i,H(q))}\cdot 2i(V_i,H(q))=
2i(V(q),I).
\]
On the other hand, applying Miyachi's Theorem \ref{th:differentialextremallenght} directly we get
\begin{multline*}
\frac{d}{dt}\operatorname{Ext}\left(G_t,X\right)=2i(H(q),J)=2\sum_i i(H(q),V_i)x_i(I)\\
=2\sum_i i(H(q),V_i)\frac{i(V_i,I)}{i(H(q),V_i)}=2i(V(q),I),
\end{multline*}
so both expressions coincide, and we have recovered Theorem \ref{th:differentialextremallenght} in this rather restricted setting. We would like to note that Proposition \ref{pr:firstderivative} also gives some information for finding the second derivatives around the point $H(q)$. Namely, the second derivatives cannot diverge to infinity as we approach $H(q)$.
Combining Proposition \ref{pr:firstderivative} with Proposition \ref{pr:busemanclosureshape} we get fairly restrictive necessary conditions for the points in $\overline{\mathcal{B}}(q)$ for surfaces without boundary. We shall be using these conditions in \cref{se:nondensity} to prove that Busemann points are not dense in the horoboundary. Now we prove a more straightforward consequence. For a topological space $U$, denote $\dim(T)$ its Lebesgue dimension. See the book by Munkres \cite[Chapter 5.80]{Munkres} for some background on basic dimension theory. Given an embedding $U\hookrightarrow V$ we have $\dim(U)\le \dim(V)$, so the conditions for the points on $\overline{\mathcal{B}}(q)$ gives us the following result.
\begin{corollary}
Let $S$ be a surface without boundary. Let $q$ be a quadratic differential such that $V(q)$ has $n$ indecomposable components. Then,
\[
\dim(\overline{\mathcal{B}}(q))\le \frac{n(n-1)}{2}.
\]
\end{corollary}
\begin{proof}
By Proposition \ref{pr:busemanclosureshape} we have an embedding of $\overline{\mathcal{B}}(q)$ into the space of homogeneous polynomials of degree $2$. For a given $\xi\in \overline{\mathcal{B}}(q)$, let $b_{i,j}^\xi$ be the coefficient of $x_i x_j$. Adding the restriction $b_{i,j}=b_{j,i}$ we have a coefficient for each possible pair, so the dimension of homogeneous polynomials of degree $2$ is equal to the number of possible pairs, that is, $\frac{n(n+1)}{2}$. Furthermore, by Proposition \ref{pr:firstderivative} we know the value of the first derivatives at $x_i=c$ for all $i$. For each $i$ this gives us the linear equation $\sum_{j\neq i} b_{i,j}^\xi + 2 b_{i,i}^\xi=2 i(V_i,H(q))$. These $n$ equations are linearly independent, as $b_{i,i}^\xi$ is only contained on the equation related to $x_i$. As such, the dimension of the coefficients is at most $\frac{n(n+1)}{2}-n=\frac{n(n-1)}{2}$.
We note that the sum of the coefficients being $1$ is the equation we get when summing the $n$ equations given by the derivatives, so we cannot use that to restrict further the dimension.
\end{proof}
Recall that the number of indecomposable components $n$ is bounded in terms of the topology of the surface. Hence, the previous corollary gives us a uniform upper bound on the dimension of $\overline{\mathcal{B}}(q)$.
More interestingly, we can also get a lower bound for the dimension of $\overline{\mathcal{B}}(q)$. This allows us to get a lower bound on the dimension of $\Pi^{-1}(q)$. Furthermore, as this is a lower bound, we do not need to restrict ourselves to surfaces without boundary, as the set of Busemann points always contains the set of Busemann points of the form $B(q)$. The bound is obtained by finding a dimensionally big set of different ways to approach a certain $q$ along the boundary and showing that each of these different approaches results in different limits for the associated Busemann points.
\begin{theorem}\label{th:dimensionfiberslowerbound}
Let $S$ be a surface of genus $g$ with $b_m$ and $b_u$ boundaries with and without marked points respectively and $p$ interior marked points.
Then there is some unit quadratic differential $q$ such that
\[\dim (\overline{\mathcal{B}}(q))\ge 2\left\lfloor\frac{g+b_m}{2}+\frac{b_u+p}{4}-\sigma(g,b_u+p)\right\rfloor,\]
where $\sigma$ has value
\begin{itemize}
\item 0 if $g\ge 2$,
\item 1/4 if $g=1$ and $b_u+p\ge 1$,
\item 1/2 if $g=1$ and $b_u+p=0$ or $g=0$ and $b_u+p\ge 2$,
\item 3/4 if $g=0$ and $b_u+p=1$ and
\item 1 if $g=0$ and $b_u+p=0$.
\end{itemize}
\end{theorem}
\begin{proof}
For simplicity we shall first do the proof in the case where $b_m=b_u=p=0$, and $g\ge 2$. Let $q$ be the quadratic differential such that $V(q)$ is the union of the closed curves $V_1,\ldots, V_{3C}$ shown in \cref{fi:v}, where $C= \lfloor g/2 \rfloor$. Let $U\subset \mathbb{R}^{3C}$ be the space of vectors $(\alpha_1,\alpha_2,\ldots,\alpha_{3C})$ with positive coefficients and such that
\begin{equation}\label{eq:dimensionproof1}
\alpha_{3k+1}+\alpha_{3k+2}+\alpha_{3k+3}=\frac{1}{C}.
\end{equation}
Each independent linear restriction reduces the dimension of the set $U$ by $1$, so $\dim U = 2C$. Hence, to prove the simplest case of the theorem it suffices to build an injective continuous map from $U$ to $\overline{\mathcal{B}}(q)$.
Choose $\alpha\in U$ and consider the multicurve $\gamma^\alpha=\sum \alpha_i G_i$, where $G_i$ are as in \cref{fi:v}. We will shortly show that by applying Dehn twists about the closed curves $V_i$ to $\gamma^\alpha$ we can get a sequence of multicurves approaching $V(q)$. We can then take the sequences of associated Busemann points, which as we will see converge to distinct points in $\Pi^{-1}(q)$. We will define the injective continuous map from $U$ to $\Pi^{-1}(q)$ by setting it as the limit of the associated sequence of Busemann points, giving us the theorem.
Let $\tau_i$ be the Dehn twist around $V_i$, and let $w^\alpha_{i}$ be such that
\begin{equation}\label{eq:dimensionproof2}
w^\alpha_{3k+1}(\alpha_{3k+2}+\alpha_{3k+3})=
w^\alpha_{3k+2}(\alpha_{3k+3}+\alpha_{3k+1})=
w_{3k+3}(\alpha_{3k+1}+\alpha_{3k+2})=\frac{1}{3C}.
\end{equation}
\begin{figure}
\caption{Labeling of the curves when the surface has no boundaries nor marked points. If $g$ is odd then there is an unused handle.}
\label{fi:v}
\end{figure}
Define $\phi^\alpha_n=\tau_1^{\lfloor w^\alpha_1 n \rfloor}\circ\tau_2^{\lfloor w^\alpha_2 n \rfloor} \circ\dots \circ\tau_{3C}^{\lfloor w^\alpha_{3C} n \rfloor}$. For $1\le k \le C$ and $j\in\{1,2,3\}$ Denote $F_{k,j}^\alpha=\sum_{i\in\{1,2,3\}-j}w_{3k+i}^\alpha V_{3k+i}$. By counting the intersections between the curves $V_i$ and $G_i$ we have that there is some sequence $\lambda_n$ such that $\lambda_n\phi^\alpha_n G_{3k+j}$ converges to $F_{k,j}^\alpha$ for all $k,j$ as $n\to\infty$. By the conditions on the weights, $\lambda_n\phi^\alpha_n\gamma^\alpha$ converges to $V(q)$. Let $q^\alpha_n$ be the quadratic differential associated to $\lambda_n\phi^\alpha_n \gamma^\alpha$. Since $\lambda_n\phi^\alpha_n\gamma^\alpha$ converges to $V(q)$, we have that $q_n$ converges to $q$, so all accumulation points of $(B(q_n))$ are in $\Pi^{-1}(q)$. We know that $(\Xi^{-1}B(q^\alpha_n))^2=\sum_i \mathcal{W}^q(\alpha_i \lambda_n\phi^\alpha_n G_i)$, so by Lemma \ref{le:limith} we have
\[
(\xi^\alpha)^2=\lim_{n\to\infty} (\Xi^{-1}B(q^\alpha_n))^2=
\sum_{k=0}^{C-1}\sum_{j\in\{1,2,3\}}
\alpha_{3k+j} \mathcal{W}^q(F_{k,j}^\alpha).
\]
Define then the map from $U$ to $\Pi^{-1}(q)$ sending $\alpha\in U$ to $\Xi\xi^\alpha\in \Pi^{-1}(q)$.
As before, we shall denote $x_i:=\frac{i(V_i,\cdot)}{i(V_i,H(q))}=3C i(V_i,\cdot)$. With this notation we have
\[
\mathcal{W}^q(F_{k,j}^\alpha)=\frac{i(F_{k,j}^\alpha,\cdot)^2}{i(F_{k,j}^\alpha,H(q))}=\frac{\left(\sum_{i\notin\{1,2,3\}-j}w^\alpha_{3k+i}x_{3k+i}\right)^2}{3C\sum_{i\notin\{1,2,3\}-j}w^\alpha_{3k+i}}.
\]
That is, given $\alpha$ we know precisely the shape of the polynomial $\xi^\alpha$. Since $\alpha$ has positive coefficients, each of the $w_i^\alpha$ depends continuously on $\alpha$, so $\xi^\alpha$ depends continuously on $\alpha$.
It remains to show injectivity. Let $\beta\in U$ be such that $\xi^{\alpha}=\xi^{\beta}$. While we have equated two polynomials, we cannot conclude directly that the coefficients are equal, as these cannot be evaluated for arbitrary values. However, we can evaluate at elements of the form $b_1 G_{3k+1}+b_2 G_{3k+2}+b_3 G_{3k+3}$ for $b_1,b_2,b_3\ge 0$, which is enough to prove that $\xi^{\alpha}$ and $\xi^{\beta}$ have the same coefficients.
Equating then the coefficients for $x_{3k+1}x_{3k+2}$, $x_{3k+2}x_{3k+3}$ and $x_{3k+1}x_{3k+3}$ we get
\begin{align*}
\frac{\alpha_{3k+1} w^\alpha_{3k+2}w^\alpha_{3k+3}}{w^\alpha_{3k+2}+w^\alpha_{3k+3}}=&\frac{\beta_{3k+1} w^\beta_{3k+2}w^\beta_{3k+3}}{w^\beta_{3k+2}+w^\beta_{3k+3}},\\
\frac{\alpha_{3k+2} w^\alpha_{3k+1}w^\alpha_{3k+3}}{w^\alpha_{3k+1}+w^\alpha_{3k+3}}=&\frac{\beta_{3k+2} w^\beta_{3k+1}w^\beta_{3k+3}}{w^\beta_{3k+1}+w^\beta_{3k+3}} \quad \text{ and }\\
\frac{\alpha_{3k+3} w^\alpha_{3k+1}w^\alpha_{3k+2}}{w^\alpha_{3k+1}+w^\alpha_{3k+2}}=&\frac{\beta_{3k+3} w^\beta_{3k+1}w^\beta_{3k+2}}{w^\beta_{3k+1}+w^\beta_{3k+2}}.
\end{align*}
Dividing these equalities and using equations \eqref{eq:dimensionproof1} and \eqref{eq:dimensionproof2} we get
\begin{align*}
\frac{\alpha_{3k+1}}{\alpha_{3k+2}}\frac{(1/C+\alpha_{3k+2})}{(1/C+\alpha_{3k+1})}=&
\frac{\beta_{3k+1}}{\beta_{3k+2}}\frac{(1/C+\beta_{3k+2})}{(1/C+\beta_{3k+1})},\\
\frac{\alpha_{3k+2}}{\alpha_{3k+3}}\frac{(1/C+\alpha_{3k+3})}{(1/C+\alpha_{3k+2})}=&
\frac{\beta_{3k+2}}{\beta_{3k+3}}\frac{(1/C+\beta_{3k+3})}{(1/C+\beta_{3k+2})} \quad \text{ and } \\
\frac{\alpha_{3k+3}}{\alpha_{3k+1}}\frac{(1/C+\alpha_{3k+1})}{(1/C+\alpha_{3k+3})}=&
\frac{\beta_{3k+3}}{\beta_{3k+1}}\frac{(1/C+\beta_{3k+1})}{(1/C+\beta_{3k+3})}.
\end{align*}
Rearranging the first equality we have
\begin{equation}\label{eq:increasingfactors}
\frac{\alpha_{3k+1}}{\beta_{3k+1}}
\frac{\beta_{3k+2}}{\alpha_{3k+2}}
=
\frac{(1/C+\alpha_{3k+1})}{(1/C+\beta_{3k+1})}
\frac{(1/C+\beta_{3k+2})}{(1/C+\alpha_{3k+1})}.
\end{equation}
If $\frac{\alpha_{3k+1}}{\beta_{3k+1}}<1$ we have $\frac{(1/C+\alpha_{3k+1})}{(1/C+\beta_{3k+1})}>\frac{\alpha_{3k+1}}{\beta_{3k+1}}$, and if $\frac{\alpha_{3k+2}}{\beta_{3k+2}}>1$ we have $\frac{(1/C+\alpha_{3k+1})}{(1/C+\beta_{3k+1})}<\frac{\alpha_{3k+1}}{\beta_{3k+1}}$. Assume then that $\alpha_{3k+1}<\beta_{3k+1}$. One of the factors of the left hand side of the product in \cref{eq:increasingfactors} is replaced in the right hand side by a larger value. Hence, the other factor has to be replaced by a smaller value. That is, the inequality
$\alpha_{3k+2}<\beta_{3k+2}$ has to be satisfied. Similarly, if $\alpha_{3k+2}<\beta_{3k+2}$ we have $\alpha_{3k+3}<\beta_{3k+3}$. Equation \eqref{eq:dimensionproof1} leads to \[\frac{1}{C}=\alpha_{3k+1}+\alpha_{3k+2}+\alpha_{3k+3}<\beta_{3k+1}+\beta_{3k+2}+\beta_{3k+3}=\frac{1}{C},\]
which is a contradiction. Similarly, $\alpha_{3k+1}>\beta_{3k+1}$ leads to another contradiction, so $\alpha_{3k+1}=\beta_{3k+1}$, which leads to $\alpha=\beta$.
Therefore,
$\dim(\overline{\mathcal{B}}(q))\ge \dim(U)=2\left\lfloor \frac{g}{2}\right\rfloor$.
Assume now that $g\ge 2$ and there are some marked points or boundaries. For each pair of marked points or unmarked boundaries, or for each marked boundary we can repeat the proof with an extra genus, by replacing the curves $G_i$ by the curves shown in \cref{fi:replacement}, and halving the associated weights for $w_i$, as the curves intersect now twice the vertical components instead of once.
\begin{figure}
\caption{Each pair of marked points and boundary components without marked points can replace a genus, as well as each boundary with marked points.}
\label{fi:replacement}
\end{figure}
If $g=1$ we need to place at least one feature at one of the ends to prevent the curve $G_1$ from being contractible or parallel to a unmarked boundary, so if we have marked points or boundaries without marked points we place these, as boundaries with marked points are more effective at increasing the dimension. In this way we get that if $b_u+p\ge 1$ then
\[
\dim(\overline{\mathcal{B}}(q))\ge 2 \left\lfloor \frac{g+b_m}{2}+\frac{b_u+p-1}{4} \right\rfloor
\]
and if $b_u+p=0$ then
\[
\dim(\overline{\mathcal{B}}(q))\ge 2 \left\lfloor \frac{g+b_m-1}{2}\right\rfloor.
\]
Lastly, if $g=0$ we need to place two elements, one at each end. Using the same choice as we took for $g=1$ we get
\[
\dim(\overline{\mathcal{B}}(q))\ge 2 \left\lfloor \frac{b_m}{2}+\frac{b_u+p-2}{4} \right\rfloor \text{ for } b_u+p\ge 2,
\]
\[
\dim(\overline{\mathcal{B}}(q))\ge 2 \left\lfloor \frac{b_m-1}{2}\right\rfloor \text{ for } b_u+p=1 \text{ and }
\]
\[
\dim(\overline{\mathcal{B}}(q))\ge 2 \left\lfloor \frac{b_m-2}{2}\right\rfloor \text{ for }b_u+p=0.
\]
\end{proof}
We would like to note that this lower bound is does not look optimal to us. Furthermore, the method used is restricted to getting to the dimension of the closure of Busemann points, so
the dimension of the whole fiber may be significantly larger than what could be achieved by refining the strategy from the proof.
\section{Busemann points are not dense}\label{se:nondensity}
By Proposition \ref{pr:busemanclosureshape} we know that points in the closure of Busemann points are smooth in the Gardiner--Masur representation with respect to certain variables. By showing that at least one point in the horoboundary is not smooth we will prove that Busemann points are not dense. The points we use for this analysis are once again the ones found by Fortier Bourque in Theorem \ref{th:maxsresult}.
Following Fortier Bourque's reasoning, we shall first prove the non density for the sphere with five marked points, and then lift to general closed surfaces by using the branched coverings given by the following Lemma, found in \cite[Lemma 7.1]{Gekhtman}.
\begin{lemma}[Gekhtman--Markovic]\label{le:branchingcover}
Let $S$ be a closed surface of genus $g$ with $p$ marked points, such that $3g+p\ge5$. Then there is a branched cover $\overline{S_{g,p}}\to\overline{S_{0,5}}$ that branches at all preimages of marked points that are not marked and induces an isometric embedding $\mathcal{T}(S_{0,5})\hookrightarrow\mathcal{T}(S_{g,p})$.
\end{lemma}
The particular conformal structure given to $S_{0,5}$ is obtained as follows. Let $S^1=\mathbb{R}/\mathbb{Z}$ and let $C=S^1\times [-1,1]$. We obtain a sphere $\Sigma$ by sealing the top and bottom of $C$ via the relation $(x,y)\sim(-x,y)$ for all $(x,y)\in S^1\times\{-1,1\}$. Let $P$ be set consisting of the five points $(0,\pm 1)$, $(1/2,\pm 1)$ and $(0,0)$. The pair $S=(\Sigma,P)$, where we view $\Sigma$ as a topological space, is the sphere with five marked points. We get a point $X$ in $\mathcal{T}(S)$ by considering the complex structure on $\Sigma$ obtained by the construction, using the identity map as our marking.
\begin{figure}
\caption{Sphere with five marked points, with curves $\alpha$ and $\beta$. We show that the extremal length is not $C^{2+\varepsilon}$ along the path $\alpha+t\beta$, $t\in[0,t_0]$.}
\label{fi:pillowcase}
\end{figure}
Let $\alpha(t)=(t,3/4)$ and $\beta(t)=(t,1/4)$ for $t\in S^1$. Denote $\tau_\alpha$ and $\tau_b$ the Dehn twists along $\alpha$ and $\beta$. By Fortier Bourque's theorem, the sequence $(X_n)=((\tau_\alpha\circ\tau_\beta)^n X)$ converges to a multiple of $\operatorname{Ext}(i(\alpha,\cdot)\alpha+i(\beta,\cdot)\beta, X))$ in the Gardiner--Masur compactification. Furthermore, the sequence $(X_n)$ converges to the quadratic differential with vertical components $\alpha$ and $\beta$. Hence, it is enough to show that $\operatorname{Ext}(i(\alpha,\cdot)\alpha+i(\beta,\cdot)\beta, X)$ is not smooth with respect to the values of $i(\alpha,\cdot)$ and $i(\beta,\cdot)$, as these correspond to the variables $x_i$ from \cref{pr:busemanclosureshape}.
\begin{lemma}\label{le:extremallengthnotsmooth}
Let $G_t$, $t\in[0,t_0]$ be the foliation $\alpha+t\beta$ on $X$. The map $f(t):=\operatorname{Ext}(G_t,X)$ is not $C^{2+\varepsilon}$ for any $\varepsilon>0$. \end{lemma} \begin{proof}
Assume $f(t)$ is $C^{2+\varepsilon}$ for some $\varepsilon>0$. Then, by Miyachi's Theorem \ref{th:differentialextremallenght} we have
\[\frac{d}{dt}\operatorname{Ext}(G_t,X)=2 i(\beta,F_{G_t,X}),\]
where we remind that $F_{G_t,X}$ is the horizontal foliation of the unique Hubbard--Masur differential associated to $G_t$ on $X$. Hence, $g(t)=i(\beta,F_{G_t,X})$ should be $C^{1+\varepsilon}$.
For a general $X$ finding a precise expression of $F_{G,X}$ is a complicated problem, as the relation established by Hubbard and Masur is not explicit. However, in our case $X$ is topologically simple, and one can use Schwartz--Christoffel maps to get a map from $G$ to $F_{G,X}$. In particular, it is possible to show that the sphere with 5 marked points is conformally equivalent to the Riemannian surface obtained by doubling an $L$-shaped polygon, marking the inner angles as shown in \cref{fi:lshaped} and setting certain values for $a,b$ and $l$. Furthermore, the quadratic differential obtained by $dz^2$ has $\alpha$ and $\beta$ as vertical foliations, with weights $a$ and $b$. Hence $q_{G_t,X}$ is $dz^2$ on the $L$-shaped pillowcase where $a=1$ and $b=t$, so $i(\beta,F_{G_t,X})=2l$. Markovic estimated in \cite[Section 9]{Markovic} the values of $a,b$ and $l$ around $b=0$ depending on a common parameter $r$. Up to rescaling, these values are given by
\begin{align*}
a(r)=&a(0)+D_1r+O(r^2),\\
b(r)=&D_2r+O(r^2) \text{ and } \\
l(r)=&l(0)+D_3r/\log r + o(r/\log r),
\end{align*}
where $A(r)=B(r)+O(f(r))$ means $\frac{|A(r)-B(r)|}{f(r)}$ is bounded around $r=0$, and $A(r)=B(r)+o(f(r))$ means $\frac{|A(r)-B(r)|}{f(r)}$ converges to $0$ as $r$ converges to $0$.
\begin{figure}
\caption{Doubling of the $L$-shaped polygon together with the curves $\alpha$ and $\beta$.}
\label{fi:lshaped}
\end{figure}
Rescaling the pillowcase by $1/a(r)$ we see that the parameter $t$ can be expressed as $t(r)=b(r)/a(r)$, and $g(t(r))=i(\beta,F_{G_t,X})=2l(r)/a(r)$. Observing that $t(0)=0$, we can get the first derivative of $g(t)$ at $0$ by evaluating the limit
\begin{multline*}
\lim_{h\to 0}\frac{g(h)-g(0)}{h}=\lim_{r\to 0}\frac{g(t(r))-g(0)}{t(r)}=2\lim_{r\to 0}\frac{l(r)-l(0)a(r)/a(0)}{b(r)}\\
=2\lim_{r\to 0}\frac{-\frac{l(0)D_1}{a(0)} r+D_3r/\log(r)+o(r/\log r)}{D_2r+O(r^2)}=-2\frac{l(0)D_1}{a(0)D_2}.
\end{multline*}
By our assumption, $g$ is $C^{1+\varepsilon}$, so around $t=0$ we can write
\[g(t(r))=g(0)+g(0)'t(r)+O(t(r)^{1+\varepsilon}).\]
However, we have
\begin{multline*}
\lim_{r\to 0}\frac{g(t(r))-g(0)-g(0)'t(r)}{t(r)^{1+\varepsilon}}=2\lim_{r\to 0}\frac{D_3/\log r+o(1/\log r)}{(D_2+O(r))t(r)^{\varepsilon}}\\
=2\lim_{r\to 0}\frac{D_3a(0)+o(1)}{D^2_2r^\varepsilon \log r+O(r^{2\varepsilon}\log r)}=+\infty,
\end{multline*}
giving us a contradiction. \end{proof}
Repeating Fortier Bourque's reasoning we can lift this example to any surface of genus $g$ with $p$ marked points as long as $3g+p\ge5$. Besides Gekhtman--Markovic's Lemma \ref{le:branchingcover}, the other key ingredient for the lifting is the following result. \begin{lemma}[Fortier Bourque]\label{le:lifting}
Let $\pi:S_{g,p}\to S_{0,5}$ be a branched cover of degree $d$ and let $\iota:\mathcal{T}(S_{0,5})\hookrightarrow \mathcal{T}(S_{g,p})$ be the induced isometric embedding. For any measured foliation $F$ on $S_{0,5}$ and any $X\in\mathcal{T}(S_{0,5})$, we have the identity
\[\operatorname{Ext}(\pi^{-1}(F),\iota(X))=d\operatorname{Ext}(F,X).\] \end{lemma} \begin{proof}
Recall that $q_{F,X}$ is the Hubbard--Masur differential associated to $\gamma$. We have that $\pi^*q_{F,X}=q_{\pi^{-1}(F),\iota(X)}$, so
\[
\operatorname{Ext}(\pi^{-1}(F),\iota(X))=
\int_{\iota(X)}|q_{\pi^{-1}(F),\iota(X)}|=
d\int_X|q_{F,X}|=d\operatorname{Ext}(F,X).
\] \end{proof}
Lifting the foliation $G_t$ from Lemma \ref{le:extremallengthnotsmooth} we get an upper bound for the smoothness of the extremal length.
\begin{theorem}\label{th:extremallengthnotsmooth}
Let $S$ be a closed surface of genus $g$ with $p$ marked points, such that $3g+p\ge5$. Then there exists two non intersecting multicurves $\hat{\alpha}$, $\hat{\beta}$ and some $X\in \mathcal{T}(S)$ such that the map $f(t):=\operatorname{Ext}(\hat\alpha+t\hat\beta,X)$, $t\in[0,t_0]$ is not $C^{2+\varepsilon}$ for any $\varepsilon>0$. \end{theorem} \begin{proof}
Since $3g+p\ge5$ we have a map $\pi:S_{g,p}\to S_{0,5}$, with an induced isometric embedding $\iota:\mathcal{T}(S_{0,5})\hookrightarrow\mathcal{T}(S_{g,p})$. By Lemma \ref{le:extremallengthnotsmooth} we have two curves $\alpha,\beta\in S_{0,5}$ and some conformal structure $X\in\mathcal{T}(S_{0,5})$ such that $t\to \operatorname{Ext}(\alpha+t\beta,X)$ is not smooth. Let $\hat\alpha=\pi^{-1}(\alpha)$ and $\hat\beta=\pi^{-1}(\beta)$. We have $\hat\alpha+t\hat\beta=\pi^{-1}(\alpha+t\beta)$, so applying Lemma \ref{le:lifting} we get $\operatorname{Ext}(\hat\alpha+t\hat\beta,i(X))=d\operatorname{Ext}(\alpha+t\beta,X)$. By Lemma \ref{le:extremallengthnotsmooth} the function $\operatorname{Ext}(\alpha+t\beta,X)$ is not $C^{2+\varepsilon}$ for any $\varepsilon>0$, so we get the theorem. \end{proof}
Theorem \ref{th:extremallengthnotsmoothintro} is essentially a rephrasing of the previous theorem. Finally, we are able to prove that Busemann points are not dense.
\begin{proof}[Proof of Theorem \ref{th:busemannnotdense}]
Let $\hat\alpha=\{\hat\alpha_1,\ldots,\hat\alpha_n\}$ and $\hat\beta=\{\hat\beta_1,\ldots,\hat\beta_m\}$ be multicurves in $S$ as in Theorem \ref{th:extremallengthnotsmooth}. Furthermore, let $q$ be the quadratic differential such that $V(q)=\sum_i{\hat\alpha_i}+\sum_j{\hat\beta_j}$. By Fortier Bourque's Theorem \ref{th:maxsresult} there exists some $f\in\Xi^{-1}\Pi^{-1}(q)$ of the form
\[
f^2(G)=\lambda\operatorname{Ext}\left(\sum_i i(G,\hat\alpha_i)\hat\alpha_i+\sum_j i(G,\hat\beta_j)\hat\beta_j,X\right)
\]
for some $\lambda>0$. Let $\pi$ be the projection given by the branched cover of the sphere with five marked points of Lemma \ref{le:branchingcover} and let $G_t$ be a path of foliations on the sphere with five marked points satisfying $i(G_t,\alpha)=1$ and $i(G_t,\beta)=t$. Finally, let $\hat{G}_t=\pi^{-1}(G_t)$. We have that $i(\hat{G}_t,\hat\alpha_i)=1$ for all $i$, and $i(\hat{G}_t,\hat\beta_j)=t$ for all $j$, so
\[
f^2(G_t)=\lambda\operatorname{Ext}\left(\sum_i \hat\alpha_i+\sum_j t \hat\beta_j,X\right)= \lambda\operatorname{Ext}(\hat\alpha+t \hat\beta,X),
\]
which by \cref{th:extremallengthnotsmooth} is not smooth with respect to $t$. However, $x_{\hat\alpha_i}(G_t)$ and $x_{\hat\beta_j}(G_t)$ depend smoothly on $t$ so, by \cref{pr:busemanclosureshape}, $f$ is not contained in $\overline{\mathcal{B}}(q)$, and hence it is not contained in the closure of Busemann points. \end{proof}
\subsection{Measures on the horofunction boundary}
The Thurston compactification can be build in a similar way as the Gardiner--Masur compactification, by using the hyperbolic length of the curves instead of the extremal length. Let $\phi$ be the map between $\mathcal{T}(S)$ and $P\mathbb{R}_+^\mathcal{S}$ defined by sending $X\in \mathcal{T}(S)$ to the projective vector $[\ell(\alpha,X)]_{\alpha\in \mathcal{S}}$. The pair $(\phi,\overline{\phi(\mathcal{T}(S))})$ defines a compactification, and the boundary is given by the space of projective measured foliations, denoted $\mathcal{PMF}$.
As explained by Miyachi \cite{Miyachi}, neither the Thurston nor the horofunction compactification is finer than the other one. However, if we restrict to uniquely ergodic foliations we can build a bicontinuous map $\mathcal{G}_x$. Following the work of Masur \cite{Masur2}, within the Thurston boundary the set of uniquely ergodic projective foliations has full Lebesgue measure. However, we shall now see that the same cannot be true for the corresponding set in the horofunction boundary for any strictly positive measure.
Within the boundary we have $\mathcal{G}_x(F)=B(q_{F,x})$, where we recall that $q_{F,x}$ is the quadratic differential on $x$ with $V(q_{F,x})=F$. Denote $\mathcal{B}_1$ the set of Busemann points associated to foliations with one indecomposable components, and $\mathcal{PMF}^{UE}$ the uniquely ergodic projective measured foliations. We have $\mathcal{G}_x(\mathcal{PMF}^{UE})\subset \mathcal{B}_1$. However, the following is also satisfied.
\begin{theorem}\label{th:nowheredense}
Let $S$ be a closed surface of genus $g$ with $p$ marked points, such that $3g+p\ge5$. Then the set $\mathcal{B}_1$ is nowhere dense in the horoboundary. \end{theorem} \begin{proof}
The action of $\MCG(S)$ on $\mathcal{T}(S)$ is extended to the projectivized version of the Gardiner--Masur compactification by $\psi[f(\alpha)]_{\alpha\in \mathcal{S}}=[f(\psi\alpha)]_{\alpha\in \mathcal{S}}$. For any $q$ such that $V(q)$ is uniquely ergodic, $\mathcal{E}_q=\Xi^{-1}B(q)=[i(V(q),\alpha)]_{\alpha\in\mathcal{S}}$, so $\psi E_q=[i(V(q),\psi(\alpha))]_{\alpha\in\mathcal{S}}=[i(\psi^1(V(q)),\alpha)]_{\alpha\in\mathcal{S}}.$ Hence, $\psi E_q$ is equal to the representation of the Busemann point in the Gardiner--Masur compactification associated to the quadratic differential with uniquely ergodic vertical foliation $\psi^{-1}V(q)$. Therefore, $\mathcal{B}_1$ is invariant under the action of $\MCG(S)$, and since $\MCG(S)$ acts by homeomorphisms, the complement of the closure is also invariant.
Let $q_0$ be a quadratic differential such that there is some $f\in \Xi^{-1}\Pi^{-1}(q_0)$ not in $\Xi^{-1}\overline{\mathcal{B}}$. This exists by \cref{th:busemannnotdense}. Furthermore, let $q$ be a quadratic differential such that $V(q)$ and $H(q)$ are the stable and unstable foliations respectively of some pseudo-Anosov element $\phi\in MCG(S)$. It is well known \cite[Expose 12]{FLP} that for any closed curve $\alpha$ we have that $\lambda^{-n}\phi^n(\alpha)$ converges to $\frac{i(\alpha,V(q))}{i(H(q),V(q))}H(q)$, where $\lambda$ is the stretch factor of $\phi$. For any foliation $F$ we have that $\Pi^{-1}M(q_0)(F)=0$ if and only if $i(V(q_0),F)=0$. Hence, since $H(q)$ is the unstable foliation of a pseudo-Anosov element and $V(q_0)$ is a multicurve, we have $i(V(q_0),H(q))\neq 0$, and so $f(H(q))\ge\Pi^{-1}M(q_0)(H(q))>0$, where $M$ is the minimal point defined in section \ref{se:shapeoffibers}. We have $\phi^{n}[f(\alpha)]_{\alpha\in\mathcal{S}}=[f(\phi^n(\alpha))]_{\alpha\in\mathcal{S}}$.
Taking limits and using that the functions in the Gardiner--Masur compactification are homogeneous of degree 1, we get that
\[
\lim_{n\to\infty}[\phi^nf(\alpha)]_{\alpha\in\mathcal{S}}=\left[i(\alpha,V(q))f\left(\frac{H(q)}{i(V(q),H(q))}\right)\right]_{\alpha\in\mathcal{S}}=[i(\alpha,V(q))]_{\alpha\in\mathcal{S}},
\]
Hence, in the normalized version, $\phi^n f$ converges to $i(\cdot ,V(q))=\Xi^{-1} B(q)$, as $V(q)$ is uniquely ergodic and therefore indecomposable. That is, $B(q)$ can be approached through a sequence of elements contained in the complement of the closure of $\mathcal{B}_1$.
Let $B(q)$ be any element in $\mathcal{B}_1$, where $q$ is any quadratic differential such that $V(q)$ has one indecomposable component. The set of pseudo-Anosov foliations in dense in $\mathcal{MF}(S)$, so we have a sequence of quadratic differentials $(q_n)$ converging to $q$ with $V(q_n)$ being a pseudo-Anosov foliation. Since $q$ has one indecomposable component, the convergence is strong, and so $B(q_n)$ converges to $B(q)$. Each $B(q_n)$ can be approached through a sequence of elements contained in the complement of the closure of $\mathcal{B}_1$, so taking a diagonal sequence the same can be said for $B(q)$. \end{proof}
\begin{corollary}\label{co:nofullmeasure}
Let $S$ be a closed surface of genus $g$ with $p$ marked points, such that $3g+p\ge5$. Then, for any finite strictly positive measure $\nu$ on the horoboundary, the set $\mathcal{B}_1$ does not have full $\nu$-measure. \end{corollary} \begin{proof}
By Theorem \ref{th:nowheredense}, the complement of the closure of $\mathcal{B}_1$ is open and nonempty, so it must have positive $\nu$-measure. \end{proof}
Another natural family of measures on the boundary is obtained by considering harmonic measures. Given a non-elementary measure $\mu$ on $\MCG(S)$ it is possible to define a random walk $(w_n)$ as the sequence of random variables defined by \[ w_n=g_0g_1g_2\ldots g_n, \] where $g_i$ are independent, identically distributed random variables on $\MCG(S)$ sampled according to the distribution $\mu$. As proven by Kaimanovich and Masur in \cite[Theorem 2.2.4]{Kaimanovich}, random walks generated by a non-elementary probability measure converges almost surely in Thurston's compactification, so we can define the hitting measure $\nu$ in $\mathcal{PMF}$. Furthermore, the walk converges almost surely to uniquely ergodic projective foliations, so we can translate this result to the horofunction compactification in the following way. \begin{corollary}
Let $\mu$ be a non-elementary measure on $\MCG(S)$. Then the associated harmonic measure on the horoboundary is supported in a nowhere dense set. \end{corollary} \begin{proof}
For any $x\in\mathcal{T}(S)$ the sequence $(w_nx)$ converges almost surely in Thurston compactification to some $F\in \mathcal{PMF}$. Hence, by \cite[Corollary 1]{Miyachi}, the sequence $(w_nx)$ converges almost surely to the Busemann point generated by a quadratic differential $q$ with $V(q)$ being a multiple of $F$. Hence, the support of the harmonic measure is contained in $\mathcal{B}_1$, which is nowhere dense by Theorem \ref{th:nowheredense}. \end{proof}
\section{Topology of the Horoboundary}\label{se:globaltopology}
In this section we make some progress towards determining the global topology of the horoboundary. We begin by showing that the minimal point $M(q)$ introduced in Proposition \ref{pr:lowerboundGM} serves as a section for the map $\Pi$ whenever $S$ does not have a boundary. Our main goal for this section is proving the following Theorem.
\begin{theorem}\label{th:globalsection}
Let $S$ be a surface of genus $g$ with $b_m$ and $b_u$ boundaries with and without marked points respectively and $p$ interior marked points. Then, the map $\Pi$ restricted to the boundary has a global continuous section $\vbd{\mathcal{T}} \to \hbd{\mathcal{T}}$ if and only if at least one of the two following conditions is satisfied:
\begin{itemize}
\item $b_m=b_u=0$ or
\item $2g+2b_m+b_u+p-\max(1-b_u,0)\le 4$.
\end{itemize}
The section is given by sending the ray in the direction of $q$ to the point $M(q)$ defined before Proposition $\ref{pr:lowerboundGM}$.
Furthermore, if the map does not admit a global section, then it does not admit any local section around some points.
\end{theorem}
We begin by proving the theorem for surfaces without boundary, as it is significantly easier to prove.
\begin{proposition}\label{pr:continuoussection}
Let $S$ be a surface without boundary. Then the projection map $\Pi$ restricted to the boundary admits a global section, given by the map $M:\vbd{\mathcal{T}}\to \hbd{\mathcal{T}}$.
\end{proposition}
\begin{proof}
By Proposition \ref{pr:lowerboundGM} every preimage $\Pi^{-1}(q)$ contains $M(q)$. We have $M(q)=\Xi (i(V(q),\cdot))$, which is continuous, as the map $\Xi$ is continuous.
\end{proof}
The rest of the cases of Theorem \ref{th:globalsection} require a more careful analysis.
\begin{proposition}\label{pr:continuoussection2}
Let $S$ be either
\begin{itemize}
\item a torus with up at most two unmarked boundaries or interior marked points,
\item a torus with one marked boundary and one interior marked point,
\item a sphere with one marked boundary and up to three interior marked points or
\item a sphere with two marked boundaries and interior marked point.
\end{itemize}
Then the projection map $\Pi$ restricted to the boundary admits a global section, given by the map $M:\vbd{\mathcal{T}}\to \hbd{\mathcal{T}}$.
\end{proposition}
\begin{proof}
We shall build the section in the same way we built it in Proposition \ref{pr:continuoussection}, that is, sending $q$ to $M(q)$.
Our first step in the proof is seeing that if $V(q)$ contains a separating proper arc then only one of the two parts separated by the proper arc admit interior components. We shall do this by inspecting each possible case. Assume then that $V(q)$ has a separating proper arc.
If $S$ is a torus with up to two unmarked boundaries or marked points or a torus with one marked boundary and one marked point, then the separating proper arc splits the surface into a torus with a marked boundary and a sphere with a marked boundary and a marked point or unmarked boundary. The latter does not admit an interior component.
If $S$ is a sphere with one marked boundary and up to three boundaries then the separating proper arc splits the surface into two spheres, both with one marked boundary, one of them with two marked points and the other one with one marked point. Again, the latter does not admit an interior component.
Finally, if $S$ is a sphere with two marked boundaries and one marked point or unmarked boundary, the proper arc splits the surface into one sphere with two marked boundaries and a sphere with one marked boundary and one marked point, which again does not admit an interior component.
Take then a sequence of unit quadratic differentials $(q_n)$ converging to $q$. Let $P_i$, $i\in \{1,\ldots, c\}$ be the boundary components of $V(q)$. Furthermore, denote $G$ the union of the interior components. By the first part of the proof, all the interior components are contained in the same interior part. We thus have
\[\Xi^{-1}M(q)=\left(\sum_i \mathcal{W}^q(P_i) + \mathcal{W}^q( G)\right)^{1/2}.\]
By Proposition \ref{pr:notsplittingboundary} all boundary components of $V(q)$ are contained in $V(q_n)$ for $n$ big enough, and all other boundary components of $V(q_n)$, denoted $P^n$, vanish in the limit. Denote $G^n$ the union of the interior components of $V(q_n)$. As before, each indecomposable component of $G^n$ is contained in the same interior part, so we have
\[
\Xi^{-1}M(q_n)=\left(\sum_i \mathcal{W}^{q_n}(\alpha_i^nP_i) +\mathcal{W}^{q_n}(P^n)+ \mathcal{W}^{q_n}(G^n)\right)^{1/2},
\]
which converges to $\Xi^{-1}M(q)$.
\end{proof}
\begin{proposition}\label{pr:continuoussection3}
Let $S$ be either
\begin{itemize}
\item a surface of genus at least two and at least one boundary;
\item a torus with at least one boundary and two more boundaries or interior marked points;
\item a torus with at least two boundaries, one being marked, and possibly interior marked points;
\item a sphere with at least one boundary, and four more boundaries or interior marked points;
\item a sphere with at least two boundaries, one being marked, and two interior marked points or
\item a sphere with at least three boundaries, two being marked, and possibly interior marked points.
\end{itemize}
Then the projection map $\Pi$ restricted to the boundary does not admit a local section around some points.
\end{proposition}
\begin{proof}
We shall prove this by finding a quadratic differential $q$ and sequences $(q_n^1)$ and $(q_n^2)$ converging to $q$ such that their preimages by $\Pi$ are singletons, but such that $\Pi^{-1}(q_n^1)$ and $\Pi^{-1}(q_n^2)$ converge to different points in $\Pi^{-1}(q)$. If we had a section around $q$, then its value at $q_n^1$ and $q_n^2$ would be $\Pi^{-1}(q_n^1)$ and $\Pi^{-1}(q_n^2)$ respectively, giving us a contradiction.
In all cases the construction will be similar. For $q_n^1$ we build a foliation with a separating proper arc $P$ such that each of the parts has precisely one interior component consisting of a closed curve, which we denote $G_1$ and $G_2$. Letting the weight of the proper arc diminish to $0$ we can get a sequence of quadratic differentials $(q_n^1)$ converging to a quadratic differential $q$ such that $V(q)=G_1+G_2$. Let $F_n^1=P+nG_1+nG_2$, $A_n^1$ and $A$ the area of the Hubbard--Masur differentials $q_{F_n^1,X}$ and $q_{G_1+G_2,X}$ respectively. Denote $\frac{1}{\sqrt{A_n^1}}q_{F_n^1,X}$ as $q_n^1$. These quadratic differentials have unit area, and converge to $\frac{1}{\sqrt{A}}q_{G_1+G_2,X}$, which we denote $q$. By construction, $V(q_n^1)$ is internally indecomposable, so $\Pi^{-1}(q_n^1)$ is a singleton, and $\Xi^{-1}\Pi^{-1}(q_n^1)=\left\{\left(\frac{\mathcal{W}^{q^1_n}(P)+n\mathcal{W}^{q^1_n}(G_1)+n\mathcal{W}^{q^1_n}(G_2)}{\sqrt{A_n^1}}\right)^{1/2}\right\}$.
The sequences $\frac{P}{\sqrt{A_n^1}}$, $\frac{nG_1}{\sqrt{A_n^1}}$ and $\frac{nG_2}{\sqrt{A_n^1}}$ converge respectively to $0$, $\frac{G_1}{\sqrt{A}}$ and $\frac{G_2}{\sqrt{A}}$. Hence, by Lemma \ref{le:limith} the sequence $\Pi^{-1}(q_n^1)$ converges to $\left\{\left(\frac{\mathcal{W}^q(G_1)+\mathcal{W}^q(G_2)}{\sqrt{A}}\right)^{1/2}\right\}$.
For building $q_n^2$ we take a curve $\gamma$ intersecting $G_1$ and $G_2$ at $b_1$ and $b_2$ times, where $b_1,b_2\in\{1,2\}$. Denote $\tau_1$ and $\tau_2$ the Dehn twists around $G_1$ and $G_2$. Let $F_n^2=\tau_1^{2n/b_1}\tau_2^{2n/b_2}\gamma$ and $A_n^2$ the area of the Hubbard--Masur differential $q_{F_n^2,X}$. As before. Denote $\frac{1}{\sqrt{A_n^2}}q_n^2$ the quadratic differentials $\frac{1}{\sqrt{A_n^2}}q_{F_n^2,X}$. These quadratic differentials have unit area, and converge to $q$. Furthermore, each $V(q_n^2)$ is a singleton and $\Xi^{-1}\Pi^{-1}(q_n^2)=\left\{\left(\frac{\mathcal{W}^{q^2_n}((\tau_1\tau_2)^n\gamma)}{\sqrt{A_n^2}}\right)^{1/2}\right\}$. The sequence $\frac{(\tau_1\tau_2)^n\gamma}{\sqrt{A_n^2}}$ converges to $\frac{G_1+G_2}{A}$, so by \cref{le:limith} the sequence $\Xi^{-1}\Pi^{-1}(q_n^2)$ converges to $\left\{\left(\frac{\mathcal{W}^q(G_1+G_2)}{\sqrt{A}}\right)^{1/2}\right\}$, which is different than the limit of $\Xi^{-1}\Pi^{-1}(q_n^1)$.
It remains then to find such a multicurve. For genus at least two we take $P$ to be a separating proper arc such that each of the parts is of genus at least one, and $G_1$ and $G_2$ to be non contractible curves, not parallel to unmarked boundaries on each part, as shown in \cref{fi:noncontractible1}.
For the torus we take $P$ to be a separating proper arc with both endpoints in the unmarked boundary, or a marked boundary if there are no unmarked boundaries. Further, we choose the proper arc such that, after cutting along the arc, one part is a torus with one boundary. That is, every other feature of the surface lies in the other part. Then we let $G_1$ and $G_2$ be non contractible curves on each part, as shown in \cref{fi:noncontractible2}.
Finally, for the sphere we let $P$ be a separating proper arc with both endpoints on an unmarked boundary, or a marked boundary if there are no boundaries without marked points. Further, we choose the arc such that each interior part has at least either a combination of two marked points or boundaries without marked points, or a boundary with marked points. Hence, each interior part supports an interior component formed by a curve, as shown in \cref{fi:noncontractible3}.
\begin{figure}
\caption{Curves chosen in the proof of Proposition \ref{pr:continuoussection3}}
\label{fi:noncontractible1}
\label{fi:noncontractible2}
\label{fi:noncontractible3}
\end{figure}
\end{proof}
\begin{proof}[Proof of Theorem \ref{th:globalsection}]
This is a combination of the results from Propositions \ref{pr:continuoussection}, \ref{pr:continuoussection2} and \ref{pr:continuoussection3}.
\end{proof}
By Proposition \ref{pr:finslerconnected} we know that the horoboundary boundary is connected whenever the real dimension of Teichmüller space is at least 2. In the following result we go a bit further, by showing that it is actually path connected.
\begin{proof}[Proof of Theorem \ref{th:teichconnected}]
Let $x,y\in \hbd{\mathcal{T}(S)}$. If $S$ does not have boundary then $\Pi$ has a global section, so we can lift any path between $\Pi(x)$ and $\Pi(y)$ to a path between $M(\Pi(x))$ and $M(\Pi(y))$. Then, since $\Pi^{-1}(x)$ and $\Pi^{-1}(y)$ are path connected, we can connect $x$ to $M(\Pi(x))$ and $y$ to $M(\Pi(y))$ via paths.
If $S$ has boundary we might have to be a bit more careful, as we might not have a global section. However, as we shall see, we can take a path $q_t$ between $\Pi(x)$ and $\Pi(y)$ such that $B(q_t)$ has finitely many discontinuities. Then, since each of the preimages is path connected these discontinuities can be fixed by using paths in the fibers, so we will have a path between $x$ and $y$.
Choose a boundary component of $S$, denote $b$ a curve parallel to that boundary and let $F_x=V(\Pi(x))$. If $F_x$ contains $b$ then all the expressions of the form $(1-t)F_x+tb$ with $t\in[0,1]$ correspond to foliations on $S$, which we denote $F_t$. Denote $q_t$ the unit area quadratic differential such that $V(q_t)$ is a multiple of $F_t$. This defines a continuous path joining $\Pi(x)$ and the unit area quadratic differential associated to a multiple of $b$. Let $V_i$ be the vertical components of $F_x$ that are not $b$, and let $w_0$ be the weight of $b$ in $F_x$. Then, $B(q_t)^2=\frac{1}{\sqrt{\operatorname{Area}(q_{F_t,X})}}\left((1-t)\sum \mathcal{W}^{q_t} (V_i)+(t+(1-t)w_0)\mathcal{W}^{q_t}(b)\right)$, which gives a continuous path from $B(q_0)\in\Pi^{-1}\Pi(x)$ to $B(q_1)\in \Pi^{-1}(q_1)$. If $F_x$ does not contain $b$, but $b$ can be added to the foliation then we proceed just as before. Hence, if both $x$ and $y$ result in foliations where $b$ can be added, we create a path by concatenating the paths between $x$, the Busemann point in $\Pi^{-1}\Pi(x)$, the Busemann point associated to $b$, the Busemann point in $\Pi^{-1}\Pi(y)$ and $y$.
If $b$ cannot be added to the foliation $F_x$ then there must be some set $P$ of proper arcs in $F_x$ incident to the boundary component associated to $b$. Let $F'_x$ be the foliation $F_x$ without the proper arcs $P$ and assume $F'_x$ is nonempty. Denote $F_t$ the foliations $(1-t)P+(1+t)F'_x$, $t\in [0,1]$, and $q_t$ the unit area quadratic differentials such that $V(q_t)$ is a multiple of $F_t$. Denoting $V_i$ the vertical components of $F'_x$, and $P_j$ the proper arcs incident to the boundary component associated to $b$, we have $B(q_t)^2=\frac{1}{\sqrt{\operatorname{Area}(q_{F_t,X})}}\left((1-t)\sum_j\mathcal{W}^{q_t}(P_j)+(1+t)\sum\mathcal{W}^{q_t}(V_i)\right)$ for $t<1$, which is continuous. Furthermore, $\lim_{t\to 1} B(q_t)\in \Pi^{-1}(q_1)$. Hence, we can concatenate a paths between $x$, the Busemann point in $\Pi^{-1}\Pi(x)$, the limit $\lim_{t\to 1} B(q_t)$, the Busemann point $B(q_1)$ and Busemann point associated to $b$.
If $F'_x$ is empty we want to add some other components to $F_x$. If it admits some other component $k$ then we repeat the previous reasoning with $F_t=(1-\frac{t}{2})F_x+\frac{t}{2}k$, which does not result in any discontinuity. If $F_x$ does not admit any other component then there must be at least 2 proper arcs incident to the boundary component associated to $b$, so we choose one of them, denoted $p$, and repeat the previous reasoning with $F_t=(1-t)F_x+tp$, which does not result in any discontinuity. Finally, we concatenate this last path with the previous paths.
\end{proof}
\section{Formulas for limits of extremal lengths}\label{se:formulas}
We finish by reframing the bounds we got for the elements of $\Xi^{-1}\Pi^{-1}(q)$ as results regarding limits of extremal lengths, getting in this way some extensions of \cite[Theorem 1]{Walsh}.
\begin{proposition}\label{pr:boundslimits}
Let $F$ be a measured foliation, $(q_n)$ a sequence of unit area quadratic differentials converging to a quadratic differential $q$ and $(t_n)$ a sequence of real numbers converging to infinity. Then,
\[\left(\Xi^{-1}M(q)\right)^2\le \liminf_{n\to\infty} e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)\le\limsup_{n\to\infty} e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)\le
\left(\Xi^{-1}B(q)\right)^2
\]
\end{proposition}
\begin{proof}
Take a subsequence such that $e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)$ converges to the liminf. Furthermore, take a subsequence such that $\tray{q_n}{t_n}$ converge to a point $\xi\in \Pi^{-1}(q)$. By Proposition \ref{pr:lowerboundGM} we have $(\Xi^{-1}M(q))^2\le \xi^2$. Since $e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)$ converges to $\xi^2(F)$ we have the lower bound. For the upper bound we repeat the process taking the limsup and using Proposition \ref{pr:upperboundGM}.
\end{proof}
By noting that $\Xi^{-1}M(q)(F)$ and $\Xi^{-1}B(q)(F)$ evaluate to $0$ if and only if $i(V(q),F)=0$, we get the following corollary.
\begin{corollary}\label{pr:relationqdtoGM}
Let $(q_n)$ be a sequence of unit area quadratic differentials converging to a quadratic differential $q$, and $(t_n)$ a sequence of real numbers converging to infinity. Then,
\[\liminf_{n\to\infty} e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)=0 \iff i(V(q),F)=0.\]
\end{corollary}
Proposition \ref{pr:boundslimits} can be strengthened slightly in the following manner.
\begin{proposition}\label{pr:lowerboundrefinedlimit}
Let $(q_n)$ be a sequence of unit area quadratic differentials converging to a quadratic differential $q$. Furthermore, denote $V_i^n$ the indecomposable components of $q_n$. If the vertical components can be reordered so that for each $i$ we have that $V_i^n$ converges to a foliation $V_i$, then
\[\liminf_{n\to\infty} e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)\ge
\sum_i \mathcal{W}^q(V_i).\]
\end{proposition}
\begin{proof}
Take a sequence such that the limit is equal to the liminf, and such that we have convergence in the Gardiner--Masur compactification. Let $\xi$ be the limit in the horofunction compactification. By
Lemma \ref{le:walshlowerbound} we have $e^{-2t_n} \operatorname{Ext}_\tray{q_n}{t_n}(F)\ge \left(\Xi^{-1}B(q_n)\right)^2$, and by \cref{co:walshbusemanshape} we have $\left(\Xi^{-1}B(q_n)\right)^2=\sum_i \mathcal{W}^{q_n}(V_i^n)$. Hence, by Lemma \ref{le:limith}, taking limits on both sides we get the proposition.
\end{proof}
If we have strong convergence the upper bound from Proposition \ref{pr:boundslimits} and the lower bound from Proposition \ref{pr:lowerboundrefinedlimit} coincide, giving us a proof of Theorem \ref{th:limits}.
Finally, the path connectedness of the fibers can be translated to the following result.
\begin{proposition}\label{pr:pathGM}
Let $(q_n)$ be a sequence of unit quadratic differentials converging to $q$, and $(t_n)$ be a sequence of times converging to infinity. Further, for any $F\in \mathcal{MF}$ denote $L(F):=\liminf_{n\to\infty}\operatorname{Ext}_\tray{q_n}{t_n}(F)$. Then, for any $s\in [L(F), \mathcal{E}^2_q(F)]$ there is a subsequence of $q_{n_k^s}$ and a sequence $(t_k^s)$ of times such that, for any $G\in \mathcal{MF}$ the limit \[\lim_{k\to\infty} e^{-2t_{k}^s} \operatorname{Ext}_\tray{q_{n_k^s}}{t_k^s}(G)\] is defined, and for $G=F$ it has value $s$.
\end{proposition}
\begin{proof}
We can take a subsequence such that $\lim_{n\to\infty}\operatorname{Ext}_\tray{q_n}{t_n}(F)$ converges to the liminf, and a further subsequence such that we have convergence in the Gardiner--Masur compactification to a point $\Xi^{-1}\xi\in\Xi^{-1}\Pi(q)$. By Theorem \ref{pr:pathconnected} we have a path between $\xi$ and $B(q)$ contained in $\Pi^{-1}(q)$, and hence a path $\gamma$ between $\Xi^{-1}\xi$ and $\Xi^{-1}B(q)$ contained in $\Xi^{-1}\Pi^{-1}(q)$. By continuity there is a point in that path such that $\gamma_t(F)=s^2$, and by the way we constructed $\gamma_t$, it is reached by taking a subsequence of $(q_{n_k^s})$ and a sequence $(t_k^s)$ of times converging to infinity. Finally, since $\gamma_t$ is a point in the Gardiner--Masur compactification approached by $\tray{q_{n_k^s}^s}{t_{k}^s}$, the value of $\gamma_t(G)^2$ is equal to the limit from the proposition.
\end{proof}
{}
\end{document} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.